prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from featuretools.primitives import (
Age,
EmailAddressToDomain,
IsFreeEmailDomain,
TimeSince,
URLToDomain,
URLToProtocol,
URLToTLD,
Week,
get_transform_primitives
)
def test_time_since():
time_since = TimeSince()
# class datetime.datetime(year, month, day[, hour[, minute[, second[, microsecond[,
times = pd.Series([datetime(2019, 3, 1, 0, 0, 0, 1),
datetime(2019, 3, 1, 0, 0, 1, 0),
datetime(2019, 3, 1, 0, 2, 0, 0)])
cutoff_time = datetime(2019, 3, 1, 0, 0, 0, 0)
values = time_since(array=times, time=cutoff_time)
assert(list(map(int, values)) == [0, -1, -120])
time_since = TimeSince(unit='nanoseconds')
values = time_since(array=times, time=cutoff_time)
assert(list(map(round, values)) == [-1000, -1000000000, -120000000000])
time_since = TimeSince(unit='milliseconds')
values = time_since(array=times, time=cutoff_time)
assert(list(map(int, values)) == [0, -1000, -120000])
time_since = TimeSince(unit='Milliseconds')
values = time_since(array=times, time=cutoff_time)
assert(list(map(int, values)) == [0, -1000, -120000])
time_since = TimeSince(unit='Years')
values = time_since(array=times, time=cutoff_time)
assert(list(map(int, values)) == [0, 0, 0])
times_y = pd.Series([datetime(2019, 3, 1, 0, 0, 0, 1),
datetime(2020, 3, 1, 0, 0, 1, 0),
datetime(2017, 3, 1, 0, 0, 0, 0)])
time_since = TimeSince(unit='Years')
values = time_since(array=times_y, time=cutoff_time)
assert(list(map(int, values)) == [0, -1, 1])
error_text = 'Invalid unit given, make sure it is plural'
with pytest.raises(ValueError, match=error_text):
time_since = TimeSince(unit='na')
time_since(array=times, time=cutoff_time)
def test_age():
age = Age()
dates = pd.Series(datetime(2010, 2, 26))
ages = age(dates, time=datetime(2020, 2, 26))
correct_ages = [10.005] # .005 added due to leap years
np.testing.assert_array_almost_equal(ages, correct_ages, decimal=3)
def test_age_two_years_quarterly():
age = Age()
dates = pd.Series(pd.date_range('2010-01-01', '2011-12-31', freq='Q'))
ages = age(dates, time=datetime(2020, 2, 26))
correct_ages = [9.915, 9.666, 9.414, 9.162, 8.915, 8.666, 8.414, 8.162]
np.testing.assert_array_almost_equal(ages, correct_ages, decimal=3)
def test_age_leap_year():
age = Age()
dates = pd.Series([datetime(2016, 1, 1)])
ages = age(dates, time=datetime(2016, 3, 1))
correct_ages = [(31 + 29) / 365.0]
np.testing.assert_array_almost_equal(ages, correct_ages, decimal=3)
# born leap year date
dates = pd.Series([datetime(2016, 2, 29)])
ages = age(dates, time=datetime(2020, 2, 29))
correct_ages = [4.0027] # .0027 added due to leap year
np.testing.assert_array_almost_equal(ages, correct_ages, decimal=3)
def test_age_nan():
age = Age()
dates = pd.Series([datetime(2010, 1, 1), np.nan, datetime(2012, 1, 1)])
ages = age(dates, time=datetime(2020, 2, 26))
correct_ages = [10.159, np.nan, 8.159]
np.testing.assert_array_almost_equal(ages, correct_ages, decimal=3)
def test_week_no_deprecation_message():
dates = [datetime(2019, 1, 3),
datetime(2019, 6, 17, 11, 10, 50),
datetime(2019, 11, 30, 19, 45, 15)
]
with pytest.warns(None) as record:
week = Week()
week(dates).tolist()
assert not record
def test_url_to_domain_urls():
url_to_domain = URLToDomain()
urls = pd.Series(['https://play.google.com/store/apps/details?id=com.skgames.trafficracer%22',
'http://mplay.google.co.in/sadfask/asdkfals?dk=10',
'http://lplay.google.co.in/sadfask/asdkfals?dk=10',
'http://play.google.co.in/sadfask/asdkfals?dk=10',
'http://tplay.google.co.in/sadfask/asdkfals?dk=10',
'http://www.google.co.in/sadfask/asdkfals?dk=10',
'www.google.co.in/sadfask/asdkfals?dk=10',
'http://user:[email protected]/?a=b#asdd',
'https://www.compzets.com?asd=10',
'www.compzets.com?asd=10',
'facebook.com',
'https://www.compzets.net?asd=10',
'http://www.featuretools.org'])
correct_urls = ['play.google.com',
'mplay.google.co.in',
'lplay.google.co.in',
'play.google.co.in',
'tplay.google.co.in',
'google.co.in',
'google.co.in',
'google.com',
'compzets.com',
'compzets.com',
'facebook.com',
'compzets.net',
'featuretools.org']
np.testing.assert_array_equal(url_to_domain(urls), correct_urls)
def test_url_to_domain_long_url():
url_to_domain = URLToDomain()
urls = pd.Series(["http://chart.apis.google.com/chart?chs=500x500&chma=0,0,100, \
100&cht=p&chco=FF0000%2CFFFF00%7CFF8000%2C00FF00%7C00FF00%2C0 \
000FF&chd=t%3A122%2C42%2C17%2C10%2C8%2C7%2C7%2C7%2C7%2C6%2C6% \
2C6%2C6%2C5%2C5&chl=122%7C42%7C17%7C10%7C8%7C7%7C7%7C7%7C7%7C \
6%7C6%7C6%7C6%7C5%7C5&chdl=android%7Cjava%7Cstack-trace%7Cbro \
adcastreceiver%7Candroid-ndk%7Cuser-agent%7Candroid-webview%7 \
Cwebview%7Cbackground%7Cmultithreading%7Candroid-source%7Csms \
%7Cadb%7Csollections%7Cactivity|Chart"])
correct_urls = ['chart.apis.google.com']
results = url_to_domain(urls)
np.testing.assert_array_equal(results, correct_urls)
def test_url_to_domain_nan():
url_to_domain = URLToDomain()
urls = pd.Series(['www.featuretools.com', np.nan], dtype='object')
correct_urls = pd.Series(['featuretools.com', np.nan], dtype='object')
results = url_to_domain(urls)
pd.testing.assert_series_equal(results, correct_urls)
def test_url_to_protocol_urls():
url_to_protocol = URLToProtocol()
urls = pd.Series(['https://play.google.com/store/apps/details?id=com.skgames.trafficracer%22',
'http://mplay.google.co.in/sadfask/asdkfals?dk=10',
'http://lplay.google.co.in/sadfask/asdkfals?dk=10',
'www.google.co.in/sadfask/asdkfals?dk=10',
'http://user:[email protected]/?a=b#asdd',
'https://www.compzets.com?asd=10',
'www.compzets.com?asd=10',
'facebook.com',
'https://www.compzets.net?asd=10',
'http://www.featuretools.org',
'https://featuretools.com'])
correct_urls = pd.Series(['https',
'http',
'http',
np.nan,
'http',
'https',
np.nan,
np.nan,
'https',
'http',
'https'])
results = url_to_protocol(urls)
pd.testing.assert_series_equal(results, correct_urls)
def test_url_to_protocol_long_url():
url_to_protocol = URLToProtocol()
urls = pd.Series(["http://chart.apis.google.com/chart?chs=500x500&chma=0,0,100, \
100&cht=p&chco=FF0000%2CFFFF00%7CFF8000%2C00FF00%7C00FF00%2C0 \
000FF&chd=t%3A122%2C42%2C17%2C10%2C8%2C7%2C7%2C7%2C7%2C6%2C6% \
2C6%2C6%2C5%2C5&chl=122%7C42%7C17%7C10%7C8%7C7%7C7%7C7%7C7%7C \
6%7C6%7C6%7C6%7C5%7C5&chdl=android%7Cjava%7Cstack-trace%7Cbro \
adcastreceiver%7Candroid-ndk%7Cuser-agent%7Candroid-webview%7 \
Cwebview%7Cbackground%7Cmultithreading%7Candroid-source%7Csms \
%7Cadb%7Csollections%7Cactivity|Chart"])
correct_urls = ['http']
results = url_to_protocol(urls)
np.testing.assert_array_equal(results, correct_urls)
def test_url_to_protocol_nan():
url_to_protocol = URLToProtocol()
urls = pd.Series(['www.featuretools.com', np.nan, ''], dtype='object')
correct_urls = pd.Series([np.nan, np.nan, np.nan], dtype='object')
results = url_to_protocol(urls)
pd.testing.assert_series_equal(results, correct_urls)
def test_url_to_tld_urls():
url_to_tld = URLToTLD()
urls = pd.Series(['https://play.google.com/store/apps/details?id=com.skgames.trafficracer%22',
'http://mplay.google.co.in/sadfask/asdkfals?dk=10',
'http://lplay.google.co.in/sadfask/asdkfals?dk=10',
'http://play.google.co.in/sadfask/asdkfals?dk=10',
'http://tplay.google.co.in/sadfask/asdkfals?dk=10',
'http://www.google.co.in/sadfask/asdkfals?dk=10',
'www.google.co.in/sadfask/asdkfals?dk=10',
'http://user:[email protected]/?a=b#asdd',
'https://www.compzets.dev?asd=10',
'www.compzets.com?asd=10',
'https://www.compzets.net?asd=10',
'http://www.featuretools.org',
'featuretools.org'])
correct_urls = ['com',
'in',
'in',
'in',
'in',
'in',
'in',
'com',
'dev',
'com',
'net',
'org',
'org']
np.testing.assert_array_equal(url_to_tld(urls), correct_urls)
def test_url_to_tld_long_url():
url_to_tld = URLToTLD()
urls = pd.Series(["http://chart.apis.google.com/chart?chs=500x500&chma=0,0,100, \
100&cht=p&chco=FF0000%2CFFFF00%7CFF8000%2C00FF00%7C00FF00%2C0 \
000FF&chd=t%3A122%2C42%2C17%2C10%2C8%2C7%2C7%2C7%2C7%2C6%2C6% \
2C6%2C6%2C5%2C5&chl=122%7C42%7C17%7C10%7C8%7C7%7C7%7C7%7C7%7C \
6%7C6%7C6%7C6%7C5%7C5&chdl=android%7Cjava%7Cstack-trace%7Cbro \
adcastreceiver%7Candroid-ndk%7Cuser-agent%7Candroid-webview%7 \
Cwebview%7Cbackground%7Cmultithreading%7Candroid-source%7Csms \
%7Cadb%7Csollections%7Cactivity|Chart"])
correct_urls = ['com']
np.testing.assert_array_equal(url_to_tld(urls), correct_urls)
def test_url_to_tld_nan():
url_to_tld = URLToTLD()
urls = pd.Series(['www.featuretools.com', np.nan, 'featuretools', ''], dtype='object')
correct_urls = pd.Series(['com', np.nan, np.nan, np.nan], dtype='object')
results = url_to_tld(urls)
pd.testing.assert_series_equal(results, correct_urls, check_names=False)
def test_is_free_email_domain_valid_addresses():
is_free_email_domain = IsFreeEmailDomain()
array = pd.Series(['<EMAIL>', '<EMAIL>', '<EMAIL>', 'free<EMAIL>'])
answers = pd.Series(is_free_email_domain(array))
correct_answers = pd.Series([True, False, True, True])
pd.testing.assert_series_equal(answers, correct_answers)
def test_is_free_email_domain_valid_addresses_whitespace():
is_free_email_domain = IsFreeEmailDomain()
array = pd.Series([' <EMAIL>', ' <EMAIL>', '<EMAIL> ', ' <EMAIL> '])
answers = pd.Series(is_free_email_domain(array))
correct_answers = pd.Series([True, False, True, True])
pd.testing.assert_series_equal(answers, correct_answers)
def test_is_free_email_domain_nan():
is_free_email_domain = IsFreeEmailDomain()
array = pd.Series([np.nan, '<EMAIL>', '<EMAIL>'])
answers = pd.Series(is_free_email_domain(array))
correct_answers = pd.Series([np.nan, False, True])
pd.testing.assert_series_equal(answers, correct_answers)
def test_is_free_email_domain_empty_string():
is_free_email_domain = IsFreeEmailDomain()
array = pd.Series(['', '<EMAIL>', '<EMAIL>'])
answers = pd.Series(is_free_email_domain(array))
correct_answers = pd.Series([np.nan, False, True])
pd.testing.assert_series_equal(answers, correct_answers)
def test_is_free_email_domain_empty_series():
is_free_email_domain = IsFreeEmailDomain()
array = pd.Series([])
answers = pd.Series(is_free_email_domain(array))
correct_answers = pd.Series([])
pd.testing.assert_series_equal(answers, correct_answers)
def test_is_free_email_domain_invalid_email():
is_free_email_domain = IsFreeEmailDomain()
array = pd.Series([np.nan, 'this is not an email address', '<EMAIL>', '<EMAIL>', 1234, 1.23, True])
answers = pd.Series(is_free_email_domain(array))
correct_answers = pd.Series([np.nan, np.nan, False, True, np.nan, np.nan, np.nan])
pd.testing.assert_series_equal(answers, correct_answers)
def test_is_free_email_domain_all_nan():
is_free_email_domain = IsFreeEmailDomain()
array = pd.Series([np.nan, np.nan])
answers = pd.Series(is_free_email_domain(array))
correct_answers = pd.Series([np.nan, np.nan], dtype=object)
pd.testing.assert_series_equal(answers, correct_answers)
def test_email_address_to_domain_valid_addresses():
email_address_to_domain = EmailAddressToDomain()
array = pd.Series(['<EMAIL>', '<EMAIL>', '<EMAIL>', '<EMAIL>'])
answers = pd.Series(email_address_to_domain(array))
correct_answers = pd.Series(['hotmail.com', 'featuretools.com', 'yahoo.com', 'gmail.com'])
pd.testing.assert_series_equal(answers, correct_answers)
def test_email_address_to_domain_valid_addresses_whitespace():
email_address_to_domain = EmailAddressToDomain()
array = pd.Series([' <EMAIL>', ' <EMAIL>', '<EMAIL> ', ' <EMAIL> '])
answers = pd.Series(email_address_to_domain(array))
correct_answers = pd.Series(['hotmail.com', 'featuretools.com', 'yahoo.com', 'gmail.com'])
pd.testing.assert_series_equal(answers, correct_answers)
def test_email_address_to_domain_nan():
email_address_to_domain = EmailAddressToDomain()
array = pd.Series([np.nan, 'name<EMAIL>', '<EMAIL>'])
answers = pd.Series(email_address_to_domain(array))
correct_answers = pd.Series([np.nan, 'featuretools.com', 'yahoo.com'])
pd.testing.assert_series_equal(answers, correct_answers)
def test_email_address_to_domain_empty_string():
email_address_to_domain = EmailAddressToDomain()
array =
|
pd.Series(['', '<EMAIL>', '<EMAIL>'])
|
pandas.Series
|
"""
matplotcheck.base
=================
Base plot checking class and methods that should apply to all plots
whether they are spatial or not.
"""
import numpy as np
import matplotlib.dates as mdates
import matplotlib
from matplotlib.backend_bases import RendererBase
import math
from scipy import stats
import pandas as pd
import geopandas as gpd
import numbers
class InvalidPlotError(Exception):
pass
class PlotTester(object):
"""
Object to grab elements from Matplotlib plots
Temporarily removing parameters and returns as it's breaking sphinx
Parameters
----------
axis : mpl axis object
"""
def __init__(self, ax):
"""Initialize TestPlot object"""
self.ax = ax
def _is_line(self):
"""Boolean expressing if ax contains scatter points.
If plot contains scatter points and lines return True.
Returns
-------
is_line : boolean
True if Axes ax is a line plot, False if not
"""
if self.ax.lines:
for l in self.ax.lines:
if (
not l.get_linestyle()
or not l.get_linewidth()
or l.get_linewidth() > 0
):
return True
def _is_scatter(self):
"""Boolean expressing if ax contains scatter points.
If plot contains scatter points as well as lines, functions will return
true.
Returns
-------
is_scatter : boolean
True if Axes ax is a scatter plot, False if not
"""
if self.ax.collections:
return True
elif self.ax.lines:
for l in self.ax.lines:
if (
l.get_linestyle() == "None"
or l.get_linewidth() == "None"
or l.get_linewidth() == 0
):
return True
return False
def assert_string_contains(
self,
string,
strings_expected,
message_default="String does not contain expected string: {0}",
message_or="String does not contain at least one of: {0}",
):
"""Asserts that `string` contains the expected strings from
`strings_expected`.
Parameters
----------
strings_expected : list
Any string in `strings_expected` must be in the title for the
assertion to pass. If there is a list of strings in
`strings_expected`, at least one of the strings in that list must
be in the title for the assertion to pass. For example, if
``strings_expected=['a', 'b', 'c']``, then ``'a'`` AND ``'b'`` AND
``'c'`` must be in the title for the assertion to pass.
Alternatively, if ``strings_expected=['a', 'b', ['c', 'd']]``, then
``'a'`` AND ``'b'`` AND (at least one of: ``'c'``, ``'d'``) must be
in the title for the assertion to pass. Case insensitive.
message_default : string
The error message to be displayed if the `string` does not contain
a string in strings_expected. If `message` contains ``'{0}'``, it
will be replaced with the first expected string not found in the
label.
message_or : string
Similar to `message_default`, `message_or` is the error message to
be displated if `string` does not contain at least one of
the strings in an inner list in `strings_expected`. If `message`
contains ``'{0}'``, it will be replaced with the first failing
inner list in `strings_expected`.
Raises
-------
AssertionError
if `string` does not contain expected strings
"""
# Assertion passes if strings_expected == [] or
# strings_expected == None
if not strings_expected:
return
string = string.lower().replace(" ", "")
if isinstance(strings_expected, str):
strings_expected = [strings_expected]
for check in strings_expected:
if isinstance(check, str):
if not check.lower().replace(" ", "") in string:
raise AssertionError(message_default.format(check))
elif isinstance(check, list):
if not any(
[c.lower().replace(" ", "") in string for c in check]
):
if len(check) == 1:
raise AssertionError(message_default.format(check[0]))
else:
raise AssertionError(message_or.format(check))
else:
raise ValueError(
"str_lst must be a list of: lists or strings."
)
def assert_plot_type(
self, plot_type=None, message="Plot is not of type {0}"
):
"""Asserts Axes `ax` contains the type of plot specified in `plot_type`.
if `plot_type` is ``None``, assertion is passed.
Parameters
----------
plot_type: string
String specifying the expected plot type. Options:
`scatter`, `bar`, `line`
message : string
The error message to be displayed if Plot does not match
`plot_type`. If `message` contains ``'{0}'``, it will be replaced
with the epected plot type.
Raises
-------
AssertionError
if Plot does not match `plot_type`
"""
if plot_type:
if plot_type == "scatter":
assert self._is_scatter(), message.format(plot_type)
elif plot_type == "bar":
assert self.ax.patches, message.format(plot_type)
elif plot_type == "line":
assert self._is_line(), message.format(plot_type)
else:
raise ValueError(
"Plot_type to test must be either: scatter, bar or line"
)
""" TITLES TESTS/HELPER FUNCTIONS """
def get_titles(self):
"""Returns the suptitle (Figure title) and axes title of `ax`.
Returns
-------
suptitle : string
Figure title of the Figure that the `ax` object is on. If figure
title is ``None``, this is an empty string.
title : string
Title on the axes. If title is ``None``, this is an empty string.
"""
fig, suptitle = self.ax.get_figure(), ""
if fig._suptitle:
suptitle += fig._suptitle.get_text()
return suptitle, self.ax.get_title()
def assert_title_contains(
self,
strings_expected,
title_type="either",
message_default="Title does not contain expected string: {0}",
message_or="Title does not contain at least one of: {0}",
message_no_title="Expected title is not displayed",
):
"""Asserts that title defined by `title_type` contains the expected
strings from `strings_expected`.
Parameters
----------
strings_expected : list
Any string in `strings_expected` must be in the title for the
assertion to pass. If there is a list of strings in
`strings_expected`, at least one of the strings in that list must
be in the title for the assertion to pass. For example, if
``strings_expected=['a', 'b', 'c']``, then ``'a'`` AND ``'b'`` AND
``'c'`` must be in the title for the assertion to pass.
Alternatively, if ``strings_expected=['a', 'b', ['c', 'd']]``, then
``'a'`` AND ``'b'`` AND (at least one of: ``'c'``, ``'d'``) must be
in the title for the assertion to pass. Case insensitive.
title_type : string
One of the following strings ["figure", "axes", "either"]
`figure`: only the figure title (suptitle) will be tested
'axes': only the axes title (suptitle) will be tested
'either': either the figure title or axes title will pass this
assertion.
The combined title will be tested.
message_default : string
The error message to be displayed if the axis label does not
contain a string in strings_expected. If `message` contains
``'{0}'``, it will be replaced with the first expected string not
found in the label.
message_or : string
Similar to `message_default`, `message_or` is the error message to
be displated if the axis label does not contain at least one of
the strings in an inner list in `strings_expected`. If `message`
contains ``'{0}'``, it will be replaced with the first failing
inner list in `strings_expected`.
message_no_title : string
The error message to be displayed if the expected title is not
displayed.
Raises
-------
AssertionError
if title does not contain expected strings
"""
suptitle, axtitle = self.get_titles()
if title_type == "either":
title = axtitle + suptitle
elif title_type == "figure":
title = suptitle
elif title_type == "axes":
title = axtitle
else:
raise ValueError(
"title_type must be one of the following "
+ '["figure", "axes", "either"]'
)
assert title, message_no_title
self.assert_string_contains(
title,
strings_expected,
message_default=message_default,
message_or=message_or,
)
"""CAPTION TEST/HELPER FUNCTIONS """
def get_caption(self):
"""Returns the text that is located in the bottom right, just below the
right side of ax
If no text is found in location, ``None`` is returned.
Returns
-------
caption : string
the text that is found in bottom right, ``None`` if no text is
found
"""
caption = None
ax_position = self.ax.get_position()
for tex in self.ax.get_figure().texts:
tex_position = tex.get_position()
if (
ax_position.ymin - 0.1 < tex_position[1] < ax_position.ymin
) and (
ax_position.xmax - 0.5 < tex_position[0] < ax_position.xmax
):
caption = tex
break
if isinstance(caption, matplotlib.text.Text):
caption = caption.get_text()
return caption
def assert_caption_contains(
self,
strings_expected,
message_default="Caption does not contain expected string: {0}",
message_or="Caption does not contain at least one of: {0}",
message_no_caption="No caption exists in appropriate location",
):
"""
Asserts that caption contains expected strings from `strings_expected`.
Parameters
----------
strings_expected : list
Any string in `strings_expected` must be in the title for the
assertion to pass. If there is a list of strings in
`strings_expected`, at least one of the strings in that list must
be in the title for the assertion to pass. For example, if
``strings_expected=['a', 'b', 'c']``, then ``'a'`` AND ``'b'`` AND
``'c'`` must be in the title for the assertion to pass.
Alternatively, if ``strings_expected=['a', 'b', ['c', 'd']]``, then
``'a'`` AND ``'b'`` AND (at least one of: ``'c'``, ``'d'``) must be
in the title for the assertion to pass. Case insensitive.
message_default : string
The error message to be displayed if the axis label does not
contain a string in strings_expected. If `message` contains
``'{0}'``, it will be replaced with the first expected string
not found in the label.
message_or : string
Similar to `message_default`, `message_or` is the error message to
be displated if the axis label does not contain at least one of
the strings in an inner list in `strings_expected`. If `message`
contains ``'{0}'``, it will be replaced with the first failing
inner list in `strings_expected`.
message_no_caption : string
The error message to be displayed if no caption exists in the
appropriate location.
Raises
-------
AssertionError
if caption does not contain strings matching `strings_expected`
"""
caption = self.get_caption()
if strings_expected is None:
return
assert caption, message_no_caption
self.assert_string_contains(
caption,
strings_expected,
message_default=message_default,
message_or=message_or,
)
""" AXIS TEST/HELPER FUNCTIONS """
def assert_axis_off(self, message="Axis lines are displayed on plot"):
"""Asserts one of the three cases holds true with error message m:
1) axis have been turned off
2) both x and y axis have visibility set to false
3) both x and y axis ticks have been set to empty lists
Parameters
----------
message : string
The error message to be displayed if the assertion is not met.
Raises
----------
AssertionError
with message `m` if axis lines are displayed on plot
"""
flag = False
# Case 1: check if axis have been turned off
if not self.ax.axison:
flag = True
# Case 2: Check if both axis visibilities set to false
elif not self.ax.xaxis._visible and not self.ax.yaxis._visible:
flag = True
# Case 3: Check if both axis ticks are set to empty lists
elif (
self.ax.xaxis.get_gridlines() == []
and self.ax.yaxis.get_gridlines() == []
):
flag = True
assert flag, message
def assert_axis_label_contains(
self,
axis="x",
strings_expected=None,
message_default="{1}-axis label does not contain expected string: {0}",
message_or="{1}-axis label does not contain at least one of: {0}",
message_not_displayed="Expected {0} axis label is not displayed",
):
"""
Asserts that the axis label contains the expected strings from
`strings_expected`. Tests x or y axis based on 'axis' param.
Parameters
----------
axis : string
One of the following ['x','y'] stated which axis label to be tested
strings_expected : list
Any string in `strings_expected` must be in the axis label for the
assertion to pass. If there is a list of strings in
`strings_expected`, at least one of the strings in that list must
be in the axis label for the assertion to pass. For example, if
``strings_expected=['a', 'b', 'c']``, then ``'a'`` AND ``'b'`` AND
``'c'`` must be in the title for the assertion to pass.
Alternatively, if ``strings_expected=['a', 'b', ['c', 'd']]``, then
``'a'`` AND ``'b'`` AND (at least one of: ``'c'``, ``'d'``) must be
in the title for the assertion to pass. Case insensitive.
message_default : string
The error message to be displayed if the axis label does not
contain a string in strings_expected. If `message` contains
``'{1}'``, it will be replaced with `axis`. If `message` contains
``'{0}'``, it will be replaced with the first expected string not
found in the label.
message_or : string
Similar to `message_default`, `message_or` is the error message to
be displated if the axis label does not contain at least one of
the strings in an inner list in `strings_expected`. If `message`
contains ``'{1}'``, it will be replaced with `axis`. If `message`
contains ``'{0}'``, it will be replaced with the first failing
inner list in `strings_expected`.
message_not_displayed : string
The error message to be displayed if the expected axis label is not
displayed. If `message_not_displayed` contains ``'{0}'``, it will
be replaced with `axis`.
Raises
----------
AssertionError
if axis label does not contain expected strings
"""
# Retrieve appropriate axis label, error if axis param is not x or y
if axis == "x":
label = self.ax.get_xlabel()
elif axis == "y":
label = self.ax.get_ylabel()
else:
raise ValueError('axis must be one of the following ["x", "y"]')
# Check that axis label contains the expected strings in lst
if strings_expected is None:
return
assert label, "Expected {0} axis label is not displayed".format(axis)
message_default = message_default.replace("{1}", axis)
message_or = message_or.replace("{1}", axis)
self.assert_string_contains(
label,
strings_expected,
message_default=message_default,
message_or=message_or,
)
def assert_lims(
self,
lims_expected,
axis="x",
message="Incorrect limits on the {0} axis",
):
"""Assert the lims of ax match lims_expected. Tests x or y axis based on
'axis' param
Parameters
---------
lims_expected : list of numbers (float or int)
List of length 2 containing expected min and max vals for axis
limits
axis : string
From ['x','y'], which axis to be tested
message : string
The error message to be displayed if the limits of ax do not match
the expected limits. If `message` contains ``'{0}'``, it will be
replaced with `axis`.
Raises
----------
AssertionError
if `lims_expected` does not match the limits of ax
"""
# Get axis limit values
if axis == "x":
lims = [int(l) for l in self.ax.get_xlim()]
elif axis == "y":
lims = [int(l) for l in self.ax.get_ylim()]
else:
raise ValueError(
"axis must be one of the following string ['x', 'y']"
)
# Check retrieved limits against expected min and max values
assert np.array_equal(lims, lims_expected), message.format(axis)
def assert_lims_range(
self,
lims_range,
axis="x",
message_min="Incorrect min limit on the {0} axis",
message_max="Incorrect max limit on the {0} axis",
):
"""Asserts axis limits fall within lims_range (INCLUSIVE).
Parameters
----------
lims_range: tuple of tuples.
if axis == 'x': first tuple is range the left x limit must be in,
second tuple is the range the right x limit must be in
if axis == 'y': first tuple is range the bottom y limit must be in,
second tuple is the range the top x limit must be in
axis: string
from list ['x','y'] declaring which axis to be tested
message_min : string
The error message to be displayed if the limits of ax do not fall
within the expected limit minimum. If `message` contains ``'{0}'``,
it will be replaced with `axis`.
message_max : string
The error message to be displayed if the limits of ax do not fall
within the expected limit maximum. If `message` contains ``'{0}'``,
it will be replaced with the specified `axis` (i.e. it will be
replaced with 'x' or 'y').
Raises
----------
AssertionError
if axis limits does not fall within `lims_range`
"""
# Get ax axis limits
if axis == "x":
lims = self.ax.get_xlim()
elif axis == "y":
lims = self.ax.get_ylim()
else:
raise ValueError(
"axis must be one of the following string ['x', 'y']"
)
# Check if the min falls with in lims_range[0]
assert (
lims_range[0][0] <= lims[0] <= lims_range[0][1]
), message_min.format(axis)
# Check if the max falls with in lims_range[1]
assert (
lims_range[1][0] <= lims[1] <= lims_range[1][1]
), message_max.format(axis)
def assert_equal_xlims_ylims(
self, message="xlims and ylims are not equal"
):
"""Assert the x and y lims of Axes ax are exactly equal to each other
Parameters
---------
message : string
The error message to be displayed if the x limits and y limits are
equal.
Raises
----------
AssertionError
with message `m` if limits are not equal
"""
xlims = self.ax.get_xlim()
ylims = self.ax.get_ylim()
assert np.array_equal(xlims, ylims), message
""" LEGEND TESTS """
def get_legends(self):
"""Retrieve the list of legends on ax
Returns
-------
legends : list
List of matplotlib.legend.Legend objects
"""
return self.ax.findobj(match=matplotlib.legend.Legend)
def assert_legend_titles(
self,
titles_exp,
message="Legend title does not contain expected string: {0}",
message_num_titles="I was expecting {0} legend titles but instead "
+ "found {1}",
):
"""Asserts legend titles contain expected text in titles_exp list.
Parameters
----------
titles_exp : list of strings
Each string is expected be be in one legend title. The number of
strings is equal to the number of expected legends.
message : string
The error message to be displayed if the legend titles do not match
the expected strings. If `message` contains ``'{0}'``,
it will be replaced with the first expected string that does not
exist in the legend title.
message_num_titles : string
The error message to be displayed if there exist a different number
of legend titles than expected. If `message_num_titles` contains
``'{0}'`` it will be replaced with the number of titles found. If
`message_num_titles` contains ``'{1}'`` it will be replaced with
the expected number of titles.
Raises
-------
AssertionError
if legend titles do not contain expected text
"""
legends = self.get_legends()
# Test number of legends - edge case when a student might have two
# legends rather than 2
num_legends = len(legends)
num_exp_legends = len(titles_exp)
assert num_legends == num_exp_legends, message_num_titles.format(
num_legends, num_exp_legends
)
# Check that each expected legend title is in a legend title in ax
titles = [leg.get_title().get_text().lower() for leg in legends]
for title_exp in titles_exp:
assert any(title_exp.lower() in s for s in titles), message.format(
title_exp
)
def assert_legend_labels(
self,
labels_exp,
message="Legend does not have expected labels",
message_no_legend="Legend does not exist",
message_num_labels="I was expecting {0} legend entries, but found "
+ "{1}. Are there extra labels in your legend?",
):
"""Asserts legends on ax have the correct entry labels
Parameters
----------
labels_exp : list of strings
Each string is an expected legend entry label. Checks that
the legend entry labels match exactly (except for case).
message : string
The error message to be displayed if the expected labels are not
found.
message_no_legend : string
The error message to be displayed if no legend is found.
message_num_labels: string
The error message to be displayed if there exist a different number
of legend labels than expected. If `message_num_labels` contains
``'{0}'`` it will be replaced with the number of labels found. If
`message_num_labels` contains ``'{1}'`` it will be replaced with
the expected number of labels.
Raises
-------
AssertionError
if legend labeles do not match `labels_exp`
Notes
-----
If there are multiple legends, it combines all the legend labels into
one set and checks that set against the list labels_exp
"""
legends = self.get_legends()
assert legends, message_no_legend
# Lowercase both the expected and actual legend labels
legend_texts = [
t.get_text().lower() for leg in legends for t in leg.get_texts()
]
labels_exp = [l.lower() for l in labels_exp]
num_exp_labs = len(labels_exp)
num_actual_labs = len(legend_texts)
assert num_actual_labs == num_exp_labs, message_num_labels.format(
num_exp_labs, num_actual_labs
)
assert set(legend_texts) == set(labels_exp), message
def assert_legend_no_overlay_content(
self, message="Legend overlays plot window"
):
"""Asserts that each legend does not overlay plot window
Parameters
----------
message : string
The error message to be displayed if the legend overlays the plot
window.
Raises
-------
AssertionError
with message `m` if legend does not overlay plot window
"""
# RendererBase() is needed to get extent, otherwise raises an error
plot_extent = self.ax.get_window_extent(RendererBase()).get_points()
legends = self.get_legends()
for leg in legends:
# RendererBase() is needed to get extent, otherwise raises error
leg_extent = leg.get_window_extent(RendererBase()).get_points()
legend_left = leg_extent[1][0] < plot_extent[0][0]
legend_right = leg_extent[0][0] > plot_extent[1][0]
legend_below = leg_extent[1][1] < plot_extent[0][1]
assert legend_left or legend_right or legend_below, message
def legends_overlap(self, b1, b2):
"""Helper function for assert_no_legend_overlap.
True if points of window extents for b1 and b2 overlap, False otherwise
Parameters
----------
b1 : list of lists
2x2 array containg numbers, bounding box of window extents
b2 : list of lists
2x2 array containg numbers, bounding box of window extents
Returns
-------
overlap : boolean
True if bounding boxes b1 and b2 overlap
"""
x_overlap = (b1[0][0] <= b2[1][0] and b1[0][0] >= b2[0][0]) or (
b1[1][0] <= b2[1][0] and b1[1][0] >= b2[0][0]
)
y_overlap = (b1[0][1] <= b2[1][1] and b1[0][1] >= b2[0][1]) or (
b1[1][1] <= b2[1][1] and b1[1][1] >= b2[0][1]
)
return x_overlap and y_overlap
def assert_no_legend_overlap(self, message="Legends overlap eachother"):
"""When multiple legends on ax, asserts that there are no two legends
in ax that overlap each other
Parameters
----------
message : string
The error message to be displayed if two legends overlap.
Raises
-------
AssertionError
with message `m` if legends overlap
"""
legends = self.get_legends()
n = len(legends)
for i in range(n - 1):
# Get extent of first legend in check, RendererBase() avoids error
leg_extent1 = (
legends[i].get_window_extent(RendererBase()).get_points()
)
for j in range(i + 1, n):
# Get extent of second legend in check
leg_extent2 = (
legends[j].get_window_extent(RendererBase()).get_points()
)
assert not self.legends_overlap(
leg_extent1, leg_extent2
), message
""" BASIC PLOT DATA FUNCTIONS """
def get_xy(self, points_only=False, xtime=False):
"""Returns a pandas dataframe with columns "x" and "y" holding the x
and y coords on Axes `ax`
Parameters
----------
ax : matplotlib.axes.Axes
Matplotlib Axes object to be tested
points_only : boolean
Set ``True`` to check only points, set ``False`` to check all data
on plot.
xtime : boolean
Set equal to True if the x axis of the plot contains datetime
values
Returns
-------
df : pandas.DataFrame
Pandas dataframe with columns "x" and "y" containing the x and y
coords of each point on Axes `ax`
"""
if points_only:
xy_coords = [
val
for l in self.ax.lines
if (l.get_linestyle() == "None" or l.get_linewidth() == "None")
for val in l.get_xydata()
] # .plot()
xy_coords += [
val
for c in self.ax.collections
if type(c) != matplotlib.collections.PolyCollection
for val in c.get_offsets()
] # .scatter()
else:
xy_coords = [
val for l in self.ax.lines for val in l.get_xydata()
] # .plot()
xy_coords += [
val for c in self.ax.collections for val in c.get_offsets()
] # .scatter()
xy_coords += [
[(p.get_x() + (p.get_width() / 2)), p.get_height()]
for p in self.ax.patches
] # .bar()
xy_data =
|
pd.DataFrame(data=xy_coords, columns=["x", "y"])
|
pandas.DataFrame
|
from linescanning.plotting import LazyPlot
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy.interpolate import interp1d
import seaborn as sns
from nilearn.glm.first_level import first_level
from nilearn.glm.first_level import hemodynamic_models
from nilearn import plotting
import warnings
class GenericGLM():
"""GenericGLM
Main class to perform a simple GLM with python. Will do most of the processes internally, and allows you to plot various processes along the way.
Parameters
----------
onset: pandas.DataFrame
Dataframe containing the onset times for all events in an experiment. Specifically design to work smoothly with :func:`linescanning.utils.ParseExpToolsFile`. You should insert the output from :func:`linescanning.utils.ParseExpToolsFile.get_onset_df()` as `onset`
data: numpy.ndarray, pandas.DataFrame
<time,voxels> numpy array or pandas DataFrame; required for creating the appropriate length of the stimulus vectors
hrf_pars: dict, optional
dictionary collecting the parameters required for :func:`linescanning.glm.double_gamma` (generally the defaults are fine though!)
>>> pars = {'lag': 6,
>>> 'a2': 12,
>>> 'b1': 12,
>>> 'b2': 12,
>>> 'c': 12,
>>> 'scale': True}
TR: float
repetition time of acquisition
osf: int, optional
Oversampling factor used to account for decimal onset times, by default None. The larger this factor, the more accurate decimal onset times will be processed, but also the bigger your upsampled convolved becomes, which means convolving will take longer.
type: str, optional
Use block design of event-related design, by default 'event'. If set to 'block', `block_length` is required.
block_length: int, optional
Duration of block in seconds, by default None
amplitude: int, list, optional
Amplitude to be used when creating the stimulus vector, by default None. If nothing is specified, the amplitude will be set to '1', like you would in a regular FSL 1-/3-column file. If you want variable amplitudes for different events for in a simulation, you can specify a list with an equal length to the number of events present in `onset_df`.
regressors: pandas.DataFrame, numpy.ndarray, optional
Add a bunch of regressors to the design
make_figure: bool, optional
Create overview figure of HRF, stimulus vector, and convolved stimulus vector, by default False
scan_length: int
number of volumes in `data` (= `scan_length` in :func:`linescanning.glm.make_stimulus_vector`)
xkcd: bool, optional
Plot the figre in XKCD-style (cartoon), by default False
plot_vox: int, optional
Instead of plotting the best-fitting voxel, specify which voxel to plot the timecourse and fit of, by default None
plot_event: str, int, list, optional
If a larger design matrix was inputted with multiple events, you can specify here the name of the event you'd like to plot the betas from. It also accepts a list of indices of events to plot, so you could plot the first to events by specifying `plot_event=[1,2]`. Remember, the 0th index is the intercept! By default we'll plot the event right after the intercept
contrast_matrix: numpy.ndarray, optional
contrast array for the event regressors. If none, we'll create a contrast matrix that estimates the effect of each regressor and the baseline
nilearn: bool, optional
use nilearn implementation of `FirstLevelModel` (True) or bare python (False). The later gives easier access to betas, while the former allows implementation of AR-noise models.
Returns
----------
dict
Dictionary collecting outputs under the following keys
* betas: <n_regressors (+intercept), n_voxels> beta values
* tstats: <n_regressors (+intercept), n_voxels> t-statistics (FSL-way)
* x_conv: <n_timepoints, n_regressors (+intercept)> design matrix
* resids: <n_timepoints, n_voxels> residuals>
matplotlib.pyplot
plots along the process if `make_figure=True`
Example
----------
>>> # import modules
>>> from linescanning.glm import GenericGLM
>>> from linescanning import utils
>>>
>>> # define file with fMRI-data and the output from Exptools2
>>> func_file = "some_func_file.mat"
>>> exp_file = "some_exp_file.tsv"
>>>
>>> # load in functional data
>>> func = utils.ParseFuncFile(func_file,
>>> subject=1,
>>> run=1,
>>> deleted_first_timepoints=200,
>>> deleted_last_timepoints=200,
>>> bp_filter="rolling")
>>>
>>> # fetch HP-filtered, percent-signal changed data
>>> data = func.dct_psc_df.copy()
>>>
>>> # load in exptools-file, use attributes from 'func'
>>> onset = utils.ParseExpToolsFile(exp_file,
>>> subject=func.subject,
>>> run=func.run,
>>> delete_vols=(func.deleted_first_timepoints),
>>> TR=func.TR)
>>>
>>> # fetch the onset times and event names in a dataframe
>>> onsets = onset.get_onset_df()
>>>
>>> # do the fitting
>>> fitting = GenericGLM(onsets, data.values, TR=func.TR, osf=1000)
Notes
----------
For `FirstLevelModel` to work with our type of data, I had to add the following to `https://github.com/nilearn/nilearn/blob/main/nilearn/glm/first_level/first_level.py#L683`:
```python
for output_type_ in output_types:
estimate_ = getattr(contrast, output_type_)()
if return_type == "imgs":
# Prepare the returned images
output = self.masker_.inverse_transform(estimate_)
contrast_name = str(con_vals)
output.header['descrip'] = (
'%s of contrast %s' % (output_type_, contrast_name))
outputs[output_type_] = output
else:
output = estimate_
outputs[output_type_] = output
```
This ensures we're getting an array back, rather than a nifti-image for our statistics
"""
def __init__(self, onsets, data, hrf_pars=None, TR=None, osf=1, contrast_matrix=None, exp_type='event', block_length=None, amplitude=None, regressors=None, make_figure=False, xkcd=False, plot_event=[1, 2], plot_vox=None, verbose=False, nilearn=False, derivative=False, dispersion=False):
# %%
# instantiate
self.onsets = onsets
self.hrf_pars = hrf_pars
self.TR = TR
self.osf = osf
self.exp_type = exp_type
self.block_length = block_length
self.amplitude = amplitude
self.regressors = regressors
self.make_figure = make_figure
self.xkcd = xkcd
self.plot_event = plot_event
self.plot_vox = plot_vox
self.verbose = verbose
self.contrast_matrix = contrast_matrix
self.nilearn_method = nilearn
self.dispersion = dispersion
self.derivative = derivative
if isinstance(data, np.ndarray):
self.data = data.copy()
elif isinstance(data, pd.DataFrame):
self.data = data.values
else:
raise ValueError("Data must be 'np.ndarray' or 'pandas.DataFrame'")
# %%
# make the stimulus vectors
if verbose:
print("Creating stimulus vector(s)")
self.stims = make_stimulus_vector(self.onsets, scan_length=self.data.shape[0], osf=self.osf, type=self.exp_type)
# %%
# define HRF
self.hrf_kernel = []
if verbose:
print("Defining HRF")
self.hrf = glover_hrf(osf=osf, TR=self.TR, dispersion=self.dispersion, derivative=self.derivative)
# %%
# convolve stimulus vectors
if verbose:
print("Convolve stimulus vectors with HRF")
self.stims_convolved = convolve_hrf(self.hrf, self.stims, make_figure=self.make_figure, xkcd=self.xkcd)
if self.osf > 1:
if verbose:
print("Resample convolved stimulus vectors")
self.stims_convolved_resampled = resample_stim_vector(self.stims_convolved, self.data.shape[0])
else:
self.stims_convolved_resampled = self.stims_convolved.copy()
self.condition_names = list(self.stims_convolved_resampled.keys())
# %%
# finalize design matrix (with regressors)
if verbose:
print("Creating design matrix")
self.design = first_level_matrix(self.stims_convolved_resampled, regressors=self.regressors)
if self.make_figure:
self.plot_design_matrix()
# %%
# Fit all
if verbose:
print("Running fit")
if self.nilearn_method:
# we're going to hack Nilearn's FirstLevelModel to be compatible with our line-data. First, we specify the model as usual
self.fmri_glm = first_level.FirstLevelModel(t_r=self.TR,
noise_model='ar1',
standardize=False,
hrf_model='spm',
drift_model='cosine',
high_pass=.01)
# Normally, we'd run `fmri_glm = fmri_glm.fit()`, but because this requires nifti-like inputs, we run `run_glm` outside of that function to get the labels:
if isinstance(data, pd.DataFrame):
data = data.values
elif isinstance(data, np.ndarray):
data = data.copy()
else:
raise ValueError(f"Unknown input type {type(data)} for functional data. Must be pd.DataFrame or np.ndarray [time, voxels]")
self.labels, self.results = first_level.run_glm(data, self.design, noise_model='ar1')
# Then, we inject this into the `fmri_glm`-class so we can compute contrasts
self.fmri_glm.labels_ = [self.labels]
self.fmri_glm.results_ = [self.results]
# insert the design matrix:
self.fmri_glm.design_matrices_ = []
self.fmri_glm.design_matrices_.append(self.design)
# Then we specify our contrast matrix:
if self.contrast_matrix == None:
if self.verbose:
print("Defining standard contrast matrix")
matrix = np.eye(len(self.condition_names))
icept = np.zeros((len(self.condition_names), 1))
matrix = np.hstack((icept, matrix)).astype(int)
self.contrast_matrix = matrix.copy()
self.conditions = {}
for idx, name in enumerate(self.condition_names):
self.conditions[name] = self.contrast_matrix[idx, ...]
if self.verbose:
print("Computing contrasts")
self.tstats = []
for event in self.conditions:
tstat = self.fmri_glm.compute_contrast(self.conditions[event],
stat_type='t',
output_type='stat',
return_type=None)
self.tstats.append(tstat)
self.tstats = np.array(self.tstats)
else:
self.results = fit_first_level(self.design, self.data, make_figure=self.make_figure, xkcd=self.xkcd, plot_vox=self.plot_vox, plot_event=self.plot_event)
def plot_contrast_matrix(self, save_as=None):
if self.nilearn_method:
fig,axs = plt.subplots(figsize=(10,10))
plotting.plot_contrast_matrix(self.contrast_matrix, design_matrix=self.design, ax=axs)
if save_as:
fig.savefig(save_as)
else:
raise NotImplementedError("Can't use this function without nilearn-fitting. Set 'nilearn=True'")
def plot_design_matrix(self, save_as=None):
fig,axs = plt.subplots(figsize=(10,10))
plotting.plot_design_matrix(self.design, ax=axs)
if save_as:
fig.savefig(save_as)
def glover_hrf(osf=1, TR=0.105, dispersion=False, derivative=False, time_length=25):
# osf factor is different in `hemodynamic_models`
osf /= 10
# set kernel
hrf_kernel = []
hrf = hemodynamic_models.glover_hrf(TR, oversampling=osf, time_length=time_length)
hrf /= hrf.max()
hrf_kernel.append(hrf)
if derivative:
tderiv_hrf = hemodynamic_models.glover_time_derivative(tr=TR, oversampling=osf, time_length=time_length)
tderiv_hrf /= tderiv_hrf.max()
hrf_kernel.append(tderiv_hrf)
if dispersion:
tdisp_hrf = hemodynamic_models.glover_dispersion_derivative(TR, oversampling=osf, time_length=time_length)
tdisp_hrf /= tdisp_hrf.max()
hrf_kernel.append(tdisp_hrf)
return hrf_kernel
def make_stimulus_vector(onset_df, scan_length=None, TR=0.105, osf=None, type='event', block_length=None, amplitude=None):
"""make_stimulus_vector
Creates a stimulus vector for each of the conditions found in `onset_df`. You can account for onset times being in decimal using the oversampling factor `osf`. This would return an upsampled stimulus vector which should be convolved with an equally upsampled HRF. This can be ensured by using the same `osf` in :func:`linescanning.glm.double_gamma`.
Parameters
----------
onset_df: pandas.DataFrame
onset times as read in with :class:`linescanning.utils.ParseExpToolsFile`
scan_length: float, optional
length of the , by default None
TR: float, optional
Repetition time, by default 0.105. Will be used to calculate the required length of the stimulus vector
osf: [type], optional
Oversampling factor used to account for decimal onset times, by default None
type: str, optional
Use block design of event-related design, by default 'event'. If set to 'block', `block_length` is required.
block_length: int, optional
Duration of block in seconds, by default None
amplitude: int, list, optional
Amplitude to be used when creating the stimulus vector, by default None. If nothing is specified, the amplitude will be set to '1', like you would in a regular FSL 1-/3-column file. If you want variable amplitudes for different events for in a simulation, you can specify a list with an equal length to the number of events present in `onset_df`.
Returns
----------
dict
Dictionary collecting numpy array stimulus vectors for each event present in `onset_df` under the keys <event name>
Raises
----------
ValueError
`onset_df` should contain event names
ValueError
if multiple amplitudes are requested but the length of `amplitude` does not match the number of events
ValueError
`block_length` should be an integer
Example
----------
>>> from linescanning import utils
>>> from linescanning import glm
>>> exp_file = 'path/to/exptools2_file.tsv'
>>> exp_df = utilsParseExpToolsFile(exp_file, subject=1, run=1)
>>> times = exp_df.get_onset_df()
>>> # oversample with factor 1000 to get rid of 3 decimals in onset times
>>> osf = 1000
>>> # make stimulus vectors
>>> stims = glm.make_stimulus_vector(times, scan_length=400, osf=osf, type='event')
>>> stims
{'left': array([0., 0., 0., ..., 0., 0., 0.]),
'right': array([0., 0., 0., ..., 0., 0., 0.])}
"""
# check if we should reset or not
try:
onset_df = onset_df.reset_index()
except:
onset_df = onset_df
# check conditions we have
try:
names_cond = onset_df['event_type'].unique()
names_cond.sort()
except:
raise ValueError('Could not extract condition names; are you sure you formatted the dataframe correctly?')
# check if we got multiple amplitudes
if isinstance(amplitude, np.ndarray):
ampl_array = amplitude
elif isinstance(amplitude, list):
ampl_array = np.array(amplitude)
else:
ampl_array = False
# loop through unique conditions
stim_vectors = {}
for idx,condition in enumerate(names_cond):
if isinstance(ampl_array, np.ndarray):
if ampl_array.shape[0] == names_cond.shape[0]:
ampl = amplitude[idx]
print(f"Amplitude for event '{names_cond[idx]}' = {round(ampl,2)}")
else:
raise ValueError(f"Nr of amplitudes ({ampl_array.shape[0]}) does not match number of conditions ({names_cond.shape[0]})")
else:
ampl = 1
Y = np.zeros(int((scan_length*TR)*osf))
if type == "event":
for rr, ii in enumerate(onset_df['onset']):
if onset_df['event_type'][rr] == condition:
try:
Y[int(ii*osf)] = ampl
except:
warnings.warn(f"Warning: could not include event {rr} with t = {ii}. Probably experiment continued after functional acquisition")
elif type == 'block':
if not isinstance(block_length, int):
raise ValueError("Please specify the length of the block in seconds (integer)")
for rr, ii in enumerate(onset_df['onset']):
if onset_df['event_type'][rr] == condition:
Y[int(ii*osf):int((ii+block_length)*osf)] = ampl
stim_vectors[condition] = Y
return stim_vectors
def convolve_hrf(hrf, stim_v, make_figure=False, xkcd=False):
"""convolve_hrf
Convolve :func:`linescanning.glm.double_gamma` with :func:`linescanning.glm.make_stimulus_vector`. There's an option to plot the result in a nice overview figure, though python-wise it's not the prettiest..
Parameters
----------
hrf: numpy.ndarray
HRF across given timepoints with shape (,`x.shape[0]`)
stim_v: numpy.ndarray, list
Stimulus vector as per :func:`linescanning.glm.make_stimulus_vector` or numpy array containing one stimulus vector (e.g., a *key* from :func:`linescanning.glm.make_stimulus_vector`)
make_figure: bool, optional
Create overview figure of HRF, stimulus vector, and convolved stimulus vector, by default False
osf: [type], optional
Oversampling factor used to account for decimal onset times, by default None
scan_length: int
number of volumes in `data` (= `scan_length` in :func:`linescanning.glm.make_stimulus_vector`)
xkcd: bool, optional
Plot the figre in XKCD-style (cartoon), by default False
add_array1: numpy.ndarray, optional
additional stimulus vector to be plotted on top of `stim_v`, by default None
add_array2: numpy.ndarray, optional
additional **convolved** stimulus vector to be plotted on top of `stim_v`, by default None
regressors: pandas.DataFrame
add a bunch of regressors with shape <time,voxels> to the design matrix. Should be in the dimensions of the functional data, not the oversampled..
Returns
----------
matplotlib.plot
if `make_figure=True`, a figure will be displayed
pandas.DataFrame
if `osf > 1`, then resampled stimulus vector DataFrame is returned. If not, the convolved stimulus vectors are returned in a dataframe as is
Example
----------
>>> from linescanning.glm import convolve_hrf
>>> convolved_stim_vector_left = convolve_hrf(hrf_custom, stims, make_figure=True, xkcd=True) # creates figure too
>>> convolved_stim_vector_left = convolve_hrf(hrf_custom, stims) # no figure
"""
def plot(stim_v, hrf, convolved, xkcd=False):
fig = plt.figure(figsize=(20,6))
gs = fig.add_gridspec(2, 2, width_ratios=[20, 10], hspace=0.7)
ax0 = fig.add_subplot(gs[0,0])
LazyPlot(stim_v,
color="#B1BDBD",
axs=ax0,
title="Events",
y_lim=[-.5, 1.5],
x_label='Time (*osf)',
y_label='Activity (A.U.)',
xkcd=xkcd,
font_size=16)
# check if we got derivatives; if so, select first element (= standard HRF)
if isinstance(convolved, list):
convolved = np.array(convolved)
if convolved.shape[-1] > 1:
convolved = convolved[:,0]
ax1 = fig.add_subplot(gs[1, 0])
LazyPlot(convolved,
axs=ax1,
title="Convolved stimulus-vector",
x_label='Time (*osf)',
y_label='Activity (A.U.)',
xkcd=xkcd,
font_size=16)
ax2 = fig.add_subplot(gs[:, 1])
LazyPlot(hrf,
axs=ax2,
title="HRF",
x_label='Time (*osf)',
xkcd=xkcd,
font_size=16)
# check hrf input
if isinstance(hrf, list):
hrfs = hrf.copy()
elif isinstance(hrf, np.ndarray):
hrfs = [hrf]
else:
raise ValueError(f"Unknown input type '{type(hrf)}' for HRF. Must be list or array")
# convolve stimulus vectors
if isinstance(stim_v, np.ndarray):
if len(hrf) >= 1:
convolved_stim_vector = np.zeros((stim_v.shape[0], len(hrf)))
for ix,rf in enumerate(hrf):
convolved_stim_vectors[:,ix] = np.convolve(stim_v, rf, 'full')[:stim_v.shape[0]]
if make_figure:
plot(stim_v, hrf[0], convolved_stim_vector, xkcd=xkcd)
plt.show()
elif isinstance(stim_v, dict):
if len(hrf) >= 1:
convolved_stim_vector = {}
for event in list(stim_v.keys()):
hrf_conv = np.zeros((stim_v[event].shape[0], len(hrf)))
for ix,rf in enumerate(hrf):
hrf_conv[...,ix] = np.convolve(stim_v[event], rf, 'full')[:stim_v[event].shape[0]]
convolved_stim_vector[event] = hrf_conv
if make_figure:
if xkcd:
with plt.xkcd():
plot(stim_v[event], hrf[0], convolved_stim_vector[event])
else:
plot(stim_v[event], hrf[0], convolved_stim_vector[event])
plt.show()
else:
raise ValueError("Data must be 'np.ndarray' or 'dict'")
return convolved_stim_vector
def resample_stim_vector(convolved_array, scan_length, interpolate='nearest'):
"""resample_stim_vector
Resample the oversampled stimulus vector back in to functional time domain
Parameters
----------
convolved_array: dict, numpy.ndarray
oversampled convolved stimulus vector as per :func:`linescanning.glm.convolve_hrf`
scan_length: int
number of volumes in `data` (= `scan_length` in :func:`linescanning.glm.make_stimulus_vector`)
interpolate: str, optional
interpolation method, by default 'nearest'
Returns
----------
dict, numpy.ndarray
convolved stimulus vector in time domain that matches the fMRI acquisition
Example
----------
>>> from linescanning.glm import resample_stim_vector
>>> convolved_stim_vector_left_ds = resample_stim_vector(convolved_stim_vector_left, <`scan_length`>)
"""
if isinstance(convolved_array, np.ndarray):
interpolated = interp1d(np.arange(len(convolved_array)), convolved_array, kind=interpolate, axis=0, fill_value='extrapolate')
downsampled = interpolated(np.linspace(0, len(convolved_array), scan_length))
elif isinstance(convolved_array, dict):
downsampled = {}
for event in list(convolved_array.keys()):
event_arr = convolved_array[event]
if event_arr.shape[-1] > 1:
tmp = np.zeros((scan_length, event_arr.shape[-1]))
for elem in range(event_arr.shape[-1]):
data = event_arr[..., elem]
interpolated = interp1d(
np.arange(len(data)), data, kind=interpolate, axis=0, fill_value='extrapolate')
tmp[...,elem] = interpolated(np.linspace(0, len(data), scan_length))
downsampled[event] = tmp
else:
interpolated = interp1d(np.arange(len(convolved_array[event])), convolved_array[event], kind=interpolate, axis=0, fill_value='extrapolate')
downsampled[event] = interpolated(np.linspace(0, len(convolved_array[event]), scan_length))
else:
raise ValueError("Data must be 'np.ndarray' or 'dict'")
return downsampled
def first_level_matrix(stims_dict, regressors=None, add_intercept=True, names=None):
# make dataframe of stimulus vectors
if isinstance(stims_dict, np.ndarray):
if names:
stims = pd.DataFrame(stims_dict, columns=names)
else:
stims = pd.DataFrame(stims_dict, columns=[f'event {ii}' for ii in range(stims_dict.shape[-1])])
elif isinstance(stims_dict, dict):
# check if we got time/dispersion derivatives
cols = []
data = []
keys = list(stims_dict.keys())
for key in keys:
if stims_dict[key].shape[-1] == 1:
cols.extend([key])
elif stims_dict[key].shape[-1] == 2:
cols.extend([key, f'{key}_1st_derivative'])
elif stims_dict[key].shape[-1] == 3:
cols.extend([key, f'{key}_1st_derivative', f'{key}_2nd_derivative'])
data.append(stims_dict[key])
data = np.concatenate(data, axis=-1)
stims = pd.DataFrame(data, columns=cols)
else:
raise ValueError("Data must be 'np.ndarray' or 'dict'")
# check if we should add intercept
if add_intercept:
intercept = np.ones((stims.shape[0], 1))
intercept_df = pd.DataFrame(intercept, columns=['intercept'])
X_matrix = pd.concat([intercept_df, stims], axis=1)
else:
X_matrix = stims.copy()
# check if we should add regressors
if isinstance(regressors, np.ndarray):
regressors_df = pd.DataFrame(regressors, columns=[f'regressor {ii}' for ii in range(regressors.shape[-1])])
return pd.concat([X_matrix, regressors_df], axis=1)
elif isinstance(regressors, dict):
regressors_df = pd.DataFrame(regressors)
return pd.concat([X_matrix, regressors_df], axis=1)
else:
return X_matrix
def fit_first_level(stim_vector, data, make_figure=False, copes=None, xkcd=False, plot_vox=None, plot_event=1):
"""fit_first_level
First level models are, in essence, linear regression models run at the level of a single session or single subject. The model is applied on a voxel-wise basis, either on the whole brain or within a region of interest. The timecourse of each voxel is regressed against a predicted BOLD response created by convolving the haemodynamic response function (HRF) with a set of predictors defined within the design matrix (source: https://nilearn.github.io/glm/first_level_model.html)
Parameters
----------
stim_vector: pandas.DataFrame, numpy.ndarray
either the output from :func:`linescanning.glm.resample_stim_vector` (convolved stimulus vector in fMRI-acquisition time domain) or a pandas.DataFrame containing the full design matrix as per the output of :func:`linescanning.glm.first_level_matrix`.s
data: numpy.ndarray
<time,voxels> numpy array; same input as **data** from :func:`linescanning.glm.make_stimulus_vector`
make_figure: bool, optional
Create a figure of best-voxel fit, by default False
copes: [type], optional
[description], by default None
xkcd: bool, optional
Plot the figre in XKCD-style (cartoon), by default False
plot_vox: int, optional
Instead of plotting the best-fitting voxel, specify which voxel to plot the timecourse and fit of, by default None
plot_event: str, int, list, optional
If a larger design matrix was inputted with multiple events, you can specify here the name of the event you'd like to plot the betas from. It also accepts a list of indices of events to plot, so you could plot the first to events by specifying `plot_event=[1,2]`. Remember, the 0th index is the intercept! By default we'll plot the event right after the intercept
Returns
----------
numpy.ndarray
betas for each voxel for the intercept and the number of stim_vectors used (in case you also add regressors)
numpy.ndarray
the design matrix `X_conv`
Example
----------
>>> from linescanning.glm import fit_first_level
>>> betas_left,x_conv_left = fit_first_level(convolved_stim_vector_left_ds, data, make_figure=True, xkcd=True) # plots first event
>>> betas_left,x_conv_left = fit_first_level(convolved_stim_vector_left_ds, data, make_figure=True, xkcd=True, plot_events=[1,2]) # plots first two events
"""
# add intercept if input is simple numpy array.
if isinstance(stim_vector, np.ndarray):
if stim_vector.ndim == 1:
stim_vector = stim_vector[:, np.newaxis]
if data.ndim == 1:
data = data[:, np.newaxis]
if stim_vector.shape[0] != data.shape[0]:
stim_vector = stim_vector[:data.shape[0],:]
# create design matrix with intercept
intercept = np.ones((data.shape[0], 1))
intercept_df = pd.DataFrame(intercept, columns=['intercept'])
X_matrix =
|
pd.concat([intercept_df, stim_vector], axis=1)
|
pandas.concat
|
import math
from contextlib import ExitStack as does_not_raise # noqa: N813
import numpy as np
import pandas as pd
import pytest
from pandas.api.types import is_categorical_dtype
from resources import CONTACT_MODELS
from resources import meet_two
from sid.config import INDEX_NAMES
from sid.simulate import _add_default_duration_to_models
from sid.simulate import _create_group_codes_and_info
from sid.simulate import _create_group_codes_names
from sid.simulate import _process_assort_bys
from sid.simulate import _process_initial_states
from sid.simulate import get_simulate_func
from sid.validation import validate_params
@pytest.mark.end_to_end
def test_simulate_a_simple_model(params, initial_states, tmp_path):
simulate = get_simulate_func(
params=params,
initial_states=initial_states,
contact_models=CONTACT_MODELS,
saved_columns={"other": ["channel_infected_by_contact"]},
path=tmp_path,
seed=144,
)
result = simulate(params)
time_series = result["time_series"].compute()
last_states = result["last_states"]
for df in [time_series, last_states]:
assert isinstance(df, pd.DataFrame)
assert set(df["channel_infected_by_contact"].cat.categories) == {
"not_infected_by_contact",
"standard",
}
@pytest.mark.end_to_end
def test_resume_a_simulation(params, initial_states, tmp_path):
simulate = get_simulate_func(
params=params,
initial_states=initial_states,
contact_models=CONTACT_MODELS,
saved_columns={"other": ["channel_infected_by_contact"]},
path=tmp_path,
seed=144,
)
result = simulate(params)
time_series = result["time_series"].compute()
last_states = result["last_states"]
for df in [time_series, last_states]:
assert isinstance(df, pd.DataFrame)
assert set(df["channel_infected_by_contact"].cat.categories) == {
"not_infected_by_contact",
"standard",
}
resumed_simulate = get_simulate_func(
params=params,
initial_states=last_states,
contact_models=CONTACT_MODELS,
saved_columns={"other": ["channel_infected_by_contact"]},
duration={"start": "2020-02-06", "periods": 5},
path=tmp_path,
seed=144,
)
resumed_result = resumed_simulate(params)
resumed_time_series = resumed_result["time_series"].compute()
resumed_last_states = resumed_result["last_states"]
for df in [resumed_time_series, resumed_last_states]:
assert isinstance(df, pd.DataFrame)
assert set(df["channel_infected_by_contact"].cat.categories) == {
"not_infected_by_contact",
"standard",
}
@pytest.mark.end_to_end
def test_simulate_a_simple_model_without_assort_by(params, initial_states, tmp_path):
contact_models = {
"without_assort": {
"model": meet_two,
"assort_by": [],
"is_recurrent": False,
}
}
params.loc[("infection_prob", "without_assort", "without_assort"), "value"] = 0.1
simulate = get_simulate_func(
params=params,
initial_states=initial_states,
contact_models=contact_models,
saved_columns={"other": ["channel_infected_by_contact"]},
path=tmp_path,
seed=144,
)
result = simulate(params)
time_series = result["time_series"].compute()
last_states = result["last_states"]
for df in [time_series, last_states]:
assert isinstance(df, pd.DataFrame)
assert set(df["channel_infected_by_contact"].cat.categories) == {
"not_infected_by_contact",
"without_assort",
}
@pytest.mark.unit
def test_check_assort_by_are_categoricals(initial_states):
assort_bys = _process_assort_bys(CONTACT_MODELS)
virus_strains = {"names": ["base_strain"], "factors": np.ones(1)}
_ = _process_initial_states(initial_states, assort_bys, virus_strains)
initial_states = initial_states.astype(str)
processed = _process_initial_states(initial_states, assort_bys, virus_strains)
for var in ["age_group", "region"]:
assert is_categorical_dtype(processed[var].dtype)
@pytest.mark.unit
def test_prepare_params(params):
index = pd.MultiIndex.from_tuples([("a", "b", np.nan)], names=INDEX_NAMES)
s =
|
pd.DataFrame(index=index, data={"value": 0, "note": None, "source": None})
|
pandas.DataFrame
|
import os
from nose.tools import *
import unittest
import pandas as pd
from py_entitymatching.utils.generic_helper import get_install_path
import py_entitymatching.catalog.catalog_manager as cm
import py_entitymatching.utils.catalog_helper as ch
from py_entitymatching.io.parsers import read_csv_metadata
datasets_path = os.sep.join([get_install_path(), 'tests', 'test_datasets'])
catalog_datasets_path = os.sep.join([get_install_path(), 'tests',
'test_datasets', 'catalog'])
path_a = os.sep.join([datasets_path, 'A.csv'])
path_b = os.sep.join([datasets_path, 'B.csv'])
path_c = os.sep.join([datasets_path, 'C.csv'])
class CatalogManagerTestCases(unittest.TestCase):
def setUp(self):
cm.del_catalog()
def tearDown(self):
cm.del_catalog()
def test_get_property_valid_df_name_1(self):
# cm.del_catalog()
df = read_csv_metadata(path_a)
self.assertEqual(cm.get_property(df, 'key'), 'ID')
# cm.del_catalog()
def test_get_property_valid_df_name_2(self):
# cm.del_catalog()
self.assertEqual(cm.get_catalog_len(), 0)
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
self.assertEqual(cm.get_property(C, 'key'), '_id')
self.assertEqual(cm.get_property(C, 'fk_ltable'), 'ltable_ID')
self.assertEqual(cm.get_property(C, 'fk_rtable'), 'rtable_ID')
self.assertEqual(cm.get_property(C, 'ltable').equals(A), True)
self.assertEqual(cm.get_property(C, 'rtable').equals(B), True)
# cm.del_catalog()
@raises(AssertionError)
def test_get_property_invalid_df_1(self):
cm.get_property(10, 'key')
@raises(AssertionError)
def test_get_property_invalid_path_1(self):
# cm.del_catalog()
A = read_csv_metadata(path_a)
cm.get_property(A, None)
# cm.del_catalog()
@raises(KeyError)
def test_get_property_df_notin_catalog(self):
# cm.del_catalog()
A = pd.read_csv(path_a)
cm.get_property(A, 'key')
# cm.del_catalog()
def test_set_property_valid_df_name_value(self):
# cm.del_catalog()
df = pd.read_csv(path_a)
cm.set_property(df, 'key', 'ID')
self.assertEqual(cm.get_property(df, 'key'), 'ID')
# cm.del_catalog()
@raises(AssertionError)
def test_set_property_invalid_df(self):
# cm.del_catalog()
cm.set_property(None, 'key', 'ID')
# cm.del_catalog()
@raises(AssertionError)
def test_set_property_valid_df_invalid_prop(self):
# cm.del_catalog()
A = pd.read_csv(path_a)
cm.set_property(A, None, 'ID')
# cm.del_catalog()
def test_init_properties_valid(self):
# cm.del_catalog()
A = pd.read_csv(path_a)
cm.init_properties(A)
self.assertEqual(cm.is_dfinfo_present(A), True)
# cm.del_catalog()
@raises(AssertionError)
def test_init_properties_invalid_df(self):
cm.init_properties(None)
def test_get_all_properties_valid_1(self):
# cm.del_catalog()
A = read_csv_metadata(path_a)
m = cm.get_all_properties(A)
self.assertEqual(len(m), 1)
self.assertEqual(m['key'], 'ID')
# cm.del_catalog()
def test_get_all_properties_valid_2(self):
# cm.del_catalog()
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
m = cm.get_all_properties(C)
self.assertEqual(len(m), 5)
self.assertEqual(m['key'], '_id')
self.assertEqual(m['fk_ltable'], 'ltable_ID')
self.assertEqual(m['fk_rtable'], 'rtable_ID')
self.assertEqual(m['ltable'].equals(A), True)
self.assertEqual(m['rtable'].equals(B), True)
# cm.del_catalog()
@raises(AssertionError)
def test_get_all_properties_invalid_df_1(self):
# cm.del_catalog()
C = cm.get_all_properties(None)
@raises(KeyError)
def test_get_all_properties_invalid_df_2(self):
# cm.del_catalog()
A = pd.read_csv(path_a)
C = cm.get_all_properties(A)
def test_del_property_valid_df_name(self):
A = read_csv_metadata(path_a)
cm.del_property(A, 'key')
self.assertEqual(len(cm.get_all_properties(A)), 0)
@raises(AssertionError)
def test_del_property_invalid_df(self):
cm.del_property(None, 'key')
@raises(AssertionError)
def test_del_property_invalid_property(self):
A = read_csv_metadata(path_a)
cm.del_property(A, None)
@raises(KeyError)
def test_del_property_df_notin_catalog(self):
A = pd.read_csv(path_a)
cm.del_property(A, 'key')
@raises(KeyError)
def test_del_property_prop_notin_catalog(self):
A = read_csv_metadata(path_a)
cm.del_property(A, 'key1')
def test_del_all_properties_valid_1(self):
A = read_csv_metadata(path_a)
cm.del_all_properties(A)
self.assertEqual(cm.is_dfinfo_present(A), False)
def test_del_all_properties_valid_2(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = read_csv_metadata(path_c, ltable=A, rtable=B)
cm.del_all_properties(C)
self.assertEqual(cm.is_dfinfo_present(C), False)
@raises(AssertionError)
def test_del_all_properties_invalid_df(self):
cm.del_all_properties(None)
@raises(KeyError)
def test_del_all_properties_df_notin_catalog(self):
A = pd.read_csv(path_a)
cm.del_all_properties(A)
def test_get_catalog_valid(self):
A = read_csv_metadata(path_a)
cg = cm.get_catalog()
self.assertEqual(len(cg), 1)
def test_del_catalog_valid(self):
A = read_csv_metadata(path_a)
cm.del_catalog()
cg = cm.get_catalog()
self.assertEqual(len(cg), 0)
def test_is_catalog_empty(self):
A = read_csv_metadata(path_a)
cm.del_catalog()
self.assertEqual(cm.is_catalog_empty(), True)
def test_is_dfinfo_present_valid_1(self):
A = read_csv_metadata(path_a)
status = cm.is_dfinfo_present(A)
self.assertEqual(status, True)
def test_is_dfinfo_present_valid_2(self):
A = pd.read_csv(path_a)
status = cm.is_dfinfo_present(A)
self.assertEqual(status, False)
@raises(AssertionError)
def test_is_dfinfo_present_invalid(self):
cm.is_dfinfo_present(None)
def test_is_property_present_for_df_valid_1(self):
A = read_csv_metadata(path_a)
status = cm.is_property_present_for_df(A, 'key')
self.assertEqual(status, True)
def test_is_property_present_for_df_valid_2(self):
A = read_csv_metadata(path_a)
status = cm.is_property_present_for_df(A, 'key1')
self.assertEqual(status, False)
@raises(AssertionError)
def test_is_property_present_for_df_invalid_df(self):
cm.is_property_present_for_df(None, 'key')
@raises(KeyError)
def test_is_property_present_for_df_notin_catalog(self):
A = pd.read_csv(path_a)
cm.is_property_present_for_df(A, 'key')
def test_catalog_len(self):
A = read_csv_metadata(path_a)
self.assertEqual(cm.get_catalog_len(), 1)
def test_set_properties_valid_1(self):
A = read_csv_metadata(path_a)
p = cm.get_all_properties(A)
B = pd.read_csv(path_b)
cm.init_properties(B)
cm.set_properties(B,p)
self.assertEqual(cm.get_all_properties(B)==p, True)
def test_set_properties_valid_2(self):
A = read_csv_metadata(path_a)
p = cm.get_all_properties(A)
B = pd.read_csv(path_b)
cm.set_properties(B,p)
self.assertEqual(cm.get_all_properties(B)==p, True)
@raises(AssertionError)
def test_set_properties_invalid_df_1(self):
cm.set_properties(None, {})
@raises(AssertionError)
def test_set_properties_invalid_dict_1(self):
A = read_csv_metadata(path_a)
cm.set_properties(A, None)
def test_set_properties_df_notin_catalog_replace_false(self):
A = read_csv_metadata(path_a)
cm.set_properties(A, {}, replace=False)
self.assertEqual(cm.get_key(A), 'ID')
# def test_has_property_valid_1(self):
# A = read_csv_metadata(path_a)
# self.assertEqual(cm.has_property(A, 'key'), True)
#
# def test_has_property_valid_2(self):
# A = read_csv_metadata(path_a)
# self.assertEqual(cm.has_property(A, 'key1'), False)
#
# @raises(AssertionError)
# def test_has_property_invalid_df(self):
# cm.has_property(None, 'key')
#
# @raises(AssertionError)
# def test_has_property_invalid_prop_name(self):
# A = read_csv_metadata(path_a)
# cm.has_property(A, None)
#
# @raises(KeyError)
# def test_has_property_df_notin_catalog(self):
# A = pd.read_csv(path_a)
# cm.has_property(A, 'key')
def test_copy_properties_valid_1(self):
A = read_csv_metadata(path_a)
A1 = pd.read_csv(path_a)
cm.copy_properties(A, A1)
self.assertEqual(cm.is_dfinfo_present(A1), True)
p = cm.get_all_properties(A)
p1 = cm.get_all_properties(A1)
self.assertEqual(p, p1)
self.assertEqual(cm.get_key(A1), cm.get_key(A))
def test_copy_properties_valid_2(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = read_csv_metadata(path_c, ltable=A, rtable=B)
C1 = pd.read_csv(path_c)
cm.copy_properties(C, C1)
self.assertEqual(cm.is_dfinfo_present(C1), True)
p = cm.get_all_properties(C1)
p1 = cm.get_all_properties(C1)
self.assertEqual(p, p1)
self.assertEqual(cm.get_key(C1), cm.get_key(C))
self.assertEqual(cm.get_ltable(C1).equals(A), True)
self.assertEqual(cm.get_rtable(C1).equals(B), True)
self.assertEqual(cm.get_fk_ltable(C1), cm.get_fk_ltable(C))
self.assertEqual(cm.get_fk_rtable(C1), cm.get_fk_rtable(C))
@raises(AssertionError)
def test_copy_properties_invalid_tar_df(self):
A = read_csv_metadata(path_a)
cm.copy_properties(A, None)
@raises(AssertionError)
def test_copy_properties_invalid_src_df(self):
A = read_csv_metadata(path_a)
cm.copy_properties(None, A)
def test_copy_properties_update_false_1(self):
A = read_csv_metadata(path_a)
A1 = read_csv_metadata(path_a)
status=cm.copy_properties(A, A1, replace=False)
self.assertEqual(status, False)
def test_copy_properties_update_false_2(self):
A = read_csv_metadata(path_a)
A1 = pd.read_csv(path_a)
cm.copy_properties(A, A1, replace=False)
p = cm.get_all_properties(A)
p1 = cm.get_all_properties(A1)
self.assertEqual(p, p1)
self.assertEqual(cm.get_key(A1), cm.get_key(A))
@raises(KeyError)
def test_copy_properties_src_df_notin_catalog(self):
A = pd.read_csv(path_a)
A1 = pd.read_csv(path_a)
cm.copy_properties(A, A1)
def test_get_key_valid(self):
A = pd.read_csv(path_a)
cm.set_key(A, 'ID')
self.assertEqual(cm.get_key(A), 'ID')
@raises(AssertionError)
def test_get_key_invalid_df(self):
cm.get_key(None)
@raises(KeyError)
def test_get_key_df_notin_catalog(self):
A = pd.read_csv(path_a)
cm.get_key(A)
def test_set_key_valid(self):
A = pd.read_csv(path_a)
cm.set_key(A, 'ID')
self.assertEqual(cm.get_key(A), 'ID')
@raises(AssertionError)
def test_set_key_invalid_df(self):
cm.set_key(None, 'ID')
@raises(KeyError)
def test_set_key_notin_df(self):
A = pd.read_csv(path_a)
cm.set_key(A, 'ID1')
def test_set_key_with_dupids(self):
p = os.sep.join([catalog_datasets_path, 'A_dupid.csv'])
A = pd.read_csv(p)
status = cm.set_key(A, 'ID')
self.assertEqual(status, False)
def test_set_key_with_mvals(self):
p = os.sep.join([catalog_datasets_path, 'A_mvals.csv'])
A = pd.read_csv(p)
status = cm.set_key(A, 'ID')
self.assertEqual(status, False)
def test_get_fk_ltable_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = read_csv_metadata(path_c, ltable=A, rtable=B)
self.assertEqual(cm.get_fk_ltable(C), cm.get_property(C, 'fk_ltable'))
self.assertEqual(cm.get_fk_ltable(C), 'ltable_ID')
@raises(AssertionError)
def test_get_fk_ltable_invalid_df(self):
cm.get_fk_ltable(None)
def test_get_fk_rtable_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = read_csv_metadata(path_c, ltable=A, rtable=B)
self.assertEqual(cm.get_fk_rtable(C), cm.get_property(C, 'fk_rtable'))
self.assertEqual(cm.get_fk_rtable(C), 'rtable_ID')
@raises(AssertionError)
def test_get_fk_rtable_invalid_df(self):
cm.get_fk_rtable(None)
def test_set_fk_ltable_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = pd.read_csv(path_c)
cm.set_fk_ltable(C, 'ltable_ID')
self.assertEqual(cm.get_fk_ltable(C), 'ltable_ID')
@raises(AssertionError)
def test_set_fk_ltable_invalid_df(self):
cm.set_fk_ltable(None, 'ltable_ID')
@raises(KeyError)
def test_set_fk_ltable_invalid_col(self):
C = pd.read_csv(path_c)
cm.set_fk_ltable(C, 'ltable_ID1')
def test_set_fk_rtable_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = pd.read_csv(path_c)
cm.set_fk_rtable(C, 'rtable_ID')
self.assertEqual(cm.get_fk_rtable(C), 'rtable_ID')
@raises(AssertionError)
def test_set_fk_rtable_invalid_df(self):
cm.set_fk_rtable(None, 'rtable_ID')
@raises(KeyError)
def test_set_fk_rtable_invalid_col(self):
C = pd.read_csv(path_c)
cm.set_fk_rtable(C, 'rtable_ID1')
def test_validate_and_set_fk_ltable_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b)
C = pd.read_csv(path_c)
cm.validate_and_set_fk_ltable(C, 'ltable_ID', A, 'ID')
self.assertEqual(cm.get_fk_ltable(C), 'ltable_ID')
def test_validate_and_set_fk_ltable_err_case_1(self):
C = pd.read_csv(path_c)
p = os.sep.join([catalog_datasets_path, 'A_dupid.csv'])
A = pd.read_csv(p)
status = cm.validate_and_set_fk_ltable(C, 'ltable_ID', A, 'ID')
self.assertEqual(status, False)
self.assertEqual(cm.is_dfinfo_present(C), False)
def test_validate_and_set_fk_ltable_err_case_2(self):
C = pd.read_csv(path_c)
p = os.sep.join([catalog_datasets_path, 'A_inv_fk.csv'])
A = pd.read_csv(p)
status = cm.validate_and_set_fk_ltable(C, 'ltable_ID', A, 'ID')
self.assertEqual(status, False)
self.assertEqual(cm.is_dfinfo_present(C), False)
def test_validate_and_set_fk_rtable_valid(self):
A = read_csv_metadata(path_a)
C = pd.read_csv(path_c)
cm.validate_and_set_fk_rtable(C, 'ltable_ID', A, 'ID')
self.assertEqual(cm.get_fk_rtable(C), 'ltable_ID')
def test_validate_and_set_fk_rtable_err_case_1(self):
C = pd.read_csv(path_c)
p = os.sep.join([catalog_datasets_path, 'A_dupid.csv'])
A = pd.read_csv(p)
status = cm.validate_and_set_fk_rtable(C, 'ltable_ID', A, 'ID')
self.assertEqual(status, False)
self.assertEqual(cm.is_dfinfo_present(C), False)
def test_validate_and_set_fk_rtable_err_case_2(self):
C = pd.read_csv(path_c)
p = os.sep.join([catalog_datasets_path, 'A_inv_fk.csv'])
A = pd.read_csv(p)
status = cm.validate_and_set_fk_rtable(C, 'ltable_ID', A, 'ID')
self.assertEqual(status, False)
self.assertEqual(cm.is_dfinfo_present(C), False)
# def test_get_reqd_metadata_from_catalog_valid_1(self):
# A = read_csv_metadata(path_a)
# d = cm.get_reqd_metadata_from_catalog(A, 'key')
# self.assertEqual(d['key'], cm.get_key(A))
#
# def test_get_reqd_metadata_from_catalog_valid_2(self):
# A = read_csv_metadata(path_a)
# d = cm.get_reqd_metadata_from_catalog(A, ['key'])
# self.assertEqual(d['key'], cm.get_key(A))
#
# def test_get_reqd_metadata_from_catalog_valid_3(self):
# A = read_csv_metadata(path_a)
# B = read_csv_metadata(path_b, key='ID')
# C = read_csv_metadata(path_c, ltable=A, rtable=B)
# d = cm.get_reqd_metadata_from_catalog(C, ['key', 'fk_ltable', 'fk_rtable', 'ltable', 'rtable'])
# self.assertEqual(d['key'], cm.get_key(C))
# self.assertEqual(d['fk_ltable'], cm.get_fk_ltable(C))
# self.assertEqual(d['fk_rtable'], cm.get_fk_rtable(C))
# self.assertEqual(cm.get_ltable(C).equals(A), True)
# self.assertEqual(cm.get_rtable(C).equals(B), True)
#
# @raises(AssertionError)
# def test_get_reqd_metadata_from_catalog_err_1(self):
# cm.get_reqd_metadata_from_catalog(None, ['key'])
#
# @raises(AssertionError)
# def test_get_reqd_metadata_from_catalog_err_2(self):
# A = read_csv_metadata(path_a)
# B = read_csv_metadata(path_b, key='ID')
# C = read_csv_metadata(path_c, ltable=A, rtable=B)
# d = cm.get_reqd_metadata_from_catalog(C, ['key', 'fk_ltable1', 'fk_rtable', 'ltable', 'rtable'])
#
#
# def test_update_reqd_metadata_with_kwargs_valid_1(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# metadata = {}
# cm._update_reqd_metadata_with_kwargs(metadata, d, ['key'])
# self.assertEqual(metadata['key'], d['key'])
#
# def test_update_reqd_metadata_with_kwargs_valid_2(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# metadata = {}
# cm._update_reqd_metadata_with_kwargs(metadata, d, 'key')
# self.assertEqual(metadata['key'], d['key'])
#
# @raises(AssertionError)
# def test_update_reqf_metadata_with_kwargs_invalid_dict_1(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# cm._update_reqd_metadata_with_kwargs(None, d, 'key')
#
# @raises(AssertionError)
# def test_update_reqf_metadata_with_kwargs_invalid_dict_2(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# cm._update_reqd_metadata_with_kwargs(d, None, 'key')
#
# @raises(AssertionError)
# def test_update_reqd_metadata_with_kwargs_invalid_elts(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# metadata = {}
# cm._update_reqd_metadata_with_kwargs(metadata, d, ['key1'])
# def test_get_diff_with_reqd_metadata_valid_1(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# d1 = cm._get_diff_with_required_metadata(d, 'key1')
# self.assertEqual(len(d1), 1)
#
# def test_get_diff_with_reqd_metadata_valid_2(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# d1 = cm._get_diff_with_required_metadata(d, ['key1'])
# self.assertEqual(len(d1), 1)
#
# @raises(AssertionError)
# def test_get_diff_with_reqd_metadata_invalid_dict(self):
# d1 = cm._get_diff_with_required_metadata(None, ['key1'])
# def test_is_all_reqd_metadata_present_valid_1(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# self.assertEqual(cm.is_all_reqd_metadata_present(d, 'key'),True)
#
# def test_is_all_reqd_metadata_present_valid_2(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# self.assertEqual(cm.is_all_reqd_metadata_present(d, ['key']),True)
#
# def test_is_all_reqd_metadata_present_valid_3(self):
# A = read_csv_metadata(path_a)
# d = cm.get_all_properties(A)
# self.assertEqual(cm.is_all_reqd_metadata_present(d, ['key1']), False)
#
# @raises(AssertionError)
# def test_is_all_reqd_metadata_present_invalid_dict(self):
# cm.is_all_reqd_metadata_present(None, 'key')
def test_show_properties_for_df_valid_1(self):
A = read_csv_metadata(path_a)
cm.show_properties(A)
def test_show_properties_for_df_valid_2(self):
A = pd.read_csv(path_a)
cm.show_properties(A)
def test_show_properties_for_df_valid_3(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
cm.show_properties(C)
def test_show_properties_for_objid_valid_1(self):
A = read_csv_metadata(path_a)
cm.show_properties_for_id(id(A))
@raises(KeyError)
def test_show_properties_for_objid_err_1(self):
A = pd.read_csv(path_a)
cm.show_properties_for_id(id(A))
def test_show_properties_for_objid_valid_3(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
cm.show_properties_for_id(id(C))
def test_validate_metadata_for_table_valid_1(self):
A = pd.read_csv(path_a)
status = cm._validate_metadata_for_table(A, 'ID', 'table', None, False)
self.assertEqual(status, True)
def test_validate_metadata_for_table_valid_2(self):
import logging
logger = logging.getLogger(__name__)
A = pd.read_csv(path_a)
status = cm._validate_metadata_for_table(A, 'ID', 'table', logger, True)
self.assertEqual(status, True)
@raises(AssertionError)
def test_validate_metadata_for_table_invalid_df(self):
status = cm._validate_metadata_for_table(None, 'ID', 'table', None, False)
@raises(KeyError)
def test_validate_metadata_for_table_key_notin_catalog(self):
A = pd.read_csv(path_a)
status = cm._validate_metadata_for_table(A, 'ID1', 'table', None, False)
@raises(KeyError)
def test_validate_metadata_for_table_key_notstring(self):
A = pd.read_csv(path_a)
status = cm._validate_metadata_for_table(A, None, 'table', None, False)
@raises(AssertionError)
def test_validate_metadata_for_table_key_notstring(self):
A = pd.read_csv(path_a)
status = cm._validate_metadata_for_table(A, 'zipcode', 'table', None, False)
def test_validate_metadata_for_candset_valid_1(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
status = cm._validate_metadata_for_candset(C, '_id', 'ltable_ID', 'rtable_ID', A, B, 'ID', 'ID', None, False)
self.assertEqual(status, True)
@raises(AssertionError)
def test_validate_metadata_for_candset_invalid_df(self):
status = cm._validate_metadata_for_candset(None, '_id', 'ltable_ID', 'rtable_ID', None, None,
'ID', 'ID', None, False)
@raises(KeyError)
def test_validate_metadata_for_candset_id_notin(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
status = cm._validate_metadata_for_candset(C, 'id', 'ltable_ID', 'rtable_ID', A, B, 'ID', 'ID', None, False)
@raises(KeyError)
def test_validate_metadata_for_candset_fk_ltable_notin(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
status = cm._validate_metadata_for_candset(C, '_id', 'ltableID', 'rtable_ID', A, B, 'ID', 'ID', None, False)
@raises(KeyError)
def test_validate_metadata_for_candset_fk_rtable_notin(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
status = cm._validate_metadata_for_candset(C, '_id', 'ltable_ID', 'rtableID', A, B, 'ID', 'ID', None, False)
@raises(AssertionError)
def test_validate_metadata_for_candset_invlaid_ltable(self):
B = pd.read_csv(path_b)
C = pd.read_csv(path_c)
status = cm._validate_metadata_for_candset(C, '_id', 'ltable_ID', 'rtable_ID', None, B, 'ID', 'ID', None, False)
@raises(AssertionError)
def test_validate_metadata_for_candset_invlaid_rtable(self):
B = pd.read_csv(path_b)
C = pd.read_csv(path_c)
status = cm._validate_metadata_for_candset(C, '_id', 'ltable_ID', 'rtable_ID', B, None, 'ID', 'ID', None, False)
@raises(KeyError)
def test_validate_metadata_for_candset_lkey_notin_ltable(self):
A = pd.read_csv(path_a)
B = pd.read_csv(path_b)
C = pd.read_csv(path_c)
status = cm._validate_metadata_for_candset(C, '_id', 'ltable_ID', 'rtable_ID', A, B, 'ID1', 'ID', None, False)
@raises(KeyError)
def test_validate_metadata_for_candset_rkey_notin_rtable(self):
A = pd.read_csv(path_a)
B = pd.read_csv(path_b)
C = pd.read_csv(path_c)
status = cm._validate_metadata_for_candset(C, '_id', 'ltable_ID', 'rtable_ID', A, B, 'ID', 'ID1', None, False)
def test_get_keys_for_ltable_rtable_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
l_key, r_key = cm.get_keys_for_ltable_rtable(A, B, None, False)
self.assertEqual(l_key, 'ID')
self.assertEqual(r_key, 'ID')
@raises(AssertionError)
def test_get_keys_for_ltable_rtable_invalid_ltable(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
l_key, r_key = cm.get_keys_for_ltable_rtable(None, B, None, False)
@raises(AssertionError)
def test_get_keys_for_ltable_rtable_invalid_rtable(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
l_key, r_key = cm.get_keys_for_ltable_rtable(A, None, None, False)
def test_get_metadata_for_candset_valid(self):
A = read_csv_metadata(path_a)
B = read_csv_metadata(path_b, key='ID')
C = read_csv_metadata(path_c, ltable=A, rtable=B)
key, fk_ltable, fk_rtable, ltable, rtable, l_key, r_key = cm.get_metadata_for_candset(C, None, False)
self.assertEqual(key, '_id')
self.assertEqual(fk_ltable, 'ltable_ID')
self.assertEqual(fk_rtable, 'rtable_ID')
self.assertEqual(l_key, 'ID')
self.assertEqual(r_key, 'ID')
self.assertEqual(ltable.equals(A), True)
self.assertEqual(rtable.equals(B), True)
@raises(AssertionError)
def test_get_metadata_for_candset_invalid_df(self):
cm.get_metadata_for_candset(None, None, False)
#--- catalog ---
def test_catalog_singleton_isinstance(self):
from py_entitymatching.catalog.catalog import Singleton
x = Singleton(object)
x.__instancecheck__(object)
@raises(TypeError)
def test_catalog_singleton_call(self):
from py_entitymatching.catalog.catalog import Singleton
x = Singleton(object)
x.__call__()
# -- catalog helper --
def test_check_attrs_present_valid_1(self):
A = pd.read_csv(path_a)
status = ch.check_attrs_present(A, 'ID')
self.assertEqual(status, True)
def test_check_attrs_present_valid_2(self):
A = pd.read_csv(path_a)
status = ch.check_attrs_present(A, ['ID'])
self.assertEqual(status, True)
def test_check_attrs_present_valid_3(self):
A = pd.read_csv(path_a)
status = ch.check_attrs_present(A, ['_ID'])
self.assertEqual(status, False)
@raises(AssertionError)
def test_check_attrs_present_invalid_df(self):
ch.check_attrs_present(None, 'ID')
def test_check_attrs_invalid_None(self):
A = pd.read_csv(path_a)
status = ch.check_attrs_present(A, None)
self.assertEqual(status, False)
@raises(AssertionError)
def test_are_all_attrs_present_invalid_df(self):
ch.are_all_attrs_in_df(None, 'id')
def test_are_all_attrs_present_invalid_None(self):
A = pd.read_csv(path_a)
status = ch.are_all_attrs_in_df(A, None)
self.assertEqual(status, False)
def test_is_attr_unique_valid_1(self):
A = pd.read_csv(path_a)
status = ch.is_attr_unique(A, 'ID')
self.assertEqual(status, True)
def test_is_attr_unique_valid_2(self):
A = pd.read_csv(path_a)
status = ch.is_attr_unique(A, 'zipcode')
self.assertEqual(status, False)
@raises(AssertionError)
def test_is_attr_unique_invalid_df(self):
ch.is_attr_unique(None, 'zipcode')
@raises(AssertionError)
def test_is_attr_unique_invalid_attr(self):
A = pd.read_csv(path_a)
ch.is_attr_unique(A, None)
def test_does_contain_missing_values_valid_1(self):
A = pd.read_csv(path_a)
status = ch.does_contain_missing_vals(A, 'ID')
self.assertEqual(status, False)
def test_does_contain_missing_values_valid_2(self):
p = os.sep.join([catalog_datasets_path, 'A_mvals.csv'])
A = pd.read_csv(p)
status = ch.does_contain_missing_vals(A, 'ID')
self.assertEqual(status, True)
@raises(AssertionError)
def test_does_contain_missing_values_invalid_df(self):
ch.does_contain_missing_vals(None, 'zipcode')
@raises(AssertionError)
def test_does_invalid_attr(self):
A = pd.read_csv(path_a)
ch.does_contain_missing_vals(A, None)
def test_is_key_attribute_valid_1(self):
A = pd.read_csv(path_a)
status = ch.is_key_attribute(A, 'ID', True)
self.assertEqual(status, True)
def test_is_key_attribute_valid_2(self):
A = pd.read_csv(path_a)
status = ch.is_key_attribute(A, 'zipcode', True)
self.assertEqual(status, False)
def test_is_key_attribute_valid_3(self):
p = os.sep.join([catalog_datasets_path, 'A_mvals.csv'])
A = pd.read_csv(p)
status = ch.is_key_attribute(A, 'ID', True)
self.assertEqual(status, False)
def test_is_key_attribute_valid_4(self):
A = pd.DataFrame(columns=['id', 'name'])
status = ch.is_key_attribute(A, 'id')
self.assertEqual(status, True)
@raises(AssertionError)
def test_is_key_attribute_invalid_df(self):
ch.is_key_attribute(None, 'id')
@raises(AssertionError)
def test_is_key_attribute_invalid_attr(self):
A = pd.read_csv(path_a)
ch.is_key_attribute(A, None)
def test_check_fk_constraint_valid_1(self):
A = pd.read_csv(path_a)
B = pd.read_csv(path_b)
C = pd.read_csv(path_c)
status = ch.check_fk_constraint(C, 'ltable_ID', A, 'ID')
self.assertEqual(status, True)
status = ch.check_fk_constraint(C, 'rtable_ID', B, 'ID')
self.assertEqual(status, True)
@raises(AssertionError)
def test_check_fk_constraint_invalid_foreign_df(self):
ch.check_fk_constraint(None, 'rtable_ID', pd.DataFrame(), 'ID')
@raises(AssertionError)
def test_check_fk_constraint_invalid_base_df(self):
ch.check_fk_constraint(pd.DataFrame(), 'rtable_ID', None, 'ID')
@raises(AssertionError)
def test_check_fk_constraint_invalid_base_attr(self):
ch.check_fk_constraint(pd.DataFrame(), 'rtable_ID', pd.DataFrame(), None)
@raises(AssertionError)
def test_check_fk_constraint_invalid_foreign_attr(self):
ch.check_fk_constraint(pd.DataFrame(), None, pd.DataFrame(), 'ID')
def test_check_fk_constraint_invalid_attr_notin(self):
A = pd.read_csv(path_a)
B = pd.read_csv(path_b)
C = pd.read_csv(path_c)
status = ch.check_fk_constraint(C, 'ltable_ID', A, 'ID1')
self.assertEqual(status, False)
def test_check_fk_constraint_invalid_attr_mval(self):
A = pd.read_csv(path_a)
B = pd.read_csv(path_b)
C = pd.read_csv(path_c)
C.ix[0, 'ltable_ID'] = pd.np.NaN
status = ch.check_fk_constraint(C, 'ltable_ID', A, 'ID')
self.assertEqual(status, False)
def test_does_contain_rows_valid_1(self):
A = pd.read_csv(path_a)
status = ch.does_contain_rows(A)
self.assertEqual(status, True)
def test_does_contain_rows_valid_2(self):
A = pd.DataFrame()
status = ch.does_contain_rows(A)
self.assertEqual(status, False)
@raises(AssertionError)
def test_does_contain_rows_invalid(self):
ch.does_contain_rows(None)
def test_get_name_for_key_valid_1(self):
A =
|
pd.read_csv(path_a)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 1 10:26:52 2021
@author: dlvilla
Created on Tue Apr 20 14:50:39 2021
Copyright Notice
=================
Copyright 2021 National Technology and Engineering Solutions of Sandia, LLC.
Under the terms of Contract DE-NA0003525, there is a non-exclusive license
for use of this work by or on behalf of the U.S. Government.
Export of this program may require a license from the
United States Government.
Please refer to the LICENSE.md file for a full description of the license
terms for MEWS.
The license for MEWS is the Modified BSD License and copyright information
must be replicated in any derivative works that use the source code.
@author: dlvilla
MEWS = Multi-senario Extreme Weather Simulator
METHODS mews.weather.DOE2Weather.read_doe2_bin and
mews.weather.DOE2Weather.write_doe2_bin
Are translations of parts of BIN2TXT.F and TXT2BIN.F translated with permission
from James and <NAME> and Associates (JJH&A). The license for these utilities
must be formed with (JJH&A) before they are distributed in any other package
besides MEWS
"""
from numpy import zeros,cumsum,arange, int64, float64,array
import numpy as np
import pandas as pd
from subprocess import Popen, PIPE
from os.path import isfile, dirname,basename, join
from os import chdir as cd
from os import remove as rm
from os import getcwd as pwd
import os
from shutil import copy as cp
from pandas import DataFrame, DatetimeIndex, DateOffset, Series
from datetime import datetime, timedelta
import warnings
import logging
import struct
class DataFormats():
# changing this will cause significant portions of the code to fail
header_dtype = np.dtype([('location_IWDID','a20'),
('year_IWYR', 'i4'),
('latitude_WLAT','f4'),
('longitude_WLONG','f4'),
('timezone_IWTZN','i4'),
('record_length_LRECX','i4'),
('number_days_NUMDAY','i4'),
('clearness_number_CLN_IM1','f4'),
('ground_temperature_GT_IM1','f4'),
('solar_flag_IWSOL','i4')])
column_description = ['MONTH (1-12)',
'DAY OF MONTH',
'HOUR OF DAY',
'WET BULB TEMP (DEG F)',
'DRY BULB TEMP (DEG F)',
'PRESSURE (INCHES OF HG)',
'CLOUD AMOUNT (0 - 10)',
'SNOW FLAG (1 = SNOWFALL)',
'RAIN FLAG (1 = RAINFALL)',
'WIND DIRECTION (0 - 15; 0=N, 1=NNE, ETC)',
'HUMIDITY RATIO (LB H2O/LB AIR)',
'DENSITY OF AIR (LB/CU FT)',
'SPECIFIC ENTHALPY (BTU/LB)',
'TOTAL HOR. SOLAR (BTU/HR-SQFT)',
'DIR. NORMAL SOLAR (BTU/HR-SQFT)',
'CLOUD TYPE (0 - 2)',
'WIND SPEED (KNOTS)']
class DOE2_Weather_Error(Exception):
def __init__(self,error_message):
self.error_message = error_message
class DOE2Weather(object):
def __init__(self):
self.column_description = DataFormats.column_description
def _rm_file(self,filenamepath):
if isfile(filenamepath):
try:
rm(filenamepath)
except:
raise DOE2_Weather_Error("The operating system will not allow python to remove the " +
filenamepath + " file!")
def bin2txt(self,binfilename,bin2txtpath):
txtfilename = ""
if isfile(bin2txtpath) and isfile(binfilename):
curdir = pwd()
try:
cd(dirname(bin2txtpath))
self._rm_file("WEATHER.FMT")
self._rm_file("WEATHER.BIN")
if not os.path.isabs(binfilename):
binfilename2 = os.path.join(curdir,binfilename)
else:
binfilename2 = binfilename
cp(binfilename2,"WEATHER.BIN")
# no arguments needed
pp = Popen(basename(bin2txtpath),stdout=PIPE, stderr=PIPE, shell=True)
output, errors = pp.communicate()
if not errors == b'':
warnings.warn("WARNING! An error was recorded by Popen.communicate but this does not mean the BIN2TXT did not work. Investigate further to verify it worked.")
txtfilename = join(dirname(binfilename2) , basename(binfilename2).split(".")[0] + ".txt")
cp("WEATHER.FMT",txtfilename)
cd(curdir)
except:
# return to the correct directory
try:
cd(curdir)
raise DOE2_Weather_Error("The bin to text process failed!")
except:
raise DOE2_Weather_Error("The OS will not allow return to the original directory. " + curdir)
else:
if not isfile(bin2txtpath):
raise DOE2_Weather_Error("doe2bin2txt.conver_bin_to_txt: the requested bin2txtpath" +
" executable does not exist! A valid path to the BIN2TXT.EXE" +
" utility must be provided.")
else:
raise DOE2_Weather_Error("doe2bin2txt.conver_bin_to_txt: the requested binfilename" +
" does not exist! A valid path to a valid DOE2 weather binary" +
" file must be provided.")
return txtfilename
def txt2bin(self,txtfilename,txt2binpath,binfilename):
if isfile(txt2binpath) and isfile(txtfilename):
curdir = pwd()
try:
change_dir = len(dirname(txt2binpath)) != 0
if change_dir:
cd(dirname(txt2binpath))
self._rm_file("WEATHER.BIN")
if txtfilename != "WEATHER.FMT":
cp(txtfilename,"WEATHER.FMT")
# no arguments needed
p = Popen(basename(txt2binpath),stdout=PIPE, stderr=PIPE, shell=True)
output, errors = p.communicate()
if not errors == b'':
warnings.warn("The process produced an error make sure the process worked!\n\n" + str(errors) + "\n\n" + str(output))
cp("WEATHER.BIN",binfilename)
self._rm_file("WEATHER.BIN")
cd(curdir)
except:
# return to the correct directory
try:
cd(curdir)
except:
raise DOE2_Weather_Error("The OS will not allow return to the original directory.\n\n " + curdir)
else:
if not isfile(txt2binpath):
raise DOE2_Weather_Error("doe2bin2txt.txt2bin: the requested bin2txtpath" +
" executable does not exist! A valid path to the TXT2BIN.EXE" +
" utility must be provided.")
else:
raise DOE2_Weather_Error("doe2bin2txt.txt2bin: the requested txtfilename" +
" does not exist! A valid path to a valid DOE2 weather textfile" +
" file must be provided.")
def df2bin(self,df, binfilename, use_exe=False, start_datetime=None, hour_in_file=None, txt2bin_exepath=None,
location=None,fyear=None,latitude=None,longitude=None,timezone=None,
iwsz=2,iftyp=3,clearness_number=None,ground_temp=None):
"""
df2bin(df, binfilename, start_datetime, hour_in_file, txt2bin_exepath,
location=None,fyear=None,latitude=None,longitude=None,timezone=None,
iwsz=2,iftyp=3,clearness_number=None,ground_temp=None)
Parameters
----------
df : pd.Dataframe: must contain the columns originally read from a
DOE2 BIN weather file format (in the table below)
if use_exe = False, df must have a
binfilename : str : path and filename that will be output with the
weather signals in df.
use_exe : bool : optional : Default=False
Set to True if using TXT2BIN is desired
instead of python. Changing use_exe between
reads and writes is not allowed
All other parameters only apply if use_exe = True
start_datetime : datetime :start date and time to be output in the weather file
typically this is Jan 1st, fyear
hour_in_file : int : Either 8760 or 8784
txt2bin_exepath : str : path and filename that point to TXT2BIN.EXE
DOE2 utility that can be obtained from www.doe2.com
after forming a license agreement with <NAME>
and associates.
location .. and all other inputs
Returns
=======
None
%% From TXT2BIN.FMT - this gives the exact format needed to output a
% text file that TXT2BIN.EXE can process.
% THIS DOCUMENTS A FORMATTED WEATHER FILE (WEATHER.FMT) MADE FROM
% A PACKED BINARY DOE2 WEATHER FILE (WEATHER.BIN) USING WTHFMT2.EXE
% AND THE EXTRA FILE NEEDED (not really!) TO PACK IT WITH FMTWTH2.EXE
%
% on input.dat:
%
% Record 1 IWSZ,IFTYP
% FORMAT(12X,I1,17X,I1)
%
% IWSZ WORD SIZE 1 = 60-BIT, 2 = 30-BIT
% IFTYP FILE TYPE 1 = OLD, 2 = NORMAL (NO SOLAR),
% 3 = THE DATA HAS SOLAR
% on weather.fmt:
%
% Record 1 (IWDID(I),I=1,5),IWYR,WLAT,WLONG,IWTZN,IWSOL
% FORMAT(5A4,I5,2F8.2,2I5)
%
% Record 2 (CLN(I),I=1,12)
% FORMAT(12F6.2)
%
% Record 3 (GT(I),I=1,12)
% FORMAT(12F6.1)
%
% Records 4,8763
% KMON, KDAY, KH, WBT, DBT, PATM, CLDAMT, ISNOW,
% IRAIN, IWNDDR, HUMRAT, DENSTY, ENTHAL, SOLRAD,
% DIRSOL, ICLDTY, WNDSPD
% FORMAT(3I2,2F5.0,F6.1,F5.0,2I3,I4,F7.4,F6.3,F6.1,2F7.1,I3,F5.0)
% IWDID LOCATION I.D.
% IWYR YEAR
% WLAT LATITUDE
% WLONG LONGITUDE
% IWTZN TIME ZONE NUMBER
% IWSOL SOLAR FLAG IWSOL = IWSZ + (IFTYP-1)*2 - 1
% CLN CLEARNESS NO.
% GT GROUND TEMP. (DEG R)
% KMON MONTH (1-12)
% KDAY DAY OF MONTH
% KH HOUR OF DAY
% WBT WET BULB TEMP (DEG F)
% DBT DRY BULB TEMP (DEG F)
% PATM PRESSURE (INCHES OF HG)
% CLDAMT CLOUD AMOUNT (0 - 10)
% ISNOW SNOW FLAG (1 = SNOWFALL)
% IRAIN RAIN FLAG (1 = RAINFALL)
% IWNDDR WIND DIRECTION (0 - 15; 0=N, 1=NNE, ETC)
% HUMRAT HUMIDITY RATIO (LB H2O/LB AIR)
% DENSTY DENSITY OF AIR (LB/CU FT)
% ENTHAL SPECIFIC ENTHALPY (BTU/LB)
% SOLRAD TOTAL HOR. SOLAR (BTU/HR-SQFT)
% DIRSOL DIR. NORMAL SOLAR (BTU/HR-SQFT)
% ICLDTY CLOUD TYPE (0 - 2)
% WNDSPD WIND SPEED KNOTS"""
if hasattr(self,'use_exe'):
if self.use_exe != use_exe:
raise ValueError("This class does not support switching between"+
" using Python and using the BIN2TXT.F and TXT2BIN.F"+
" executables!")
else:
self.use_exe = use_exe
if use_exe:
cdir = pwd()
#try:
change_dir = len(dirname(binfilename)) != 0
if change_dir:
cd(dirname(binfilename))
self._rm_file("WEATHER.FMT")
self._rm_file("WEATHER.BIN")
with open("INPUT.DAT",'w') as dat:
dat.write(' {0:1.0f} {1:1.0f}'.format(iwsz,iftyp))
with open("WEATHER.FMT",'w') as fmt:
# 3 header rows
# header_dtype = np.dtype([('location_IWDID','a20'),
# ('year_IWYR', 'i4'),
# ('latitude_WLAT','f4'),
# ('longitude_WLONG','f4'),
# ('timezone_IWTZN','i4'),
# ('record_length_LRECX','i4'),
# ('number_days_NUMDAY','i4'),
# ('clearness_number_CLN_IM1','f4'),
# ('ground_temperature_GT_IM1','f4'),
# ('solar_flag_IWSOL','i4')])
if isinstance(df.headers[0]['location_IWDID'][0],np.bytes_):
location_str = df.headers[0]['location_IWDID'][0].decode('ascii')
else:
location_str = df.headers[0]['location_IWDID'][0]
row1 = '{0:20s}{1:5d}{2:8.2f}{3:8.2f}{4:5d}{5:5d}\n'.format(
location_str,
df.headers[0]['year_IWYR'][0],
df.headers[0]['latitude_WLAT'][0],
df.headers[0]['longitude_WLONG'][0],
df.headers[0]['timezone_IWTZN'][0],
df.headers[0]['solar_flag_IWSOL'][0])
fmt.write(row1)
clearness_number = [head['clearness_number_CLN_IM1'][0] for head in df.headers]
ground_temp = [head['ground_temperature_GT_IM1'][0] for head in df.headers]
fmt.write((12*'{:6.2f}'+'\n').format(*clearness_number))
fmt.write((12*'{:6.2f}'+'\n').format(*ground_temp))
for index, row in df.iterrows():
fmt.write((3*'{:2.0f}'+2*'{:5.0f}' + '{:6.1f}{:5.0f}' + 2*'{:3.0f}' +
'{:4.0f}{:7.4f}{:6.3f}{:6.1f}' + 2*'{:7.1f}'+'{:3.0f}{:5.0f}\n').format(
*row.tolist()))
if isfile(txt2bin_exepath):
if change_dir:
cp(txt2bin_exepath,".")
else:
cp(txt2bin_exepath,".")
new_exe_path = join(".",basename(txt2bin_exepath))
self.txt2bin("WEATHER.FMT",new_exe_path,os.path.basename(binfilename))
else:
cd(cdir)
raise DOE2_Weather_Error("The txt2bin.exe utility is not present at: \n\n" + txt2bin_exepath)
cd(cdir)
else:
# size and type checking
m = df[self.column_description].values
if m.shape[1] != 17:
raise ValueError("This function only handles dataframes with"+
" 17 columns as defined for DOE-2 BIN weather files!")
elif not hasattr(df,'headers'):
raise ValueError("The input dataframe df must have an attribute "+
"'headers' that contains a list of ")
headers = df.headers
DOE2Weather.write_doe2_bin(m, headers, binfilename)
def bin2df(self,binfilename, start_datetime=None, hour_in_file=None, bin2txt_exepath=None, timezone=None, dst=None, use_exe=False):
""" This function was originally written in matlab and is the
"ReadDOE2BINTXTFile.m" function except that it also includes conversion
of the BIN file into a text file
DST is a list of the start of daylight savings and end so that
adjustments can be made and the time stamps adjusted for daylight savings.
% the input *.txt "filename" must come from a DOE2 bin file that has been
% converted to a text file. It has the following columns of information:
% Column Number Variable Description Units
%C 1 IM2 MOMTH (1-12)
%C 2 ID DAY OF MONTH
%C 3 IH HOUR OF DAY
%C 4 CALC(1) WET BULB TEMP (DEG F)
%C 5 CALC(2) DRY BULB TEMP (DEG F)
%C 6 CALC(3) PRESSURE (INCHES OF HG)
%C 7 CALC(4) CLOUD AMOUNT (0 - 10)
%C 8 ISNOW SNOW FLAG (1 = SNOWFALL)
%C 9 IRAIN RAIN FLAG (1 = RAINFALL)
%C 10 IWNDDR WIND DIRECTION (0 - 15; 0=N, 1=NNE, ETC)
%C 11 CALC(8) HUMIDITY RATIO (LB H2O/LB AIR)
%C 12 CALC(9) DENSITY OF AIR (LB/CU FT)
%C 13 CALC(10) SPECIFIC ENTHALPY (BTU/LB)
%C 14 CALC(11) TOTAL HOR. SOLAR (BTU/HR-SQFT)
%C 15 CALC(12) DIR. NORMAL SOLAR (BTU/HR-SQFT)
%C 16 ICLDTY CLOUD TYPE (0 - 2)
%C 17 CALC(14) WIND SPEED KNOTS
"""
if (timezone is None and not dst is None) or (not timezone is None and dst is None):
raise ValueError("The timezone and dst values must be specified together!")
hour_in_day = 24
if hasattr(self,'use_exe'):
if use_exe != self.use_exe:
raise ValueError("This class does not support switching between" +
" using BIN2TXT.EXE and TXT2BIN.EXE and using" +
" Python for translation! None returned as a result!")
else:
# set the mode of operation of the class
self.use_exe = use_exe
if use_exe:
# this is the old way of doing things.
txtname = self.bin2txt(binfilename, bin2txt_exepath)
if len(txtname)==0:
raise DOE2_Weather_Error("bom2df:The bin file was not successfully converted please troubleshoot!")
num = 0
m = zeros((hour_in_file,17))
# this is specific to the conversion utility and how it writes out ASCII.
EntryLength = [0, 2, 2, 2, 5, 5, 6, 5, 3, 3, 4, 7, 6, 6, 7, 7, 3, 5]
j = 0
i = 0
b_lines = []
with open(txtname,'r') as h:
for text_line in h:
if num <= 2:
num += 1 # skip three lines
b_lines.append(text_line)
else:
for mm,nn in zip(cumsum(EntryLength[0:-1]),cumsum(EntryLength[1:])):
m[j,i] = float(text_line[mm:nn])
i+=1
j +=1
i = 0
else:
# use pure python to do this
m, headers = self.read_doe2_bin(binfilename)
# adjust for leap year by repeating February 28th on February 29th.
if hour_in_file == 8784:
# February 28th is the 59th day of the year
# Febrary 29th is the 60th day of a year
sid = 59 * 24
# shift all of March1st to December 31 over 24 hours
m[sid+24:] = m[sid:-24]
# repeat February 28th
m[sid:sid+24] = m[sid-24:sid]
# reassign Feb 28th to Feb 29th -
# day of month column
ind = self.column_description.index("DAY OF MONTH")
m[sid:sid+24,ind] = 29
MDAYS = [31,29,31,30,31,30,31,31,30,31,30,31]
else:
MDAYS = [31,28,31,30,31,30,31,31,30,31,30,31]
dateVec = []
reached_hours_to_next = True
start_datetime_was_None = False
get_year_from_headers = False
month = 0 # 0 = Jan
for i in arange(m.shape[0]):
# see whether a replacement year has been provided - if not, use what
# is in the BIN file - if Python is used, the the header of each
# month can have a different year if it is TMY3
if start_datetime is None and use_exe:
start_datetime = datetime(year=int(b_lines[0][20:25]),day=1,month=1)
start_datetime_was_None = True
elif start_datetime is None and reached_hours_to_next and not use_exe:
hour_count = 0
hours_to_next = MDAYS[month]* hour_in_day
year = headers[month]['year_IWYR'][0]
month += 1
start_datetime_was_None = True
get_year_from_headers = True
elif not use_exe and start_datetime is None:
hour_count += 1
if get_year_from_headers:
if hour_count > hours_to_next:
reached_hours_to_next = True
else:
reached_hours_to_next = False
# for cases with a replacement year OR use_exe where the text file
# does not convey the year at every header
if use_exe or not start_datetime_was_None:
current_time = start_datetime+timedelta(hours=float(i))
dateVec.append(datetime(current_time.year,current_time.month,
current_time.day,current_time.hour,0,0))
#
else:
dateVec.append(datetime(year,int(m[i,0]),int(m[i,1]),int(m[i,2])-1,0,0))
if not dst is None:
#Handle daylight savings correctly
if dateVec[-1] == dst[0]:
dateVec[-1] = dateVec[-1] + DateOffset(hour=1)
elif dateVec[-1] == dst[1]:
dateVec[-1] = dateVec[-1] - DateOffset(hour=1)
dateTimeIn =
|
DatetimeIndex(dateVec)
|
pandas.DatetimeIndex
|
import numpy as np
import statsmodels
import pandas as pd
import statsmodels.formula.api as smf
import statsmodels.stats.api as sms
import sys
import statistics
beta = pd.read_csv('data/beta_organoids.csv')
meta = pd.read_csv('data/meta_organoids.csv')
# prepare passage column from linear modelling
meta.rename(columns={"passage.or.rescope.no": "passage", "sample.type": "sampletype"}, inplace=True)
#df['score_num'] = df['score'].apply(score_to_numeric)
meta['passage'] = meta['passage'].str.replace('P','')
meta['passage'] = meta['passage'].str.replace('RE1.','')
meta['passage'] = pd.to_numeric(meta['passage'])
import random
permstart = int(sys.argv[1])
permend = int(sys.argv[1])+10
CpGnum = beta.shape[0]
pval_all_BP = []
pval_all_diff = []
db_all_diff = []
fdr_all_BP = []
fdr_all_diff = []
for n in range(permstart,permend):
random.seed(n)
## Sample the cohort in the lower passage number samples.
#Pull 5 random samples from each of those with 1,2,3 or 4 passages.
meta_sampled_high_passage = meta[meta['passage'] > 4]
meta_sampled = meta[meta['passage'] <= 4]
meta_sampled_grouped = meta_sampled.groupby('passage')
meta_sampled_subset = []
for name, group in meta_sampled_grouped:
meta_sampled_subset.append(group.sample(5))
meta_sampled_subset = pd.concat([pd.concat(meta_sampled_subset),meta_sampled_high_passage])
## collect a p value for each CpG
beta_sampled = beta[meta_sampled_subset['array.id'].values.tolist()]
CpG_pval_passage_subset = []
CpG_pval_BP_subset = []
CpG_db_passage_subset = []
for cpg in range(0, CpGnum): #beta_sampled.shape[0]
meta_sampled_subset['beta'] = beta_sampled.iloc[cpg,0:42].values.tolist()
meta_sampled_subset['constant'] = 1
reg = smf.ols('beta ~ passage', data=meta_sampled_subset).fit()
# Differential p value is interesting as well
pval_passage = reg.pvalues[1]
db = (reg.params[1]*1)-(reg.params[1]*16)
pred_val = reg.fittedvalues.copy()
true_val = meta_sampled_subset['beta'].values.copy()
residual = true_val - pred_val
#BP heteroskedacity test
_, pval_BP, __, f_pval = statsmodels.stats.diagnostic.het_breuschpagan(residual, meta_sampled_subset[['passage','constant']])
# studentized or not (p vs f) values do match the ones from bptest in R
CpG_pval_BP_subset.append(pval_BP)
CpG_pval_passage_subset.append(pval_passage)
CpG_db_passage_subset.append(db)
pval_all_BP.append(CpG_pval_BP_subset)
pval_all_diff.append(CpG_pval_passage_subset)
db_all_diff.append(CpG_db_passage_subset)
fdr_all_BP.append(statsmodels.stats.multitest.multipletests(CpG_pval_BP_subset, method='fdr_bh', is_sorted=False, returnsorted=False)[1])
fdr_all_diff.append(statsmodels.stats.multitest.multipletests(CpG_pval_passage_subset, method='fdr_bh', is_sorted=False, returnsorted=False)[1])
pval_BP_df = pd.DataFrame(pval_all_BP)
pval_diff_df = pd.DataFrame(pval_all_diff)
db_all_diff =
|
pd.DataFrame(db_all_diff)
|
pandas.DataFrame
|
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.dummy import DummyClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import GridSearchCV, learning_curve
from sklearn.metrics import get_scorer
from sklearn.model_selection import ParameterGrid
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
import os
sns.set(style="ticks")
os.chdir("C:/Users/AE250016/Desktop/ACA_DS/Untitled Folder")
titanic = pd.read_csv('train.csv')
titanic = titanic[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']]
class ModelingStage:
@staticmethod
def dict_model(inputs):
""" Handles dictionary of Sk Learn models
Args:
inputs (String or List or Dict):
1) String with required model name
2) List of required models
3) Dictionary with tuples (model() , {Parameter dictionary} )
Returns: Dictionary with tuples (model() , {Parameter dictionary} )
"""
dictionary = {"Trees": (DecisionTreeClassifier(), {'max_depth': np.arange(3, 10)}),
"Logistic": (LogisticRegression(), {'C': [0.001, 0.01, 0.05, 0.1, 10, 100]}),
'K-nearest-neighbour': (KNeighborsClassifier(),
{'n_neighbors': [5, 6, 7, 8, 9],
'metric': ['minkowski', 'euclidean', 'manhattan'],
'weights': ['uniform', 'distance']})}
if inputs:
if isinstance(inputs, dict):
return inputs
elif isinstance(inputs, str):
filtered_dictionary = {inputs: dictionary[inputs]}
return filtered_dictionary
elif isinstance(inputs, list):
filtered_dictionary = {}
for a in inputs:
filtered_dictionary[a] = dictionary[a]
return filtered_dictionary
else:
return dictionary
def plot_learning_curve(self, loading_eda, scores='neg_log_loss'):
"""
Args:
loading_eda (class.LoeadingEDA ): Object of Loeading EDA class
scores (String): Type of scoring
Returns:
Plot with learning curves
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
plt.figure()
plt.title('title')
plt.xlabel("Training examples")
plt.ylabel(scores)
model = self.best_model.fit(loading_eda.X_train, loading_eda.y_train)
train_sizes, train_scores, test_scores = learning_curve(model, loading_eda.x_train, loading_eda.y_train,
cv=5, scoring=scores)
train_scores_mean = np.mean(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
plt.grid()
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
@staticmethod
def dummy_model(x_train, y_train, x_test, y_test, dummy_strategy):
""" calculates accuracy score of the dummy model on test data
Args:
x_train(numpy array): for training data
y_train(numpy array): for training target labels
x_test(numpy array): for testing data
y_test(numpy array): for testing target labels
dummy_strategy (String): type of dummy model to use
Returns: accuracy score of the dummy model on test data
"""
dummy_model = DummyClassifier(strategy=dummy_strategy).fit(x_train, y_train)
y_dummy = dummy_model.predict(x_test)
return accuracy_score(y_test, y_dummy)
@staticmethod
def modeling_stage_k_folds(model_dictionary, x_train, y_train, k_folds, performance_metric):
""" Choosing the best model applying cross_fold validation
Args:
model_dictionary: Dictionary with tuples( model(), {Parameter dictionary})
x_train(numpy array): for training data
y_train(numpy array): for training target labels
k_folds (int): Number of cross folds
performance_metric (String): Metric to be used
Returns:
model_dicts (dict): Dictionary with best accuracy per medel as key, and the model as value
cross_val_results (pd.Dataframe): Results of each run of validation
best_model (Sklearn.Model): model object with best model
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
cross_val_results =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# cell_metadata_filter: -all
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Experiment KPI Analysis
# %% [markdown]
# ## Table of Contents
#
# * [Process KPIs](#Process-KPIs)
# * [Sensitivity Analysis](#Sensitivity-Analysis)
# * [Control Parameter Analysis and Selection](#Control-Parameter-Analysis-and-Selection)
# %%
import pandas as pd
from pandarallel import pandarallel
pandarallel.initialize(progress_bar=False)
# %%
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
# import plotly.io as pio
#pio.renderers.default = "png"
from pprint import pprint
# %%
# Update dataframe display settings
pd.set_option('display.max_columns', 100)
pd.set_option('display.max_rows', 50)
pd.options.plotting.backend = "plotly"
# %% [markdown]
# # Load Dataset
# %%
import os
os.chdir('../../../')
os.getcwd()
# %%
from experiments.system_model_v3.post_process import post_process_results
from experiments.system_model_v3.experiment_monte_carlo import SIMULATION_TIMESTEPS, params
from radcad.core import generate_parameter_sweep
# %%
processed_results = 'experiments/system_model_v3/experiment_monte_carlo/experiment_results.hdf5'
# %%
experiment_results_keys = []
with pd.HDFStore(processed_results) as store:
experiment_results_keys = list(filter(lambda x: "results" in x, store.keys()))
exceptions_keys = list(filter(lambda x: "exceptions" in x, store.keys()))
experiment_results_keys
# %%
df_raw = pd.read_hdf(processed_results, key=experiment_results_keys[-1])
# %%
df = post_process_results(df_raw, params, set_params=['kp', 'ki', 'alpha', 'liquidation_ratio', 'controller_enabled', 'control_period', 'liquidity_demand_shock'])
# %%
#df = pd.read_hdf(processed_results, key='results')
#df
# %%
df = df[df['timestep'] >= 1]
# %% [markdown]
# # Process KPIs
# %%
df_kpis = df.copy()
# %%
# Rescale target price according to liquidation ratio, if rescale_target_price set
#cols = ['timestep', 'target_price', 'liquidation_ratio', 'rescale_target_price']
#f = lambda x: (x['target_price'] * x['liquidation_ratio']) if x['rescale_target_price'] and x['timestep'] > 0 else x['target_price']
#df_kpis['target_price_scaled'] = df_kpis[cols].parallel_apply(f, axis=1)
#df_kpis['target_price_scaled'].head(10)
# %% [markdown]
# ## Stability
# %% [markdown]
# **Stability** threshold of system: defined as the maximum value for relative frequency of simulation runs that are unstable. Unstable is measured as fraction of runs where:
# - market price runs to infinity/zero (e.g. upper bound 10xPI; lower bound 0.10xPI if initial price is PI);
# - redemption price runs to infinity/zero (e.g. upper bound 10xPI; lower bound 0.10xPI if initial price is PI);
# - Uniswap liquidity (RAI reserve) runs to zero;
# - CDP position (total ETH collateral) runs to infinity/zero.
# %%
# Get initial target price for calculations
initial_target_price = df_kpis['target_price'].iloc[0]
initial_target_price
# %%
# Get decile stats. for system states, to set KPI thresholds
df_kpis[['market_price', 'target_price', 'RAI_balance', 'eth_collateral']].describe([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.90])
# %%
df_stability = df_kpis.groupby(['subset'])
# Calculate aggregate values for each subset
df_stability = df_stability.agg({
'market_price': ['min', 'max'],
'target_price': ['min', 'max'],
'RAI_balance': ['min', 'max'],
'eth_collateral': ['min', 'max'],
})
df_stability.columns = [
'market_price_min', 'market_price_max',
'target_price_min', 'target_price_max',
'RAI_balance_min', 'RAI_balance_max',
'eth_collateral_min', 'eth_collateral_max'
]
df_stability = df_stability.reset_index()
# Set stability KPI for market price
df_stability['stability_market_price'] = df_stability \
.apply(lambda x: x['market_price_min'] >= 0.1*initial_target_price and x['market_price_max'] <= 10*initial_target_price, axis=1)
# Set stability KPI for target price
df_stability['stability_target_price'] = df_stability \
.apply(lambda x: x['target_price_min'] >= 0.1*initial_target_price and x['target_price_max'] <= 10*initial_target_price, axis=1)
# Set stability KPI for Uniswap RAI balance
# NOTE: threshold set according to decile stats.
df_stability['stability_uniswap_liquidity'] = df_stability \
.apply(lambda x: x['RAI_balance_min'] >= 50e3, axis=1)
# Set stability KPI for CDP ETH collateral
# NOTE: threshold set according to decile stats.
df_stability['stability_cdp_system'] = df_stability \
.apply(lambda x: x['eth_collateral_min'] > 0, axis=1)
# Calculate aggregate stability KPI
df_stability['kpi_stability'] = df_stability \
.apply(lambda x: ( \
x.stability_cdp_system == True and \
x.stability_uniswap_liquidity == True and \
x.stability_market_price == True and \
x.stability_target_price == True) \
, axis=1)
# Get all subsets where stability KPI is met
df_stability.query('kpi_stability == True')
# %%
df_stability.query('kpi_stability == False')
# %%
df_stability.columns
# %% [markdown]
# ## Volatility
# %% [markdown]
# **Volatility** threshold of market price: defined as the maximum value for the **standard deviation** computed. Defined relative to ETH price volatility. Definition: ratio of RAI price volatility / ETH price volatility is not to exceed 0.5.
# - over simulation period;
# - as moving average with 10-day window.
# %%
df_volatility_grouped = df_kpis.groupby(['subset'])
# Calculate aggregate values for each subset
df_volatility_grouped = df_volatility_grouped.agg({'market_price': ['std'], 'eth_price': ['std']})
df_volatility_grouped.columns = ['market_price_std', 'eth_price_std']
df_volatility_grouped = df_volatility_grouped.reset_index()
# Set volatility ratio for each subset
df_volatility_grouped['volatility_ratio_simulation'] = \
df_volatility_grouped[['subset', 'market_price_std', 'eth_price_std']] \
.apply(lambda x: x['market_price_std'] / x['eth_price_std'], axis=1)
# Calculate per subset volatility KPI based on ratio
df_volatility_grouped['kpi_volatility_simulation'] = df_volatility_grouped.apply(lambda x: x['volatility_ratio_simulation'] <= 0.5, axis=1)
# Get all subsets where subset volatility KPI is met
df_volatility_grouped.query('kpi_volatility_simulation == True')
# %%
df_volatility_series = pd.DataFrame()
group = df_kpis.groupby(['subset', 'run'])
# Calculate rolling average standard deviation for each subset/run combination
df_volatility_series['market_price_moving_average_std'] = group['market_price'].rolling(24*10, 1).std()
df_volatility_series['eth_price_moving_average_std'] = group['eth_price'].rolling(24*10, 1).std()
df_volatility_series
# %%
# Calculate volatility ratio for each subset/run combination
f = lambda x: x['market_price_moving_average_std'] / x['eth_price_moving_average_std']
df_volatility_series['volatility_ratio_window'] = df_volatility_series.parallel_apply(f, axis=1)
df_volatility_series.head(5)
# %%
# Group by subset and calculate volatility ratio for each subset as the mean
f = lambda x: x['volatility_ratio_window'] != x['volatility_ratio_window'] or x['volatility_ratio_window'] <= 0.5
df_volatility_series['volatility_window_series'] = df_volatility_series.parallel_apply(f, axis=1)
df_volatility_series['volatility_window_mean'] = (df_volatility_series.groupby(['subset'])
['volatility_window_series']
.transform(lambda x: x.mean()))
df_volatility_series.head(5)
# %%
# Check volatility stats
df_volatility_series['volatility_window_mean'].describe()
# %%
# Set volatility KPI threshold based on volatility stats
df_volatility_series['kpi_volatility_window'] = df_volatility_series.groupby(['subset'])['volatility_window_mean'].transform(lambda x: x > 0.98)
df_volatility_series
# %%
# Get all subsets where window volatility KPI is met
df_volatility_series.query('kpi_volatility_window == True')
# %%
# Count window volatility KPI values (True/False)
df_volatility_series['kpi_volatility_window'].value_counts()
# %% [markdown]
# ## Merge KPI dataframes
# %%
# Select columns to drop from final dataset
cols_to_drop = {
'volatility_window_series',
'market_price_moving_average_std',
'eth_price_moving_average_std',
'index'
}
# Select column to groupby
index_cols = ['subset']
# Set dataframes to join
dfs_to_join = [df_volatility_grouped, df_volatility_series, df_stability]
# Join dataframes, dropping columns and re-setting the index
for i, df_to_join in enumerate(dfs_to_join):
_df = df_to_join.reset_index()
remaining_cols = list(set(_df.columns) - cols_to_drop)
_df = (_df.reset_index()
.loc[:, remaining_cols]
.groupby(index_cols)
.first()
)
dfs_to_join[i] = _df
df_kpis = (dfs_to_join[0].join(dfs_to_join[1], how='inner')
.join(dfs_to_join[2], how='inner')
)
# %%
# Calculate volatility KPI
df_kpis['kpi_volatility'] = df_kpis.apply(lambda x: x['kpi_volatility_simulation'] and x['kpi_volatility_window'], axis=1)
# %%
# Get all subsets where volatility KPI is not met
df_kpis.query('kpi_volatility == False and kpi_stability == False')
# %% [markdown]
# ## Liquidity
# %% [markdown]
# **Liquidity** threshold of secondary market: defined as the maximum slippage value below which the controller is allowed to operate.
# * __NB__: Threshold value will be determined by experimental outcomes, e.g. sample mean of the Monte Carlo outcomes of the slippage value when the system becomes unstable. Would like variance/std deviation of the Monte Carlo slippage series to be small (tight estimate), but can report both mean and variance as part of recommendations
# %%
# Placeholder value, which must be determined
critical_liquidity_threshold = None
# %%
# To calculate liquidity threshold, create a copy of the timeseries dataframe with market slippage, and merge with KPI dataframe
df_liquidity = df[['subset', 'run', 'timestep', 'market_slippage']].copy()
df_liquidity = pd.merge(df_liquidity, df_kpis, how='inner', on=['subset', 'run'])
# Take the absolute value of market slippage (swap direction in or out of the liquidity pool)
df_liquidity['market_slippage_abs'] = df_liquidity['market_slippage'].transform(lambda x: abs(x))
df_liquidity
# %%
# Check absolute value market slippage decile stats
df_liquidity.query('subset == 0')['market_slippage_abs'].describe([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.90])
# %%
# For each subset, calculate the 90th percentile market slippage
df_liquidity['market_slippage_percentile'] = df_liquidity.groupby(['subset'])['market_slippage'].transform(lambda x: x.quantile(.90))
df_liquidity
# %%
# %%capture
# Select all subsets that failed the volatility and stability KPIs
# NOTE: updated to just use stability KPI and not volatility KPI to not over-tune
df_liquidity_failed = df_liquidity.query('kpi_stability == False')
df_liquidity_failed['market_slippage_percentile_mean'] = df_liquidity_failed.groupby(['subset'])['market_slippage_percentile'].transform(lambda x: x.mean())
# %%
# Get the mean liquidity threshold of all failed runs
critical_liquidity_threshold = df_liquidity_failed['market_slippage_percentile_mean'].mean()
critical_liquidity_threshold
# %%
# Calculate liquidity KPI based on critical liquidity threshold found above
df_liquidity_grouped = df_liquidity.groupby(['subset']).mean()
df_liquidity_grouped = df_liquidity_grouped.reset_index()
df_liquidity_grouped['kpi_liquidity'] = df_liquidity_grouped.apply(lambda x: x['market_slippage_percentile'] <= critical_liquidity_threshold, axis=1)
df_liquidity_grouped
# %%
# Save interim results
df_liquidity_grouped.to_pickle('experiments/system_model_v3/experiment_monte_carlo/df_liquidity_grouped.pickle')
# %%
# Aggregate KPIs per subset
df_kpis = df_liquidity_grouped[['subset', 'run', 'kpi_stability', 'kpi_volatility', 'kpi_liquidity']]
df_kpis = df_kpis.groupby(['subset']).first()
# %%
print(f'''
{round(df_kpis.query('kpi_stability == True and kpi_volatility == True and kpi_liquidity == True').count().iloc[0]*100/df_kpis.count().iloc[0])}% successful KPIs
''')
# %%
# %% [markdown]
# ## Save KPI Results
# %%
# Save interim results
df_kpis.to_pickle('experiments/system_model_v3/experiment_monte_carlo/df_kpis.pickle')
# %%
# Load interim results
df_kpis = pd.read_pickle('experiments/system_model_v3/experiment_monte_carlo/df_kpis.pickle')
# %% [markdown]
# # Sensitivity Analysis
# %%
# Merge KPI dataframe with timeseries dataframe, and grouped liquidity dataframe,
# including volatility_ratio_simulation and volatility_ratio_window for sensitivity analysis later
df_sensitivity = pd.merge(df, df_kpis, on=['run','subset'], how='inner')
df_sensitivity = pd.merge(df_sensitivity, df_liquidity_grouped[[
'run',
'subset',
'volatility_ratio_simulation',
'volatility_ratio_window'
]], on=['run','subset'], how='inner')
df_sensitivity.head(1)
# %%
df_sensitivity = df_sensitivity.reset_index()
# %%
# Save interim results
df_sensitivity.to_pickle('experiments/system_model_v3/experiment_monte_carlo/df_sensitivity.pickle')
# %%
# Set control parameters for sensitivity analysis
control_params = [
'ki',
'kp',
'control_period',
]
# %%
from cadcad_machine_search.visualizations import kpi_sensitivity_plot
goals = {
'low_volatility' : lambda metrics: metrics['kpi_volatility'].mean(),
'high_stability' : lambda metrics: metrics['kpi_stability'].mean(),
'liquidity_threshold': lambda metrics: metrics['kpi_liquidity'].mean(),
}
# TODO: configure visualizations for analysis
for scenario in df_sensitivity['controller_enabled'].unique():
_df = df_sensitivity.query(f'controller_enabled == {scenario}')
for goal in goals:
kpi_sensitivity_plot(_df, goals[goal], control_params)
# %%
enabled = df_sensitivity.query(f'controller_enabled == True')
for goal in goals:
kpi_sensitivity_plot(enabled, goals[goal], control_params)
# %%
for scenario in df_sensitivity['liquidity_demand_shock'].unique():
_df = df_sensitivity.query(f'liquidity_demand_shock == {scenario}')
for goal in goals:
kpi_sensitivity_plot(_df, goals[goal], control_params)
# %%
critical_liquidity_threshold
# %%
from cadcad_machine_search.visualizations import plot_goal_ternary
# danlessa notes:
# 1. Make sure that the KPIs they are well behaved: eg: no NaNs
# 2. Be aware of division by zero on the goals
# 3. KPIs must transform a (N_s x N_t) array into a (N_s, 1) vector
# - N_s: number of 'experiments' (N_sweeps + N_mc)
kpis = {
'volatility_simulation' : lambda df: df['volatility_ratio_simulation'].max(),
'volatility_window_mean' : lambda df: df['volatility_ratio_window'].mean(),
'market_price_max' : lambda df: df['market_price'].max(),
'market_price_min' : lambda df: df['market_price'].min(),
'redemption_price_max' : lambda df: df['target_price'].max(),
'redemption_price_min' : lambda df: df['target_price'].min(),
'rai_balance_uniswap_min' : lambda df: df['RAI_balance'].min(),
'cdp_collateral_balance_min' : lambda df: df['eth_collateral'].min(),
#'price_change_percentile_mean' : lambda df: critical_liquidity_threshold # This is ill condionated! df -> float
}
from typing import Callable, Dict, List
def low_volatility_goal(metrics: Dict[str, List[float]]) -> float:
utility = metrics['volatility_simulation']
#utility += metrics['price_change_percentile_mean']
utility *= -1.0
return utility
def high_stability_goal(metrics: Dict[str, List[float]]) -> float:
# Note: I've changed the 1/x terms to -1 terms due to div by zero problems
utility = metrics['market_price_max']
utility -= metrics['market_price_min']
utility -= metrics['redemption_price_max']
utility -= metrics['redemption_price_min']
utility -= metrics['rai_balance_uniswap_min']
utility -= metrics['cdp_collateral_balance_min']
utility *= -1.0
return utility
def liquidity_goal(metrics: Dict[str, List[float]]) -> float:
# danlessa note: price_change_percentile_mean seems to be ill-condionated
# utility = -metrics['price_change_percentile_mean']
utility = -metrics['market_price_min'] # Just for debugging
return utility
goals = {
'low_volatility': low_volatility_goal,
'high_stability': high_stability_goal,
'liquidity': liquidity_goal,
'combined': lambda goals: goals[0] + goals[1] + goals[2]
}
for scenario in df_sensitivity['controller_enabled'].unique():
print("---")
print(scenario)
_df = df_sensitivity.query(f'controller_enabled == {scenario}')
plot_goal_ternary(_df, kpis, goals, control_params)
print("---")
# %% [markdown]
# # Control Parameter Analysis and Selection
# %%
df_liquidity_grouped =
|
pd.read_pickle('experiments/system_model_v3/experiment_monte_carlo/df_liquidity_grouped.pickle')
|
pandas.read_pickle
|
## code written by <NAME> and <NAME>
import pandas as pd
import numpy as np
import gzip
import os
import random
import math
import tensorflow as tf
from tensorflow.keras.layers import Input, Dense, Reshape, LocallyConnected1D
from tensorflow.keras.models import Model
from tensorflow.keras import optimizers
from sklearn.preprocessing import normalize
from sklearn.model_selection import train_test_split
from scipy.stats import spearmanr
from datetime import datetime
import matplotlib
matplotlib.use("Agg") # use a non-interactive backend
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
def plot_loss(m1, start, file_name, df_filename):
# plot the training loss and accuracy
end = len(m1.history['loss'])
N = np.arange(start, end)
s = slice(start,end)
#
plt.style.use("ggplot")
plt.figure(figsize=(5, 4))
#
plt.plot(N, (m1.history["loss"][s]), label="train_loss")
plt.plot(N, (m1.history["val_loss"][s]), label="val_loss")
#
plt.title("Training Loss")
plt.xlabel("Epoch #")
plt.ylabel("Loss")
plt.legend()
plt.savefig(file_name)
plt.clf()
#
epoch_idx = list(N)
train_loss = m1.history["loss"][s]
valid_loss = m1.history["val_loss"][s]
df_loss = pd.DataFrame(list(zip(epoch_idx, train_loss, valid_loss)),
columns = ['epoch_idx', 'train_loss', 'valid_loss'])
df_loss.to_csv(df_filename, index = False)
# regular autoencoder
def get_regular_AE(input_shape1, latent_size = 20, hidden_sizes = [71]):
tf.keras.backend.clear_session()
input_e = Input(shape=(input_shape1,))
encoded = Dense(hidden_sizes[0], activation='relu')(input_e)
encoded = Dense(latent_size, activation='relu')(encoded)
decoded = Dense(hidden_sizes[0], activation='relu')(encoded)
decoded = Dense(input_shape1, activation='sigmoid')(decoded)
#
AE = Model(input_e, decoded)
return AE
# an autoencoder using locally connected layer
def get_lc_AE(input_shape1, latent_size = 20, n_filter = 1, kernel_size = 100,\
strides = 20, hidden_sizes = [71]):
tf.keras.backend.clear_session()
input_e = Input(shape=(input_shape1,))
rslayer = Reshape((input_shape1, 1), \
input_shape = (input_shape1, ))(input_e)
encoded = LocallyConnected1D(filters = n_filter, kernel_size = kernel_size, \
strides = strides, \
activation='relu')(rslayer)
rslayer = Reshape((hidden_sizes[0],), \
input_shape = (hidden_sizes[0], 1))(encoded)
encoded = Dense(latent_size, activation='relu')(rslayer)
decoded = Dense(hidden_sizes[0], activation='relu')(encoded)
decoded = Dense(input_shape1, activation='sigmoid')(decoded)
#
lc_AE = Model(input_e, decoded)
return lc_AE
# model_type = "lc_AE"
# split_method = "sklearn"
# number_of_epochs = 500
# batch_size = 32
# learning_rate = 1e-3
# testing_fraction = 0.2
def main(model_type = "lc_AE", split_method = "sklearn",
number_of_epochs = 500, batch_size = 32,
learning_rate = 1e-3, testing_fraction = 0.2):
data_file = "../data/cts_all_but_Micro_Endo_ordered_by_annotation.txt.gz"
data = pd.read_csv(data_file, compression='gzip', sep=",", header=0)
data.shape
data.iloc[:6, :5]
gene_names = data["gene_name"]
len(gene_names)
col_names = list(data.columns)[1:]
len(col_names)
values = data.iloc[0: , 1:]
data_array = values.to_numpy()
data_array[:7, :5]
# this is an arbitray choice to use top 1500 genes
# to make it easier to generate the autoencoder
# so 19 genes are skipped
data_array = data_array[:1500, ]
data_array = data_array.T
# transform gene expression
depth = np.sum(data_array,1)
depth.shape
pd.Series(depth).describe()
data_normalized = (data_array.T/depth).T
print('data after normalizing by depth:')
data_normalized.shape
print('summation by sample:')
pd.Series(np.sum(data_normalized,1)).describe()
print('')
data_normalized = normalize(data_normalized, norm='max', axis=0)
print('check data_normalized')
print(data_normalized.shape)
print(data_normalized[0:7,0:5])
print('summary of maximum per gene')
print(pd.Series(np.amax(data_normalized, 0)).describe())
print('summary of maximum per sample')
print(pd.Series(np.amax(data_normalized, 1)).describe())
train_size = math.floor((1 - testing_fraction) * data_normalized.shape[0])
if split_method == "manual":
random.seed(1243)
index_list = list(range(data_normalized.shape[0]))
random.shuffle(index_list)
#
trainX = data_normalized[index_list[:train_size], ]
testX = data_normalized[index_list[train_size:], ]
trainX.shape
testX.shape
#
train_cellname = [col_names[i] for i in index_list[:train_size]]
test_cellname = [col_names[i] for i in index_list[train_size:]]
else:
# sklearn style data splitting
data_label = np.array(col_names)
trainX, testX = train_test_split(data_normalized,
test_size=testing_fraction, random_state=1999)
train_cellname, test_cellname = train_test_split(data_label,
test_size=testing_fraction, random_state=1999)
print('training and testing data dimension:')
print(trainX.shape)
print(testX.shape)
print('testX[0:2,0:9]:')
print(testX[0:2,0:9])
# get model
if model_type == "AE":
cur_model = get_regular_AE(input_shape1 = trainX.shape[1])
else:
cur_model = get_lc_AE(input_shape1 = trainX.shape[1])
cur_model.summary()
adam1 = optimizers.Adam(lr=learning_rate)
cur_model.compile(optimizer=adam1, loss='mean_absolute_error')
# model training
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print("Current Time =", current_time)
m1 = cur_model.fit(trainX, trainX,
epochs=number_of_epochs,
batch_size=batch_size,
verbose=0,
validation_data=(testX, testX))
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print("Current Time =", current_time) # 29 minutes
# write out plots for loss
plot_path = os.path.join('../auto_encoder_plots',
model_type + '_' + split_method + '_track_loss.png')
df_path = os.path.join('../auto_encoder_plots',
model_type + '_' + split_method + '_loss.csv')
plot_loss(m1, 1, plot_path, df_path)
# write out test correlation
test_pred = cur_model.predict(testX)
#rho_train = np.zeros(trainX.shape[1])
rho_test = np.zeros(testX.shape[1])
for i in range(0,testX.shape[1]):
#rho, pval = spearmanr(trainX[:,i], train_pred[:,i])
#rho_train[i] = rho
rho, pval = spearmanr(testX[:,i], test_pred[:,i])
rho_test[i] = rho
print('spearman gene-by-gene correlation for testing data:')
print(
|
pd.Series(rho_test)
|
pandas.Series
|
import argparse
import numpy as np
import os
import pandas as pd
import cv2
from matplotlib import pyplot as plt
from tqdm import tqdm
import random
import gc
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SequentialSampler, RandomSampler
from apex import amp
from models import get_effdet
from warmup_scheduler import GradualWarmupScheduler
from dataset import WheatDataset
## uncomment to train with more workers
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (8192, rlimit[1]))
import warnings
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--backbone", default="ed7", type=str, choices=['ed0', 'ed1', 'ed2', 'ed3', 'ed4', 'ed5', 'ed6', 'ed7'])
parser.add_argument("--img-size", default=768, type=int)
parser.add_argument("--batch-size", default=8, type=int)
parser.add_argument("--workers", default=16, type=int)
parser.add_argument("--warm-epochs", default=20, type=int)
parser.add_argument("--epochs", default=100, type=int)
parser.add_argument("--patience", default=40, type=int)
parser.add_argument("--folds", nargs="+", type=int)
parser.add_argument("--init_lr", default=5e-4, type=float)
parser.add_argument("--warmup-factor", default=10, type=int)
parser.add_argument("--use-amp", default=True, type=lambda x: (str(x).lower() == "true"))
args = parser.parse_args()
print(args)
torch.backends.cudnn.benchmark = True
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def collate_fn(batch):
return tuple(zip(*batch))
if __name__ == "__main__":
os.makedirs('checkpoints', exist_ok = True)
os.makedirs('logs', exist_ok = True)
df = pd.read_csv('dataset/trainset.csv')
wheat2017_df = pd.read_csv('dataset/wheat2017.csv')
wheat2017_df = wheat2017_df[['image_id','fold','xmin','ymin','xmax','ymax','isbox','source']].reset_index(drop=True)
spike_df = pd.read_csv('dataset/spike-wheat.csv')
spike_df = spike_df[['image_id','fold','xmin','ymin','xmax','ymax','isbox','source']].reset_index(drop=True)
for fold in args.folds:
valid_df = df.loc[df['fold'] == fold]
train_df = df.loc[~df.index.isin(valid_df.index)]
valid_df = valid_df.loc[valid_df['isbox']==True].reset_index(drop=True)
warm_df =
|
pd.concat([train_df, wheat2017_df, spike_df], ignore_index=True)
|
pandas.concat
|
import logging
import re
from typing import List
import pandas as pd
from pandas import DataFrame, Series
from autogluon.core.features.types import S_TEXT, S_TEXT_SPECIAL
from .abstract import AbstractFeatureGenerator
from .binned import BinnedFeatureGenerator
logger = logging.getLogger(__name__)
class TextSpecialFeatureGenerator(AbstractFeatureGenerator):
"""
TextSpecialFeatureGenerator generates text specific features from incoming raw text features.
These include word counts, character counts, symbol counts, capital letter ratios, and much more.
Features generated by this generator will have 'text_special' as a special type.
Parameters
----------
symbols : List[str], optional
List of string symbols to compute counts and ratios for as features.
If not specified, defaults to ['!', '?', '@', '%', '$', '*', '&', '#', '^', '.', ':', ' ', '/', ';', '-', '=']
bin_features : bool, default True
If True, adds a BinnedFeatureGenerator to the front of post_generators such that all features generated from this generator are then binned.
This is useful for 'text_special' features because it lowers the chance models will overfit on the features and reduces their memory usage.
post_drop_duplicates : bool, default True
Identical to AbstractFeatureGenerator's post_drop_duplicates, except it is defaulted to True instead of False.
This helps to clean the output of this generator when symbols aren't present in the data.
**kwargs :
Refer to AbstractFeatureGenerator documentation for details on valid key word arguments.
"""
def __init__(self, symbols: List[str] = None, bin_features: bool = True, post_drop_duplicates: bool = True, **kwargs):
super().__init__(post_drop_duplicates=post_drop_duplicates, **kwargs)
if symbols is None:
symbols = ['!', '?', '@', '%', '$', '*', '&', '#', '^', '.', ':', ' ', '/', ';', '-', '=']
self._symbols = symbols # Symbols to generate count and ratio features for.
if bin_features:
self._post_generators = [BinnedFeatureGenerator(inplace=True)] + self._post_generators
def _fit_transform(self, X: DataFrame, **kwargs) -> (DataFrame, dict):
X_out = self._transform(X)
type_family_groups_special = {
S_TEXT_SPECIAL: list(X_out.columns)
}
return X_out, type_family_groups_special
def _transform(self, X: DataFrame) -> DataFrame:
return self._generate_features_text_special(X)
@staticmethod
def get_default_infer_features_in_args() -> dict:
return dict(required_special_types=[S_TEXT])
def _generate_features_text_special(self, X: DataFrame) -> DataFrame:
if self.features_in:
X_text_special_combined = []
for nlp_feature in self.features_in:
df_text_special = self._generate_text_special(X[nlp_feature], nlp_feature)
X_text_special_combined.append(df_text_special)
X_text_special_combined = pd.concat(X_text_special_combined, axis=1)
else:
X_text_special_combined = pd.DataFrame(index=X.index)
return X_text_special_combined
def _generate_text_special(self, X: Series, feature: str) -> DataFrame:
X_text_special: DataFrame =
|
DataFrame(index=X.index)
|
pandas.DataFrame
|
"""
A solver based on the elaboration of a flow graph to quantify interfaces, connected by flow or scale relationships.
It is assumed that other kinds of relationship (part-of, upscale, ...) are translated into these two basic ones.
Another type of relationship considered is linear transform from InterfaceType to InterfaceType, which is cascaded into
appearances of its instances.
Before the elaboration of flow graphs, several preparatory steps:
* Find the separate contexts. Each context is formed by the "local", "environment" and "external" sets of processors,
and is -totally- isolated from other contexts
- Context defining attributes are defined in the Problem Statement command. If not, defined, a "context" attribute in
Processors would be assumed
If nothing is found, all Processors are assumed to be under the same context (what will happen??!!)
- Elaborate (add necessary entities) the "environment", "external" top level processors if none have been specified.
- Opposite processor can be specified when defining Interface
- This attribute is taken into account if NO relationship originates or ends in this Interface. Then, a default
relationship would be created
- If Processors are defined for environment or
* Unexecuted model parts
- Connection of Interfaces
- Dataset expansion
* [Datasets]
* Scenarios
- Parameters
* Time. Classify QQs by time, on storage
* Observers (different versions). Take average always
"""
import traceback
from collections import defaultdict
from copy import deepcopy
from enum import Enum
from typing import Dict, List, Set, Any, Tuple, Union, Optional, NamedTuple, Generator, NoReturn, Sequence
import lxml
import networkx as nx
import pandas as pd
from lxml import etree
from nexinfosys import case_sensitive
from nexinfosys.command_field_definitions import orientations
from nexinfosys.command_generators import Issue, global_functions_extended
from nexinfosys.command_generators.parser_ast_evaluators import ast_evaluator, obtain_subset_of_processors, \
get_adapted_case_dataframe_filter
from nexinfosys.command_generators.parser_field_parsers import string_to_ast, expression_with_parameters, is_year, \
is_month, indicator_expression, parse_string_as_simple_ident_list, number_interval
from nexinfosys.common.constants import SubsystemType, Scope
from nexinfosys.common.helper import create_dictionary, PartialRetrievalDictionary, ifnull, istr, strcmp, \
FloatExp, precedes_in_list, replace_string_from_dictionary, brackets, get_interfaces_and_weights_from_expression
from nexinfosys.ie_exports.xml_export import export_model_to_xml
from nexinfosys.model_services import get_case_study_registry_objects, State
from nexinfosys.models import CodeImmutable
from nexinfosys.models.musiasem_concepts import ProblemStatement, Parameter, FactorsRelationDirectedFlowObservation, \
FactorsRelationScaleObservation, Processor, FactorQuantitativeObservation, Factor, \
ProcessorsRelationPartOfObservation, FactorType, Indicator, MatrixIndicator, IndicatorCategories, Benchmark
from nexinfosys.models.musiasem_concepts_helper import find_quantitative_observations
from nexinfosys.models.statistical_datasets import Dataset, Dimension, CodeList
from nexinfosys.solving.graph.computation_graph import ComputationGraph
from nexinfosys.solving.graph.flow_graph import FlowGraph, IType
class SolvingException(Exception):
pass
class Computed(Enum):
No = 1
Yes = 2
class ComputationSource(Enum):
Flow = 1
Scale = 2
ScaleChange = 3
PartOfAggregation = 4
InterfaceTypeAggregation = 5
def is_aggregation(self) -> bool:
return self in (self.PartOfAggregation, self.InterfaceTypeAggregation)
class FloatComputedTuple(NamedTuple):
value: FloatExp
computed: Computed
observer: str = None
computation_source: ComputationSource = None
class ConflictResolution(Enum):
No = 1
Taken = 2
Dismissed = 3
class AggregationConflictResolutionPolicy(Enum):
TakeUpper = 1
TakeLowerAggregation = 2
@staticmethod
def get_key():
return "NISSolverAggregationConflictResolutionPolicy"
def resolve(self, computed_value: FloatComputedTuple, existing_value: FloatComputedTuple) \
-> Tuple[FloatComputedTuple, FloatComputedTuple]:
if self == self.TakeLowerAggregation:
# Take computed aggregation over existing value
return computed_value, existing_value
elif self == self.TakeUpper:
# Take existing value over computed aggregation
return existing_value, computed_value
class MissingValueResolutionPolicy(Enum):
UseZero = 0
Invalidate = 1
@staticmethod
def get_key():
return "NISSolverMissingValueResolutionPolicy"
class ConflictResolutionAlgorithm:
def __init__(self, computation_sources_priority_list: List[ComputationSource], aggregation_conflict_policy: AggregationConflictResolutionPolicy):
self.computation_sources_priority_list = computation_sources_priority_list
self.aggregation_conflict_policy = aggregation_conflict_policy
def resolve(self, value1: FloatComputedTuple, value2: FloatComputedTuple) -> Tuple[FloatComputedTuple, FloatComputedTuple]:
assert(value1.computation_source != value2.computation_source,
f"The computation sources of both conflicting values cannot be the same: {value1.computation_source}")
# Both values have been computed
if value1.computation_source is not None and value2.computation_source is not None:
value1_position = self.computation_sources_priority_list.index(value1.computation_source)
value2_position = self.computation_sources_priority_list.index(value2.computation_source)
if value1_position < value2_position:
return value1, value2
else:
return value2, value1
# One of the values has been computed by aggregation while the other is an observation
if ifnull(value1.computation_source, value2.computation_source) in (ComputationSource.PartOfAggregation, ComputationSource.InterfaceTypeAggregation):
if value1.computation_source is None:
# value2 is computed value, value1 is existing value
return self.aggregation_conflict_policy.resolve(value2, value1)
else:
# value1 is computed value, value2 is existing value
return self.aggregation_conflict_policy.resolve(value1, value2)
# One of the values has been computed by a non-aggregation computation while the other is an observation
else:
# Return the observation first
if value1.computation_source is None:
return value1, value2
else:
return value2, value1
def get_computation_sources_priority_list(s: str) -> List[ComputationSource]:
""" Convert a list of strings into a list of valid ComputationSource values and also check its validity
according to the parameter "NISSolverComputationSourcesPriority".
The input list should contain all values of ComputationSource, without duplicates, in any order.
"""
identifiers = parse_string_as_simple_ident_list(s)
sources: List[ComputationSource] = []
if identifiers is None:
raise SolvingException(f"The priority list of computation sources is invalid: {identifiers}")
for identifier in identifiers:
try:
sources.append(ComputationSource[identifier])
except KeyError:
raise SolvingException(f"The priority list of computation sources have an invalid value: {identifier}")
if len(sources) != len(ComputationSource):
raise SolvingException(
f"The priority list of computation sources should have length {len(ComputationSource)} but has length: {len(sources)}")
if len(sources) != len(set(sources)):
raise SolvingException(f"The priority list of computation sources cannot have duplicated values: {sources}")
return sources
class InterfaceNode:
"""
Identifies an interface which value should be computed by the solver.
An interface can be identified in two different ways:
1. In the common case there is an interface declared in the Interfaces command. The interface is identified
with "ProcessorName:InterfaceName".
2. When we are aggregating by the interface type and there isn't a declared interface. The interface is
identified with "ProcessorName:InterfaceTypeName:Orientation"
"""
def __init__(self, interface_or_type: Union[Factor, FactorType], processor: Optional[Processor] = None,
orientation: Optional[str] = None, processor_name: Optional[str] = None):
if isinstance(interface_or_type, Factor):
self.interface: Optional[Factor] = interface_or_type
self.interface_type = self.interface.taxon
self.orientation: Optional[str] = orientation if orientation else self.interface.orientation
self.interface_name: str = interface_or_type.name
self.processor = processor if processor else self.interface.processor
elif isinstance(interface_or_type, FactorType):
self.interface: Optional[Factor] = None
self.interface_type = interface_or_type
self.orientation = orientation
self.interface_name: str = ""
self.processor = processor
else:
raise Exception(f"Invalid object type '{type(interface_or_type)}' for the first parameter. "
f"Valid object types are [Factor, FactorType].")
self.processor_name: str = self.processor.full_hierarchy_name if self.processor else processor_name
@property
def key(self) -> Tuple:
return self.processor_name, self.interface_name
@property
def alternate_key(self) -> Tuple:
return self.processor_name, self.type, self.orientation
@property
def full_key(self) -> Tuple:
return self.processor_name, self.interface_name, self.type, self.orientation
@staticmethod
def full_key_labels() -> List[str]:
return ["Processor", "Interface", "InterfaceType", "Orientation"]
@property
def name(self) -> str:
if self.interface_name:
return ":".join(self.key)
else:
return ":".join(self.alternate_key)
@property
def type(self) -> str:
return self.interface_type.name
@property
def unit(self):
return self.interface_type.unit
@property
def roegen_type(self):
if self.interface and self.interface.roegen_type:
if isinstance(self.interface.roegen_type, str):
return self.interface.roegen_type
else:
return self.interface.roegen_type.name.title()
elif self.interface_type and self.interface_type.roegen_type:
if isinstance(self.interface_type.roegen_type, str):
return self.interface_type.roegen_type
else:
return self.interface_type.roegen_type.name.title()
else:
return ""
@property
def sphere(self) -> Optional[str]:
if self.interface and self.interface.sphere:
return self.interface.sphere
else:
return self.interface_type.sphere
@property
def system(self) -> Optional[str]:
return self.processor.processor_system if self.processor else None
@property
def subsystem(self) -> Optional[SubsystemType]:
return SubsystemType.from_str(self.processor.subsystem_type) if self.processor else None
def has_interface(self) -> bool:
return self.interface is not None
def no_interface_copy(self) -> "InterfaceNode":
return InterfaceNode(self.interface_type, self.processor, self.orientation)
def __str__(self):
return self.name
def __eq__(self, other):
return self.name == other.name
def __repr__(self):
return istr(str(self))
def __hash__(self):
return hash(repr(self))
def __lt__(self, other):
return self.name < other.name
class ResultKey(NamedTuple):
scenario: str
period: str
scope: Scope
conflict: ConflictResolution = ConflictResolution.No
def as_string_tuple(self) -> Tuple[str, str, str, str]:
return self.scenario, self.period, self.scope.name, self.conflict.name
ProcessorsRelationWeights = Dict[Tuple[Processor, Processor], Any]
InterfaceNodeHierarchy = Dict[InterfaceNode, Set[InterfaceNode]]
NodeFloatDict = Dict[InterfaceNode, FloatExp]
NodeFloatComputedDict = Dict[InterfaceNode, FloatComputedTuple]
ResultDict = Dict[ResultKey, NodeFloatComputedDict]
AstType = Dict
ObservationListType = List[Tuple[Optional[Union[float, AstType]], FactorQuantitativeObservation]]
TimeObservationsType = Dict[str, ObservationListType]
InterfaceNodeAstDict = Dict[InterfaceNode, Tuple[AstType, FactorQuantitativeObservation]]
class ProcessingItem(NamedTuple):
source: ComputationSource
hierarchy: Union[InterfaceNodeHierarchy, ComputationGraph]
results: NodeFloatComputedDict
partof_weights: Optional[ProcessorsRelationWeights] = None
def get_circular_dependencies(parameters: Dict[str, Tuple[Any, list]]) -> list:
# Graph, for evaluation of circular dependencies
G = nx.DiGraph()
for param, (_, dependencies) in parameters.items():
for param2 in dependencies:
G.add_edge(param2, param) # We need "param2" to obtain "param"
return list(nx.simple_cycles(G))
def evaluate_parameters_for_scenario(base_params: List[Parameter], scenario_params: Dict[str, str]):
"""
Obtain a dictionary (parameter -> value), where parameter is a string and value is a literal: number, boolean,
category or string.
Start from the base parameters then overwrite with the values in the current scenario.
Parameters may depend on other parameters, so this has to be considered before evaluation.
No cycles are allowed in the dependencies, i.e., if P2 depends on P1, P1 cannot depend on P2.
To analyze this, first expressions are evaluated, extracting which parameters appear in each of them. Then a graph
is elaborated based on this information. Finally, an algorithm to find cycles is executed.
:param base_params:
:param scenario_params:
:return:
"""
# Create dictionary without evaluation
result_params = create_dictionary()
result_params.update({p.name: p.default_value for p in base_params if p.default_value is not None})
param_types = create_dictionary()
param_types.update({p.name: p.type for p in base_params})
# Overwrite with scenario expressions or constants
result_params.update(scenario_params)
state = State()
known_params = create_dictionary()
unknown_params = create_dictionary()
# Now, evaluate ALL expressions
for param, expression in result_params.items():
ptype = param_types[param]
if strcmp(ptype, "Number") or strcmp(ptype, "Boolean"):
value, ast, params, issues = evaluate_numeric_expression_with_parameters(expression, state)
if value is None: # It is not a constant, store the parameters on which this depends
unknown_params[param] = (ast, set([istr(p) for p in params]))
else: # It is a constant, store it
result_params[param] = value # Overwrite
known_params[param] = value
elif strcmp(ptype, "Code") or strcmp(ptype, "String"):
result_params[param] = expression
known_params[param] = expression
cycles = get_circular_dependencies(unknown_params)
if len(cycles) > 0:
raise SolvingException(
f"Parameters cannot have circular dependencies. {len(cycles)} cycles were detected: {':: '.join(cycles)}")
# Initialize state with known parameters
state.update(known_params)
known_params_set = set([istr(p) for p in known_params.keys()])
# Loop until no new parameters can be evaluated
previous_len_unknown_params = len(unknown_params) + 1
while len(unknown_params) < previous_len_unknown_params:
previous_len_unknown_params = len(unknown_params)
for param in list(unknown_params): # A list(...) is used because the dictionary can be modified inside
ast, params = unknown_params[param]
if params.issubset(known_params_set):
value, _, _, issues = evaluate_numeric_expression_with_parameters(ast, state)
if value is None:
raise SolvingException(
f"It should be possible to evaluate the parameter '{param}'. Issues: {', '.join(issues)}")
else:
del unknown_params[param]
result_params[param] = value
# known_params[param] = value # Not necessary
known_params_set.add(istr(param))
state.set(param, value)
if len(unknown_params) > 0:
raise SolvingException(f"Could not evaluate the following parameters: {', '.join(unknown_params)}")
return result_params
def get_evaluated_observations_by_time(prd: PartialRetrievalDictionary) -> TimeObservationsType:
"""
Get all interface observations (intensive or extensive) by time.
Also resolve expressions without parameters. Cannot resolve expressions depending only on global parameters
because some of them can be overridden by scenario parameters.
Each evaluated observation is stored as a tuple:
* First: the evaluated result as a float or the prepared AST
* Second: the observation
:param prd: the global objects dictionary
:return: a time dictionary with a list of observation on each time
"""
observations: TimeObservationsType = defaultdict(list)
state = State()
# Get all observations by time
for observation in find_quantitative_observations(prd, processor_instances_only=True):
# Try to evaluate the observation value
value, ast, _, issues = evaluate_numeric_expression_with_parameters(observation.value, state)
# Store: (Value, FactorQuantitativeObservation)
time = observation.attributes["time"].lower()
observations[time].append((ifnull(value, ast), observation))
if len(observations) == 0:
return {}
# Check all time periods are consistent. All should be Year or Month, but not both.
time_period_type = check_type_consistency_from_all_time_periods(list(observations.keys()))
assert(time_period_type in ["year", "month"])
# Remove generic period type and insert it into all specific periods. E.g. "Year" into "2010", "2011" and "2012"
if time_period_type in observations:
# Generic monthly ("Month") or annual ("Year") data
periodic_observations = observations.pop(time_period_type)
for time in observations:
observations[time] += periodic_observations
return observations
def evaluate_numeric_expression_with_parameters(expression: Union[float, str, dict], state: State) \
-> Tuple[Optional[float], Optional[AstType], Set, List[str]]:
issues: List[Tuple[int, str]] = []
ast: Optional[AstType] = None
value: Optional[float] = None
params = set()
if expression is None:
value = None
elif isinstance(expression, float):
value = expression
elif isinstance(expression, dict):
ast = expression
value, params = ast_evaluator(ast, state, None, issues)
if value is not None:
ast = None
elif isinstance(expression, str):
try:
value = float(expression)
except ValueError:
# print(f"{expression} before")
ast = string_to_ast(expression_with_parameters, expression)
# print(f"{expression} after")
value, params = ast_evaluator(ast, state, None, issues)
if value is not None:
ast = None
else:
issues.append((3, f"Invalid type '{type(expression)}' for expression '{expression}'"))
return value, ast, params, [i[1] for i in issues]
def check_type_consistency_from_all_time_periods(time_periods: List[str]) -> str:
""" Check if all time periods are of the same period type, either Year or Month:
- general "Year" & specific year (YYYY)
- general "Month" & specific month (mm-YYYY or YYYY-mm, separator can be any of "-/")
:param time_periods:
:return:
"""
# Based on the first element we will check the rest of elements
period = next(iter(time_periods))
if period == "year" or is_year(period):
period_type = "year"
period_check = is_year
elif period == "month" or is_month(period):
period_type = "month"
period_check = is_month
else:
raise SolvingException(f"Found invalid period type '{period}'")
for time_period in time_periods:
if time_period != period_type and not period_check(time_period):
raise SolvingException(
f"Found period type inconsistency: accepting '{period_type}' but found '{time_period}'")
return period_type
def split_observations_by_relativeness(observations_by_time: TimeObservationsType) -> Tuple[TimeObservationsType, TimeObservationsType]:
observations_by_time_norelative = defaultdict(list)
observations_by_time_relative = defaultdict(list)
for time, observations in observations_by_time.items():
for value, obs in observations:
if obs.is_relative:
observations_by_time_relative[time].append((value, obs))
else:
observations_by_time_norelative[time].append((value, obs))
return observations_by_time_norelative, observations_by_time_relative
def compute_graph_results(comp_graph: ComputationGraph,
existing_results: NodeFloatComputedDict,
previous_known_nodes: Set[InterfaceNode],
computation_source: ComputationSource) -> NodeFloatComputedDict:
# Filter results in graph
graph_params: NodeFloatDict = {k: v.value for k, v in existing_results.items() if k in comp_graph.nodes}
# Obtain nodes without a value
compute_nodes = comp_graph.nodes_not_in_container(graph_params)
if len(compute_nodes) == 0:
print("All nodes have a value. Nothing to solve.")
return {}
print(f"****** NODES: {comp_graph.nodes}")
print(f"****** UNKNOWN NODES: {compute_nodes}")
new_computed_nodes: Set[InterfaceNode] = {k for k in existing_results if k not in previous_known_nodes}
conflicts = comp_graph.compute_conflicts(new_computed_nodes, previous_known_nodes)
raise_error_if_conflicts(conflicts, graph_params, comp_graph.name)
results, _ = comp_graph.compute_values(compute_nodes, graph_params)
# Return only entries with a valid value and set the name
return_values: NodeFloatComputedDict = {}
for k, v in results.items():
if v is not None:
# v.name = k.name
return_values[k] = FloatComputedTuple(v, Computed.Yes, computation_source=computation_source)
return return_values
def raise_error_if_conflicts(conflicts: Dict[InterfaceNode, Set[InterfaceNode]], graph_params: NodeFloatDict, graph_name: str):
conflict_strings: List[str] = []
for param, conf_params in conflicts.items():
if conf_params:
conf_params_string = "{" + ', '.join([f"{p} ({graph_params[p]})" for p in conf_params]) + "}"
conflict_strings.append(f"{param} ({graph_params[param]}) -> {conf_params_string}")
if conflict_strings:
raise SolvingException(f"There are conflicts in the '{graph_name}' computation graph: {', '.join(conflict_strings)}")
def create_interface_edges(edges: List[Tuple[Factor, Factor, Optional[str]]]) \
-> Generator[Tuple[InterfaceNode, InterfaceNode, Dict], None, None]:
for src, dst, weight in edges:
src_node = InterfaceNode(src)
dst_node = InterfaceNode(dst)
if "Archetype" in [src.processor.instance_or_archetype, dst.processor.instance_or_archetype]:
print(f"WARNING: excluding relation from '{src_node}' to '{dst_node}' because of Archetype processor")
else:
yield src_node, dst_node, dict(weight=weight)
def resolve_weight_expressions(graph_list: List[nx.DiGraph], state: State, raise_error=False) -> None:
for graph in graph_list:
for u, v, data in graph.edges(data=True):
expression = data["weight"]
if expression is not None and not isinstance(expression, FloatExp):
value, ast, params, issues = evaluate_numeric_expression_with_parameters(expression, state)
if raise_error and value is None:
raise SolvingException(
f"Cannot evaluate expression "
f"'{expression}' for weight from interface '{u}' to interface '{v}'. Params: {params}. "
f"Issues: {', '.join(issues)}"
)
data["weight"] = ast if value is None else FloatExp(value, None, str(expression))
def resolve_partof_weight_expressions(weights: ProcessorsRelationWeights, state: State, raise_error=False) \
-> ProcessorsRelationWeights:
evaluated_weights: ProcessorsRelationWeights = {}
for (parent, child), expression in weights.items():
if expression is not None and not isinstance(expression, FloatExp):
value, ast, params, issues = evaluate_numeric_expression_with_parameters(expression, state)
if raise_error and value is None:
raise SolvingException(
f"Cannot evaluate expression '{expression}' for weight from child processor '{parent}' "
f"to parent processor '{child}'. Params: {params}. Issues: {', '.join(issues)}"
)
evaluated_weights[(parent, child)] = ast if value is None else FloatExp(value, None, str(expression))
return evaluated_weights
def create_scale_change_relations_and_update_flow_relations(relations_flow: nx.DiGraph, registry,
interface_nodes: Set[InterfaceNode]) -> nx.DiGraph:
relations_scale_change = nx.DiGraph()
edges = [(r.source_factor, r.target_factor, r.back_factor, r.weight, r.scale_change_weight)
for r in registry.get(FactorsRelationDirectedFlowObservation.partial_key())
if r.scale_change_weight is not None or r.back_factor is not None]
for src, dst, bck, weight, scale_change_weight in edges:
source_node = InterfaceNode(src)
dest_node = InterfaceNode(dst)
back_node = InterfaceNode(bck) if bck else None
if "Archetype" in [src.processor.instance_or_archetype,
dst.processor.instance_or_archetype,
bck.processor.instance_or_archetype if bck else None]:
print(f"WARNING: excluding relation from '{source_node}' to '{dest_node}' "
f"and back to '{back_node}' because of Archetype processor")
continue
hidden_node = InterfaceNode(src.taxon,
processor_name=f"{src.processor.full_hierarchy_name}-"
f"{dst.processor.full_hierarchy_name}",
orientation="Input/Output")
relations_flow.add_edge(source_node, hidden_node, weight=weight)
relations_scale_change.add_edge(hidden_node, dest_node, weight=scale_change_weight, add_reverse_weight="yes")
if back_node:
relations_scale_change.add_edge(hidden_node, back_node, weight=scale_change_weight, add_reverse_weight="yes")
relations_scale_change.nodes[hidden_node]["add_split"] = "yes"
real_dest_node = InterfaceNode(source_node.interface_type, dest_node.processor,
orientation="Input" if source_node.orientation.lower() == "output" else "Output")
# Check if synthetic interface is equal to an existing one
matching_interfaces = [n for n in interface_nodes if n.alternate_key == real_dest_node.alternate_key]
if len(matching_interfaces) == 1:
real_dest_node = matching_interfaces[0]
else:
interface_nodes.add(real_dest_node)
if relations_flow.has_edge(source_node, real_dest_node):
# weight = relations_flow[source_node][real_dest_node]['weight']
relations_flow.remove_edge(source_node, real_dest_node)
# relations_flow.add_edge(source_node, hidden_node, weight=weight) # This "weight" should be the same
relations_flow.add_edge(hidden_node, real_dest_node, weight=1.0)
return relations_scale_change
def convert_params_to_extended_interface_names(params: Set[str], obs: FactorQuantitativeObservation, registry) \
-> Tuple[Dict[str, str], List[str], List[str]]:
extended_interface_names: Dict[str, str] = {}
unresolved_params: List[str] = []
issues: List[str] = []
for param in params:
# Check if param is valid interface name
interfaces: Sequence[Factor] = registry.get(Factor.partial_key(processor=obs.factor.processor, name=param))
if len(interfaces) == 1:
node = InterfaceNode(interfaces[0])
extended_interface_names[param] = node.name
else:
unresolved_params.append(param)
if len(interfaces) > 1:
issues.append(f"Multiple interfaces with name '{param}' exist, "
f"rename them to uniquely identify the desired one.")
else: # len(interfaces) == 0
issues.append(f"No global parameter or interface exist with name '{param}'.")
return extended_interface_names, unresolved_params, issues
def replace_ast_variable_parts(ast: AstType, variable_conversion: Dict[str, str]) -> AstType:
new_ast = deepcopy(ast)
for term in new_ast['terms']:
if term['type'] == 'h_var':
variable = term['parts'][0]
if variable in variable_conversion:
term['parts'] = [variable_conversion[variable]]
return new_ast
def resolve_observations_with_parameters(state: State, observations: ObservationListType,
observers_priority_list: Optional[List[str]], registry) \
-> Tuple[NodeFloatComputedDict, InterfaceNodeAstDict]:
resolved_observations: NodeFloatComputedDict = {}
unresolved_observations_with_interfaces: InterfaceNodeAstDict = {}
for expression, obs in observations:
interface_params: Dict[str, str] = {}
obs_new_value: Optional[str] = None
value, ast, params, issues = evaluate_numeric_expression_with_parameters(expression, state)
if value is None:
interface_params, params, issues = convert_params_to_extended_interface_names(params, obs, registry)
if interface_params and not issues:
ast = replace_ast_variable_parts(ast, interface_params)
obs_new_value = replace_string_from_dictionary(obs.value, interface_params)
else:
raise SolvingException(
f"Cannot evaluate expression '{expression}' for observation at interface '{obs.factor.name}'. "
f"Params: {params}. Issues: {', '.join(issues)}"
)
# Get observer name
observer_name = obs.observer.name if obs.observer else None
if observer_name and observers_priority_list and observer_name not in observers_priority_list:
raise SolvingException(
f"The specified observer '{observer_name}' for the interface '{obs.factor.name}' has not been included "
f"in the observers' priority list: {observers_priority_list}"
)
# Create node from the interface
node = InterfaceNode(obs.factor)
if node in resolved_observations or node in unresolved_observations_with_interfaces:
previous_observer_name: str = resolved_observations[node].observer \
if node in resolved_observations else unresolved_observations_with_interfaces[node][1].observer.name
if observer_name is None and previous_observer_name is None:
raise SolvingException(
f"Multiple observations exist for the 'same interface '{node.name}' without a specified observer."
)
elif not observers_priority_list:
raise SolvingException(
f"Multiple observations exist for the same interface '{node.name}' but an observers' priority list "
f"has not been (correctly) defined: {observers_priority_list}"
)
elif not precedes_in_list(observers_priority_list, observer_name, previous_observer_name):
# Ignore this observation because a higher priority observations has previously been set
continue
if interface_params:
new_obs = deepcopy(obs)
if obs_new_value is not None:
new_obs.value = obs_new_value
unresolved_observations_with_interfaces[node] = (ast, new_obs)
resolved_observations.pop(node, None)
else:
resolved_observations[node] = FloatComputedTuple(FloatExp(value, node.name, obs_new_value),
Computed.No, observer_name)
unresolved_observations_with_interfaces.pop(node, None)
return resolved_observations, unresolved_observations_with_interfaces
def resolve_observations_with_interfaces(
state: State, existing_unresolved_observations: InterfaceNodeAstDict, existing_results: NodeFloatComputedDict) \
-> Tuple[NodeFloatComputedDict, InterfaceNodeAstDict]:
state.update({k.name: v.value.val for k, v in existing_results.items()})
results: NodeFloatComputedDict = {}
unresolved_observations: InterfaceNodeAstDict = {}
for node, (ast, obs) in existing_unresolved_observations.items():
value, ast, params, issues = evaluate_numeric_expression_with_parameters(ast, state)
if value is not None:
observer_name = obs.observer.name if obs.observer else None
results[node] = FloatComputedTuple(FloatExp(value, node.name, str(obs.value)), Computed.No, observer_name)
else:
unresolved_observations[node] = (ast, obs)
return results, unresolved_observations
def compute_flow_and_scale_computation_graphs(state: State,
relative_observations: ObservationListType,
relations_flow: nx.DiGraph,
relations_scale: nx.DiGraph,
relations_scale_change: nx.DiGraph) \
-> Tuple[ComputationGraph, ComputationGraph, ComputationGraph]:
# Create a copy of the main relations structures that are modified with time-dependent values
time_relations_flow = relations_flow.copy()
time_relations_scale = relations_scale.copy()
time_relations_scale_change = relations_scale_change.copy()
# Add Processors internal -RelativeTo- relations (time dependent)
# Transform relative observations into graph edges
for expression, obs in relative_observations:
time_relations_scale.add_edge(InterfaceNode(obs.relative_factor, obs.factor.processor),
InterfaceNode(obs.factor),
weight=expression)
# Last pass to resolve weight expressions: expressions with parameters can be solved
resolve_weight_expressions([time_relations_flow, time_relations_scale, time_relations_scale_change],
state, raise_error=True)
# Create computation graphs
comp_graph_flow = create_computation_graph_from_flows(time_relations_flow, time_relations_scale)
comp_graph_flow.name = "Flow"
comp_graph_scale = ComputationGraph(time_relations_scale, "Scale")
comp_graph_scale_change = ComputationGraph(time_relations_scale_change, "Scale Change")
return comp_graph_flow, comp_graph_scale, comp_graph_scale_change
def create_computation_graph_from_flows(relations_flow: nx.DiGraph, relations_scale: Optional[nx.DiGraph] = None) -> ComputationGraph:
flow_graph = FlowGraph(relations_flow)
comp_graph_flow, issues = flow_graph.get_computation_graph(relations_scale)
for issue in issues:
print(issue)
error_issues = [e.description for e in issues if e.itype == IType.ERROR]
if len(error_issues) > 0:
raise SolvingException(f"The computation graph cannot be generated. Issues: {', '.join(error_issues)}")
return comp_graph_flow
def compute_interfacetype_hierarchies(registry, interface_nodes: Set[InterfaceNode]) -> InterfaceNodeHierarchy:
def compute(parent: FactorType):
""" Recursive computation for a depth-first search """
if parent in visited_interface_types:
return
for child in interface_types_parent_relations[parent]:
if child in interface_types_parent_relations:
compute(child)
for processor in {p.processor for p in interface_nodes}: # type: Processor
for orientation in orientations:
child_interfaces = interfaces_dict.get(
(processor.full_hierarchy_name, child.name, orientation), {})
if child_interfaces:
parent_interface = InterfaceNode(parent, processor, orientation)
interfaces = interfaces_dict.get(parent_interface.alternate_key, [])
if len(interfaces) == 1:
# Replace "ProcessorName:InterfaceTypeName:Orientation" -> "ProcessorName:InterfaceName"
parent_interface = interfaces[0]
else: # len(interfaces) != 1
interface_nodes.add(parent_interface)
interfaces_dict.setdefault(parent_interface.alternate_key, []).append(parent_interface)
hierarchies.setdefault(parent_interface, set()).update(child_interfaces)
visited_interface_types.add(parent)
# Get all different existing interface types with children interface types
interface_types_parent_relations: Dict[FactorType, Set[FactorType]] = \
{ft: ft.get_children() for ft in registry.get(FactorType.partial_key()) if len(ft.get_children()) > 0}
# Get the list of interfaces for each combination
interfaces_dict: Dict[Tuple[str, str, str], List[InterfaceNode]] = {}
for interface in interface_nodes:
interfaces_dict.setdefault(interface.alternate_key, []).append(interface)
hierarchies: InterfaceNodeHierarchy = {}
visited_interface_types: Set[FactorType] = set()
# Iterate over all relations
for parent_interface_type in interface_types_parent_relations:
compute(parent_interface_type)
return hierarchies
def compute_partof_hierarchies(registry, interface_nodes: Set[InterfaceNode]) \
-> Tuple[InterfaceNodeHierarchy, ProcessorsRelationWeights]:
def compute(parent: Processor):
""" Recursive computation for a depth-first search """
if parent in visited_processors:
return
for child in processor_partof_relations[parent]:
if child in processor_partof_relations:
compute(child)
child_interface_nodes: List[InterfaceNode] = processor_interface_nodes.get(child, [])
if child_interface_nodes and (parent, child) in behave_as_differences:
# Remove interfaces from child that doesn't belong to behave_as_processor
child_interface_nodes = [n for n in child_interface_nodes if n.interface_name not in behave_as_differences[(parent, child)]]
# Add the interfaces of the child processor to the parent processor
for child_interface_node in child_interface_nodes:
parent_interface_node = InterfaceNode(child_interface_node.interface, parent)
# Search parent_interface in Set of existing interface_nodes, it can have same name but different
# combination of (type, orientation). For example, we define:
# - interface "ChildProcessor:Water" as (BlueWater, Input)
# - interface "ParentProcessor:Water" as (BlueWater, Output)
# In this case aggregating child interface results in a conflict in parent
if parent_interface_node in interface_nodes:
for interface_node in interface_nodes:
if interface_node == parent_interface_node:
if (interface_node.type, interface_node.orientation) != (parent_interface_node.type, parent_interface_node.orientation):
raise SolvingException(
f"Interface '{parent_interface_node}' already defined with type <{parent_interface_node.type}> and orientation <{parent_interface_node.orientation}> "
f"is being redefined with type <{interface_node.type}> and orientation <{interface_node.orientation}> when aggregating processor "
f"<{child_interface_node.processor_name}> to parent processor <{parent_interface_node.processor_name}>. Rename either the child or the parent interface.")
break
else:
interface_nodes.add(parent_interface_node)
processor_interface_nodes.setdefault(parent_interface_node.processor, []).append(parent_interface_node)
hierarchies.setdefault(parent_interface_node, set()).add(child_interface_node)
visited_processors.add(parent)
# Get the -PartOf- processor relations of the system
processor_partof_relations, weights, behave_as_dependencies = get_processor_partof_relations(registry)
# Get the list of interfaces of each processor
processor_interface_nodes: Dict[Processor, List[InterfaceNode]] = {}
for node in interface_nodes:
processor_interface_nodes.setdefault(node.processor, []).append(node)
check_behave_as_dependencies(behave_as_dependencies, processor_interface_nodes)
behave_as_differences = compute_behave_as_differences(behave_as_dependencies, processor_interface_nodes)
hierarchies: InterfaceNodeHierarchy = {}
visited_processors: Set[Processor] = set()
# Iterate over all relations
for parent_processor in processor_partof_relations:
compute(parent_processor)
return hierarchies, weights
def check_behave_as_dependencies(
behave_as_dependencies: Dict[Tuple[Processor, Processor], Processor],
processor_interface_nodes: Dict[Processor, List[InterfaceNode]]):
""" Make a check for the 'BehaveAs' property that can be defined in the 'BareProcessors' command.
If defined, all the interfaces of the 'BehaveAs' processor must be specified in the selected processor."""
for (_, child_processor), behave_as_processor in behave_as_dependencies.items():
child_interfaces = {n.interface_name for n in processor_interface_nodes[child_processor]}
behave_as_interfaces = {n.interface_name for n in processor_interface_nodes[behave_as_processor]}
difference_interfaces = behave_as_interfaces.difference(child_interfaces)
if difference_interfaces:
raise SolvingException(
f"The processor '{child_processor.name}' cannot behave as processor '{behave_as_processor.name}' on "
f"aggregations because it doesn't have these interfaces: {difference_interfaces}")
def compute_behave_as_differences(
behave_as_dependencies: Dict[Tuple[Processor, Processor], Processor],
processor_interface_nodes: Dict[Processor, List[InterfaceNode]]) -> Dict[Tuple[Processor, Processor], Set[str]]:
""" Compute the difference in interfaces from a processor and the associated BehaveAs processor """
behave_as_differences: Dict[Tuple[Processor, Processor], Set[str]] = {}
for (parent_processor, child_processor), behave_as_processor in behave_as_dependencies.items():
child_interfaces = {n.interface_name for n in processor_interface_nodes[child_processor]}
behave_as_interfaces = {n.interface_name for n in processor_interface_nodes[behave_as_processor]}
behave_as_differences[(parent_processor, child_processor)] = child_interfaces.difference(behave_as_interfaces)
return behave_as_differences
def get_processor_partof_relations(glb_idx: PartialRetrievalDictionary) \
-> Tuple[Dict[Processor, Set[Processor]], ProcessorsRelationWeights, Dict[Tuple[Processor, Processor], Processor]]:
""" Get in a dictionary the -PartOf- processor relations, ignoring Archetype processors """
relations: Dict[Processor, Set[Processor]] = {}
weights: ProcessorsRelationWeights = {}
behave_as_dependencies: Dict[Tuple[Processor, Processor], Processor] = {}
for parent, child, weight, behave_as_processor in \
[(r.parent_processor, r.child_processor, r.weight, r.behave_as)
for r in glb_idx.get(ProcessorsRelationPartOfObservation.partial_key())
if "Archetype" not in [r.parent_processor.instance_or_archetype, r.child_processor.instance_or_archetype]]:
relations.setdefault(parent, set()).add(child)
weights[(parent, child)] = weight
if behave_as_processor:
behave_as_dependencies[(parent, child)] = behave_as_processor
return relations, weights, behave_as_dependencies
def compute_hierarchy_graph_results(
graph: ComputationGraph, params: NodeFloatComputedDict,
prev_computed_values: NodeFloatComputedDict,
conflict_resolution_algorithm: ConflictResolutionAlgorithm,
computation_source: ComputationSource) \
-> Tuple[NodeFloatComputedDict, NodeFloatComputedDict, NodeFloatComputedDict]:
"""
Compute nodes in a graph hierarchy and also mark conflicts with existing values (params)
:param graph: hierarchy as a graph of interface nodes
:param params: all nodes with a known value
:param prev_computed_values: all nodes that have been previously computed with same computation source
:param conflict_resolution_algorithm: algorithm for resolution of conflicts
:param computation_source: source of computation
:return: a dict with all values computed now and in previous calls, a dict with conflicted values
that have been taken, a dict with conflicted values that have been dismissed
"""
def solve_inputs(inputs: List[FloatExp.ValueWeightPair], split: bool) -> Optional[FloatExp]:
input_values: List[FloatExp.ValueWeightPair] = []
for n, weight in sorted(inputs):
res_backward = compute_node(n)
# If node 'n' is a 'split' only one result is needed to compute the result
if split:
if res_backward is not None:
return res_backward * weight
else:
if res_backward is not None and weight is not None:
input_values.append((res_backward, weight))
else:
return None
return FloatExp.compute_weighted_addition(input_values)
def compute_node(node: InterfaceNode) -> Optional[FloatExp]:
# If the node has already been computed return the value
if new_values.get(node) is not None:
return new_values[node].value
# We avoid graphs with cycles
if node in pending_nodes:
return None
pending_nodes.append(node)
sum_children = solve_inputs(graph.direct_inputs(node), graph.get_reverse_node_split(node))
if sum_children is None:
sum_children = solve_inputs(graph.reverse_inputs(node), graph.get_direct_node_split(node))
float_value = params.get(node)
if sum_children is not None:
# New value has been computed
sum_children.name = node.name
new_computed_value = FloatComputedTuple(sum_children, Computed.Yes, computation_source=computation_source)
if float_value is not None:
# Conflict here: applies strategy
taken_conflicts[node], dismissed_conflicts[node] = \
conflict_resolution_algorithm.resolve(new_computed_value, float_value)
new_values[node] = taken_conflicts[node]
return_value = taken_conflicts[node].value
else:
new_values[node] = new_computed_value
return_value = new_computed_value.value
else:
# No value got from children, try to search in "params"
return_value = float_value.value if float_value is not None else None
# if float_value is not None:
# new_values[node] = float_value
# return_value = float_value.value
# else:
# return_value = None
return return_value
new_values: NodeFloatComputedDict = {**prev_computed_values} # All computed aggregations
taken_conflicts: NodeFloatComputedDict = {} # Taken values on conflicting nodes
dismissed_conflicts: NodeFloatComputedDict = {} # Dismissed values on conflicting nodes
for parent_node in graph.nodes:
pending_nodes: List[InterfaceNode] = []
compute_node(parent_node)
return new_values, taken_conflicts, dismissed_conflicts
def compute_hierarchy_aggregate_results(
tree: InterfaceNodeHierarchy, params: NodeFloatComputedDict,
prev_computed_values: NodeFloatComputedDict,
conflict_resolution_algorithm: ConflictResolutionAlgorithm,
missing_values_policy: MissingValueResolutionPolicy,
computation_source: ComputationSource,
processors_relation_weights: ProcessorsRelationWeights = None) \
-> Tuple[NodeFloatComputedDict, NodeFloatComputedDict, NodeFloatComputedDict]:
"""
Compute aggregations of nodes in a hierarchy and also mark conflicts with existing values (params)
:param tree: dictionary representing a hierarchy as a tree of interface nodes in the form [parent, set(child)]
:param params: all nodes with a known value
:param prev_computed_values: all nodes that have been previously computed by aggregation
:param conflict_resolution_algorithm: algorithm for resolution of conflicts
:param missing_values_policy: policy for missing values when aggregating children
:param computation_source: source of computation
:param processors_relation_weights: weights to use computing aggregation for processor hierarchies
:return: a dict with all values computed by aggregation now and in previous calls, a dict with conflicted values
that have been taken, a dict with conflicted values that have been dismissed
"""
def compute_node(node: InterfaceNode) -> Optional[FloatExp]:
# If the node has already been computed return the value
if new_values.get(node) is not None:
return new_values[node].value
# Make a depth-first search
return_value: Optional[FloatExp]
children_values: List[FloatExp.ValueWeightPair] = []
invalidate_sum_children: bool = False
sum_children: Optional[FloatExp] = None
# Try to get the sum from children, if any
for child in sorted(tree.get(node, {})):
child_value = compute_node(child)
if child_value is not None:
weight: FloatExp = None if processors_relation_weights is None \
else processors_relation_weights[(node.processor, child.processor)]
children_values.append((child_value, weight))
elif missing_values_policy == MissingValueResolutionPolicy.Invalidate:
# Invalidate current children computation and stop evaluating following children
invalidate_sum_children = True
break
if not invalidate_sum_children:
sum_children = FloatExp.compute_weighted_addition(children_values)
float_value = params.get(node)
if sum_children is not None:
# New value has been computed
sum_children.name = node.name
new_computed_value = FloatComputedTuple(sum_children, Computed.Yes, computation_source=computation_source)
if float_value is not None:
# Conflict here: applies strategy
taken_conflicts[node], dismissed_conflicts[node] = \
conflict_resolution_algorithm.resolve(new_computed_value, float_value)
new_values[node] = taken_conflicts[node]
return_value = taken_conflicts[node].value
else:
new_values[node] = new_computed_value
return_value = new_computed_value.value
else:
# No value got from children, try to search in "params"
return_value = float_value.value if float_value is not None else None
return return_value
new_values: NodeFloatComputedDict = {**prev_computed_values} # All computed aggregations
taken_conflicts: NodeFloatComputedDict = {} # Taken values on conflicting nodes
dismissed_conflicts: NodeFloatComputedDict = {} # Dismissed values on conflicting nodes
for parent_node in tree:
compute_node(parent_node)
return new_values, taken_conflicts, dismissed_conflicts
def init_processor_full_names(registry: PartialRetrievalDictionary):
for processor in registry.get(Processor.partial_key()):
processor.full_hierarchy_name = processor.full_hierarchy_names(registry)[0]
# ##########################################
# ## MAIN ENTRY POINT ######################
# ##########################################
def flow_graph_solver(global_parameters: List[Parameter], problem_statement: ProblemStatement,
global_state: State, dynamic_scenario: bool) -> List[Issue]:
"""
A solver using the graph composed by the interfaces and the relationships (flows, part-of, scale, change-of-scale and relative-to)
:param global_parameters: Parameters including the default value (if defined)
:param problem_statement: ProblemStatement object, with scenarios (parameters changing the default)
and parameters for the solver
:param global_state: All variables available: object model, registry, datasets (inputs and outputs), ...
:param dynamic_scenario: If "True" store results in datasets separated from "fixed" scenarios.
Also "problem_statement" MUST have only one scenario with the parameters.
:return: List of Issues
"""
try:
issues: List[Issue] = []
glb_idx, _, _, datasets, _ = get_case_study_registry_objects(global_state)
init_processor_full_names(glb_idx)
# Get available observations
time_absolute_observations, time_relative_observations = \
split_observations_by_relativeness(get_evaluated_observations_by_time(glb_idx))
if len(time_absolute_observations) == 0:
return [Issue(IType.WARNING, f"No absolute observations have been found. The solver has nothing to solve.")]
# Get available interfaces
interface_nodes: Set[InterfaceNode] = {InterfaceNode(i) for i in glb_idx.get(Factor.partial_key())}
# Get hierarchies of processors and update interfaces to compute
partof_hierarchies, partof_weights = compute_partof_hierarchies(glb_idx, interface_nodes)
# Get hierarchies of interface types and update interfaces to compute
interfacetype_hierarchies = compute_interfacetype_hierarchies(glb_idx, interface_nodes)
relations_flow, relations_scale, relations_scale_change = \
compute_flow_and_scale_relation_graphs(glb_idx, interface_nodes)
total_results: ResultDict = {}
for scenario_name, scenario_params in problem_statement.scenarios.items(): # type: str, Dict[str, Any]
print(f"********************* SCENARIO: {scenario_name}")
scenario_state = State(evaluate_parameters_for_scenario(global_parameters, scenario_params))
scenario_partof_weights = resolve_partof_weight_expressions(partof_weights, scenario_state, raise_error=True)
# Get scenario parameters
observers_priority_list = parse_string_as_simple_ident_list(scenario_state.get('NISSolverObserversPriority'))
missing_value_policy = MissingValueResolutionPolicy[scenario_state.get(MissingValueResolutionPolicy.get_key())]
conflict_resolution_algorithm = ConflictResolutionAlgorithm(
get_computation_sources_priority_list(scenario_state.get('NISSolverComputationSourcesPriority')),
AggregationConflictResolutionPolicy[scenario_state.get(AggregationConflictResolutionPolicy.get_key())]
)
missing_value_policies: List[MissingValueResolutionPolicy] = [MissingValueResolutionPolicy.Invalidate]
if missing_value_policy == MissingValueResolutionPolicy.UseZero:
missing_value_policies.append(MissingValueResolutionPolicy.UseZero)
for time_period, absolute_observations in time_absolute_observations.items():
print(f"********************* TIME PERIOD: {time_period}")
total_taken_results: NodeFloatComputedDict = {}
total_dismissed_results: NodeFloatComputedDict = {}
try:
comp_graph_flow, comp_graph_scale, comp_graph_scale_change = \
compute_flow_and_scale_computation_graphs(scenario_state, time_relative_observations[time_period],
relations_flow,
relations_scale,
relations_scale_change)
# Get final results from the absolute observations
results, unresolved_observations_with_interfaces = \
resolve_observations_with_parameters(scenario_state, absolute_observations,
observers_priority_list, glb_idx)
# Initializations
iteration_number = 1
processing_items = [
ProcessingItem(ComputationSource.Flow, comp_graph_flow, {}),
ProcessingItem(ComputationSource.Scale, comp_graph_scale, {}),
ProcessingItem(ComputationSource.ScaleChange, comp_graph_scale_change, {}),
ProcessingItem(ComputationSource.InterfaceTypeAggregation, interfacetype_hierarchies, {}),
ProcessingItem(ComputationSource.PartOfAggregation, partof_hierarchies, {}, scenario_partof_weights)
]
# START ITERATIVE SOLVING
# We first iterate with policy MissingValueResolutionPolicy.Invalidate trying to get as many results
# we can without supposing zero for missing values.
# Second, if specified in paramater "NISSolverMissingValueResolutionPolicy" we try to get further
# results with policy MissingValueResolutionPolicy.UseZero.
for missing_value_policy in missing_value_policies:
previous_len_results = len(results) - 1
# Iterate while the number of results is increasing
while len(results) > previous_len_results:
print(f"********************* Solving iteration: {iteration_number}")
previous_len_results = len(results)
for pi in processing_items:
if pi.source.is_aggregation():
new_results, taken_results, dismissed_results = compute_hierarchy_aggregate_results(
pi.hierarchy, results, pi.results, conflict_resolution_algorithm,
missing_value_policy, pi.source, pi.partof_weights)
else:
new_results, taken_results, dismissed_results = compute_hierarchy_graph_results(
pi.hierarchy, results, pi.results, conflict_resolution_algorithm, pi.source)
pi.results.update(new_results)
results.update(new_results)
total_taken_results.update(taken_results)
total_dismissed_results.update(dismissed_results)
if unresolved_observations_with_interfaces:
new_results, unresolved_observations_with_interfaces = \
resolve_observations_with_interfaces(
scenario_state, unresolved_observations_with_interfaces, results
)
results.update(new_results)
iteration_number += 1
if unresolved_observations_with_interfaces:
issues.append(Issue(IType.WARNING, f"Scenario '{scenario_name}' - period '{time_period}'."
f"The following observations could not be evaluated: "
f"{[k for k in unresolved_observations_with_interfaces.keys()]}"))
issues.extend(check_unresolved_nodes_in_computation_graphs(
[comp_graph_flow, comp_graph_scale, comp_graph_scale_change], results, scenario_name, time_period))
current_results: ResultDict = {}
result_key = ResultKey(scenario_name, time_period, Scope.Total)
# Filter out conflicted results from TOTAL results
current_results[result_key] = {k: v for k, v in results.items() if k not in total_taken_results}
if total_taken_results:
current_results[result_key._replace(conflict=ConflictResolution.Taken)] = total_taken_results
current_results[result_key._replace(conflict=ConflictResolution.Dismissed)] = total_dismissed_results
hierarchical_structures = [
HierarchicalNodeStructure.from_flow_computation_graph(comp_graph_flow, True),
HierarchicalNodeStructure.from_partof_aggregation(partof_hierarchies, scenario_partof_weights),
HierarchicalNodeStructure.from_interfacetype_aggregation(interfacetype_hierarchies)]
additional_hierarchical_structure = HierarchicalNodeStructure.from_flow_computation_graph(comp_graph_flow, False)
internal_results, external_results = \
compute_internal_external_results(results, hierarchical_structures, additional_hierarchical_structure)
current_results[result_key._replace(scope=Scope.Internal)] = internal_results
current_results[result_key._replace(scope=Scope.External)] = external_results
total_results.update(current_results)
except SolvingException as e:
return [Issue(IType.ERROR, f"Scenario '{scenario_name}' - period '{time_period}'. {e.args[0]}")]
#
# ---------------------- CREATE PD.DATAFRAMES PREVIOUS TO OUTPUT DATASETS ----------------------
#
data = {result_key.as_string_tuple() + node.full_key:
{"RoegenType": node.roegen_type if node else "-",
"Value": float_computed.value.val,
"Computed": float_computed.computed.name,
"ComputationSource": float_computed.computation_source.name if float_computed.computation_source else None,
"Observer": float_computed.observer,
"Expression": str(float_computed.value.exp),
"Unit": node.unit if node else "-",
"Level": node.processor.attributes.get('level', '') if node else "-",
"System": node.system if node else "-",
"Subsystem": node.subsystem.name if node else "-",
"Sphere": node.sphere if node else "-"
}
for result_key, node_floatcomputed_dict in total_results.items()
for node, float_computed in node_floatcomputed_dict.items()}
export_solver_data(datasets, data, dynamic_scenario, global_state, global_parameters, problem_statement)
dataframe_sankey = compute_dataframe_sankey(total_results)
dataset_name = "flow_graph_solution_sankey"
datasets[dataset_name] = get_dataset(dataframe_sankey, dataset_name, "Flow Graph Solution - Sankey")
return issues
except SolvingException as e:
traceback.print_exc() # Print the Exception to std output
return [Issue(IType.ERROR, e.args[0])]
def compute_dataframe_sankey(results: ResultDict) -> pd.DataFrame:
data: List[Dict] = []
for result_key, node_floatcomputed_dict in results.items():
if result_key.scope == Scope.Total and result_key.conflict != ConflictResolution.Dismissed:
for node, float_computed in node_floatcomputed_dict.items():
if float_computed.computed == Computed.Yes:
for interface_fullname, weight in get_interfaces_and_weights_from_expression(float_computed.value.exp):
data.append(
{"Scenario": result_key.scenario,
"Period": result_key.period,
"OriginProcessor": interface_fullname.split(":")[0],
"OriginInterface": interface_fullname.split(":")[1],
"DestinationProcessor": node.processor_name,
"DestinationInterface": node.interface_name if node.interface_name else node.type+":"+node.orientation,
"RelationType": float_computed.computation_source.name if float_computed.computation_source else None,
"Quantity": weight
}
)
df = pd.DataFrame(data)
df.set_index(["Scenario", "Period", "OriginProcessor", "OriginInterface", "DestinationProcessor", "DestinationInterface"], inplace=True)
return df.sort_index()
def mark_observations_and_scales_as_internal_results(
results: NodeFloatComputedDict, internal_results: NodeFloatComputedDict) -> NoReturn:
for node, value in results.items():
if (value.computed == Computed.No) or \
(value.computation_source and value.computation_source == ComputationSource.Scale):
internal_results[node] = deepcopy(value)
class HierarchicalNodeStructure:
def __init__(self, structure: Union[ComputationGraph, InterfaceNodeHierarchy],
computation_source: ComputationSource,
weights: Optional[ProcessorsRelationWeights] = None,
direct: Optional[bool] = None):
assert(isinstance(structure, ComputationGraph) or isinstance(structure, Dict))
self.structure = structure
self.computation_source = computation_source
self.weights = weights
self.direct = direct
@classmethod
def from_partof_aggregation(cls, structure: InterfaceNodeHierarchy, weights: ProcessorsRelationWeights) -> 'HierarchicalNodeStructure':
return cls(structure, ComputationSource.PartOfAggregation, weights)
@classmethod
def from_interfacetype_aggregation(cls, structure: InterfaceNodeHierarchy) -> 'HierarchicalNodeStructure':
return cls(structure, ComputationSource.InterfaceTypeAggregation)
@classmethod
def from_flow_computation_graph(cls, structure: ComputationGraph, direct: Optional[bool]) -> 'HierarchicalNodeStructure':
return cls(structure, ComputationSource.Flow, direct=direct)
def __iter__(self):
if isinstance(self.structure, ComputationGraph):
return (n for n in self.structure.nodes)
else:
return (n for n in self.structure)
def get_children(self, node: InterfaceNode) -> List[Tuple[InterfaceNode, Optional[FloatExp]]]:
if isinstance(self.structure, ComputationGraph):
if self.direct:
return self.structure.direct_inputs(node)
else:
return self.structure.reverse_inputs(node)
else:
if node in self.structure:
if self.weights:
return [(n, self.weights[(node.processor, n.processor)]) for n in self.structure[node]]
else:
return [(n, None) for n in self.structure[node]]
else:
return []
def compute_internal_external_results(results: NodeFloatComputedDict, structures: List[HierarchicalNodeStructure],
additional_structure: HierarchicalNodeStructure) \
-> Tuple[NodeFloatComputedDict, NodeFloatComputedDict]:
def compute_structures() -> int:
unknown_nodes: Set[InterfaceNode] = set()
for structure in structures:
unknown_nodes |= compute_hierarchical_structure_internal_external_results(structure, results,
internal_results,
external_results)
return len(unknown_nodes)
internal_results: NodeFloatComputedDict = {}
external_results: NodeFloatComputedDict = {}
mark_observations_and_scales_as_internal_results(results, internal_results)
len_unknown = len(results)
prev_len_unknown = len_unknown + 1
while len_unknown and len_unknown < prev_len_unknown:
prev_len_unknown = len_unknown
len_unknown = compute_structures()
# If resolution is stuck try to solve flow graph in reverse order
if len_unknown and len_unknown == prev_len_unknown:
compute_hierarchical_structure_internal_external_results(additional_structure, results,
internal_results, external_results)
len_unknown = compute_structures()
return internal_results, external_results
def compute_hierarchical_structure_internal_external_results(
structure: HierarchicalNodeStructure,
results: NodeFloatComputedDict,
internal_results: NodeFloatComputedDict, external_results: NodeFloatComputedDict) -> Set[InterfaceNode]:
def compute(node: InterfaceNode) -> Tuple[Optional[FloatComputedTuple], Optional[FloatComputedTuple]]:
if node not in internal_results and node not in external_results:
if not structure.get_children(node):
unknown_nodes.add(node)
return None, None
else:
internal_addends: List[FloatExp.ValueWeightPair] = []
external_addends: List[FloatExp.ValueWeightPair] = []
for child_node, weight in sorted(structure.get_children(node)):
if child_node in results:
child_value = deepcopy(results[child_node])
same_system = node.system == child_node.system and node.subsystem.is_same_scope(child_node.subsystem)
if same_system:
child_internal_value, child_external_value = compute(child_node)
if not child_internal_value and not child_external_value:
unknown_nodes.add(node)
return None, None
if child_internal_value:
child_internal_value.value.name = Scope.Internal.name + brackets(child_node.name)
internal_addends.append((child_internal_value.value, weight))
if child_external_value:
child_external_value.value.name = Scope.External.name + brackets(child_node.name)
external_addends.append((child_external_value.value, weight))
else:
external_addends.append((child_value.value, weight))
if internal_addends:
scope_value = FloatExp.compute_weighted_addition(internal_addends)
scope_value.name = node.name
internal_results[node] = FloatComputedTuple(scope_value, Computed.Yes,
computation_source=structure.computation_source)
if external_addends:
scope_value = FloatExp.compute_weighted_addition(external_addends)
scope_value.name = node.name
external_results[node] = FloatComputedTuple(scope_value, Computed.Yes,
computation_source=structure.computation_source)
return internal_results.get(node), external_results.get(node)
unknown_nodes: Set[InterfaceNode] = set()
for node in structure:
compute(node)
return unknown_nodes
def check_unresolved_nodes_in_computation_graphs(computation_graphs: List[ComputationGraph],
resolved_nodes: NodeFloatComputedDict,
scenario_name: str, time_period: str) -> List[Issue]:
issues: List[Issue] = []
for comp_graph in computation_graphs:
unresolved_nodes = [n for n in comp_graph.nodes if n not in resolved_nodes]
if unresolved_nodes:
issues.append(Issue(IType.WARNING,
f"Scenario '{scenario_name}' - period '{time_period}'. The following nodes in "
f"'{comp_graph.name}' graph could not be evaluated: {unresolved_nodes}"))
return issues
def check_unresolved_nodes_in_aggregation_hierarchies(hierarchies: List[InterfaceNodeHierarchy], resolved_nodes: NodeFloatComputedDict) -> List[Issue]:
issues: List[Issue] = []
unresolved_nodes: Set[InterfaceNode] = set()
for hierarchy in hierarchies:
unresolved_nodes.update({n for n in hierarchy if n not in resolved_nodes})
for parent, children in hierarchy.items():
unresolved_nodes.update({n for n in children if n not in resolved_nodes})
if unresolved_nodes:
issues.append(Issue(IType.WARNING, f"The following nodes in aggregation hierarchies could not be "
f"evaluated: {unresolved_nodes}"))
return issues
def compute_flow_and_scale_relation_graphs(registry, interface_nodes: Set[InterfaceNode]):
# Compute Interfaces -Flow- relations (time independent)
relations_flow = nx.DiGraph(
incoming_graph_data=create_interface_edges(
[(r.source_factor, r.target_factor, r.weight)
for r in registry.get(FactorsRelationDirectedFlowObservation.partial_key())
if r.scale_change_weight is None and r.back_factor is None]
)
)
# Compute Processors -Scale- relations (time independent)
relations_scale = nx.DiGraph(
incoming_graph_data=create_interface_edges(
[(r.origin, r.destination, r.quantity)
for r in registry.get(FactorsRelationScaleObservation.partial_key())]
)
)
# Compute Interfaces -Scale Change- relations (time independent). Also update Flow relations.
relations_scale_change = create_scale_change_relations_and_update_flow_relations(relations_flow, registry, interface_nodes)
# First pass to resolve weight expressions: only expressions without parameters can be solved
# NOT WORKING:
# 1) the method ast_evaluator() doesn't get global Parameters,
# 2) the expression for the FloatExp() is not correctly computed on a second pass
# resolve_weight_expressions([relations_flow, relations_scale, relations_scale_change], state)
return relations_flow, relations_scale, relations_scale_change
def export_solver_data(datasets, data, dynamic_scenario, state, global_parameters, problem_statement) -> NoReturn:
glb_idx, _, _, _, _ = get_case_study_registry_objects(state)
df = pd.DataFrame.from_dict(data, orient='index')
# Round all values to 3 decimals
df = df.round(3)
# Give a name to the dataframe indexes
index_names = [f.title() for f in
ResultKey._fields] + InterfaceNode.full_key_labels()
df.index.names = index_names
# Sort the dataframe based on indexes. Not necessary, only done for debugging purposes.
df = df.sort_index(level=index_names)
# print(df)
# Create Matrix to Sankey graph
ds_flow_values = prepare_sankey_dataset(glb_idx, df)
# Convert model to XML and to DOM tree. Used by XPath expressions (Matrices and Global Indicators)
_, p_map = export_model_to_xml(glb_idx) # p_map: {(processor_full_path_name, Processor), ...}
dom_tree = etree.fromstring(_).getroottree() # dom_tree: DOM against which an XQuery can be executed
# Obtain Analysis objects: Indicators and Benchmarks
indicators = glb_idx.get(Indicator.partial_key())
matrix_indicators = glb_idx.get(MatrixIndicator.partial_key())
benchmarks = glb_idx.get(Benchmark.partial_key())
# Filter out conflicts and prepare for case insensitiveness
# Filter: Conflict!='Dismissed' and remove the column
df_without_conflicts = get_conflicts_filtered_dataframe(df)
inplace_case_sensitiveness_dataframe(df_without_conflicts)
# Calculate ScalarIndicators (Local and Global)
df_local_indicators = calculate_local_scalar_indicators(indicators, dom_tree, p_map, df_without_conflicts, global_parameters, problem_statement, state)
df_global_indicators = calculate_global_scalar_indicators(indicators, dom_tree, p_map, df_without_conflicts, df_local_indicators, global_parameters, problem_statement, state)
# Calculate benchmarks
ds_benchmarks = calculate_local_benchmarks(df_local_indicators, indicators) # Find local indicators, and related benchmarks (indic_to_benchmarks). For each group (scenario, time, scope, processor): for each indicator, frame the related benchmark and add the framing result
ds_global_benchmarks = calculate_global_benchmarks(df_global_indicators, indicators) # Find global indicators, and related benchmarks (indic_to_benchmarks). For each group (scenario, time, scope, processor): for each indicator, frame the related benchmark and add the framing result
# Prepare Benchmarks to Stakeholders DataFrame
ds_stakeholders = prepare_benchmarks_to_stakeholders(benchmarks) # Find all benchmarks. For each benchmark, create a row per stakeholder -> return the dataframe
# Prepare Matrices
# TODO df_attributes
matrices = prepare_matrix_indicators(matrix_indicators, glb_idx, dom_tree, p_map, df, df_local_indicators, dynamic_scenario)
#
# ---------------------- CREATE DATASETS AND STORE IN STATE ----------------------
#
if not dynamic_scenario:
ds_name = "flow_graph_solution"
ds_flows_name = "flow_graph_solution_edges"
ds_indicators_name = "flow_graph_solution_indicators"
df_global_indicators_name = "flow_graph_global_indicators"
ds_benchmarks_name = "flow_graph_solution_benchmarks"
ds_global_benchmarks_name = "flow_graph_solution_global_benchmarks"
ds_stakeholders_name = "benchmarks_and_stakeholders"
else:
ds_name = "dyn_flow_graph_solution"
ds_flows_name = "dyn_flow_graph_solution_edges"
ds_indicators_name = "dyn_flow_graph_solution_indicators"
df_global_indicators_name = "dyn_flow_graph_global_indicators"
ds_benchmarks_name = "dyn_flow_graph_solution_benchmarks"
ds_global_benchmarks_name = "dyn_flow_graph_solution_global_benchmarks"
ds_stakeholders_name = "benchmarks_and_stakeholders"
for d, name, label in [(df, ds_name, "Flow Graph Solver - Interfaces"),
(ds_flow_values, ds_flows_name, "Flow Graph Solver Edges - Interfaces"),
(df_local_indicators, ds_indicators_name, "Flow Graph Solver - Local Indicators"),
(df_global_indicators, df_global_indicators_name, "Flow Graph Solver - Global Indicators"),
(ds_benchmarks, ds_benchmarks_name, "Flow Graph Solver - Local Benchmarks"),
(ds_global_benchmarks, ds_global_benchmarks_name, "Flow Graph Solver - Global Benchmarks"),
(ds_stakeholders, ds_stakeholders_name, "Benchmarks - Stakeholders")
]:
if not d.empty:
datasets[name] = get_dataset(d, name, label)
# Register matrices
for n, ds in matrices.items():
datasets[n] = ds
# Create dataset and store in State (specific of "Biofuel case study")
# datasets["end_use_matrix"] = get_eum_dataset(df)
return []
def prepare_benchmarks_to_stakeholders(benchmarks: List[Benchmark]):
rows = []
for b in benchmarks:
for s in b.stakeholders:
rows.append((b.name, s))
df = pd.DataFrame(data=rows, columns=["Benchmark", "Stakeholder"])
df.set_index("Benchmark", inplace=True)
return df
def add_conflicts_to_results(existing_results: ResultDict, taken_results: ResultDict, dismissed_results: ResultDict,
conflict_type: str) -> ResultDict:
""" Iterate on the existing results and mark which of them have been involved into a conflict """
results: ResultDict = {}
for result_key, node_floatcomputed_dict in existing_results.items():
if result_key in taken_results:
assert result_key in dismissed_results
key_taken = result_key._replace(**{conflict_type: ConflictResolution.Taken})
key_dismissed = result_key._replace(**{conflict_type: ConflictResolution.Dismissed})
for node, float_computed in node_floatcomputed_dict.items():
if node in taken_results[result_key]:
results.setdefault(key_taken, {})[node] = taken_results[result_key][node]
results.setdefault(key_dismissed, {})[node] = dismissed_results[result_key][node]
else:
results.setdefault(result_key, {})[node] = float_computed
else:
results[result_key] = node_floatcomputed_dict
return results
def prepare_sankey_dataset(registry: PartialRetrievalDictionary, df: pd.DataFrame):
# Create Matrix to Sankey graph
FactorsRelationDirectedFlowObservation_list = registry.get(FactorsRelationDirectedFlowObservation.partial_key())
ds_flows = pd.DataFrame({'source': [i._source.full_name for i in FactorsRelationDirectedFlowObservation_list],
'source_processor': [i._source._processor._name for i in
FactorsRelationDirectedFlowObservation_list],
'source_level': [i._source._processor._attributes['level'] if (
'level' in i._source._processor._attributes) else None for i in
FactorsRelationDirectedFlowObservation_list],
'target': [i._target.full_name for i in FactorsRelationDirectedFlowObservation_list],
'target_processor': [i._target._processor._name for i in
FactorsRelationDirectedFlowObservation_list],
'target_level': [i._target._processor._attributes[
'level'] if 'level' in i._target._processor._attributes else None for
i in FactorsRelationDirectedFlowObservation_list],
# 'RoegenType_target': [i.target_factor._attributes['roegen_type']for i in FactorsRelationDirectedFlowObservation_list],
'Sphere_target': [i.target_factor._attributes['sphere'] for i in
FactorsRelationDirectedFlowObservation_list],
'Subsystem_target': [i._target._processor._attributes['subsystem_type'] for i in
FactorsRelationDirectedFlowObservation_list],
'System_target': [i._target._processor._attributes['processor_system'] for i in
FactorsRelationDirectedFlowObservation_list]
}
)
# I suppose that relations between processors (source-target) doesn't change between different scenarios.
df2 = df.reset_index()
processor = df2["Processor"].apply(lambda x: x.split("."))
df2["lastprocessor"] = [i[-1] for i in processor]
df2["source"] = df2["lastprocessor"] + ":" + df2["Interface"]
# df2 = df2[df2["Orientation"]=="Output"] It is not necessary?
ds_flow_values = pd.merge(df2, ds_flows, on="source")
ds_flow_values = ds_flow_values.drop(
columns=["Orientation", "lastprocessor", "Processor", "Interface", 'RoegenType'], axis=1)
ds_flow_values = ds_flow_values.rename(
columns={'Sphere': 'Sphere_source', 'System': 'System_source', 'Subsystem': 'Subsystem_source'})
# ds_flow_values.reset_index()
# if not ds_flows.empty:
return ds_flow_values
def get_conflicts_filtered_dataframe(in_df: pd.DataFrame) -> pd.DataFrame:
filt = in_df.index.get_level_values("Conflict").isin(["No", "Taken"])
df = in_df[filt]
df = df.droplevel("Conflict")
return df
def inplace_case_sensitiveness_dataframe(df: pd.DataFrame):
if not case_sensitive:
level_processor = df.index._get_level_number("Processor")
level_interface = df.index._get_level_number("Interface")
df.index.set_levels([df.index.levels[level_processor].str.lower(),
df.index.levels[level_interface].str.lower()],
level=[level_processor, level_interface],
verify_integrity=False,
inplace=True)
def calculate_local_scalar_indicators(indicators: List[Indicator],
serialized_model: lxml.etree._ElementTree,
p_map: Dict[str, Processor],
results: pd.DataFrame,
global_parameters: List[Parameter], problem_statement: ProblemStatement,
global_state: State) -> pd.DataFrame:
"""
Compute local scalar indicators using data from "results", and return a pd.DataFrame
:param indicators: List of indicators to compute
:param serialized_model:
:param p_map:
:param results: Result of the graph solving process ("flow_graph_solution")
:param global_parameters: List of parameter definitions
:param problem_statement: Object with a list of scenarios (defining Parameter sets)
:return: pd.DataFrame with all the local indicators
"""
# The "columns" in the index of "results" are:
# 'Scenario', 'Period', 'Scope', 'Processor', 'Interface', 'Orientation'
# Group by: 'Scenario', 'Period', 'Scope', 'Processor'
# Rearrange: 'Interface' and 'Orientation'
idx_names = ["Scenario", "Period", "Scope", "Processor"] # Changing factors
def calculate_local_scalar_indicator(indicator: Indicator) -> pd.DataFrame:
"""
:param indicator:
:return:
"""
df = results
# Parse the expression
ast = string_to_ast(indicator_expression, indicator.formula if case_sensitive else indicator.formula.lower())
# Scenario parameters
scenario_params = create_dictionary()
for scenario_name, scenario_exp_params in problem_statement.scenarios.items(): # type: str, dict
scenario_params[scenario_name] = evaluate_parameters_for_scenario(global_parameters, scenario_exp_params)
issues = []
new_df_rows_idx = []
new_df_rows_data = []
for t, g in df.groupby(idx_names): # "t", the current tuple; "g", the values of the group
params = scenario_params[t[0]]
# Elaborate a dictionary with: <interface>_<orientation>: <Value>
d = {}
# Iterate through available values in a single processor
for row, sdf in g.iterrows():
iface = row[4] # InterfaceType
orientation = row[5] # Orientation
iface_orientation = iface + "_" + orientation
if iface_orientation in d:
print(f"{iface_orientation} found to already exist!")
d[iface_orientation] = sdf["Value"]
if iface not in d:
d[iface] = sdf["Value"] # First appearance allowed, insert, others ignored
# Include parameters (with priority)
d.update(params)
if not case_sensitive:
d = {k.lower(): v for k, v in d.items()}
state = State(d)
state.set("_lcia_methods", global_state.get("_lcia_methods"))
val, variables = ast_evaluator(ast, state, None, issues)
if val is not None: # If it was possible to evaluate ... append a new row
if isinstance(val, dict): # LCIA method returns a Dict
for k, v in val.items():
l = list(t)
l.append(k)
t2 = tuple(l)
new_df_rows_idx.append(t2) # (scenario, period, scope, processor)
new_df_rows_data.append((v, None)) # (indicator, value, unit)
else:
l = list(t)
l.append(indicator.name)
t2 = tuple(l)
new_df_rows_idx.append(t2) # (scenario, period, scope, processor)
new_df_rows_data.append((val, None)) # (indicator, value, unit)
# print(issues)
# Construct pd.DataFrame with the result of the scalar indicator calculation
df2 = pd.DataFrame(data=new_df_rows_data,
index=pd.MultiIndex.from_tuples(new_df_rows_idx, names=idx_names+["Indicator"]),
columns=["Value", "Unit"])
return df2
# -- calculate_local_scalar_indicators --
idx_to_change = ["Interface"]
results.reset_index(idx_to_change, inplace=True)
# For each ScalarIndicator...
dfs = []
for si in indicators:
if si._indicator_category == IndicatorCategories.factors_expression:
dfi = calculate_local_scalar_indicator(si)
if not dfi.empty:
dfs.append(dfi)
# Restore index
results.set_index(idx_to_change, append=True, inplace=True)
if dfs:
return pd.concat(dfs)
else:
return pd.DataFrame()
def calculate_global_scalar_indicators(indicators: List[Indicator],
serialized_model: lxml.etree._ElementTree, p_map: Dict[str, Processor],
results: pd.DataFrame, local_indicators: pd.DataFrame,
global_parameters: List[Parameter], problem_statement: ProblemStatement,
state: State) -> pd.DataFrame:
"""
Compute global scalar indicators using data from "results", and return a pd.DataFrame
:param indicators: List of indicators to compute
:param serialized_model:
:param p_map:
:param results: Result of the graph solving process
:param global_parameters: List of parameter definitions
:param problem_statement: Object with a list of scenarios (defining Parameter sets)
:return: pd.DataFrame with all the local indicators
"""
# The "columns" in the index of "results" are:
# 'Scenario', 'Period', 'Scope', 'Processor', 'Interface', 'Orientation'
# Group by: 'Scenario', 'Period'
# Aggregator function uses a "Processors selector" and a "Scope parameter"
# Then, only one Interface(and its Orientation) allowed
# Filter the passed group by processor and scope, by Interface and Orientation
# Aggregate the Value column according of remaining rows
idx_names = ["Scenario", "Period"] # , "Scope"
def calculate_global_scalar_indicator(indicator: Indicator) -> pd.DataFrame:
"""
:param indicator:
:return:
"""
df = results
# Parse the expression
ast = string_to_ast(indicator_expression, indicator.formula if case_sensitive else indicator.formula.lower())
# Scenario parameters
scenario_params = create_dictionary()
for scenario_name, scenario_exp_params in problem_statement.scenarios.items(): # type: str, dict
scenario_params[scenario_name] = evaluate_parameters_for_scenario(global_parameters, scenario_exp_params)
issues = []
new_df_rows_idx = []
new_df_rows_data = []
for t, g in df.groupby(idx_names): # GROUP BY Scenario, Period
params = scenario_params[t[0]] # Obtain parameter values from scenario, in t[0]
# TODO Local indicators from the selected processors, for the selected Scenario, Period, Scope
local_indicators_extract = pd.DataFrame()
# TODO If a specific indicator or interface from a processor is mentioned, put it as a variable
# Variables for aggregator functions (which should be present in the AST)
d = dict(_processors_map=p_map,
_processors_dom=serialized_model,
_df_group=g,
_df_indicators_group=local_indicators_extract)
# Include parameters (with priority)
d.update(params)
if not case_sensitive:
d = {k.lower(): v for k, v in d.items()}
state = State(d)
val, variables = ast_evaluator(ast, state, None, issues, allowed_functions=global_functions_extended)
if val is not None:
new_df_rows_idx.append(t) # (scenario, period)
new_df_rows_data.append((indicator.name, val, None))
print(issues)
# Construct pd.DataFrame with the result of the scalar indicator calculation
df2 = pd.DataFrame(data=new_df_rows_data,
index=pd.MultiIndex.from_tuples(new_df_rows_idx, names=idx_names),
columns=["Indicator", "Value", "Unit"])
return df2
# -- calculate_global_scalar_indicators --
idx_to_change = []
results.reset_index(idx_to_change, inplace=True)
# For each ScalarIndicator...
dfs = []
for si in indicators:
if si._indicator_category == IndicatorCategories.case_study:
dfi = calculate_global_scalar_indicator(si)
if not dfi.empty:
dfs.append(dfi)
# Restore index
results.set_index(idx_to_change, append=True, inplace=True)
if dfs:
return pd.concat(dfs)
else:
return pd.DataFrame()
range_ast = {}
def get_benchmark_category(b: Benchmark, v):
c = None
for r in b.ranges.values():
cat = r["category"]
range = r["range"]
if range in range_ast:
ast = range_ast[range]
else:
ast = string_to_ast(number_interval, range)
range_ast[range] = ast
in_left = (ast["left"] == "[" and ast["number_left"] <= v) or (ast["left"] == "(" and ast["number_left"] < v)
in_right = (ast["right"] == "]" and ast["number_right"] >= v) or (ast["right"] == ")" and ast["number_right"] > v)
if in_left and in_right:
c = cat
break
return c
def calculate_local_benchmarks(df_local_indicators, indicators: List[Indicator]):
"""
From the dataframe of local indicators: scenario, period, scope, processor, indicator, value
Prepare a dataframe with columns: scenario, period, scope, processor, indicator, benchmark, value
:param df_local_indicators:
:param indicators: List of all Indicators (inside it is filtered to process only Local Indicators)
:return:
"""
if df_local_indicators.empty:
return pd.DataFrame()
ind_map = create_dictionary()
for si in indicators:
if si._indicator_category == IndicatorCategories.factors_expression:
if len(si.benchmarks) > 0:
ind_map[si.name] = si
idx_names = ["Scenario", "Period", "Scope", "Processor", "Indicator"] # Changing factors
new_df_rows_idx = []
new_df_rows_data = []
indicator_column_idx = 4
value_column_idx = df_local_indicators.columns.get_loc("Value")
unit_column_idx = df_local_indicators.columns.get_loc("Unit")
for r in df_local_indicators.itertuples():
if r[0][indicator_column_idx] in ind_map:
ind = ind_map[r[0][indicator_column_idx]]
val = r[1+value_column_idx]
unit = r[1+unit_column_idx]
for b in ind.benchmarks:
c = get_benchmark_category(b, val)
if not c:
c = f"<out ({val})>"
new_df_rows_idx.append(r[0]) # (scenario, period, scope, processor)
new_df_rows_data.append((val, b.name, c))
# Construct pd.DataFrame with the result of the scalar indicator calculation
df2 = pd.DataFrame(data=new_df_rows_data,
index=
|
pd.MultiIndex.from_tuples(new_df_rows_idx, names=idx_names)
|
pandas.MultiIndex.from_tuples
|
import numpy as np
import pandas as pd
import astropy.io.fits as fits
import os
def create_folders(path_list):
for item_path in path_list:
if not os.path.exists(item_path):
os.mkdir(item_path)
def match_simu_detect(simulated_outcat_path, detected_outcat_path, match_save_path):
# simulated_outcat_path, detected_outcat_path, match_save_path = outcat_name, fit_outcat_name, match_save_path
if not os.path.exists(match_save_path):
os.mkdir(match_save_path)
clump_item = simulated_outcat_path.split('_')[-1].split('.')[0]
Match_table = os.path.join(match_save_path, 'Match_table')
Miss_table = os.path.join(match_save_path, 'Miss_table')
False_table = os.path.join(match_save_path, 'False_table')
create_folders([Match_table, Miss_table, False_table])
Match_table_name = os.path.join(Match_table, 'Match_%s.txt' %clump_item)
Miss_table_name = os.path.join(Miss_table, 'Miss_%s.txt' % clump_item)
False_table_name = os.path.join(False_table, 'False_%s.txt' % clump_item)
table_s = pd.read_csv(simulated_outcat_path, sep='\t')
table_g =
|
pd.read_csv(detected_outcat_path, sep='\t')
|
pandas.read_csv
|
import numpy as np
import pandas as pd
def center(X):
for col in X.columns:
X.loc[:, col] = X.loc[:, col]-np.mean(X.loc[:, col])
return X
def add_intercept(X):
"""Add all 1's column to predictor matrix"""
X['intercept'] = [1]*X.shape[0]
def one_hot_code(df1, sens_dict):
cols = df1.columns
for c in cols:
if isinstance(df1[c][0], str):
column = df1[c]
df1 = df1.drop(c, 1)
unique_values = list(set(column))
n = len(unique_values)
if n > 2:
for i in range(n):
col_name = '{}.{}'.format(c, i)
col_i = [1 if el == unique_values[i] else 0 for el in column]
df1[col_name] = col_i
sens_dict[col_name] = sens_dict[c]
del sens_dict[c]
else:
col_name = c
col = [1 if el == unique_values[0] else 0 for el in column]
df1[col_name] = col
return df1, sens_dict
# num_sens in 1:18
def clean_communities(num_sens):
"""Clean communities & crime data set."""
# Data Cleaning and Import
df = pd.read_csv('dataset/communities.csv')
df = df.fillna(0)
y = df['ViolentCrimesPerPop']
q_y = np.percentile(y, 70) ################### 70 or 20 ????????????????????????? #####################
# convert y's to binary predictions on whether the neighborhood is
# especially violent
y = [np.round((1 + np.sign(s - q_y)) / 2) for s in y]
X = df.iloc[:, 0:122]
# hot code categorical variables
sens_df =
|
pd.read_csv('dataset/communities_protected.csv')
|
pandas.read_csv
|
# Author: <NAME>, PhD
#
# Email: <EMAIL>
#
# Organization: National Center for Advancing Translational Sciences (NCATS/NIH)
#
# References
#
# Ref: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.aggregate.html
# Ref: https://stackoverflow.com/questions/27298178/concatenate-strings-from-several-rows-using-pandas-groupby
# Ref: https://stackoverflow.com/questions/32117848/pandas-groupby-concatenate-strings-in-multiple-columns
# Ref; https://stackoverflow.com/questions/60260774/pandas-agg-dropping-columns-lambda-function
import pandas as pd
import sys
def extract_metadata (df, md_cols):
orig_cols = {}
length = df.shape[0]
first = True
md = []
if md_cols != 'na':
md_cols = md_cols.split (';')
for col in md_cols:
col = col.strip()
orig_cols[col] = list (map(str, df[col]))
if first:
first = False
for i in range(length):
combined_md = ''
for col in orig_cols:
md_val = orig_cols [col][i]
if md_val == 'nan':
md_val = ''
combined_md += col + ':' + md_val + ';'
md.append (combined_md[:-1])
else:
md = ['na' for i in range (length)]
return (md)
def merge_metadata (data_type, df, md):
if data_type == 'host_protein':
host_proteins = list(df['host_protein'])
activations = list(df['activation'])
activation_types = list(df['activation_type'])
df = pd.DataFrame ({'host_protein':host_proteins, 'activation':activations, 'activation_type':activation_types, 'metadata': md})
elif data_type == 'pathogen_protein':
pathogen_proteins = list(df['pathogen_protein'])
df = pd.DataFrame ({'pathogen_protein':pathogen_proteins, 'metadata': md})
elif data_type == 'hpi':
pathogen_proteins = list(df['pathogen_protein'])
host_proteins = list(df['host_protein'])
interactions = list(df['interaction'])
mechanisms = list(df['mechanism'])
df = pd.DataFrame ({ 'pathogen_protein':pathogen_proteins, 'host_protein':host_proteins, 'interaction': interactions, 'mechanism': mechanisms, 'metadata': md})
elif data_type == 'ppi':
host_proteins_a = list(df['host_protein_a'])
host_proteins_b = list(df['host_protein_b'])
interactions = list(df['interaction'])
mechanisms = list(df['mechanism'])
df = pd.DataFrame ({ 'host_proteins_a':host_protein_a, 'host_protein_b':host_proteins_b, 'interaction': interactions, 'mechanism': mechanisms, 'metadata': md})
elif data_type == 'dti':
drugs = list(df['drug_name'])
host_proteins = list(df['host_protein'])
action_types = list(df['action_type'])
df =
|
pd.DataFrame ({'drug_name':drugs, 'host_protein':host_proteins,'action_type': action_types, 'metadata': md})
|
pandas.DataFrame
|
# coding=utf-8
# Author: SafetyCulture
# Copyright: © SafetyCulture 2016
import argparse
import errno
import json
import os
import re
import shutil
import sys
import time
from builtins import input
from datetime import datetime
from datetime import timedelta
import coloredlogs
import dateutil.parser
import logging
import numpy as np
import pandas as pd
import pytz
import unicodecsv as csv
import yaml
from safetypy import safetypy as sp
from sqlalchemy import *
from sqlalchemy.exc import IntegrityError, OperationalError
from sqlalchemy.orm import sessionmaker
import csvExporter
from model import Base, set_table, SQL_HEADER_ROW, ACTIONS_HEADER_ROW, set_actions_table
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
# Possible values here are DEBUG, INFO, WARN, ERROR and CRITICAL
LOG_LEVEL = logging.DEBUG
# Stores the API access token and other configuration settings
DEFAULT_CONFIG_FILENAME = 'config.yaml'
# Wait 15 minutes by default between sync attempts
DEFAULT_SYNC_DELAY_IN_SECONDS = 900
# Only download audits older than 10 minutes
DEFAULT_MEDIA_SYNC_OFFSET_IN_SECONDS = 600
# The file that stores the "date modified" of the last successfully synced audit
SYNC_MARKER_FILENAME = 'last_successful/last_successful.txt'
# The file that stores the ISO date/time string of the last successful actions export
ACTIONS_SYNC_MARKER_FILENAME = 'last_successful/last_successful_actions_export.txt'
# the file that stores all exported actions in CSV format
ACTIONS_EXPORT_FILENAME = 'iauditor_actions.csv'
# Whether to export inactive items to CSV
DEFAULT_EXPORT_INACTIVE_ITEMS_TO_CSV = True
# When exporting actions to CSV, if property is None, print this value to CSV
EMPTY_RESPONSE = ''
# Not all Audits will actually contain an Audit Title item. For examples, when Audit Title rules are set, the Audit
# Title item is not going to be included by default.
# When this item ID is specified in the custom export filename configuration, the audit_data.name property will
# be used to populate the data as it covers all cases.
AUDIT_TITLE_ITEM_ID = 'f3245d40-ea77-11e1-aff1-0800200c9a66'
# Properties kept in settings dictionary which takes its values from config.YAML
API_TOKEN = 'api_token'
CONFIG_NAME = 'config_name'
EXPORT_PATH = 'export_path'
PREFERENCES = 'preferences'
FILENAME_ITEM_ID = 'filename_item_id'
SYNC_DELAY_IN_SECONDS = 'sync_delay_in_seconds'
EXPORT_INACTIVE_ITEMS_TO_CSV = 'export_inactive_items_to_csv'
MEDIA_SYNC_OFFSET_IN_SECONDS = 'media_sync_offset_in_seconds'
EXPORT_FORMATS = 'export_formats'
TEMPLATE_IDS = 'template_ids'
SQL_TABLE = 'sql_table'
DB_TYPE = 'database_type'
DB_USER = 'database_user'
DB_PWD = '<PASSWORD>'
DB_SERVER = 'database_server'
DB_PORT = 'database_port'
DB_NAME = 'database_name'
DB_SCHEMA = 'database_schema'
USE_REAL_TEMPLATE_NAME = 'use_real_template_name'
EXPORT_ARCHIVED = 'export_archived'
EXPORT_COMPLETED = 'export_completed'
MERGE_ROWS = 'merge_rows'
ALLOW_TABLE_CREATION = 'allow_table_creation'
ACTIONS_TABLE = 'actions_table'
ACTIONS_MERGE_ROWS = 'actions_merge_rows'
# Used to create a default config file for new users
DEFAULT_CONFIG_FILE_YAML = [
'API:',
'\n token: ',
'\nconfig_name: '
'\nexport_options: ',
'\n export_path: ',
'\n export_archived: false',
'\n export_completed: true',
'\n use_real_template_name: false'
'\n filename: ',
'\n export_inactive_items: false',
'\n preferences: ',
'\n sync_delay_in_seconds: 300',
'\n media_sync_offset_in_seconds: ',
'\n template_ids: ',
'\n merge_rows: false',
'\n actions_merge_rows: false',
'\n allow_table_creation: false',
'\n sql_table: ',
'\n database_type: ',
'\n database_server: ',
'\n database_user: ',
'\n database_pwd: ',
'\n database_port: ',
'\n database_name: DB-NAME?driver=ODBC Driver 17 for SQL Server',
'\n database_schema: '
]
def log_critical_error(logger, ex, message):
"""
Logs the exception at 'CRITICAL' log level
:param logger: the logger
:param ex: exception to log
:param message: descriptive message to log details of where/why ex occurred
"""
if logger is not None:
logger.critical(message)
logger.critical(ex)
def load_setting_api_access_token(logger, config_settings):
"""
Attempt to parse API token from config settings
:param logger: the logger
:param config_settings: config settings loaded from config file
:return: API token if valid, else None
"""
try:
api_token = config_settings['API']['token']
token_is_valid = re.match('^[a-f0-9]{64}$', api_token)
if token_is_valid:
logger.debug('API token matched expected pattern')
return api_token
else:
logger.error('API token failed to match expected pattern')
return None
except Exception as ex:
log_critical_error(logger, ex, 'Exception parsing API token from config.yaml')
return None
def docker_load_setting_api_access_token(logger, api_token):
"""
Attempt to parse API token from config settings
:param logger: the logger
:param config_settings: config settings loaded from config file
:return: API token if valid, else None
"""
try:
token_is_valid = re.match('^[a-f0-9]{64}$', api_token)
if token_is_valid:
logger.debug('API token matched expected pattern')
return api_token
else:
logger.error('API token failed to match expected pattern')
return None
except Exception as ex:
log_critical_error(logger, ex, 'Exception parsing API token from config.yaml')
return None
def load_export_inactive_items_to_csv(logger, config_settings):
"""
Attempt to parse export_inactive_items from config settings. Value of true or false is expected.
True means the CSV exporter will include inactive items. False means the CSV exporter will exclude inactive items.
:param logger: the logger
:param config_settings: config settings loaded from config file
:return: value of export_inactive_items_to_csv if valid, else DEFAULT_EXPORT_INACTIVE_ITEMS_TO_CSV
"""
try:
if config_settings['export_options']['merge_rows'] is True:
logger.info('Merge rows is enabled, turning on the export of inactive items.')
export_inactive_items_to_csv = True
else:
export_inactive_items_to_csv = config_settings['export_options']['export_inactive_items']
if not isinstance(export_inactive_items_to_csv, bool):
logger.info('Invalid export_inactive_items value from configuration file, defaulting to true')
export_inactive_items_to_csv = DEFAULT_EXPORT_INACTIVE_ITEMS_TO_CSV
return export_inactive_items_to_csv
except Exception as ex:
log_critical_error(logger, ex,
'Exception parsing export_inactive_items from the configuration file, defaulting to {0}'.
format(str(DEFAULT_EXPORT_INACTIVE_ITEMS_TO_CSV)))
return DEFAULT_EXPORT_INACTIVE_ITEMS_TO_CSV
def load_setting_sync_delay(logger, config_settings):
"""
Attempt to parse delay between sync loops from config settings
:param logger: the logger
:param config_settings: config settings loaded from config file
:return: extracted sync delay if valid, else DEFAULT_SYNC_DELAY_IN_SECONDS
"""
try:
sync_delay = config_settings['export_options']['sync_delay_in_seconds']
sync_delay_is_valid = re.match('^[0-9]+$', str(sync_delay))
if sync_delay_is_valid and sync_delay >= 0:
if sync_delay < DEFAULT_SYNC_DELAY_IN_SECONDS:
'{0} seconds'.format(logger.info(
'Sync delay is less than the minimum recommended value of ' + str(DEFAULT_SYNC_DELAY_IN_SECONDS)))
return sync_delay
else:
logger.info('Invalid sync_delay_in_seconds from the configuration file, defaulting to {0}'.format(str(
DEFAULT_SYNC_DELAY_IN_SECONDS)))
return DEFAULT_SYNC_DELAY_IN_SECONDS
except Exception as ex:
log_critical_error(logger, ex,
'Exception parsing sync_delay from the configuration file, defaulting to {0}'.format(str(
DEFAULT_SYNC_DELAY_IN_SECONDS)))
return DEFAULT_SYNC_DELAY_IN_SECONDS
def load_setting_preference_mapping(logger, config_settings):
"""
Attempt to parse preference settings from config settings
:param logger: the logger
:param config_settings: config settings loaded from config file
:return: export preference mapping if valid, else None
"""
try:
preference_mapping = {}
preference_settings = config_settings['export_options']['preferences']
if preference_settings is not None:
preference_lines = preference_settings.split(' ')
for preference in preference_lines:
template_id = preference[:preference.index(':')]
if template_id not in preference_mapping.keys():
preference_mapping[template_id] = preference
return preference_mapping
except KeyError:
logger.debug('No preference key in the configuration file')
return None
except Exception as ex:
log_critical_error(logger, ex, 'Exception getting preferences from the configuration file')
return None
def load_setting_export_path(logger, config_settings):
"""
Attempt to extract export path from config settings
:param config_settings: config settings loaded from config file
:param logger: the logger
:return: export path, None if path is invalid or missing
"""
try:
export_path = config_settings['export_options']['export_path']
if export_path is not None:
return export_path
else:
return None
except Exception as ex:
log_critical_error(logger, ex, 'Exception getting export path from the configuration file')
return None
def load_setting_media_sync_offset(logger, config_settings):
"""
:param logger: the logger
:param config_settings: config settings loaded from config file
:return: media sync offset parsed from file, else default media sync offset
defined as global constant
"""
try:
media_sync_offset = config_settings['export_options']['media_sync_offset_in_seconds']
if media_sync_offset is None or media_sync_offset < 0 or not isinstance(media_sync_offset, int):
media_sync_offset = DEFAULT_MEDIA_SYNC_OFFSET_IN_SECONDS
return media_sync_offset
except Exception as ex:
log_critical_error(logger, ex, 'Exception parsing media sync offset from config file')
return DEFAULT_MEDIA_SYNC_OFFSET_IN_SECONDS
def configure_logging(path_to_log_directory):
"""
Configure logger
:param path_to_log_directory: path to directory to write log file in
:return:
"""
log_filename = datetime.now().strftime('%Y-%m-%d') + '.log'
exporter_logger = logging.getLogger('exporter_logger')
exporter_logger.setLevel(LOG_LEVEL)
formatter = logging.Formatter('%(asctime)s : %(levelname)s : %(message)s')
fh = logging.FileHandler(filename=os.path.join(path_to_log_directory, log_filename))
fh.setLevel(LOG_LEVEL)
fh.setFormatter(formatter)
exporter_logger.addHandler(fh)
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(LOG_LEVEL)
sh.setFormatter(formatter)
exporter_logger.addHandler(sh)
def create_directory_if_not_exists(logger, path):
"""
Creates 'path' if it does not exist
If creation fails, an exception will be thrown
:param logger: the logger
:param path: the path to ensure it exists
"""
try:
os.makedirs(path)
except OSError as ex:
if ex.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
log_critical_error(logger, ex, 'An error happened trying to create ' + path)
raise
def save_web_report_link_to_file(logger, export_dir, web_report_data):
"""
Write Web Report links to 'web-report-links.csv' on disk at specified location
Any existing file with the same name will be appended to
:param logger: the logger
:param export_dir: path to directory for exports
:param web_report_data: Data to write to CSV: Template ID, Template name, Audit ID, Audit name, Web Report link
"""
if not os.path.exists(export_dir):
logger.info("Creating directory at {0} for Web Report links.".format(export_dir))
os.makedirs(export_dir)
file_path = os.path.join(export_dir, 'web-report-links.csv')
if os.path.isfile(file_path):
logger.info('Appending Web Report link to ' + file_path)
try:
with open(file_path, 'ab') as web_report_link_csv:
wr = csv.writer(web_report_link_csv, dialect='excel', quoting=csv.QUOTE_ALL)
wr.writerow(web_report_data)
web_report_link_csv.close()
except Exception as ex:
log_critical_error(logger, ex, 'Exception while writing' + file_path + ' to file')
else:
logger.info('Creating ' + file_path)
logger.info('Appending web report to ' + file_path)
try:
with open(file_path, 'wb') as web_report_link_csv:
wr = csv.writer(web_report_link_csv, dialect='excel', quoting=csv.QUOTE_ALL)
wr.writerow(['Template ID', 'Template Name', 'Audit ID', 'Audit Name', 'Web Report Link'])
wr.writerow(web_report_data)
web_report_link_csv.close()
except Exception as ex:
log_critical_error(logger, ex, 'Exception while writing' + file_path + ' to file')
def save_exported_actions_to_db(logger, actions_array, settings, get_started):
"""
Write Actions to 'iauditor_actions.csv' on disk at specified location
:param get_started:
:param logger: the logger
:param export_path: path to directory for exports
:param actions_array: Array of action objects to be converted to CSV and saved to disk
"""
engine = get_started[1]
actions_db = get_started[4]
if not actions_array:
logger.info('No actions returned after ' + get_last_successful_actions_export(logger))
return
logger.info('Exporting ' + str(len(actions_array)) + ' actions')
Session = sessionmaker(bind=engine)
session = Session()
bulk_actions = []
for action in actions_array:
action_as_list = transform_action_object_to_list(action)
bulk_actions.append(action_as_list)
df = pd.DataFrame.from_records(bulk_actions, columns=ACTIONS_HEADER_ROW)
df['DatePK'] = pd.to_datetime(df['modifiedDatetime']).values.astype(np.int64) // 10 ** 6
df_dict = df.to_dict(orient='records')
try:
session.bulk_insert_mappings(actions_db, df_dict)
except KeyboardInterrupt:
logger.warning('Interrupted by user, exiting.')
session.rollback()
sys.exit(0)
except OperationalError as ex:
session.rollback()
logger.warning('Something went wrong. Here are the details: {}'.format(ex))
except IntegrityError as ex:
# If the bulk insert fails, we do a slower merge
logger.warning('Duplicate found, attempting to update')
session.rollback()
for action in df_dict:
row_to_dict = actions_db(**action)
session.merge(row_to_dict)
logger.debug('Row successfully updated.')
session.commit()
def save_exported_actions_to_csv_file(logger, export_path, actions_array):
"""
Write Actions to 'iauditor_actions.csv' on disk at specified location
:param logger: the logger
:param export_path: path to directory for exports
:param actions_array: Array of action objects to be converted to CSV and saved to disk
"""
if not actions_array:
logger.info('No actions returned after ' + get_last_successful_actions_export(logger))
return
filename = ACTIONS_EXPORT_FILENAME
file_path = os.path.join(export_path, filename)
logger.info('Exporting ' + str(len(actions_array)) + ' actions to ' + file_path)
if os.path.isfile(file_path):
actions_csv = open(file_path, 'ab')
actions_csv_wr = csv.writer(actions_csv, dialect='excel', quoting=csv.QUOTE_ALL)
else:
actions_csv = open(file_path, 'wb')
actions_csv_wr = csv.writer(actions_csv, dialect='excel', quoting=csv.QUOTE_ALL)
actions_csv_wr.writerow([
'actionId', 'description', 'assignee', 'priority', 'priorityCode', 'status', 'statusCode', 'dueDatetime',
'audit', 'auditId', 'linkedToItem', 'linkedToItemId', 'creatorName', 'creatorId', 'createdDatetime',
'modifiedDatetime', 'completedDatetime'
])
for action in actions_array:
actions_list = transform_action_object_to_list(action)
actions_csv_wr.writerow(actions_list)
del actions_list
def transform_action_object_to_list(action):
priority_codes = {0: 'None', 10: 'Low', 20: 'Medium', 30: 'High'}
status_codes = {0: 'To Do', 10: 'In Progress', 50: 'Done', 60: 'Cannot Do'}
get_json_property = csvExporter.get_json_property
actions_list = [get_json_property(action, 'action_id'), get_json_property(action, 'description')]
assignee_list = []
for assignee in get_json_property(action, 'assignees'):
assignee_list.append(get_json_property(assignee, 'name'))
actions_list.append(", ".join(assignee_list))
actions_list.append(get_json_property(priority_codes, get_json_property(action, 'priority')))
actions_list.append(get_json_property(action, 'priority'))
actions_list.append(get_json_property(status_codes, get_json_property(action, 'status')))
actions_list.append(get_json_property(action, 'status'))
actions_list.append(get_json_property(action, 'due_at'))
actions_list.append(get_json_property(action, 'audit', 'name'))
actions_list.append(get_json_property(action, 'audit', 'audit_id'))
actions_list.append(get_json_property(action, 'item', 'label'))
actions_list.append(get_json_property(action, 'item', 'item_id'))
actions_list.append(get_json_property(action, 'created_by', 'name'))
actions_list.append(get_json_property(action, 'created_by', 'user_id'))
actions_list.append(get_json_property(action, 'created_at'))
actions_list.append(get_json_property(action, 'modified_at'))
actions_list.append(get_json_property(action, 'completed_at'))
return actions_list
def save_exported_media_to_file(logger, export_dir, media_file, filename, extension):
"""
Write exported media item to disk at specified location with specified file name.
Any existing file with the same name will be overwritten.
:param logger: the logger
:param export_dir: path to directory for exports
:param media_file: media file to write to disc
:param filename: filename to give exported image
:param extension: extension to give exported image
"""
if not os.path.exists(export_dir):
logger.info("Creating directory at {0} for media files.".format(export_dir))
os.makedirs(export_dir)
file_path = os.path.join(export_dir, filename + '.' + extension)
if os.path.isfile(file_path):
logger.info('Overwriting existing report at ' + file_path)
try:
with open(file_path, 'wb') as out_file:
shutil.copyfileobj(media_file.raw, out_file)
del media_file
except Exception as ex:
log_critical_error(logger, ex, 'Exception while writing' + file_path + ' to file')
def save_exported_document(logger, export_dir, export_doc, filename, extension):
"""
Write exported document to disk at specified location with specified file name.
Any existing file with the same name will be overwritten.
:param logger: the logger
:param export_dir: path to directory for exports
:param export_doc: export document to write
:param filename: filename to give exported document
:param extension: extension to give exported document
"""
file_path = os.path.join(export_dir, filename + '.' + extension)
if os.path.isfile(file_path):
logger.info('Overwriting existing report at ' + file_path)
try:
with open(file_path, 'wb') as export_file:
export_file.write(export_doc)
except Exception as ex:
log_critical_error(logger, ex, 'Exception while writing' + file_path + ' to file')
def update_sync_marker_file(date_modified):
"""
Replaces the contents of the sync marker file with the most
recent modified_at date time value from audit JSON data
:param date_modified: modified_at value from most recently downloaded audit JSON
:return:
"""
with open(SYNC_MARKER_FILENAME, 'w') as sync_marker_file:
sync_marker_file.write(date_modified)
def get_last_successful(logger):
"""
Read the date and time of the last successfully exported audit data from the sync marker file
:param logger: the logger
:return: A datetime value (or 2000-01-01 if syncing since the 'beginning of time')
"""
if os.path.isfile(SYNC_MARKER_FILENAME):
with open(SYNC_MARKER_FILENAME, 'r+') as last_run:
last_successful = last_run.readlines()[0]
last_successful = last_successful.strip()
else:
beginning_of_time = '2000-01-01T00:00:00.000Z'
last_successful = beginning_of_time
create_directory_if_not_exists(logger, 'last_successful')
with open(SYNC_MARKER_FILENAME, 'w') as last_run:
last_run.write(last_successful)
logger.info('Searching for audits since the beginning of time: ' + beginning_of_time)
return last_successful
def update_actions_sync_marker_file(logger, date_modified):
"""
Replaces the contents of the actions sync marker file with the the date/time string provided
:param logger: The logger
:param date_modified: ISO string
"""
try:
with open(ACTIONS_SYNC_MARKER_FILENAME, 'w') as actions_sync_marker_file:
actions_sync_marker_file.write(date_modified)
except Exception as ex:
log_critical_error(logger, ex, 'Unable to open ' + ACTIONS_SYNC_MARKER_FILENAME + ' for writing')
exit()
def get_last_successful_actions_export(logger):
"""
Reads the actions sync marker file to determine the date and time of the most last successfully exported action.
The actions sync marker file is expected to contain a single ISO formatted datetime string.
:param logger: the logger
:return: A datetime value (or 2000-01-01 if syncing since the 'beginning of time')
"""
if os.path.isfile(ACTIONS_SYNC_MARKER_FILENAME):
with open(ACTIONS_SYNC_MARKER_FILENAME, 'r+') as last_run:
last_successful_actions_export = last_run.readlines()[0]
logger.info('Searching for actions modified after ' + last_successful_actions_export)
else:
beginning_of_time = '2000-01-01T00:00:00.000Z'
last_successful_actions_export = beginning_of_time
with open(ACTIONS_SYNC_MARKER_FILENAME, 'w') as last_run:
last_run.write(last_successful_actions_export)
logger.info('Searching for actions since the beginning of time: ' + beginning_of_time)
return last_successful_actions_export
def parse_export_filename(audit_json, filename_item_id):
"""
Get 'response' value of specified header item to use for export file name
:param header_items: header_items array from audit JSON
:param filename_item_id: item_id from config settings
:return: 'response' value of specified item from audit JSON
"""
if filename_item_id is None:
return None
# Not all Audits will actually contain an Audit Title item. For examples, when Audit Title rules are set,
# the Audit Title item is not going to be included by default.
# When this item ID is specified in the custom export filename configuration, the audit_data.name property
# will be used to populate the data as it covers all cases.
if filename_item_id == AUDIT_TITLE_ITEM_ID and 'audit_data' in audit_json.keys() \
and 'name' in audit_json['audit_data'].keys():
return audit_json['audit_data']['name'].replace('/','_')
for item in audit_json['header_items']:
if item['item_id'] == filename_item_id:
if 'responses' in item.keys():
if 'text' in item['responses'].keys() and item['responses']['text'].strip() != '':
return item['responses']['text']
return None
def get_filename_item_id(logger, config_settings):
"""
Attempt to parse item_id for file naming from config settings
:param logger: the logger
:param config_settings: config settings loaded from config file
:return: item_id extracted from config_settings if valid, else None
"""
try:
filename_item_id = config_settings['export_options']['filename']
if filename_item_id is not None:
return filename_item_id
else:
return None
except Exception as ex:
log_critical_error(logger, ex, 'Exception retrieving setting "filename" from the configuration file')
return None
def configure_logger():
"""
Declare and validate existence of log directory; create and configure logger object
:return: instance of configured logger object
"""
log_dir = os.path.join(os.getcwd(), 'log')
create_directory_if_not_exists(None, log_dir)
configure_logging(log_dir)
logger = logging.getLogger('exporter_logger')
coloredlogs.install(logger=logger)
return logger
def set_env_defaults(name, env_var, logger):
# if env_var is None or '':
if not env_var:
if name == 'CONFIG_NAME':
logger.error('You must set the CONFIG_NAME')
sys.exit()
if name == 'DB_SCHEMA':
env_var = 'dbo'
if name.startswith('DB_'):
env_var = None
if name == 'SQL_TABLE':
env_var = None
if name == 'TEMPLATE_IDS':
env_var = None
else:
env_var = false
print(name, ' set to ', env_var)
return env_var
def load_config_settings(logger, path_to_config_file, docker_enabled):
"""
Load config settings from config file
:param logger: the logger
:param path_to_config_file: location of config file
:return: settings dictionary containing values for:
api_token, export_path, preferences,
filename_item_id, sync_delay_in_seconds loaded from
config file, media_sync_offset_in_seconds
"""
if docker_enabled is True:
settings = {
API_TOKEN: docker_load_setting_api_access_token(logger, os.environ['API_TOKEN']),
EXPORT_PATH: None,
# PREFERENCES: load_setting_preference_mapping(logger, config_settings),
# FILENAME_ITEM_ID: get_filename_item_id(logger, config_settings),
SYNC_DELAY_IN_SECONDS: int(os.environ['SYNC_DELAY_IN_SECONDS']),
# EXPORT_INACTIVE_ITEMS_TO_CSV: load_export_inactive_items_to_csv(logger, config_settings),
MEDIA_SYNC_OFFSET_IN_SECONDS: int(os.environ['MEDIA_SYNC_OFFSET_IN_SECONDS']),
TEMPLATE_IDS: set_env_defaults('TEMPLATE_IDS', os.environ['TEMPLATE_IDS'], logger),
SQL_TABLE: set_env_defaults('SQL_TABLE', os.environ['SQL_TABLE'], logger),
DB_TYPE: set_env_defaults('DB_TYPE', os.environ['DB_TYPE'], logger),
DB_USER: set_env_defaults('DB_USER', os.environ['DB_USER'], logger),
DB_PWD: set_env_defaults('DB_PWD', os.environ['DB_PWD'], logger),
DB_SERVER: set_env_defaults('DB_SERVER', os.environ['DB_SERVER'], logger),
DB_PORT: set_env_defaults('DB_PORT', os.environ['DB_PORT'], logger),
DB_NAME: set_env_defaults('DB_NAME', os.environ['DB_NAME'], logger),
DB_SCHEMA: set_env_defaults('DB_SCHEMA', os.environ['DB_SCHEMA'], logger),
USE_REAL_TEMPLATE_NAME: set_env_defaults('USE_REAL_TEMPLATE_NAME', os.environ['USE_REAL_TEMPLATE_NAME'],
logger),
CONFIG_NAME: set_env_defaults('CONFIG_NAME', os.environ['CONFIG_NAME'], logger),
EXPORT_ARCHIVED: set_env_defaults('EXPORT_ARCHIVED', os.environ['EXPORT_ARCHIVED'], logger),
EXPORT_COMPLETED: set_env_defaults('EXPORT_COMPLETED', os.environ['EXPORT_COMPLETED'], logger),
MERGE_ROWS: set_env_defaults('MERGE_ROWS', os.environ['MERGE_ROWS'], logger),
ALLOW_TABLE_CREATION: set_env_defaults('ALLOW_TABLE_CREATION', os.environ['ALLOW_TABLE_CREATION'], logger),
ACTIONS_TABLE: 'iauditor_actions_data',
ACTIONS_MERGE_ROWS: set_env_defaults('ACTIONS_MERGE_ROWS', os.environ['ACTIONS_MERGE_ROWS'], logger),
PREFERENCES: None,
FILENAME_ITEM_ID: None,
EXPORT_INACTIVE_ITEMS_TO_CSV: None
}
else:
config_settings = yaml.safe_load(open(path_to_config_file))
if config_settings['config_name'] is None:
logger.info('The Config Name has been left blank, defaulting to iauditor.')
config_name = 'iauditor'
elif ' ' in config_settings['config_name']:
config_name = config_settings['config_name'].replace(' ', '_')
else:
config_name = config_settings['config_name']
if re.match("^[A-Za-z0-9_-]*$", config_name):
config_name = config_name
else:
logger.critical('Config name can only contain letters, numbers, hyphens or underscores.')
sys.exit()
if 'allow_table_creation' in config_settings['export_options']:
table_creation = config_settings['export_options']['allow_table_creation']
else:
table_creation = False
if load_setting_export_path(logger, config_settings) is None:
export_path = os.path.join('exports', config_name)
else:
export_path = os.path.join(load_setting_export_path(logger, config_settings), config_name)
settings = {
API_TOKEN: load_setting_api_access_token(logger, config_settings),
EXPORT_PATH: export_path,
PREFERENCES: load_setting_preference_mapping(logger, config_settings),
FILENAME_ITEM_ID: get_filename_item_id(logger, config_settings),
SYNC_DELAY_IN_SECONDS: load_setting_sync_delay(logger, config_settings),
EXPORT_INACTIVE_ITEMS_TO_CSV: load_export_inactive_items_to_csv(logger, config_settings),
MEDIA_SYNC_OFFSET_IN_SECONDS: load_setting_media_sync_offset(logger, config_settings),
TEMPLATE_IDS: config_settings['export_options']['template_ids'],
SQL_TABLE: config_settings['export_options']['sql_table'],
DB_TYPE: config_settings['export_options']['database_type'],
DB_USER: config_settings['export_options']['database_user'],
DB_PWD: config_settings['export_options']['database_pwd'],
DB_SERVER: config_settings['export_options']['database_server'],
DB_PORT: config_settings['export_options']['database_port'],
DB_NAME: config_settings['export_options']['database_name'],
DB_SCHEMA: config_settings['export_options']['database_schema'],
USE_REAL_TEMPLATE_NAME: config_settings['export_options']['use_real_template_name'],
CONFIG_NAME: config_name,
EXPORT_ARCHIVED: config_settings['export_options']['export_archived'],
EXPORT_COMPLETED: config_settings['export_options']['export_completed'],
MERGE_ROWS: config_settings['export_options']['merge_rows'],
ALLOW_TABLE_CREATION: table_creation,
ACTIONS_TABLE: config_settings['export_options']['sql_table']+'_actions',
ACTIONS_MERGE_ROWS: config_settings['export_options']['actions_merge_rows']
}
return settings
def configure(logger, path_to_config_file, export_formats, docker_enabled):
"""
instantiate and configure logger, load config settings from file, instantiate SafetyCulture SDK
:param logger: the logger
:param path_to_config_file: path to config file
:param export_formats: desired export formats
:return: instance of SafetyCulture SDK object, config settings
"""
config_settings = load_config_settings(logger, path_to_config_file, docker_enabled)
config_settings[EXPORT_FORMATS] = export_formats
sc_client = sp.SafetyCulture(config_settings[API_TOKEN])
if config_settings[EXPORT_PATH] is not None:
if config_settings[CONFIG_NAME] is not None:
create_directory_if_not_exists(logger, os.path.join(config_settings[EXPORT_PATH]))
else:
logger.error("You must set the config_name in your config file before continuing.")
sys.exit()
else:
logger.info('No export path was found in ' + path_to_config_file + ', defaulting to /exports')
config_settings[EXPORT_PATH] = os.path.join(os.getcwd(), 'exports')
if config_settings[CONFIG_NAME] is not None:
create_directory_if_not_exists(logger, os.path.join(config_settings[EXPORT_PATH]))
else:
logger.error("You must set the config_name in your config file before continuing.")
sys.exit()
return sc_client, config_settings
def parse_command_line_arguments(logger):
"""
Parse command line arguments received, if any
Print example if invalid arguments are passed
:param logger: the logger
:return: config_filename passed as argument if any, else DEFAULT_CONFIG_FILENAME
export_formats passed as argument if any, else 'pdf'
list_preferences if passed as argument, else None
do_loop False if passed as argument, else True
"""
parser = argparse.ArgumentParser()
parser.add_argument('--config', help='config file to use, defaults to ' + DEFAULT_CONFIG_FILENAME)
parser.add_argument('--docker', nargs='*', help='Switches settings to ENV variables for use with docker.')
parser.add_argument('--format', nargs='*', help='formats to download, valid options are pdf, '
'json, docx, csv, media, web-report-link, actions, pickle, sql')
parser.add_argument('--list_preferences', nargs='*', help='display all preferences, or restrict to specific'
' template_id if supplied as additional argument')
parser.add_argument('--loop', nargs='*', help='execute continuously until interrupted')
parser.add_argument('--setup', action='store_true', help='Automatically create new directory containing the '
'necessary config file.'
'Directory will be named iAuditor Audit Exports, and will '
'be placed in your current directory')
args = parser.parse_args()
if args.setup:
initial_setup(logger)
exit()
if args.config is not None:
config_filename = os.path.join('configs', args.config)
print(args.config)
if os.path.isfile(config_filename):
config_filename = os.path.join('configs', args.config)
logger.debug(config_filename + ' passed as config argument')
else:
logger.error(config_filename + ' is either missing or corrupt.')
sys.exit(1)
else:
config_filename = os.path.join('configs', DEFAULT_CONFIG_FILENAME)
export_formats = ['pdf']
if args.format is not None and len(args.format) > 0:
valid_export_formats = ['json', 'docx', 'pdf', 'csv', 'media', 'web-report-link', 'actions', 'actions-sql', 'sql', 'pickle', 'doc_creation']
export_formats = []
for option in args.format:
if option not in valid_export_formats:
print('{0} is not a valid export format. Valid options are pdf, json, docx, csv, web-report-link, '
'media, actions, pickle, actions_sql, or sql'.format(option))
logger.info('invalid export format argument: {0}'.format(option))
else:
export_formats.append(option)
loop_enabled = True if args.loop is not None else False
docker_enabled = True if args.docker is not None else False
return config_filename, export_formats, args.list_preferences, loop_enabled, docker_enabled
def initial_setup(logger):
"""
Creates a new directory in current working directory called 'iauditor_exports_folder'. If 'iauditor_exports_folder'
already exists the setup script will notify user that the folder exists and exit. Default config file placed
in directory, with user API Token. User is asked for iAuditor credentials in order to generate their
API token.
:param logger: the logger
"""
# setup variables
current_directory_path = os.getcwd()
exports_folder_name = 'iauditor_exports_folder'
# get token, set token
token = sp.get_user_api_token(logger)
if not token:
logger.critical("Problem generating API token.")
exit()
DEFAULT_CONFIG_FILE_YAML[1] = '\n token: ' + str(token)
# create new directory
create_directory_if_not_exists(logger, exports_folder_name)
# write config file
path_to_config_file = os.path.join(current_directory_path, exports_folder_name, 'configs', 'config.yaml')
create_directory_if_not_exists(logger, os.path.join(current_directory_path, exports_folder_name,'configs'))
if os.path.exists(path_to_config_file):
logger.critical("Config file already exists at {0}".format(path_to_config_file))
logger.info("Please remove or rename the existing config file, then retry this setup program.")
logger.info('Exiting.')
exit()
try:
config_file = open(path_to_config_file, 'w')
config_file.writelines(DEFAULT_CONFIG_FILE_YAML)
except Exception as ex:
log_critical_error(logger, ex, "Problem creating " + path_to_config_file)
logger.info("Exiting.")
exit()
logger.info("Default config file successfully created at {0}.".format(path_to_config_file))
os.chdir(exports_folder_name)
choice = input('Would you like to start exporting audits from:\n 1. The beginning of time\n '
'2. Now\n Enter 1 or 2: ')
if choice == '1':
logger.info('Audit exporting set to start from earliest audits available')
get_last_successful(logger)
else:
now = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.000Z')
create_directory_if_not_exists(logger, 'last_successful')
update_sync_marker_file(now)
logger.info('Audit exporting set to start from ' + now)
exit()
def show_preferences_and_exit(list_preferences, sc_client):
"""
Display preferences to stdout and exit
:param list_preferences: empty list for all preference, list of template_ids if specified at command line
:param sc_client: instance of SDK object, used to retrieve preferences
"""
row_boundary = '|' + '-' * 136 + '|'
row_format = '|{0:<37} | {1:<40} | {2:<10}| {3:<10}|'
print(row_boundary)
print(row_format.format('Preference ID', 'Preference Name', 'Global', 'Default'))
print(row_boundary)
if len(list_preferences) > 0:
for template_id in list_preferences:
preferences = sc_client.get_preference_ids(template_id)
for preference in preferences['preferences']:
preference_id = str(preference['id'])
preference_name = str(preference['label'])[:35]
is_global = str(preference['is_global'])
is_default = str(preference['is_default'])
print(row_format.format(preference_id, preference_name, is_global, is_default))
print(row_boundary)
sys.exit()
else:
preferences = sc_client.get_preference_ids()
for preference in preferences['preferences']:
preference_id = str(preference['id'])
preference_name = str(preference['label'])[:35]
is_global = str(preference['is_global'])
is_default = str(preference['is_default'])
print(row_format.format(preference_id, preference_name, is_global, is_default))
print(row_boundary)
sys.exit(0)
def export_actions(logger, settings, sc_client, get_started):
"""
Export all actions created after date specified
:param logger: The logger
:param settings: Settings from command line and configuration file
:param sc_client: instance of safetypy.SafetyCulture class
"""
logger.info('Exporting iAuditor actions')
last_successful_actions_export = get_last_successful_actions_export(logger)
actions_array = sc_client.get_audit_actions(last_successful_actions_export)
if actions_array is not None:
logger.info('Found ' + str(len(actions_array)) + ' actions')
if not get_started:
save_exported_actions_to_csv_file(logger, settings[EXPORT_PATH], actions_array)
else:
save_exported_actions_to_db(logger, actions_array, settings, get_started)
utc_iso_datetime_now = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.000Z')
update_actions_sync_marker_file(logger, utc_iso_datetime_now)
def sync_exports(logger, settings, sc_client):
"""
Perform sync, exporting documents modified since last execution
:param logger: the logger
:param settings: Settings from command line and configuration file
:param sc_client: Instance of SDK object
"""
get_started = None
if settings[EXPORT_ARCHIVED] is not None:
archived_setting = settings[EXPORT_ARCHIVED]
else:
archived_setting = False
if settings[EXPORT_COMPLETED] is not None:
completed_setting = settings[EXPORT_COMPLETED]
else:
completed_setting = True
if 'actions-sql' in settings[EXPORT_FORMATS]:
get_started = sql_setup(logger, settings, 'actions')
export_actions(logger, settings, sc_client, get_started)
if 'actions' in settings[EXPORT_FORMATS]:
get_started = None
export_actions(logger, settings, sc_client, get_started)
if not bool(
set(settings[EXPORT_FORMATS]) & {'pdf', 'docx', 'csv', 'media', 'web-report-link', 'json', 'sql', 'pickle',
'doc_creation'}):
return
last_successful = get_last_successful(logger)
if settings[TEMPLATE_IDS] is not None:
if settings[TEMPLATE_IDS].endswith('.txt'):
file = settings[TEMPLATE_IDS].strip()
f = open(file, "r")
ids_to_search = []
for id in f:
ids_to_search.append(id.strip())
elif len(settings[TEMPLATE_IDS]) != 1:
ids_to_search = settings[TEMPLATE_IDS].split(",")
else:
ids_to_search = [settings[TEMPLATE_IDS][0]]
list_of_audits = sc_client.discover_audits(modified_after=last_successful, template_id=ids_to_search, completed=completed_setting, archived=archived_setting)
else:
list_of_audits = sc_client.discover_audits(modified_after=last_successful,completed=completed_setting,archived=archived_setting)
if list_of_audits is not None:
logger.info(str(list_of_audits['total']) + ' audits discovered')
export_count = 1
export_total = list_of_audits['total']
get_started = 'ignored'
for export_format in settings[EXPORT_FORMATS]:
if export_format == 'sql':
get_started = sql_setup(logger, settings, 'audit')
elif export_format in ['pickle']:
get_started = ['complete', 'complete']
if export_format == 'pickle' and os.path.isfile('{}.pkl'.format(settings[SQL_TABLE])):
logger.error(
'The Pickle file already exists. Appending to Pickles isn\'t currently possible, please '
'remove {}.pkl and try again.'.format(
settings[SQL_TABLE]))
sys.exit(0)
for audit in list_of_audits['audits']:
logger.info('Processing audit (' + str(export_count) + '/' + str(export_total) + ')')
process_audit(logger, settings, sc_client, audit, get_started)
export_count += 1
def check_if_media_sync_offset_satisfied(logger, settings, audit):
"""
Check if the media sync offset is satisfied. The media sync offset is a duration in seconds specified in the
configuration file. This duration is the amount of time audit media is given to sync up with SafetyCulture servers
before this tool exports the audit data.
:param logger: The logger
:param settings: Settings from command line and configuration file
:param audit: Audit JSON
:return: Boolean - True if the media sync offset is satisfied, otherwise, returns false.
"""
modified_at = dateutil.parser.parse(audit['modified_at'])
now = datetime.utcnow()
elapsed_time_difference = (pytz.utc.localize(now) - modified_at)
# if the media_sync_offset has been satisfied
if not elapsed_time_difference > timedelta(seconds=settings[MEDIA_SYNC_OFFSET_IN_SECONDS]):
logger.info(
'Audit {0} modified too recently, some media may not have completed syncing. '
'Skipping export until next sync cycle'.format(
audit['audit_id']))
return False
return True
def process_audit(logger, settings, sc_client, audit, get_started):
"""
Export audit in the format specified in settings. Formats include PDF, JSON, CSV, MS Word (docx), media, or
web report link.
:param logger: The logger
:param settings: Settings from command line and configuration file
:param sc_client: instance of safetypy.SafetyCulture class
:param audit: Audit JSON to be exported
"""
if not check_if_media_sync_offset_satisfied(logger, settings, audit):
return
audit_id = audit['audit_id']
logger.info('downloading ' + audit_id)
audit_json = sc_client.get_audit(audit_id)
template_id = audit_json['template_id']
preference_id = None
if settings[PREFERENCES] is not None and template_id in settings[PREFERENCES].keys():
preference_id = settings[PREFERENCES][template_id]
export_filename = parse_export_filename(audit_json, settings[FILENAME_ITEM_ID]) or audit_id
for export_format in settings[EXPORT_FORMATS]:
if export_format in ['pdf', 'docx']:
export_audit_pdf_word(logger, sc_client, settings, audit_id, preference_id, export_format, export_filename)
elif export_format == 'json':
export_audit_json(logger, settings, audit_json, export_filename)
elif export_format == 'csv':
export_audit_csv(settings, audit_json)
elif export_format == 'doc_creation':
print('Not currently implemented')
sys.exit()
# media_list = []
# # media_list = export_audit_media(logger, sc_client, settings, audit_json, audit_id, export_filename)
# export_audit_doc_creation(logger, settings, audit_json, media_list)
# elif export_format == 'doc_template':
# export_template_creation(logger, settings, audit_json)
elif export_format in ['sql', 'pickle']:
if get_started[0] == 'complete':
export_audit_pandas(logger, settings, audit_json, get_started)
elif get_started[0] != 'complete':
logger.error('Something went wrong connecting to the database, please check your settings.')
sys.exit(1)
elif export_format == 'media':
export_audit_media(logger, sc_client, settings, audit_json, audit_id, export_filename)
elif export_format == 'web-report-link':
export_audit_web_report_link(logger, settings, sc_client, audit_json, audit_id, template_id)
logger.debug('setting last modified to ' + audit['modified_at'])
update_sync_marker_file(audit['modified_at'])
def export_audit_pdf_word(logger, sc_client, settings, audit_id, preference_id, export_format, export_filename):
"""
Save Audit to disk in PDF or MS Word format
:param logger: The logger
:param sc_client: instance of safetypy.SafetyCulture class
:param settings: Settings from command line and configuration file
:param audit_id: Unique audit UUID
:param preference_id: Unique preference UUID
:param export_format: 'pdf' or 'docx' string
:param export_filename: String indicating what to name the exported audit file
"""
export_doc = sc_client.get_export(audit_id, preference_id, export_format)
save_exported_document(logger, settings[EXPORT_PATH], export_doc, export_filename, export_format)
def export_audit_json(logger, settings, audit_json, export_filename):
"""
Save audit JSON to disk
:param logger: The logger
:param settings: Settings from the command line and configuration file
:param audit_json: Audit JSON
:param export_filename: String indicating what to name the exported audit file
"""
export_format = 'json'
export_doc = json.dumps(audit_json, indent=4)
save_exported_document(logger, settings[EXPORT_PATH], export_doc.encode(), export_filename, export_format)
def export_audit_csv(settings, audit_json):
"""
Save audit CSV to disk.
:param settings: Settings from command line and configuration file
:param audit_json: Audit JSON
"""
csv_exporter = csvExporter.CsvExporter(audit_json, settings[EXPORT_INACTIVE_ITEMS_TO_CSV])
count = 0
if settings[USE_REAL_TEMPLATE_NAME] is False:
csv_export_filename = audit_json['template_id']
elif settings[USE_REAL_TEMPLATE_NAME] is True:
csv_export_filename = audit_json['template_data']['metadata']['name']+' - '+audit_json['template_id']
csv_export_filename = csv_export_filename.replace('/', ' ').replace('\\', ' ')
elif settings[USE_REAL_TEMPLATE_NAME].startswith('single_file'):
csv_export_filename = settings[CONFIG_NAME]
else:
csv_export_filename = audit_json['template_id']
for row in csv_exporter.audit_table:
count += 1
row[0] = count
# if settings[CONFIG_NAME] is not None:
# csv_exporter.append_converted_audit_to_bulk_export_file(
# os.path.join(settings[EXPORT_PATH], settings[CONFIG_NAME], csv_export_filename + '.csv'))
# else:
csv_exporter.append_converted_audit_to_bulk_export_file(
os.path.join(settings[EXPORT_PATH], csv_export_filename + '.csv'))
def sql_setup(logger, settings, action_or_audit):
if settings[MERGE_ROWS] is True or False:
merge = settings[MERGE_ROWS]
else:
merge = False
if settings[ACTIONS_MERGE_ROWS] is True or False:
actions_merge = settings[ACTIONS_MERGE_ROWS]
else:
actions_merge = False
Base.metadata.clear()
if action_or_audit == 'audit':
if settings[SQL_TABLE] is not None:
table = settings[SQL_TABLE]
else:
table = 'iauditor_data'
Database = set_table(table, merge)
elif action_or_audit == 'actions':
if settings[ACTIONS_TABLE] is not None:
table = settings[ACTIONS_TABLE]
else:
table = 'iauditor_actions_data'
ActionsDatabase = set_actions_table(table, actions_merge)
else:
print('No Match')
sys.exit()
connection_string = '{}://{}:{}@{}:{}/{}'.format(settings[DB_TYPE],
settings[DB_USER],
settings[DB_PWD],
settings[DB_SERVER],
settings[DB_PORT],
settings[DB_NAME])
engine = create_engine(connection_string)
meta = MetaData()
logger.debug('Making connection to ' + str(engine))
if action_or_audit == 'audit':
if not engine.dialect.has_table(engine, settings[SQL_TABLE], schema=settings[DB_SCHEMA]):
logger.info(settings[SQL_TABLE] + ' not Found.')
if settings[ALLOW_TABLE_CREATION] == 'true':
Database.__table__.create(engine)
elif settings[ALLOW_TABLE_CREATION] == 'false':
logger.error('You need to create the table {} in your database before continuing. If you want the script '
'to do it for you, set ALLOW_TABLE_CREATION to '
'True in your config file'.format(settings[SQL_TABLE]))
sys.exit()
else:
validation = input('It doesn\'t look like a table called {} exists on your server. Would you like the '
'script to try and create the table for you now? (If you\'re using '
'docker, you need to set APPROVE_TABLE_CREATION to true in your config file) '
'(y/n) '.format(settings[SQL_TABLE]))
validation = validation.lower()
if validation.startswith('y'):
Database.__table__.create(engine)
else:
logger.info('Stopping the script. Please either re-run the script or create your table manually.')
sys.exit()
setup = 'complete'
logger.info('Successfully setup Database and connection')
else:
if not engine.dialect.has_table(engine, settings[ACTIONS_TABLE], schema=settings[DB_SCHEMA]):
logger.info(settings[ACTIONS_TABLE] + ' not Found.')
if settings[ALLOW_TABLE_CREATION] == 'true':
ActionsDatabase.__table__.create(engine)
elif settings[ALLOW_TABLE_CREATION] == 'false':
logger.error('You need to create the table {} in your database before continuing. If you want the '
'script to do it for you, set ALLOW_TABLE_CREATION to True in your '
'config file'.format(settings[SQL_TABLE]))
sys.exit()
else:
validation = input('It doesn\'t look like a table called {} exists on your server. Would you like the '
'script to try and create the table for you now? (If you\'re using '
'docker, you need to set APPROVE_TABLE_CREATION to true in your config file) '
'(y/n) '.format(settings[ACTIONS_TABLE]))
validation = validation.lower()
if validation.startswith('y'):
ActionsDatabase.__table__.create(engine)
else:
logger.info('Stopping the script. Please either re-run the script or create your table manually.')
sys.exit()
setup = 'complete'
logger.info('Successfully setup Database and connection')
if action_or_audit == 'audit':
return setup, engine, connection_string, meta, Database
else:
return setup, engine, connection_string, meta, ActionsDatabase
def export_audit_sql(logger, settings, audit_json, get_started):
"""
Save audit to a database.
:param logger: The logger
:param settings: Settings from command line and configuration file
:param audit_json: Audit JSON
"""
engine = get_started[1]
database = get_started[4]
csv_exporter = csvExporter.CsvExporter(audit_json, settings[EXPORT_INACTIVE_ITEMS_TO_CSV])
df = csv_exporter.audit_table
df =
|
pd.DataFrame.from_records(df, columns=SQL_HEADER_ROW)
|
pandas.DataFrame.from_records
|
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.core import ops
from pandas.errors import NullFrequencyError
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = pd.timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
msg = "cannot subtract a datelike from|Could not operate"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx + Timestamp('2011-01-01')
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64_radd_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
# TODO: parametrize over scalar datetime types?
result = Timestamp('2011-01-01') + idx
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Operations with int-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser + Series([2, 3, 4])
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="GH#19123 integer "
"interpreted as "
"nanoseconds",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_radd_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
Series([2, 3, 4]) + tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_sub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser - Series([2, 3, 4])
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
@pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds',
strict=True)
def test_td64arr_rsub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
with pytest.raises(TypeError):
Series([2, 3, 4]) - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_intlike(self, box):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box)
err = TypeError if box is not pd.Index else NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box, scalar, tdser):
if box is pd.DataFrame and isinstance(scalar, np.ndarray):
# raises ValueError
pytest.xfail(reason="DataFrame to broadcast incorrectly")
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype, tdser):
if type(vec) is Series and not dtype.startswith('float'):
pytest.xfail(reason='GH#19123 integer interpreted as nanos')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
# TODO: parametrize over these four ops?
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with datetime-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype "
"instead of "
"datetime64[ns]",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_sub_timestamp(self, box):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdser = Series(pd.timedelta_range('1 day', periods=3))
expected = Series(pd.date_range('2012-01-02', periods=3))
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
tm.assert_equal(ts + tdser, expected)
tm.assert_equal(tdser + ts, expected)
expected2 = Series(pd.date_range('2011-12-31',
periods=3, freq='-1D'))
expected2 = tm.box_expected(expected2, box)
tm.assert_equal(ts - tdser, expected2)
tm.assert_equal(ts + (-tdser), expected2)
with pytest.raises(TypeError):
tdser - ts
# ------------------------------------------------------------------
# Operations with timedelta-like others (including DateOffsets)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly leading "
"to alignment error",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - NaT
tm.assert_equal(res, expected)
class TestTimedeltaArraylikeMulDivOps(object):
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Incorrectly returns "
"m8[ns] instead of f8",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()])
def test_td64arr_floordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
result = td1 // scalar_td
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Incorrectly casts to f8",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()])
def test_td64arr_rfloordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
result = scalar_td // td1
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns m8[ns] dtype "
"instead of f8",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()])
def test_td64arr_rfloordiv_tdscalar_explicit(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
# We can test __rfloordiv__ using this syntax,
# see `test_timedelta_rfloordiv`
result = td1.__rfloordiv__(scalar_td)
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Operations with timedelta-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="__mul__ op treats "
"timedelta other as i8; "
"rmul OK",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()])
def test_td64arr_mul_tdscalar_invalid(self, box, scalar_td):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = 'operate|unsupported|cannot|not supported'
with
|
tm.assert_raises_regex(TypeError, pattern)
|
pandas.util.testing.assert_raises_regex
|
"""
Author: <NAME>
Date: Nov 28, 2020
This file defines a predictive model class library for this project.
"""
import os
import re
import sys
import pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import datasets
from sklearn.compose import (
ColumnTransformer,
TransformedTargetRegressor,
make_column_transformer,
)
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression
from sklearn.multiclass import OneVsRestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.model_selection import (
GridSearchCV,
RandomizedSearchCV,
cross_validate,
train_test_split,
)
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder, StandardScaler
from sklearn.svm import SVC, SVR
from lightgbm.sklearn import LGBMClassifier
from catboost import CatBoostClassifier
from tqdm import tqdm
class CharacterPredictiveModel:
"""A character model predictor function class.
Parameters
----------
verbose : boolean
A boolean flag determining if verbose output is enabled.
Examples
--------
>>> cpm = CharacterPredictiveModel(false)
>>> cpm.fit(X_train, y_train)
>>> cpm.optimize_model(model)
>>> cpm.return_model()
"""
def __init__(self, verbose = False):
"""See help(CharacterPredictiveModel)"""
self.verbose = verbose
def fit(self, X, y):
"""
Reads input dataframe and creates preprocessor
Parameters:
-----------
input_df : pandas DataFrame
input feature and target data
Returns:
-----------
"""
self.X = X
self.y = y
self.preprocessor = self.build_preprocessor()
if self.verbose: print('Fit input features and targets.')
return
def store_cross_val_results(self, model, X_train, y_train,
scoring_metric = "accuracy"):
"""
Returns mean and std of cross validation.
This function is adapted from Varada's lecture code in DSCI571
Parameters
----------
model :
scikit-learn classification model
X_train : DataFrame
X Training data, indepedent variables
y_train : DataFrame
Training data, dependent variables
scoring_metric: string
Metric to use for scoring
Returns
----------
Dict
"""
scores = cross_validate(model,
X_train, y_train,
return_train_score=True,
n_jobs=-1,
scoring=scoring_metric);
mean_scores =
|
pd.DataFrame(scores)
|
pandas.DataFrame
|
import matplotlib.pyplot as plt
import numpy as np
import os,glob,sys,importlib,pickle#,scipy,coolbox,pybedtools,
# from tqdm import tqdm
from scipy.stats import rankdata
import pandas as pd
import networkx as nx
import seaborn as sns
from joblib import delayed, wrap_non_picklable_objects
from pathlib import Path
import plotly
from numba import jit
from joblib import Parallel
import sklearn.utils as sku
import plotly.graph_objects as go
import plotly.express as px
# j=sys.argv[1]
from urllib import request
import xml.etree.ElementTree as ET
import urllib
sys.path.insert(1, './nestedness_analysis/')
import nestedness_metrics_other_functions
from nestedness_metrics_other_functions import from_edges_to_matrix
# importlib.reload(sys.modules['EO_functions_bipartite'])
import extremal_bi
@delayed
@wrap_non_picklable_objects
def bip(cc,net,ff,C,patt):
# print(net)
# dd=cc[['spec','gene',net]]
dd=pd.read_csv('data/gcn/cc_'+patt+'.txt',index_col=False,sep='\t',usecols=['spec','gene',net])
# try:
dd=dd[dd[net]!=0]
# except:
# pass
# ee=nx.from_pandas_edgelist(dd,source='spec',target='gene')
# remove = [node for node,degree in dict(ee.degree()).items() if degree <5]
# ee.remove_nodes_from(remove)
# ff.append(ee)
B = nx.Graph()
B.add_nodes_from(dd['spec'], bipartite=0)
B.add_nodes_from(dd['gene'], bipartite=1)
B.add_weighted_edges_from(tuple(dd[['spec','gene',net]].itertuples(index=False, name=None)))
remove = [node for node,degree in dict(B.degree()).items() if degree <5]
B.remove_nodes_from(remove)
# C.append(B)
xx=nx.from_pandas_edgelist(dd,source='spec',target='gene',edge_attr=net)
remove = [node for node,degree in dict(xx.degree()).items() if degree <5]
xx.remove_nodes_from(remove)
# with open('data/gcn/NX_'+str(patt)+'_hypert.pkl', 'ab+') as f:
# pickle.dump(ff, f)
# with open('data/gcn/BX_'+str(patt)+'_hypert.pkl', 'ab+') as f:
# pickle.dump(C, f)
return xx,B
def load_list_of_dicts(filename, create_using=nx.Graph):
with open(filename, 'rb') as f:
list_of_dicts = pickle.load(f)
graphs = [create_using(graph) for graph in list_of_dicts]
return graphs
# @delayed
# @wrap_non_picklable_objects
def meas(measur,uni_bact,relgene,graphs,patt):
HTXX=uni_bact[uni_bact.index.isin(relgene.columns[1:-2].str.split('-').str[0])]
HTXX['index']=np.arange(len(HTXX))
# measur=eval(measur)
S = [eval(measur)(graphs[i]) for i in HTXX[HTXX['HT']==0]['index'].values]
T = [eval(measur)(graphs[i]) for i in HTXX[HTXX['HT']!=0]['index'].values]
if measur!='nx.degree':
non=pd.DataFrame(S).melt()
yes=pd.DataFrame(T).melt()
elif measur=='nx.degree':
non=pd.DataFrame(S.pop())
non=non.rename(columns={0:'variable',1:'value'})
yes=pd.DataFrame(T.pop())
yes=yes.rename(columns={0:'variable',1:'value'})
non['type']='NoHT'
non.dropna(inplace=True)
non=non[non.value!=0]
non=non[~non['variable'].str.contains('UniRef90')]
non.value=non.value/np.sum(non.value)
yes['type']='HT'
yes.dropna(inplace=True)
yes=yes[yes.value!=0]
yes=yes[~yes['variable'].str.contains('UniRef90')]
yes.value=yes.value/np.sum(yes.value)
df=non.append(yes)
# df=df.dropna()
df['gen']=df.variable.str.split('_').str[2]
df.to_csv("data/gcn/"+patt+"_"+str(measur)+".txt",sep='\t')
plt.figure(figsize=(10,30))
sns.set_theme(style="whitegrid")
sns.violinplot(data=df, y="gen", x="value",hue="type",
split=True, inner="quart", linewidth=1,
orient="h")
sns.despine(left=True)
plt.savefig("data/gcn/"+patt+"_"+str(measur)+"_violin.png",dpi=300,bbox_inches = "tight")
return df
def time_bar(data,XX,rank='rank',species='all'):
if rank=='rank':
data['rank']=rankdata(data.value,method='min')
elif rank=='rank_diff' or rank=='diff':
data['vx']=rankdata(data.value_x,method='min')
data['vy']=rankdata(data.value_y,method='min')
data['rank_diff']=data['vx'].astype('int')-data['vy'].astype('int')
data['diff']=data['value_x']-data['value_y']
# elif rank=='value':
# rank=data.value
if species!='all':
data=data[data['species']==species]
# clust = ll.groupby(['species','target','time'], as_index=False)['diff'].sum()
df = data[['species','target','time',rank]]#.sort_values(['time'], ascending=[True]).groupby(['species','time']).max(5)
jeff=pd.DataFrame(df.groupby(['species','time'])[rank].nlargest(XX))
jeff.reset_index(inplace=True)
for cc in np.unique(jeff.species):
jeff2=jeff[jeff['species']==cc]
if species!='all':
jeff2=df.loc[jeff2['level_2']]
else:
jeff2=df.iloc[jeff2['level_2']]
plt.figure(figsize=(15,5))
ax = sns.histplot(jeff2, x='time', hue='target', weights=rank,
multiple='stack', palette='icefire', shrink=0.6,bins=len(pd.unique(jeff2.time))+5)
ax.set_ylabel(str(rank)+'_HT')
ax.set_title(cc)
# Fix the legend so it's not on top of the bars.
# legend = ax.get_legend()
plt.legend([],[], frameon=False)
Path("data/gcn/img/"+cc).mkdir(parents=True, exist_ok=True)
plt.savefig("data/gcn/img/"+cc+"/"+str(data)+"_"+cc+"_"+str(rank)+".png",dpi=300,bbox_inches = "tight")
def proc_dat(noHT):
# noHT=jj.filter(regex=str(focus)).dropna(how='all')
noHT.columns=noHT.columns.str.split('_').str[0]
noHT=noHT.groupby(by=noHT.columns, axis=1).mean()
noHT=noHT.dropna(how='any')
noHT.reset_index(inplace=True)
jj=noHT.melt(['source','target'])
jj.rename(columns={'variable':'time'},inplace=True)
jj['t']=jj['time']
# jj['time']=jj['time'].astype('int')+2000
# jj['time'] = pd.to_datetime(jj['time'], format='%Y')
# jj=jj[jj['value']>5]
jj['species']=jj['source'].str.split('_').str[2]
jj=jj.dropna(how='any')
return jj
# @delayed
# @wrap_non_picklable_objects
def rev_tbar(jj,XX,gg,species='all'):
data=jj[['species','target','time','t','value']]
# df=data.copy()
# data.reset_index(inplace=True)
data['sum']=pd.DataFrame(data.groupby(['species','t','target'])['value'].transform('sum'))
# jeff.reset_index(inplace=True)
del data['value']
data.drop_duplicates(inplace=True)
data.reset_index(inplace=True)
del data['index'],data['time']
jeff=pd.DataFrame(data.groupby(['species','t'])['sum'].nlargest(XX))
jeff.reset_index(inplace=True)
jeffA=data.iloc[jeff['level_2']]
tim_len=len(np.unique(jeffA['t']))
if species!='all':
jeff=jeff[jeff['species']==species]
JJ=pd.DataFrame()
rr=[]
for q,ee in enumerate((np.unique(jeff.species))):
jeff2=jeffA[jeffA['species']==ee]#.explode('target')
dd=pd.DataFrame(jeff2['target'].to_numpy().reshape(int(len(jeff2)/tim_len),tim_len,order='F'))
if len(dd.melt())==(tim_len*XX):
JJ=JJ.append(dd)
rr=np.append(rr, ee)
jeffA=jeffA.sort_values(['species', 't'], ascending=[True, True])
labels,levels=pd.factorize(sku.shuffle(JJ.melt()['value']))
cc=pd.DataFrame(np.array(labels).reshape((XX)*len(rr),tim_len,order='F'))
for i in np.arange(0,len(cc),XX+1):
for col in cc:
cc.iloc[i:i+XX,col] = cc.iloc[i:i+XX,col].sort_values(ignore_index=True)
cc.loc[i+XX]=0
plt.figure(figsize=(10,30))
ax=sns.heatmap(cc,cmap='rocket_r',annot=True, fmt="d",cbar=False,xticklabels=False,
yticklabels=False).set(ylabel=' - '.join(rr))
# plt.show()
data.to_csv('data/gcn/'+str(gg)+'.csv',sep='\t')
# Path("data/gcn/img/"+cc).mkdir(parents=True, exist_ok=True)
plt.savefig("data/gcn/img/full_"+str(gg)+"_10.png",dpi=300,bbox_inches = "tight")
def group_time_plot(noHT,steps,XX,spec_spec):
noHT.columns=noHT.columns.str.split('_').str[0]
noHT.columns=pd.qcut((noHT.columns).astype('int'), steps, labels=False)
noHT=noHT.groupby(by=noHT.columns, axis=1).mean()
noHT=noHT.dropna(how='all')
noHT.reset_index(inplace=True)
jj=noHT.melt(['source','target'])
jj.rename(columns={'variable':'time'},inplace=True)
jj['t']=jj['time']
# jj['time']=jj['time'].astype('int')+2000
# jj['time'] = pd.to_datetime(jj['time'], format='%Y')
# jj=jj[jj['value']>5]
jj['species']=jj['source'].str.split('_').str[2]
jj=jj.dropna(how='any')
jj['rank']=rankdata(jj.value,method='min')
XX=50 #10
# df = noHT[['species','target','time','rank']]
del jj['value'], jj['t'], jj['source']
if spec_spec=='1':
jeff=pd.DataFrame(jj.groupby(['species','time'])['rank_diff'].nlargest(XX))
jeff=jeff.dropna(how='any')
jeff.reset_index(inplace=True)
jeff2=jj.loc[jeff['level_2']]
else:
jeff=pd.DataFrame(jj.groupby(['time'])['rank'].nlargest(XX))
jeff=jeff.dropna(how='any')
jeff.reset_index(inplace=True)
jeff2=jj.loc[jeff['level_1']]
plt.figure(figsize=(15,5))
ax = sns.histplot(jeff2, x='time', hue='target', weights='rank',
multiple='stack', palette='icefire', shrink=0.6,bins=len(
|
pd.unique(jeff2['time'])
|
pandas.unique
|
from collections import defaultdict
from functools import partial
import itertools
import operator
import re
from typing import List, Optional, Sequence, Tuple, Union
import numpy as np
from pandas._libs import Timedelta, Timestamp, internals as libinternals, lib
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.cast import (
find_common_type,
infer_dtype_from_scalar,
maybe_convert_objects,
maybe_promote,
)
from pandas.core.dtypes.common import (
_NS_DTYPE,
is_extension_array_dtype,
is_list_like,
is_scalar,
is_sparse,
)
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCExtensionArray, ABCSeries
from pandas.core.dtypes.missing import isna
import pandas.core.algorithms as algos
from pandas.core.base import PandasObject
from pandas.core.index import Index, MultiIndex, ensure_index
from pandas.core.indexers import maybe_convert_indices
from pandas.io.formats.printing import pprint_thing
from .blocks import (
Block,
CategoricalBlock,
DatetimeTZBlock,
ExtensionBlock,
ObjectValuesExtensionBlock,
_extend_blocks,
_merge_blocks,
_safe_reshape,
get_block_type,
make_block,
)
from .concat import ( # all for concatenate_block_managers
combine_concat_plans,
concatenate_join_units,
get_mgr_concatenation_plan,
is_uniform_join_units,
)
# TODO: flexible with index=None and/or items=None
class BlockManager(PandasObject):
"""
Core internal data structure to implement DataFrame, Series, etc.
Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a
lightweight blocked set of labeled data to be manipulated by the DataFrame
public API class
Attributes
----------
shape
ndim
axes
values
items
Methods
-------
set_axis(axis, new_labels)
copy(deep=True)
get_dtype_counts
get_ftype_counts
get_dtypes
get_ftypes
apply(func, axes, block_filter_fn)
get_bool_data
get_numeric_data
get_slice(slice_like, axis)
get(label)
iget(loc)
take(indexer, axis)
reindex_axis(new_labels, axis)
reindex_indexer(new_labels, indexer, axis)
delete(label)
insert(loc, label, value)
set(label, value)
Parameters
----------
Notes
-----
This is *not* a public API class
"""
__slots__ = [
"axes",
"blocks",
"_ndim",
"_shape",
"_known_consolidated",
"_is_consolidated",
"_blknos",
"_blklocs",
]
def __init__(
self,
blocks: Sequence[Block],
axes: Sequence[Index],
do_integrity_check: bool = True,
):
self.axes = [ensure_index(ax) for ax in axes]
self.blocks = tuple(blocks) # type: Tuple[Block, ...]
for block in blocks:
if self.ndim != block.ndim:
raise AssertionError(
"Number of Block dimensions ({block}) must equal "
"number of axes ({self})".format(block=block.ndim, self=self.ndim)
)
if do_integrity_check:
self._verify_integrity()
self._consolidate_check()
self._rebuild_blknos_and_blklocs()
def make_empty(self, axes=None):
""" return an empty BlockManager with the items axis of len 0 """
if axes is None:
axes = [ensure_index([])] + [ensure_index(a) for a in self.axes[1:]]
# preserve dtype if possible
if self.ndim == 1:
blocks = np.array([], dtype=self.array_dtype)
else:
blocks = []
return self.__class__(blocks, axes)
def __nonzero__(self):
return True
# Python3 compat
__bool__ = __nonzero__
@property
def shape(self):
return tuple(len(ax) for ax in self.axes)
@property
def ndim(self) -> int:
return len(self.axes)
def set_axis(self, axis, new_labels):
new_labels = ensure_index(new_labels)
old_len = len(self.axes[axis])
new_len = len(new_labels)
if new_len != old_len:
raise ValueError(
"Length mismatch: Expected axis has {old} elements, new "
"values have {new} elements".format(old=old_len, new=new_len)
)
self.axes[axis] = new_labels
def rename_axis(self, mapper, axis, copy=True, level=None):
"""
Rename one of axes.
Parameters
----------
mapper : unary callable
axis : int
copy : boolean, default True
level : int, default None
"""
obj = self.copy(deep=copy)
obj.set_axis(axis, _transform_index(self.axes[axis], mapper, level))
return obj
@property
def _is_single_block(self):
if self.ndim == 1:
return True
if len(self.blocks) != 1:
return False
blk = self.blocks[0]
return blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice == slice(
0, len(self), 1
)
def _rebuild_blknos_and_blklocs(self):
"""
Update mgr._blknos / mgr._blklocs.
"""
new_blknos = np.empty(self.shape[0], dtype=np.int64)
new_blklocs = np.empty(self.shape[0], dtype=np.int64)
new_blknos.fill(-1)
new_blklocs.fill(-1)
for blkno, blk in enumerate(self.blocks):
rl = blk.mgr_locs
new_blknos[rl.indexer] = blkno
new_blklocs[rl.indexer] = np.arange(len(rl))
if (new_blknos == -1).any():
raise AssertionError("Gaps in blk ref_locs")
self._blknos = new_blknos
self._blklocs = new_blklocs
@property
def items(self):
return self.axes[0]
def _get_counts(self, f):
""" return a dict of the counts of the function in BlockManager """
self._consolidate_inplace()
counts = dict()
for b in self.blocks:
v = f(b)
counts[v] = counts.get(v, 0) + b.shape[0]
return counts
def get_dtype_counts(self):
return self._get_counts(lambda b: b.dtype.name)
def get_ftype_counts(self):
return self._get_counts(lambda b: b.ftype)
def get_dtypes(self):
dtypes = np.array([blk.dtype for blk in self.blocks])
return algos.take_1d(dtypes, self._blknos, allow_fill=False)
def get_ftypes(self):
ftypes = np.array([blk.ftype for blk in self.blocks])
return algos.take_1d(ftypes, self._blknos, allow_fill=False)
def __getstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks]
axes_array = list(self.axes)
extra_state = {
"0.14.1": {
"axes": axes_array,
"blocks": [
dict(values=b.values, mgr_locs=b.mgr_locs.indexer)
for b in self.blocks
],
}
}
# First three elements of the state are to maintain forward
# compatibility with 0.13.1.
return axes_array, block_values, block_items, extra_state
def __setstate__(self, state):
def unpickle_block(values, mgr_locs):
return make_block(values, placement=mgr_locs)
if isinstance(state, tuple) and len(state) >= 4 and "0.14.1" in state[3]:
state = state[3]["0.14.1"]
self.axes = [ensure_index(ax) for ax in state["axes"]]
self.blocks = tuple(
unpickle_block(b["values"], b["mgr_locs"]) for b in state["blocks"]
)
else:
# discard anything after 3rd, support beta pickling format for a
# little while longer
ax_arrays, bvalues, bitems = state[:3]
self.axes = [ensure_index(ax) for ax in ax_arrays]
if len(bitems) == 1 and self.axes[0].equals(bitems[0]):
# This is a workaround for pre-0.14.1 pickles that didn't
# support unpickling multi-block frames/panels with non-unique
# columns/items, because given a manager with items ["a", "b",
# "a"] there's no way of knowing which block's "a" is where.
#
# Single-block case can be supported under the assumption that
# block items corresponded to manager items 1-to-1.
all_mgr_locs = [slice(0, len(bitems[0]))]
else:
all_mgr_locs = [
self.axes[0].get_indexer(blk_items) for blk_items in bitems
]
self.blocks = tuple(
unpickle_block(values, mgr_locs)
for values, mgr_locs in zip(bvalues, all_mgr_locs)
)
self._post_setstate()
def _post_setstate(self):
self._is_consolidated = False
self._known_consolidated = False
self._rebuild_blknos_and_blklocs()
def __len__(self) -> int:
return len(self.items)
def __repr__(self) -> str:
output = pprint_thing(self.__class__.__name__)
for i, ax in enumerate(self.axes):
if i == 0:
output += "\nItems: {ax}".format(ax=ax)
else:
output += "\nAxis {i}: {ax}".format(i=i, ax=ax)
for block in self.blocks:
output += "\n{block}".format(block=pprint_thing(block))
return output
def _verify_integrity(self):
mgr_shape = self.shape
tot_items = sum(len(x.mgr_locs) for x in self.blocks)
for block in self.blocks:
if block._verify_integrity and block.shape[1:] != mgr_shape[1:]:
construction_error(tot_items, block.shape[1:], self.axes)
if len(self.items) != tot_items:
raise AssertionError(
"Number of manager items must equal union of "
"block items\n# manager items: {0}, # "
"tot_items: {1}".format(len(self.items), tot_items)
)
def apply(
self,
f,
axes=None,
filter=None,
do_integrity_check=False,
consolidate=True,
**kwargs,
):
"""
iterate over the blocks, collect and create a new block manager
Parameters
----------
f : the callable or function name to operate on at the block level
axes : optional (if not supplied, use self.axes)
filter : list, if supplied, only call the block if the filter is in
the block
do_integrity_check : boolean, default False. Do the block manager
integrity check
consolidate: boolean, default True. Join together blocks having same
dtype
Returns
-------
Block Manager (new object)
"""
result_blocks = []
# filter kwarg is used in replace-* family of methods
if filter is not None:
filter_locs = set(self.items.get_indexer_for(filter))
if len(filter_locs) == len(self.items):
# All items are included, as if there were no filtering
filter = None
else:
kwargs["filter"] = filter_locs
if consolidate:
self._consolidate_inplace()
if f == "where":
align_copy = True
if kwargs.get("align", True):
align_keys = ["other", "cond"]
else:
align_keys = ["cond"]
elif f == "putmask":
align_copy = False
if kwargs.get("align", True):
align_keys = ["new", "mask"]
else:
align_keys = ["mask"]
elif f == "fillna":
# fillna internally does putmask, maybe it's better to do this
# at mgr, not block level?
align_copy = False
align_keys = ["value"]
else:
align_keys = []
# TODO(EA): may interfere with ExtensionBlock.setitem for blocks
# with a .values attribute.
aligned_args = {
k: kwargs[k]
for k in align_keys
if not isinstance(kwargs[k], ABCExtensionArray)
and hasattr(kwargs[k], "values")
}
for b in self.blocks:
if filter is not None:
if not b.mgr_locs.isin(filter_locs).any():
result_blocks.append(b)
continue
if aligned_args:
b_items = self.items[b.mgr_locs.indexer]
for k, obj in aligned_args.items():
axis = obj._info_axis_number
kwargs[k] = obj.reindex(b_items, axis=axis, copy=align_copy)
applied = getattr(b, f)(**kwargs)
result_blocks = _extend_blocks(applied, result_blocks)
if len(result_blocks) == 0:
return self.make_empty(axes or self.axes)
bm = self.__class__(
result_blocks, axes or self.axes, do_integrity_check=do_integrity_check
)
bm._consolidate_inplace()
return bm
def quantile(
self,
axis=0,
consolidate=True,
transposed=False,
interpolation="linear",
qs=None,
numeric_only=None,
):
"""
Iterate over blocks applying quantile reduction.
This routine is intended for reduction type operations and
will do inference on the generated blocks.
Parameters
----------
axis: reduction axis, default 0
consolidate: boolean, default True. Join together blocks having same
dtype
transposed: boolean, default False
we are holding transposed data
interpolation : type of interpolation, default 'linear'
qs : a scalar or list of the quantiles to be computed
numeric_only : ignored
Returns
-------
Block Manager (new object)
"""
# Series dispatches to DataFrame for quantile, which allows us to
# simplify some of the code here and in the blocks
assert self.ndim >= 2
if consolidate:
self._consolidate_inplace()
def get_axe(block, qs, axes):
# Because Series dispatches to DataFrame, we will always have
# block.ndim == 2
from pandas import Float64Index
if is_list_like(qs):
ax = Float64Index(qs)
else:
ax = axes[0]
return ax
axes, blocks = [], []
for b in self.blocks:
block = b.quantile(axis=axis, qs=qs, interpolation=interpolation)
axe = get_axe(b, qs, axes=self.axes)
axes.append(axe)
blocks.append(block)
# note that some DatetimeTZ, Categorical are always ndim==1
ndim = {b.ndim for b in blocks}
assert 0 not in ndim, ndim
if 2 in ndim:
new_axes = list(self.axes)
# multiple blocks that are reduced
if len(blocks) > 1:
new_axes[1] = axes[0]
# reset the placement to the original
for b, sb in zip(blocks, self.blocks):
b.mgr_locs = sb.mgr_locs
else:
new_axes[axis] = Index(np.concatenate([ax.values for ax in axes]))
if transposed:
new_axes = new_axes[::-1]
blocks = [
b.make_block(b.values.T, placement=np.arange(b.shape[1]))
for b in blocks
]
return self.__class__(blocks, new_axes)
# single block, i.e. ndim == {1}
values = concat_compat([b.values for b in blocks])
# compute the orderings of our original data
if len(self.blocks) > 1:
indexer = np.empty(len(self.axes[0]), dtype=np.intp)
i = 0
for b in self.blocks:
for j in b.mgr_locs:
indexer[j] = i
i = i + 1
values = values.take(indexer)
return SingleBlockManager(
[make_block(values, ndim=1, placement=np.arange(len(values)))], axes[0]
)
def isna(self, func, **kwargs):
return self.apply("apply", func=func, **kwargs)
def where(self, **kwargs):
return self.apply("where", **kwargs)
def setitem(self, **kwargs):
return self.apply("setitem", **kwargs)
def putmask(self, **kwargs):
return self.apply("putmask", **kwargs)
def diff(self, **kwargs):
return self.apply("diff", **kwargs)
def interpolate(self, **kwargs):
return self.apply("interpolate", **kwargs)
def shift(self, **kwargs):
return self.apply("shift", **kwargs)
def fillna(self, **kwargs):
return self.apply("fillna", **kwargs)
def downcast(self, **kwargs):
return self.apply("downcast", **kwargs)
def astype(self, dtype, **kwargs):
return self.apply("astype", dtype=dtype, **kwargs)
def convert(self, **kwargs):
return self.apply("convert", **kwargs)
def replace(self, value, **kwargs):
assert np.ndim(value) == 0, value
return self.apply("replace", value=value, **kwargs)
def replace_list(self, src_list, dest_list, inplace=False, regex=False):
""" do a list replace """
inplace = validate_bool_kwarg(inplace, "inplace")
# figure out our mask a-priori to avoid repeated replacements
values = self.as_array()
def comp(s, regex=False):
"""
Generate a bool array by perform an equality check, or perform
an element-wise regular expression matching
"""
if isna(s):
return isna(values)
if isinstance(s, (Timedelta, Timestamp)) and getattr(s, "tz", None) is None:
return _compare_or_regex_search(
maybe_convert_objects(values), s.asm8, regex
)
return _compare_or_regex_search(values, s, regex)
masks = [comp(s, regex) for i, s in enumerate(src_list)]
result_blocks = []
src_len = len(src_list) - 1
for blk in self.blocks:
# its possible to get multiple result blocks here
# replace ALWAYS will return a list
rb = [blk if inplace else blk.copy()]
for i, (s, d) in enumerate(zip(src_list, dest_list)):
# TODO: assert/validate that `d` is always a scalar?
new_rb = []
for b in rb:
m = masks[i][b.mgr_locs.indexer]
convert = i == src_len
result = b._replace_coerce(
mask=m,
to_replace=s,
value=d,
inplace=inplace,
convert=convert,
regex=regex,
)
if m.any():
new_rb = _extend_blocks(result, new_rb)
else:
new_rb.append(b)
rb = new_rb
result_blocks.extend(rb)
bm = self.__class__(result_blocks, self.axes)
bm._consolidate_inplace()
return bm
def is_consolidated(self):
"""
Return True if more than one block with the same dtype
"""
if not self._known_consolidated:
self._consolidate_check()
return self._is_consolidated
def _consolidate_check(self):
ftypes = [blk.ftype for blk in self.blocks]
self._is_consolidated = len(ftypes) == len(set(ftypes))
self._known_consolidated = True
@property
def is_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return len(self.blocks) > 1
@property
def is_numeric_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return all(block.is_numeric for block in self.blocks)
@property
def is_datelike_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return any(block.is_datelike for block in self.blocks)
@property
def any_extension_types(self):
"""Whether any of the blocks in this manager are extension blocks"""
return any(block.is_extension for block in self.blocks)
@property
def is_view(self):
""" return a boolean if we are a single block and are a view """
if len(self.blocks) == 1:
return self.blocks[0].is_view
# It is technically possible to figure out which blocks are views
# e.g. [ b.values.base is not None for b in self.blocks ]
# but then we have the case of possibly some blocks being a view
# and some blocks not. setting in theory is possible on the non-view
# blocks w/o causing a SettingWithCopy raise/warn. But this is a bit
# complicated
return False
def get_bool_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_bool], copy)
def get_numeric_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_numeric], copy)
def combine(self, blocks, copy=True):
""" return a new manager with the blocks """
if len(blocks) == 0:
return self.make_empty()
# FIXME: optimization potential
indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks]))
inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0])
new_blocks = []
for b in blocks:
b = b.copy(deep=copy)
b.mgr_locs = algos.take_1d(
inv_indexer, b.mgr_locs.as_array, axis=0, allow_fill=False
)
new_blocks.append(b)
axes = list(self.axes)
axes[0] = self.items.take(indexer)
return self.__class__(new_blocks, axes, do_integrity_check=False)
def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(slobj)
else:
slicer = [slice(None)] * (axis + 1)
slicer[axis] = slobj
slicer = tuple(slicer)
new_blocks = [blk.getitem_block(slicer) for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis][slobj]
bm = self.__class__(new_blocks, new_axes, do_integrity_check=False)
bm._consolidate_inplace()
return bm
def __contains__(self, item):
return item in self.items
@property
def nblocks(self):
return len(self.blocks)
def copy(self, deep=True):
"""
Make deep or shallow copy of BlockManager
Parameters
----------
deep : boolean o rstring, default True
If False, return shallow copy (do not copy data)
If 'all', copy data and a deep copy of the index
Returns
-------
copy : BlockManager
"""
# this preserves the notion of view copying of axes
if deep:
if deep == "all":
copy = lambda ax: ax.copy(deep=True)
else:
copy = lambda ax: ax.view()
new_axes = [copy(ax) for ax in self.axes]
else:
new_axes = list(self.axes)
return self.apply("copy", axes=new_axes, deep=deep, do_integrity_check=False)
def as_array(self, transpose=False, items=None):
"""Convert the blockmanager data into an numpy array.
Parameters
----------
transpose : boolean, default False
If True, transpose the return array
items : list of strings or None
Names of block items that will be included in the returned
array. ``None`` means that all block items will be used
Returns
-------
arr : ndarray
"""
if len(self.blocks) == 0:
arr = np.empty(self.shape, dtype=float)
return arr.transpose() if transpose else arr
if items is not None:
mgr = self.reindex_axis(items, axis=0)
else:
mgr = self
if self._is_single_block and mgr.blocks[0].is_datetimetz:
# TODO(Block.get_values): Make DatetimeTZBlock.get_values
# always be object dtype. Some callers seem to want the
# DatetimeArray (previously DTI)
arr = mgr.blocks[0].get_values(dtype=object)
elif self._is_single_block or not self.is_mixed_type:
arr = np.asarray(mgr.blocks[0].get_values())
else:
arr = mgr._interleave()
return arr.transpose() if transpose else arr
def _interleave(self):
"""
Return ndarray from blocks with specified item order
Items must be contained in the blocks
"""
dtype = _interleaved_dtype(self.blocks)
# TODO: https://github.com/pandas-dev/pandas/issues/22791
# Give EAs some input on what happens here. Sparse needs this.
if is_sparse(dtype):
dtype = dtype.subtype
elif is_extension_array_dtype(dtype):
dtype = "object"
result = np.empty(self.shape, dtype=dtype)
itemmask = np.zeros(self.shape[0])
for blk in self.blocks:
rl = blk.mgr_locs
result[rl.indexer] = blk.get_values(dtype)
itemmask[rl.indexer] = 1
if not itemmask.all():
raise AssertionError("Some items were not contained in blocks")
return result
def to_dict(self, copy=True):
"""
Return a dict of str(dtype) -> BlockManager
Parameters
----------
copy : boolean, default True
Returns
-------
values : a dict of dtype -> BlockManager
Notes
-----
This consolidates based on str(dtype)
"""
self._consolidate_inplace()
bd = {}
for b in self.blocks:
bd.setdefault(str(b.dtype), []).append(b)
return {dtype: self.combine(blocks, copy=copy) for dtype, blocks in bd.items()}
def fast_xs(self, loc):
"""
get a cross sectional for a given location in the
items ; handle dups
return the result, is *could* be a view in the case of a
single block
"""
if len(self.blocks) == 1:
return self.blocks[0].iget((slice(None), loc))
items = self.items
# non-unique (GH4726)
if not items.is_unique:
result = self._interleave()
if self.ndim == 2:
result = result.T
return result[loc]
# unique
dtype = _interleaved_dtype(self.blocks)
n = len(items)
if is_extension_array_dtype(dtype):
# we'll eventually construct an ExtensionArray.
result = np.empty(n, dtype=object)
else:
result = np.empty(n, dtype=dtype)
for blk in self.blocks:
# Such assignment may incorrectly coerce NaT to None
# result[blk.mgr_locs] = blk._slice((slice(None), loc))
for i, rl in enumerate(blk.mgr_locs):
result[rl] = blk.iget((i, loc))
if is_extension_array_dtype(dtype):
result = dtype.construct_array_type()._from_sequence(result, dtype=dtype)
return result
def consolidate(self):
"""
Join together blocks having same dtype
Returns
-------
y : BlockManager
"""
if self.is_consolidated():
return self
bm = self.__class__(self.blocks, self.axes)
bm._is_consolidated = False
bm._consolidate_inplace()
return bm
def _consolidate_inplace(self):
if not self.is_consolidated():
self.blocks = tuple(_consolidate(self.blocks))
self._is_consolidated = True
self._known_consolidated = True
self._rebuild_blknos_and_blklocs()
def get(self, item):
"""
Return values for selected item (ndarray or BlockManager).
"""
if self.items.is_unique:
if not isna(item):
loc = self.items.get_loc(item)
else:
indexer = np.arange(len(self.items))[isna(self.items)]
# allow a single nan location indexer
if not is_scalar(indexer):
if len(indexer) == 1:
loc = indexer.item()
else:
raise ValueError("cannot label index with a null key")
return self.iget(loc)
else:
if
|
isna(item)
|
pandas.core.dtypes.missing.isna
|
# -*- coding: utf-8 -*-
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
from warnings import catch_warnings, simplefilter
import collections
import re
from datetime import datetime, date, timedelta, time
from decimal import Decimal
from numbers import Number
from fractions import Fraction
import numpy as np
import pytz
import pytest
import pandas as pd
from pandas._libs import lib, iNaT, missing as libmissing
from pandas import (Series, Index, DataFrame, Timedelta,
DatetimeIndex, TimedeltaIndex, Timestamp,
Panel, Period, Categorical, isna, Interval,
DateOffset)
from pandas import compat
from pandas.compat import u, PY2, StringIO, lrange
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_number,
is_integer,
is_float,
is_bool,
is_scalar,
is_scipy_sparse,
ensure_int32,
ensure_categorical)
from pandas.util import testing as tm
import pandas.util._test_decorators as td
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
# collect all objects to be tested for list-like-ness; use tuples of objects,
# whether they are list-like or not (special casing for sets), and their ID
ll_params = [
([1], True, 'list'), # noqa: E241
([], True, 'list-empty'), # noqa: E241
((1, ), True, 'tuple'), # noqa: E241
(tuple(), True, 'tuple-empty'), # noqa: E241
({'a': 1}, True, 'dict'), # noqa: E241
(dict(), True, 'dict-empty'), # noqa: E241
({'a', 1}, 'set', 'set'), # noqa: E241
(set(), 'set', 'set-empty'), # noqa: E241
(frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241
(frozenset(), 'set', 'frozenset-empty'), # noqa: E241
(iter([1, 2]), True, 'iterator'), # noqa: E241
(iter([]), True, 'iterator-empty'), # noqa: E241
((x for x in [1, 2]), True, 'generator'), # noqa: E241
((x for x in []), True, 'generator-empty'), # noqa: E241
(Series([1]), True, 'Series'), # noqa: E241
(Series([]), True, 'Series-empty'), # noqa: E241
(Series(['a']).str, True, 'StringMethods'), # noqa: E241
(Series([], dtype='O').str, True, 'StringMethods-empty'), # noqa: E241
(Index([1]), True, 'Index'), # noqa: E241
(Index([]), True, 'Index-empty'), # noqa: E241
(DataFrame([[1]]), True, 'DataFrame'), # noqa: E241
(DataFrame(), True, 'DataFrame-empty'), # noqa: E241
(np.ndarray((2,) * 1), True, 'ndarray-1d'), # noqa: E241
(np.array([]), True, 'ndarray-1d-empty'), # noqa: E241
(np.ndarray((2,) * 2), True, 'ndarray-2d'), # noqa: E241
(np.array([[]]), True, 'ndarray-2d-empty'), # noqa: E241
(np.ndarray((2,) * 3), True, 'ndarray-3d'), # noqa: E241
(np.array([[[]]]), True, 'ndarray-3d-empty'), # noqa: E241
(np.ndarray((2,) * 4), True, 'ndarray-4d'), # noqa: E241
(np.array([[[[]]]]), True, 'ndarray-4d-empty'), # noqa: E241
(np.array(2), False, 'ndarray-0d'), # noqa: E241
(1, False, 'int'), # noqa: E241
(b'123', False, 'bytes'), # noqa: E241
(b'', False, 'bytes-empty'), # noqa: E241
('123', False, 'string'), # noqa: E241
('', False, 'string-empty'), # noqa: E241
(str, False, 'string-type'), # noqa: E241
(object(), False, 'object'), # noqa: E241
(np.nan, False, 'NaN'), # noqa: E241
(None, False, 'None') # noqa: E241
]
objs, expected, ids = zip(*ll_params)
@pytest.fixture(params=zip(objs, expected), ids=ids)
def maybe_list_like(request):
return request.param
def test_is_list_like(maybe_list_like):
obj, expected = maybe_list_like
expected = True if expected == 'set' else expected
assert inference.is_list_like(obj) == expected
def test_is_list_like_disallow_sets(maybe_list_like):
obj, expected = maybe_list_like
expected = False if expected == 'set' else expected
assert inference.is_list_like(obj, allow_sets=False) == expected
def test_is_sequence():
is_seq = inference.is_sequence
assert (is_seq((1, 2)))
assert (is_seq([1, 2]))
assert (not is_seq("abcd"))
assert (not is_seq(u("abcd")))
assert (not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert (not is_seq(A()))
def test_is_array_like():
assert inference.is_array_like(Series([]))
assert inference.is_array_like(Series([1, 2]))
assert inference.is_array_like(np.array(["a", "b"]))
assert inference.is_array_like(Index(["2016-01-01"]))
class DtypeList(list):
dtype = "special"
assert inference.is_array_like(DtypeList())
assert not inference.is_array_like([1, 2, 3])
assert not inference.is_array_like(tuple())
assert not inference.is_array_like("foo")
assert not inference.is_array_like(123)
@pytest.mark.parametrize('inner', [
[], [1], (1, ), (1, 2), {'a': 1}, {1, 'a'}, Series([1]),
Series([]), Series(['a']).str, (x for x in range(5))
])
@pytest.mark.parametrize('outer', [
list, Series, np.array, tuple
])
def test_is_nested_list_like_passes(inner, outer):
result = outer([inner for _ in range(5)])
assert inference.is_list_like(result)
@pytest.mark.parametrize('obj', [
'abc', [], [1], (1,), ['a'], 'a', {'a'},
[1, 2, 3], Series([1]), DataFrame({"A": [1]}),
([1, 2] for _ in range(5)),
])
def test_is_nested_list_like_fails(obj):
assert not inference.is_nested_list_like(obj)
@pytest.mark.parametrize(
"ll", [{}, {'A': 1}, Series([1])])
def test_is_dict_like_passes(ll):
assert inference.is_dict_like(ll)
@pytest.mark.parametrize(
"ll", ['1', 1, [1, 2], (1, 2), range(2), Index([1])])
def test_is_dict_like_fails(ll):
assert not inference.is_dict_like(ll)
@pytest.mark.parametrize("has_keys", [True, False])
@pytest.mark.parametrize("has_getitem", [True, False])
@pytest.mark.parametrize("has_contains", [True, False])
def test_is_dict_like_duck_type(has_keys, has_getitem, has_contains):
class DictLike(object):
def __init__(self, d):
self.d = d
if has_keys:
def keys(self):
return self.d.keys()
if has_getitem:
def __getitem__(self, key):
return self.d.__getitem__(key)
if has_contains:
def __contains__(self, key):
return self.d.__contains__(key)
d = DictLike({1: 2})
result =
|
inference.is_dict_like(d)
|
pandas.core.dtypes.inference.is_dict_like
|
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas._libs.algos import Infinity, NegInfinity
import pandas.util._test_decorators as td
from pandas import DataFrame, Series
import pandas._testing as tm
class TestRank:
s = Series([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3])
df = DataFrame({"A": s, "B": s})
results = {
"average": np.array([1.5, 5.5, 7.0, 3.5, np.nan, 3.5, 1.5, 8.0, np.nan, 5.5]),
"min": np.array([1, 5, 7, 3, np.nan, 3, 1, 8, np.nan, 5]),
"max": np.array([2, 6, 7, 4, np.nan, 4, 2, 8, np.nan, 6]),
"first": np.array([1, 5, 7, 3, np.nan, 4, 2, 8, np.nan, 6]),
"dense": np.array([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3]),
}
@pytest.fixture(params=["average", "min", "max", "first", "dense"])
def method(self, request):
"""
Fixture for trying all rank methods
"""
return request.param
@td.skip_if_no_scipy
def test_rank(self, float_frame):
import scipy.stats # noqa:F401
from scipy.stats import rankdata
float_frame["A"][::2] = np.nan
float_frame["B"][::3] = np.nan
float_frame["C"][::4] = np.nan
float_frame["D"][::5] = np.nan
ranks0 = float_frame.rank()
ranks1 = float_frame.rank(1)
mask = np.isnan(float_frame.values)
fvals = float_frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fvals)
exp0[mask] = np.nan
exp1 = np.apply_along_axis(rankdata, 1, fvals)
exp1[mask] = np.nan
tm.assert_almost_equal(ranks0.values, exp0)
|
tm.assert_almost_equal(ranks1.values, exp1)
|
pandas._testing.assert_almost_equal
|
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
def plot_dataframe(data, x, y, category, title='confirmed cases by region'):
fig, ax = plt.subplots(1, 1, figsize=(10, 10), dpi=200)
sns.lineplot(x=x,
y=y,
hue=category,
data=data
).set_title(title)
plt.legend(ncol=4)
plt.xticks(rotation=90)
plt.show()
# for region in regions:
# region_data = data.loc[data['region'] == region]
# pass
def plot_spain(data, x, features, title='confirmed cases by region'):
fig, axes = plt.subplots(5, 4, figsize=(20, 25), dpi=200)
axes = [axes[i, j] for i in range(5) for j in range(4)]
y_max = 0
for feature in features:
y_max = max(y_max, int(np.max(data[feature]) * 1.1))
for i, region in enumerate(
|
pd.unique(data['region'])
|
pandas.unique
|
###############
#
# Transform R to Python Copyright (c) 2019 <NAME> Released under the MIT license
#
###############
import os
import numpy as np
import pystan
import pandas
import pickle
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.special import expit as logistic
germination_dat =
|
pandas.read_csv('3-9-1-germination.csv')
|
pandas.read_csv
|
from typing import Any, Dict, List, Tuple
import numpy as np
import pandas as pd
import streamlit as st
@st.cache(ttl=300)
def remove_empty_cols(df: pd.DataFrame) -> Tuple[pd.DataFrame, List[Any]]:
"""Remove columns with strictly less than 2 distinct values in input dataframe.
Parameters
----------
df : pd.DataFrame
Input dataframe whose columns will be checked and potentially removed.
Returns
-------
pd.DataFrame
Dataframe with empty columns removed.
list
List of columns that have been removed.
"""
count_cols = df.nunique(dropna=False)
empty_cols = list(count_cols[count_cols < 2].index)
return df.drop(empty_cols, axis=1), empty_cols
def print_empty_cols(empty_cols: List[Any]) -> None:
"""Displays a message in streamlit dashboard if the input list is not empty.
Parameters
----------
empty_cols : list
List of columns that have been removed.
"""
L = len(empty_cols)
if L > 0:
st.error(
f'The following column{"s" if L > 1 else ""} ha{"ve" if L > 1 else "s"} been removed because '
f'{"they have" if L > 1 else "it has"} <= 1 distinct values: {", ".join(empty_cols)}'
)
@st.cache(suppress_st_warning=True, ttl=300)
def format_date_and_target(
df_input: pd.DataFrame,
date_col: str,
target_col: str,
config: Dict[Any, Any],
load_options: Dict[Any, Any],
) -> pd.DataFrame:
"""Formats date and target columns of input dataframe.
Parameters
----------
df_input : pd.DataFrame
Input dataframe whose columns will be formatted.
date_col : str
Name of date column in input dataframe.
target_col : str
Name of target column in input dataframe.
config : Dict
Lib configuration dictionary.
load_options : Dict
Loading options selected by user.
Returns
-------
pd.DataFrame
Dataframe with columns formatted.
"""
df = df_input.copy() # To avoid CachedObjectMutationWarning
df = _format_date(df, date_col, load_options, config)
df = _format_target(df, target_col, config)
df = _rename_cols(df, date_col, target_col)
return df
def _format_date(
df: pd.DataFrame, date_col: str, load_options: Dict[Any, Any], config: Dict[Any, Any]
) -> pd.DataFrame:
"""Formats date column of input dataframe.
Parameters
----------
df : pd.DataFrame
Input dataframe whose columns will be formatted.
date_col : str
Name of date column in input dataframe.
load_options : Dict
Loading options selected by user.
config : Dict
Lib config dictionary containing information about default date format.
Returns
-------
pd.DataFrame
Dataframe with date column formatted.
"""
try:
date_series = pd.to_datetime(df[date_col])
if __check_date_format(date_series) | (
config["dataprep"]["date_format"] != load_options["date_format"]
):
date_series = pd.to_datetime(df[date_col], format=load_options["date_format"])
df[date_col] = date_series
days_range = (df[date_col].max() - df[date_col].min()).days
sec_range = (df[date_col].max() - df[date_col].min()).seconds
if ((days_range < 1) & (sec_range < 1)) | (np.isnan(days_range) & np.isnan(sec_range)):
st.error(
"Please select the correct date column (selected column has a time range < 1s)."
)
st.stop()
return df
except:
st.error(
"Please select a valid date format (selected column can't be converted into date)."
)
st.stop()
def __check_date_format(date_series: pd.Series) -> bool:
"""Checks whether the date column has been correctly converted to datetime.
Parameters
----------
date_series : pd.Series
Date column that has been converted.
Returns
-------
bool
False if conversion has not worked correctly, True otherwise.
"""
test1 = date_series.map(lambda x: x.year).nunique() < 2
test2 = date_series.map(lambda x: x.month).nunique() < 2
test3 = date_series.map(lambda x: x.day).nunique() < 2
if test1 & test2 & test3:
return True
else:
return False
def _format_target(df: pd.DataFrame, target_col: str, config: Dict[Any, Any]) -> pd.DataFrame:
"""Formats target column of input dataframe.
Parameters
----------
df : pd.DataFrame
Input dataframe whose columns will be formatted.
target_col : str
Name of target column in input dataframe.
Returns
-------
pd.DataFrame
Dataframe with date column formatted.
"""
try:
df[target_col] = df[target_col].astype("float")
if df[target_col].nunique() < config["validity"]["min_target_cardinality"]:
st.error(
"Please select the correct target column (should be numerical, not categorical)."
)
st.stop()
return df
except:
st.error("Please select the correct target column (should be of type int or float).")
st.stop()
def _rename_cols(df: pd.DataFrame, date_col: str, target_col: str) -> pd.DataFrame:
"""Renames date and target columns of input dataframe.
Parameters
----------
df : pd.DataFrame
Input dataframe whose columns will be renamed.
date_col : str
Name of date column in input dataframe.
target_col : str
Name of target column in input dataframe.
Returns
-------
pd.DataFrame
Dataframe with columns renamed.
"""
if (target_col != "y") and ("y" in df.columns):
df = df.rename(columns={"y": "y_2"})
if (date_col != "ds") and ("ds" in df.columns):
df = df.rename(columns={"ds": "ds_2"})
df = df.rename(columns={date_col: "ds", target_col: "y"})
return df
# NB: date_col and target_col not used, only added to avoid unexpected caching when their values change
@st.cache(ttl=300)
def filter_and_aggregate_df(
df_input: pd.DataFrame,
dimensions: Dict[Any, Any],
config: Dict[Any, Any],
date_col: str,
target_col: str,
) -> Tuple[pd.DataFrame, List[Any]]:
"""Filters and aggregates input dataframe according to dimensions dictionary specifications.
Parameters
----------
df_input : pd.DataFrame
Input dataframe that will be filtered and/or aggregated.
dimensions : Dict
Filtering and aggregation specifications.
config : Dict
Lib configuration dictionary.
date_col : str
Name of date column in input dataframe.
target_col : str
Name of target column in input dataframe.
Returns
-------
pd.DataFrame
Dataframe filtered and/or aggregated.
list
List of columns removed from input dataframe.
"""
df = df_input.copy() # To avoid CachedObjectMutationWarning
df = _filter(df, dimensions)
df, cols_to_drop = _format_regressors(df, config)
df = _aggregate(df, dimensions)
return df, cols_to_drop
def _filter(df: pd.DataFrame, dimensions: Dict[Any, Any]) -> pd.DataFrame:
"""Filters input dataframe according to dimensions dictionary specifications.
Parameters
----------
df : pd.DataFrame
Input dataframe that will be filtered and/or aggregated.
dimensions : Dict
Filtering specifications.
Returns
-------
pd.DataFrame
Filtered dataframe.
"""
filter_cols = list(set(dimensions.keys()) - {"agg"})
for col in filter_cols:
df = df.loc[df[col].isin(dimensions[col])]
return df.drop(filter_cols, axis=1)
def _format_regressors(df: pd.DataFrame, config: Dict[Any, Any]) -> Tuple[pd.DataFrame, List[Any]]:
"""Format some columns in input dataframe.
Parameters
----------
df : pd.DataFrame
Input dataframe whose columns will be formatted.
config : Dict
Lib configuration dictionary.
Returns
-------
pd.DataFrame
Formatted dataframe.
list
List of columns removed from input dataframe.
"""
cols_to_drop = []
for col in set(df.columns) - {"ds", "y"}:
if df[col].nunique(dropna=False) < 2:
cols_to_drop.append(col)
elif df[col].nunique(dropna=False) == 2:
df[col] = df[col].map(dict(zip(df[col].unique(), [0, 1])))
elif df[col].nunique() <= config["validity"]["max_cat_reg_cardinality"]:
df = __one_hot_encoding(df, col)
else:
try:
df[col] = df[col].astype("float")
except:
cols_to_drop.append(col)
return df.drop(cols_to_drop, axis=1), cols_to_drop
def __one_hot_encoding(df: pd.DataFrame, col: str) -> pd.DataFrame:
"""Applies one-hot encoding to some columns of input dataframe.
Parameters
----------
df : pd.DataFrame
Input dataframe whose columns will be one-hot encoded.
col : list
List of columns to one-hot encode.
Returns
-------
pd.DataFrame
One-hot encoded dataframe.
"""
df = pd.concat([df, pd.get_dummies(df[col], prefix=col)], axis=1)
return df.drop(col, axis=1)
def print_removed_cols(cols_removed: List[Any]) -> None:
"""Displays a message in streamlit dashboard if the input list is not empty.
Parameters
----------
cols_removed : list
List of columns that have been removed.
"""
L = len(cols_removed)
if L > 0:
st.error(
f'The following column{"s" if L > 1 else ""} ha{"ve" if L > 1 else "s"} been removed because '
f'{"they are" if L > 1 else "it is"} neither the target, '
f'nor a dimension, nor a potential regressor: {", ".join(cols_removed)}'
)
def _aggregate(df: pd.DataFrame, dimensions: Dict[Any, Any]) -> pd.DataFrame:
"""Aggregates input dataframe according to dimensions dictionary specifications.
Parameters
----------
df : pd.DataFrame
Input dataframe that will be filtered and/or aggregated.
dimensions : Dict
Filtering specifications.
Returns
-------
pd.DataFrame
Aggregated dataframe.
"""
cols_to_agg = set(df.columns) - {"ds", "y"}
agg_dict = {col: "mean" if df[col].nunique() > 2 else "max" for col in cols_to_agg}
agg_dict["y"] = dimensions["agg"].lower()
return df.groupby("ds").agg(agg_dict).reset_index()
@st.cache(ttl=300)
def format_datetime(df_input: pd.DataFrame, resampling: Dict[Any, Any]) -> pd.DataFrame:
"""Formats date column to datetime in input dataframe.
Parameters
----------
df_input : pd.DataFrame
Input dataframe whose date column will be formatted to datetime.
resampling : Dict
Dictionary whose "freq" key contains the frequency of input dataframe.
Returns
-------
pd.DataFrame
Dataframe with date column formatted to datetime.
"""
df = df_input.copy() # To avoid CachedObjectMutationWarning
if resampling["freq"][-1] in ["H", "s"]:
df["ds"] = df["ds"].map(lambda x: x.strftime("%Y-%m-%d %H:%M:%S"))
df["ds"] =
|
pd.to_datetime(df["ds"])
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 20 14:08:35 2019
@author: Team BTC - <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
"""
#sorry the code isnt very efficient. because of time constraints and the number of people working on the project, we couldnt do all the automatizations we would have liked to do.
#Code in block comment should not be run as it will make change to the cloud database
# %% Importing libraries
# You may need to install dnspython in order to work with cloud server
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
import json
import pandas as pd
import numpy as np
from tqdm import tqdm
from datetime import datetime as dt
import os
import time
import re
import copy
from textblob import TextBlob
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from datetime import timedelta
from pymongo import MongoClient
import statsmodels.formula.api as smf
import statsmodels.api as sm
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.graphics.tsaplots import plot_pacf
from statsmodels.tsa.stattools import adfuller
import matplotlib.pyplot as plt
from statsmodels.tsa.api import VAR
#os.chdir('H:/Documents/Alternance/Project/')
# %% Function to scrap data from Stocktwit and add to the cloud server
# The function have 2 inputs:
# - Symbol of the asset in string
# - Rate limit: number of requests per execution, in integer
def get_stwits_data(symbol,rate_limit):
client = MongoClient('mongodb+srv://Group_fintech:[email protected]/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
exist=0
for q in db['{}'.format(symbol)].aggregate([
{ "$group": {
"_id": None,
"min": { "$min": "$ID" }
}}
]):
exist=1
min_prev_id=q['min']
http = urllib3.PoolManager()
mid=[]
duplicates=0
for j in tqdm(range(rate_limit)):
if exist==0:
url = "https://api.stocktwits.com/api/2/streams/symbol/{}.json".format(symbol)
elif exist!=0 and len(mid)==0:
url = "https://api.stocktwits.com/api/2/streams/symbol/{}.json?max={}".format(symbol,min_prev_id)
else:
min_ID=min(mid)
url = "https://api.stocktwits.com/api/2/streams/symbol/{}.json?max={}".format(symbol,min_ID)
r = http.request('GET', url)
try:
data = json.loads(r.data)
except:
print('Decode error, retry again')
continue
if duplicates==1:
print('\nThere are duplicates in the result. Other people are maybe running. \nPlease try again later.')
break
if data["response"]["status"] != 200:
print("\nYour request was denied, retry in 1 hour")
time.sleep(3600)
continue
# insert_element=[]
# break
for element in data["messages"]:
mid.append(element["id"])
symbol_list=[]
for s in element['symbols']:
symbol_list.append(s['symbol'])
try:
insert_element = {"ID": element["id"], "TimeStamp": element["created_at"], "User": element["user"]["username"], "Content": element["body"],"Sentiment": (element["entities"]["sentiment"]["basic"]=="Bullish")*2-1,'Symbols':symbol_list}
except:
insert_element = {"ID": element["id"], "TimeStamp": element["created_at"], "User": element["user"]["username"], "Content": element["body"],"Sentiment": 0,'Symbols':symbol_list}
try:
result = db['{}'.format(symbol)].insert_one(insert_element)
except:
duplicates=1
break
return insert_element
# %% Execution of the function
symbol='BTC.X'
rate_limit=2000
last_ele=get_stwits_data(symbol,rate_limit)
# %% #Creating custom lexicon
#%% Finding the time interval of the database
client = MongoClient('mongodb+srv://Group_fintech:<EMAIL>/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
#Getting the minimum id
for q in db['BTC.X'].aggregate([
{ "$group": {
"_id": None,
"min": { "$min": "$ID" }
}}
]):
minID=q['min']
#Getting the timestamp from the min ID
for post in db['BTC.X'].find({'ID':minID}):
start_time=post['TimeStamp']
#Getting the max id
for q in db['BTC.X'].aggregate([
{ "$group": {
"_id": None,
"max": { "$max": "$ID" }
}}
]):
maxID=q['max']
#Getting the timestamp from the max ID
for post in db['BTC.X'].find({'ID':maxID}):
end_time=post['TimeStamp']
start_time=dt.strptime(start_time,'%Y-%m-%dT%H:%M:%SZ')
end_time=dt.strptime(end_time,'%Y-%m-%dT%H:%M:%SZ')
period=np.arange(dt(start_time.year,start_time.month,start_time.day),dt(end_time.year,end_time.month,end_time.day),timedelta(days=1))
#%% Creating dictionary
#Creating function to find words in positive and negative function
def create_positive_dictionary_by_day(day):
dictionary=pd.DataFrame(columns=['Word','Frequency'])
client = MongoClient('mongodb+srv://Group_fintech:<EMAIL>/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
sentimental=1
for documents in db['BTC.X'].find({'Sentiment':sentimental,"TimeStamp":{"$regex": u"{}-{:02d}-{:02d}".format(day.astype(object).year,day.astype(object).month,day.astype(object).day)}}):
word_list=re.findall(r"[\w']+|[.,!?;$]", documents['Content'])
word_list = [porter.stem(t) for t in word_list]
for word in word_list:
if word in dictionary['Word'].tolist():
frq=copy.copy(dictionary.iloc[dictionary.index[dictionary['Word']==word].tolist()[0]][1])+1
dictionary.at[dictionary.index[dictionary['Word']==word].tolist()[0],'Frequency']=frq
else:
dictionary=dictionary.append({'Word': word ,'Frequency':1}, ignore_index=True)
return dictionary
def create_negative_dictionary_by_day(day):
dictionary=pd.DataFrame(columns=['Word','Frequency'])
client = MongoClient('mongodb+srv://Group_fintech:<EMAIL>/test?retryWrites=true&w=majority')
db=client['SorbonneBigData']
sentimental=-1
for documents in db['BTC.X'].find({'Sentiment':sentimental,"TimeStamp":{"$regex": u"{}-{:02d}-{:02d}".format(day.astype(object).year,day.astype(object).month,day.astype(object).day)}}):
word_list=re.findall(r"[\w']+|[.,!?;$]", documents['Content'])
word_list = [porter.stem(t) for t in word_list]
for word in word_list:
if word in dictionary['Word'].tolist():
frq=copy.copy(dictionary.iloc[dictionary.index[dictionary['Word']==word].tolist()[0]][1])+1
dictionary.at[dictionary.index[dictionary['Word']==word].tolist()[0],'Frequency']=frq
else:
dictionary=dictionary.append({'Word': word ,'Frequency':1}, ignore_index=True)
return dictionary
from multiprocessing import Pool
pool = Pool()
#creating positive dictionary
df=list(tqdm(pool.imap(create_positive_dictionary_by_day, period), total=len(period)))
positive_dictionary=df[0].set_index('Word')
for i in tqdm(range(1,len(df))):
positive_dictionary=positive_dictionary.add(df[i].set_index('Word'), fill_value=0)
#creating negative dictionary
df=list(tqdm(pool.imap(create_negative_dictionary_by_day, period), total=len(period)))
negative_dictionary=df[0].set_index('Word')
for i in tqdm(range(1,len(df))):
negative_dictionary=negative_dictionary.add(df[i].set_index('Word'), fill_value=0)
negative_dictionary=negative_dictionary.sort_values('Frequency',ascending=False)
positive_dictionary=positive_dictionary.sort_values('Frequency',ascending=False)
positive_dictionary.columns=['Positive Freq']
negative_dictionary.columns=['Negative Freq']
positive_dictionary=positive_dictionary/db['BTC.X'].count_documents({'Sentiment':1})
negative_dictionary=negative_dictionary/db['BTC.X'].count_documents({'Sentiment':-1})
#Combining both dictionary
final_dict=positive_dictionary.add(negative_dictionary, fill_value=0).sort_values('Positive Freq',ascending=False)
final_dict['Pos over Neg']=final_dict['Positive Freq']/final_dict['Negative Freq']
#Removing stopwords from the dictionary
nltk.download('stopwords')
stop_words = set(stopwords.words('english'))
final_dict=final_dict.reset_index()
for i in final_dict['Word']:
if i in stop_words:
final_dict=final_dict[final_dict['Word']!=i]
#Removing words below the threshold
final_dic=final_dict.fillna(value=0)
final_dict=final_dict[(final_dict['Negative Freq']>0.0005) | (final_dict['Positive Freq']>0.0005)]
final_dict.fillna(value=0).sort_values('Pos over Neg',ascending=False).to_csv('Simple_Dictionary2.csv')
#%% Creating positive and negative word list from the lexicon
os.chdir('H:/Documents/Alternance/Project/')
lexicon=
|
pd.read_csv('Simple_Dictionary2.csv')
|
pandas.read_csv
|
#----------------------------------------------------------------------------------------------
####################
# IMPORT LIBRARIES #
####################
import streamlit as st
import pandas as pd
import numpy as np
import plotly as dd
import plotly.express as px
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.font_manager
import plotly.graph_objects as go
import functions as fc
import os
import altair as alt
import statsmodels.api as sm
from scipy import stats
from sklearn.metrics import make_scorer, mean_squared_error, r2_score, mean_absolute_error, explained_variance_score, roc_auc_score, max_error, log_loss, average_precision_score, precision_recall_curve, auc, roc_curve, confusion_matrix, recall_score, precision_score, f1_score, accuracy_score, balanced_accuracy_score, cohen_kappa_score
from sklearn.model_selection import train_test_split
import scipy
import sys
import platform
import base64
from io import BytesIO
from linearmodels import PanelOLS
from linearmodels import RandomEffects
from linearmodels import PooledOLS
#----------------------------------------------------------------------------------------------
def app():
# Clear cache
st.legacy_caching.clear_cache()
# Hide traceback in error messages (comment out for de-bugging)
sys.tracebacklimit = 0
# Show altair tooltip when full screen
st.markdown('<style>#vg-tooltip-element{z-index: 1000051}</style>',unsafe_allow_html=True)
# workaround for Firefox bug- hide the scrollbar while keeping the scrolling functionality
st.markdown("""
<style>
.ReactVirtualized__Grid::-webkit-scrollbar {
display: none;
}
.ReactVirtualized__Grid {
-ms-overflow-style: none; /* IE and Edge */
scrollbar-width: none; /* Firefox */
}
</style>
""", unsafe_allow_html=True)
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++
# RESET INPUT
#Session state
if 'key' not in st.session_state:
st.session_state['key'] = 0
reset_clicked = st.sidebar.button("Reset all your input")
if reset_clicked:
st.session_state['key'] = st.session_state['key'] + 1
st.sidebar.markdown("")
#++++++++++++++++++++++++++++++++++++++++++++
# DATA IMPORT
# File upload section
df_dec = st.sidebar.radio("Get data", ["Use example dataset", "Upload data"], key = st.session_state['key'])
uploaded_data=None
if df_dec == "Upload data":
#st.subheader("Upload your data")
#uploaded_data = st.sidebar.file_uploader("Make sure that dot (.) is a decimal separator!", type=["csv", "txt"])
separator_expander=st.sidebar.expander('Upload settings')
with separator_expander:
a4,a5=st.columns(2)
with a4:
dec_sep=a4.selectbox("Decimal sep.",['.',','], key = st.session_state['key'])
with a5:
col_sep=a5.selectbox("Column sep.",[';', ',' , '|', '\s+', '\t','other'], key = st.session_state['key'])
if col_sep=='other':
col_sep=st.text_input('Specify your column separator', key = st.session_state['key'])
a4,a5=st.columns(2)
with a4:
thousands_sep=a4.selectbox("Thousands x sep.",[None,'.', ' ','\s+', 'other'], key = st.session_state['key'])
if thousands_sep=='other':
thousands_sep=st.text_input('Specify your thousands separator', key = st.session_state['key'])
with a5:
encoding_val=a5.selectbox("Encoding",[None,'utf_8','utf_8_sig','utf_16_le','cp1140','cp1250','cp1251','cp1252','cp1253','cp1254','other'], key = st.session_state['key'])
if encoding_val=='other':
encoding_val=st.text_input('Specify your encoding', key = st.session_state['key'])
# Error handling for separator selection:
if dec_sep==col_sep:
st.sidebar.error("Decimal and column separators cannot be identical!")
elif dec_sep==thousands_sep:
st.sidebar.error("Decimal and thousands separators cannot be identical!")
elif col_sep==thousands_sep:
st.sidebar.error("Column and thousands separators cannot be identical!")
uploaded_data = st.sidebar.file_uploader("Default separators: decimal '.' | column ';'", type=["csv", "txt"])
if uploaded_data is not None:
df = pd.read_csv(uploaded_data, decimal=dec_sep, sep = col_sep,thousands=thousands_sep,encoding=encoding_val, engine='python')
df_name=os.path.splitext(uploaded_data.name)[0]
st.sidebar.success('Loading data... done!')
elif uploaded_data is None:
df = pd.read_csv("default data/Grunfeld.csv", sep = ";|,|\t",engine='python')
df_name="Grunfeld"
else:
df = pd.read_csv("default data/Grunfeld.csv", sep = ";|,|\t",engine='python')
df_name="Grunfeld"
st.sidebar.markdown("")
#Basic data info
n_rows = df.shape[0]
n_cols = df.shape[1]
#++++++++++++++++++++++++++++++++++++++++++++
# SETTINGS
settings_expander=st.sidebar.expander('Settings')
with settings_expander:
st.caption("**Precision**")
user_precision=int(st.number_input('Number of digits after the decimal point',min_value=0,max_value=10,step=1,value=4, key = st.session_state['key']))
st.caption("**Help**")
sett_hints = st.checkbox('Show learning hints', value=False, key = st.session_state['key'])
st.caption("**Appearance**")
sett_wide_mode = st.checkbox('Wide mode', value=False, key = st.session_state['key'])
sett_theme = st.selectbox('Theme', ["Light", "Dark"], key = st.session_state['key'])
#sett_info = st.checkbox('Show methods info', value=False)
#sett_prec = st.number_input('Set the number of diggits for the output', min_value=0, max_value=8, value=2)
st.sidebar.markdown("")
# Check if wide mode
if sett_wide_mode:
fc.wide_mode_func()
# Check theme
if sett_theme == "Dark":
fc.theme_func_dark()
if sett_theme == "Light":
fc.theme_func_light()
fc.theme_func_dl_button()
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++
# DATA PREPROCESSING & VISUALIZATION
st.header("**Panel data**")
st.markdown("Get your data ready for powerfull methods! Let STATY do data cleaning, variable transformations, visualizations and deliver you the stats you need. Specify your data processing preferences and start exploring your data stories right below... ")
# Check if enough data is available
if n_cols >= 2 and n_rows > 0:
st.empty()
else:
st.error("ERROR: Not enough data!")
return
# Specify entity and time
st.markdown("**Panel data specification**")
col1, col2 = st.columns(2)
with col1:
entity_na_warn = False
entity_options = df.columns
entity = st.selectbox("Select variable for entity", entity_options, key = st.session_state['key'])
with col2:
time_na_warn = False
time_options = df.columns
time_options = list(time_options[time_options.isin(df.drop(entity, axis = 1).columns)])
time = st.selectbox("Select variable for time", time_options, key = st.session_state['key'])
if np.where(df[entity].isnull())[0].size > 0:
entity_na_warn = "ERROR: The variable selected for entity has NAs!"
st.error(entity_na_warn)
if np.where(df[time].isnull())[0].size > 0:
time_na_warn = "ERROR: The variable selected for time has NAs!"
st.error(time_na_warn)
if df[time].dtypes != "float64" and df[time].dtypes != "float32" and df[time].dtypes != "int64" and df[time].dtypes != "int32":
time_na_warn = "ERROR: Time variable must be numeric!"
st.error(time_na_warn)
run_models = False
if time_na_warn == False and entity_na_warn == False:
data_empty_container = st.container()
with data_empty_container:
st.empty()
st.empty()
st.empty()
st.empty()
st.empty()
st.empty()
st.empty()
st.empty()
# Make sure time is numeric
df[time] = pd.to_numeric(df[time])
data_exploration_container2 = st.container()
with data_exploration_container2:
st.header("**Data screening and processing**")
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++
# DATA SUMMARY
# Main panel for data summary (pre)
#----------------------------------
dev_expander_dsPre = st.expander("Explore raw panel data info and stats", expanded = False)
st.empty()
with dev_expander_dsPre:
# Default data description:
if uploaded_data == None:
if st.checkbox("Show data description", value = False, key = st.session_state['key']):
st.markdown("**Data source:**")
st.markdown("This is the original 11-firm data set from Grunfeld’s Ph.D. thesis (*Grunfeld, 1958, The Determinants of Corporate Investment, Department of Economics, University of Chicago*). For more details see online complements for the article [The Grunfeld Data at 50] (https://www.zeileis.org/grunfeld/).")
st.markdown("**Citation:**")
st.markdown("<NAME>, <NAME> (2010). “The Grunfeld Data at 50,” German Economic Review, 11(4), 404-417. [doi:10.1111/j.1468-0475.2010.00513.x] (https://onlinelibrary.wiley.com/doi/abs/10.1111/j.1468-0475.2010.00513.x)")
st.markdown("**Variables in the dataset:**")
col1,col2=st.columns(2)
col1.write("invest")
col2.write("Gross investment, defined as additions to plant and equipment plus maintenance and repairs in millions of dollars deflated by the implicit price deflator of producers’ durable equipment (base 1947)")
col1,col2=st.columns(2)
col1.write("value")
col2.write("Market value of the firm, defined as the price of common shares at December 31 (or, for WH, IBM and CH, the average price of December 31 and January 31 of the following year) times the number of common shares outstanding plus price of preferred shares at December 31 (or average price of December 31 and January 31 of the following year) times number of preferred shares plus total book value of debt at December 31 in millions of dollars deflated by the implicit GNP price deflator (base 1947)")
col1,col2=st.columns(2)
col1.write("capital")
col2.write("Stock of plant and equipment, defined as the accumulated sum of net additions to plant and equipment deflated by the implicit price deflator for producers’ durable equipment (base 1947) minus depreciation allowance deflated by depreciation expense deflator (10 years moving average of wholesale price index of metals and metal products, base1947)")
col1,col2=st.columns(2)
col1.write("firm")
col2.write("General Motors (GM), US Steel (US), General Electric (GE), Chrysler (CH), Atlantic Refining (AR), IBM, Union Oil (UO), Westinghouse (WH), Goodyear (GY), Diamond Match (DM), American Steel (AS)")
col1,col2=st.columns(2)
col1.write("year")
col2.write("Year ranging from 1935 to 1954")
st.markdown("")
# Show raw data & data info
df_summary = fc.data_summary(df)
if st.checkbox("Show raw data", value = False, key = st.session_state['key']):
st.write(df)
#st.info("Data shape: "+ str(n_rows) + " rows and " + str(n_cols) + " columns")
st.write("Data shape: ", n_rows, " rows and ", n_cols, " columns")
if df[df.duplicated()].shape[0] > 0 or df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
check_nasAnddupl=st.checkbox("Show duplicates and NAs info", value = False, key = st.session_state['key'])
if check_nasAnddupl:
if df[df.duplicated()].shape[0] > 0:
st.write("Number of duplicates: ", df[df.duplicated()].shape[0])
st.write("Duplicate row index: ", ', '.join(map(str,list(df.index[df.duplicated()]))))
if df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
st.write("Number of rows with NAs: ", df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0])
st.write("Rows with NAs: ", ', '.join(map(str,list(pd.unique(np.where(df.isnull())[0])))))
# Show variable info
if st.checkbox('Show variable info', value = False, key = st.session_state['key']):
st.write(df_summary["Variable types"])
# Show summary statistics (raw data)
if st.checkbox('Show summary statistics (raw data)', value = False, key = st.session_state['key']):
st.write(df_summary["ALL"].style.set_precision(user_precision))
# Download link for summary statistics
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df_summary["Variable types"].to_excel(excel_file, sheet_name="variable_info")
df_summary["ALL"].to_excel(excel_file, sheet_name="summary_statistics")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Summary statistics__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download summary statistics</a>
""",
unsafe_allow_html=True)
st.write("")
if fc.get_mode(df).loc["n_unique"].any():
st.caption("** Mode is not unique.")
if sett_hints:
st.info(str(fc.learning_hints("de_summary_statistics")))
dev_expander_anovPre = st.expander("ANOVA for raw panel data", expanded = False)
with dev_expander_anovPre:
if df.shape[1] > 2:
# Target variable
target_var = st.selectbox('Select target variable ', df.drop([entity, time], axis = 1).columns, key = st.session_state['key'])
if df[target_var].dtypes == "int64" or df[target_var].dtypes == "float64":
class_var_options = df.columns
class_var_options = class_var_options[class_var_options.isin(df.drop(target_var, axis = 1).columns)]
clas_var = st.selectbox('Select classifier variable ', [entity, time], key = st.session_state['key'])
# Means and sd by entity
col1, col2 = st.columns(2)
with col1:
df_anova_woTime = df.drop([time], axis = 1)
df_grouped_ent = df_anova_woTime.groupby(entity)
st.write("Mean based on entity:")
st.write(df_grouped_ent.mean()[target_var])
st.write("")
with col2:
st.write("SD based on entity:")
st.write(df_grouped_ent.std()[target_var])
st.write("")
# Means and sd by time
col3, col4 = st.columns(2)
with col3:
df_anova_woEnt= df.drop([entity], axis = 1)
df_grouped_time = df_anova_woEnt.groupby(time)
counts_time = pd.DataFrame(df_grouped_time.count()[target_var])
counts_time.columns = ["count"]
st.write("Mean based on time:")
st.write(df_grouped_time.mean()[target_var])
st.write("")
with col4:
st.write("SD based on time:")
st.write(df_grouped_time.std()[target_var])
st.write("")
col9, col10 = st.columns(2)
with col9:
st.write("Boxplot grouped by entity:")
box_size1 = st.slider("Select box size", 1, 50, 5, key = st.session_state['key'])
# Grouped boxplot by entity
grouped_boxplot_data =
|
pd.DataFrame()
|
pandas.DataFrame
|
from distutils.version import LooseVersion
from warnings import catch_warnings
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
MultiIndex,
Series,
_testing as tm,
bdate_range,
concat,
date_range,
isna,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
tables,
)
from pandas.io.pytables import Term
pytestmark = pytest.mark.single
def test_select_columns_in_where(setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_select_with_dups(setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_select(setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
{
"ts": bdate_range("2012-01-01", periods=300),
"A": np.random.randn(300),
}
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame({"A": np.random.rand(20), "B": np.random.rand(20)})
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
{
"A": np.random.rand(20),
"B": np.random.rand(20),
"index": np.arange(20, dtype="f8"),
}
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"ts": bdate_range("2012-01-01", periods=300),
"A": np.random.randn(300),
"B": range(300),
"users": ["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ [f"a{i:03d}" for i in range(100)],
}
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select("df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']")
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + [f"a{i:03d}" for i in range(60)]
result = store.select("df", "ts>=Timestamp('2012-02-01') and users=selector")
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
msg = "can only use an iterator or chunksize on a table"
with pytest.raises(TypeError, match=msg):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError, match=msg):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = f"index >= '{beg_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = f"index <= '{end_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = f"index > '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
assert 0 == len(results)
def test_select_iterator_many_empty_frames(setup_path):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = 10_000
# with iterator, range limited to the first chunk
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100000, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = f"index <= '{beg_dt}' & index >= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
# should be []
assert len(results) == 0
def test_frame_select(setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
date = df.index[len(df) // 2]
crit1 = Term("index>=date")
assert crit1.env.scope["date"] == date
crit2 = "columns=['A', 'D']"
crit3 = "columns=A"
result = store.select("frame", [crit1, crit2])
expected = df.loc[date:, ["A", "D"]]
tm.assert_frame_equal(result, expected)
result = store.select("frame", [crit3])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append("df_time", df)
msg = "could not convert string to Timestamp"
with pytest.raises(ValueError, match=msg):
store.select("df_time", "index>0")
# can't select if not written as table
# store['frame'] = df
# with pytest.raises(ValueError):
# store.select('frame', [crit1, crit2])
def test_frame_select_complex(setup_path):
# select via complex criteria
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[df.index[0:4], "string"] = "bar"
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", data_columns=["string"])
# empty
result = store.select("df", 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select("df", 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "foo")]
tm.assert_frame_equal(result, expected)
# or
result = store.select("df", 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index > df.index[3]) | (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select(
"df", '(index>df.index[3] & index<=df.index[6]) | string="bar"'
)
expected = df.loc[
((df.index > df.index[3]) & (df.index <= df.index[6]))
| (df.string == "bar")
]
tm.assert_frame_equal(result, expected)
# invert
result = store.select("df", 'string!="bar"')
expected = df.loc[df.string != "bar"]
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
msg = "cannot use an invert condition when passing to numexpr"
with pytest.raises(NotImplementedError, match=msg):
store.select("df", '~(string="bar")')
# invert ok for filters
result = store.select("df", "~(columns=['A','B'])")
expected = df.loc[:, df.columns.difference(["A", "B"])]
tm.assert_frame_equal(result, expected)
# in
result = store.select("df", "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index > df.index[3]].reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(setup_path):
with ensure_clean_path(["parms.hdf", "hist.hdf"]) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({"A": [1, 1, 2, 2, 3]})
parms.to_hdf(pp, "df", mode="w", format="table", data_columns=["A"])
selection = read_hdf(pp, "df", where="A=[2,3]")
hist = DataFrame(
np.random.randn(25, 1),
columns=["data"],
index=MultiIndex.from_tuples(
[(i, j) for i in range(5) for j in range(5)], names=["l1", "l2"]
),
)
hist.to_hdf(hh, "df", mode="w", format="table")
expected = read_hdf(hh, "df", where="l1=[2, 3, 4]")
# scope with list like
l = selection.index.tolist() # noqa
store = HDFStore(hh)
result = store.select("df", where="l1=l")
tm.assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh, "df", where="l1=l")
tm.assert_frame_equal(result, expected)
# index
index = selection.index # noqa
result = read_hdf(hh, "df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
# scope with index
store = HDFStore(hh)
result = store.select("df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
store.close()
def test_invalid_filtering(setup_path):
# can't use more than one filter (atm)
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
msg = "unable to collapse Joint Filters"
# not implemented
with pytest.raises(NotImplementedError, match=msg):
store.select("df", "columns=['A'] | columns=['B']")
# in theory we could deal with this
with pytest.raises(NotImplementedError, match=msg):
store.select("df", "columns=['A','B'] & columns=['C']")
def test_string_select(setup_path):
# GH 2973
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
# test string ==/!=
df["x"] = "none"
df.loc[df.index[2:7], "x"] = ""
store.append("df", df, data_columns=["x"])
result = store.select("df", "x=none")
expected = df[df.x == "none"]
tm.assert_frame_equal(result, expected)
result = store.select("df", "x!=none")
expected = df[df.x != "none"]
tm.assert_frame_equal(result, expected)
df2 = df.copy()
df2.loc[df2.x == "", "x"] = np.nan
store.append("df2", df2, data_columns=["x"])
result = store.select("df2", "x!=none")
expected = df2[isna(df2.x)]
tm.assert_frame_equal(result, expected)
# int ==/!=
df["int"] = 1
df.loc[df.index[2:7], "int"] = 2
store.append("df3", df, data_columns=["int"])
result = store.select("df3", "int=2")
expected = df[df.int == 2]
tm.assert_frame_equal(result, expected)
result = store.select("df3", "int!=2")
expected = df[df.int != 2]
tm.assert_frame_equal(result, expected)
def test_select_as_multiple(setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df2["foo"] = "bar"
with ensure_clean_store(setup_path) as store:
msg = "keys must be a list/tuple"
# no tables stored
with pytest.raises(TypeError, match=msg):
store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1")
store.append("df1", df1, data_columns=["A", "B"])
store.append("df2", df2)
# exceptions
with pytest.raises(TypeError, match=msg):
store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1")
with pytest.raises(TypeError, match=msg):
store.select_as_multiple([None], where=["A>0", "B>0"], selector="df1")
msg = "'No object named df3 in the file'"
with pytest.raises(KeyError, match=msg):
store.select_as_multiple(
["df1", "df3"], where=["A>0", "B>0"], selector="df1"
)
with pytest.raises(KeyError, match=msg):
store.select_as_multiple(["df3"], where=["A>0", "B>0"], selector="df1")
with pytest.raises(KeyError, match="'No object named df4 in the file'"):
store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df4"
)
# default select
result = store.select("df1", ["A>0", "B>0"])
expected = store.select_as_multiple(
["df1"], where=["A>0", "B>0"], selector="df1"
)
tm.assert_frame_equal(result, expected)
expected = store.select_as_multiple("df1", where=["A>0", "B>0"], selector="df1")
tm.assert_frame_equal(result, expected)
# multiple
result = store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df1"
)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected, check_freq=False)
# FIXME: 2021-01-20 this is failing with freq None vs 4B on some builds
# multiple (diff selector)
result = store.select_as_multiple(
["df1", "df2"], where="index>df2.index[4]", selector="df2"
)
expected = concat([df1, df2], axis=1)
expected = expected[5:]
tm.assert_frame_equal(result, expected)
# test exception for diff rows
store.append("df3", tm.makeTimeDataFrame(nper=50))
msg = "all tables must have exactly the same nrows!"
with pytest.raises(ValueError, match=msg):
store.select_as_multiple(
["df1", "df3"], where=["A>0", "B>0"], selector="df1"
)
@pytest.mark.skipif(
LooseVersion(tables.__version__) < LooseVersion("3.1.0"),
reason=("tables version does not support fix for nan selection bug: GH 4858"),
)
def test_nan_selection_bug_4858(setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame({"cols": range(6), "values": range(6)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[0] = np.nan
expected = DataFrame(
{"cols": ["13.0", "14.0", "15.0"], "values": [3.0, 4.0, 5.0]},
index=[3, 4, 5],
)
# write w/o the index on that particular column
store.append("df", df, data_columns=True, index=["cols"])
result = store.select("df", where="values>2.0")
tm.assert_frame_equal(result, expected)
def test_query_with_nested_special_character(setup_path):
df = DataFrame(
{
"a": ["a", "a", "c", "b", "test & test", "c", "b", "e"],
"b": [1, 2, 3, 4, 5, 6, 7, 8],
}
)
expected = df[df.a == "test & test"]
with ensure_clean_store(setup_path) as store:
store.append("test", df, format="table", data_columns=True)
result = store.select("test", 'a = "test & test"')
tm.assert_frame_equal(expected, result)
def test_query_long_float_literal(setup_path):
# GH 14241
df = DataFrame({"A": [1000000000.0009, 1000000000.0011, 1000000000.0015]})
with
|
ensure_clean_store(setup_path)
|
pandas.tests.io.pytables.common.ensure_clean_store
|
import os
import unittest
import pandas as pd
from context import technical as ti
# Change working directory
# This enable running tests from repository root
if os.getcwd() != os.path.abspath(os.path.dirname(__file__)):
os.chdir('tests/')
# Test results and outputs
class ResultsMACD(unittest.TestCase):
# Input data
test_data = pd.read_csv('test_data/correct_series.csv')
test_data_df = pd.read_csv('test_data/correct_ohlc.csv').drop(columns=['Close'])
# Expected output results
results_line = pd.Series([0. , 0.00104169, 0.00270912, 0.00031245, 0.00247449,
0.00028395, 0.00386753, 0.00524738, 0.00287725, 0.00356132,
0.00101829, 0.00444326, 0.0068335 , 0.01014599, 0.01305172,
0.01167535, 0.01047763, 0.007073 , 0.00497697, 0.00034765,
0.00072977, 0.00086424, 0.00235727, 0.00545893, 0.00259956,
0.00035291, 0.0038989 , 0.01060325, 0.01634667, 0.02331259,
0.03048012, 0.03086871, 0.02802865, 0.02616832, 0.02788499,
0.02887595, 0.03280708, 0.0298124 , 0.02994925, 0.02826488,
0.03516188, 0.04295579, 0.04992572, 0.05473519, 0.05996441,
0.05896691, 0.0603559 , 0.05996981, 0.05965617, 0.06230883,
0.06651554, 0.07333986, 0.08139379, 0.08447225, 0.08655907,
0.08784908, 0.08850358, 0.08719152, 0.08216639, 0.07431071,
0.06214603, 0.05851695, 0.05276871, 0.04764448, 0.04233428,
0.03841456, 0.03268409, 0.03223513, 0.03148887, 0.02978424,
0.02809067])
results_signal = pd.Series([0. , 0.00057872, 0.00145183, 0.00106587, 0.0014849 ,
0.00115938, 0.00184474, 0.00266246, 0.00271207, 0.00290235,
0.00249013, 0.00290958, 0.00374002, 0.00508015, 0.00673261,
0.00774979, 0.00830792, 0.00805641, 0.00743152, 0.00599822,
0.00493472, 0.00411457, 0.00376102, 0.00410221, 0.00380054,
0.00310893, 0.0032673 , 0.00473734, 0.0070628 , 0.01031679,
0.01435345, 0.01765912, 0.01973434, 0.02102179, 0.02239499,
0.0236916 , 0.02551517, 0.02637479, 0.0270898 , 0.02732485,
0.02889242, 0.03170534, 0.03534966, 0.03922698, 0.04337464,
0.04649321, 0.04926582, 0.05140667, 0.0530566 , 0.05490707,
0.05722879, 0.06045103, 0.06463962, 0.06860617, 0.07219676,
0.07532724, 0.07796251, 0.07980832, 0.08027993, 0.07908609,
0.07569807, 0.07226184, 0.06836321, 0.06421946, 0.05984242,
0.05555685, 0.0509823 , 0.04723286, 0.04408406, 0.0412241 ,
0.03859741])
# Input parameters
slow = 24
fast = 12
ma = 9
# Tests
def test_result_macd_line(self):
'''macd function must return macd line with values equal to expected'''
result = ti.MACD(self.test_data, self.slow, self.fast, self.ma)
pd.testing.assert_series_equal(self.results_line, result['macd_line'], check_names=False)
def test_result_macd_signal(self):
'''macd function must return macd signal with values equal to expected'''
result = ti.MACD(self.test_data, self.slow, self.fast, self.ma)
pd.testing.assert_series_equal(self.results_signal, result['macd_signal'], check_names=False)
def test_result_macd_line_lenght_size(self):
'''macd result line must have the same lenght as input'''
result = ti.MACD(self.test_data, self.slow, self.fast, self.ma)
self.assertEqual(self.results_line.shape[0], result.shape[0])
def test_result_macd_signal_lenght_size(self):
'''macd result signal must have the same lenght as input'''
result = ti.MACD(self.test_data, self.slow, self.fast, self.ma)
self.assertEqual(self.results_signal.shape[0], result.shape[0])
def test_output_result_matrix_cols_number(self):
'''macd result df must have 2 columns'''
result = ti.MACD(self.test_data, self.slow, self.fast, self.ma)
self.assertEqual(2, result.shape[1])
def test_output_result_matrix_cols_number_full_if_input_series(self):
'''macd result df must have original series plus 4'''
result = ti.MACD(self.test_data, self.slow, self.fast, self.ma, full_output=True)
self.assertEqual((2+4), result.shape[1])
def test_output_result_matrix_cols_number_full_if_input_df(self):
'''macd result df must have original df columns plus 4'''
result = ti.MACD(self.test_data_df, self.slow, self.fast, self.ma, full_output=True)
self.assertEqual((self.test_data_df.shape[1] + 4), result.shape[1])
# Test input data
class BadInputMACD(unittest.TestCase):
# Input data
test_data =
|
pd.read_csv('test_data/correct_series.csv')
|
pandas.read_csv
|
import pandas as pd
from cellphonedb.src.core.methods import cpdb_statistical_analysis_helper
from cellphonedb.src.core.core_logger import core_logger
from cellphonedb.src.core.models.interaction import interaction_filter
def call(meta: pd.DataFrame,
counts: pd.DataFrame,
interactions: pd.DataFrame,
iterations: int = 1000,
threshold: float = 0.1,
threads: int = 4,
debug_seed: int = -1,
result_precision: int = 3
) -> (pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame):
core_logger.info(
'[Cluster Statistical Analysis Simple] '
'Threshold:{} Iterations:{} Debug-seed:{} Threads:{} Precision:{}'.format(threshold,
iterations,
debug_seed,
threads,
result_precision))
if debug_seed >= 0:
pd.np.random.seed(debug_seed)
core_logger.warning('Debug random seed enabled. Setted to {}'.format(debug_seed))
interactions_filtered, counts_filtered = prefilters(counts, interactions)
if interactions_filtered.empty or counts_filtered.empty:
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame()
clusters = cpdb_statistical_analysis_helper.build_clusters(meta, counts_filtered)
core_logger.info('Running Real Simple Analysis')
cluster_interactions = cpdb_statistical_analysis_helper.get_cluster_combinations(clusters['names'])
base_result = cpdb_statistical_analysis_helper.build_result_matrix(interactions_filtered, cluster_interactions)
real_mean_analysis = cpdb_statistical_analysis_helper.mean_analysis(interactions_filtered, clusters,
cluster_interactions, base_result,
suffixes=('_1', '_2'))
real_percent_analysis = cpdb_statistical_analysis_helper.percent_analysis(clusters, threshold,
interactions_filtered,
cluster_interactions, base_result,
suffixes=('_1', '_2'))
statistical_mean_analysis = cpdb_statistical_analysis_helper.shuffled_analysis(iterations, meta,
counts_filtered,
interactions_filtered,
cluster_interactions,
base_result,
threads,
suffixes=('_1', '_2'))
result_percent = cpdb_statistical_analysis_helper.build_percent_result(real_mean_analysis,
real_percent_analysis,
statistical_mean_analysis,
interactions_filtered,
cluster_interactions, base_result)
pvalues_result, means_result, significant_means, mean_pvalue_result, deconvoluted_result = build_results(
interactions_filtered,
real_mean_analysis,
result_percent,
clusters['means'],
result_precision)
return pvalues_result, means_result, significant_means, mean_pvalue_result, deconvoluted_result
def build_results(interactions: pd.DataFrame,
real_mean_analysis: pd.DataFrame,
result_percent: pd.DataFrame,
clusters_means: dict,
result_precision: int
) -> (pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame):
core_logger.info('Building Simple results')
interacting_pair = cpdb_statistical_analysis_helper.interacting_pair_build(interactions)
interactions_data_result = pd.DataFrame(interactions[['id_cp_interaction', 'name_1', 'name_2', 'ensembl_1',
'ensembl_2', 'source']].copy())
interactions_data_result = pd.concat([interacting_pair, interactions_data_result], axis=1, sort=False)
interactions_data_result['secreted'] = (interactions['secretion_1'] | interactions['secretion_2'])
interactions_data_result['is_integrin'] = (
interactions['integrin_interaction_1'] | interactions['integrin_interaction_2'])
interactions_data_result.rename(
columns={'name_1': 'partner_a', 'name_2': 'partner_b', 'ensembl_1': 'ensembl_a', 'ensembl_2': 'ensembl_b'},
inplace=True)
interactions_data_result['partner_a'] = interactions_data_result['partner_a'].apply(
lambda name: 'simple:{}'.format(name))
interactions_data_result['partner_b'] = interactions_data_result['partner_b'].apply(
lambda name: 'simple:{}'.format(name))
significant_mean_rank, significant_means = cpdb_statistical_analysis_helper.build_significant_means(
real_mean_analysis, result_percent)
result_percent = result_percent.round(result_precision)
real_mean_analysis = real_mean_analysis.round(result_precision)
significant_means = significant_means.round(result_precision)
for key, cluster_means in clusters_means.items():
clusters_means[key] = cluster_means.round(result_precision)
# Document 1
pvalues_result = pd.concat([interactions_data_result, result_percent], axis=1, join='inner', sort=False)
# Document 2
means_result = pd.concat([interactions_data_result, real_mean_analysis], axis=1, join='inner', sort=False)
# Document 3
significant_mean_result = pd.concat([interactions_data_result, significant_mean_rank, significant_means], axis=1,
join='inner', sort=False)
# Document 4
mean_pvalue_result = cpdb_statistical_analysis_helper.mean_pvalue_result_build(real_mean_analysis,
result_percent,
interactions_data_result)
# Document 5
deconvoluted_result = deconvoluted_result_build(clusters_means, interactions)
return pvalues_result, means_result, significant_mean_result, mean_pvalue_result, deconvoluted_result
def deconvoluted_result_build(clusters_means: dict, interactions: pd.DataFrame) -> pd.DataFrame:
deconvoluted_result_1 = pd.DataFrame()
deconvoluted_result_2 = pd.DataFrame()
deconvoluted_result_1[
['ensembl', 'entry_name', 'gene_name', 'name', 'is_complex', 'id_cp_interaction']] = \
interactions[
['ensembl_1', 'entry_name_1', 'gene_name_1', 'name_1', 'is_complex_1', 'id_cp_interaction']]
deconvoluted_result_2[
['ensembl', 'entry_name', 'gene_name', 'name', 'is_complex', 'id_cp_interaction']] = \
interactions[
['ensembl_2', 'entry_name_2', 'gene_name_2', 'name_2', 'is_complex_2', 'id_cp_interaction']]
deconvoluted_result = deconvoluted_result_1.append(deconvoluted_result_2)
deconvoluted_result.set_index('ensembl', inplace=True)
cluster_counts =
|
pd.DataFrame(index=deconvoluted_result.index)
|
pandas.DataFrame
|
import warnings
import numpy as np
import datetime as dt
import os
import json
import pandas as pd
from datetimerange import DateTimeRange
import dateparser
OPERAND_MAPPING_DICT = {
">": 5,
">=": 4,
"=": 3,
"<=": 2,
"<": 1
}
def check_valid_signal(x):
"""Check whether signal is valid, i.e. an array_like numeric, or raise errors.
Parameters
----------
x :
array_like, array of signal
Returns
-------
"""
if isinstance(x, dict) or isinstance(x, tuple):
raise ValueError("Expected array_like input, instead found {"
"0}:".format(type(x)))
if len(x) == 0:
raise ValueError("Empty signal")
types = []
for i in range(len(x)):
types.append(str(type(x[i])))
type_unique = np.unique(np.array(types))
if len(type_unique) != 1 and (type_unique[0].find("int") != -1 or
type_unique[0].find("float") != -1):
raise ValueError("Invalid signal: Expect numeric array, instead found "
"array with types {0}: ".format(type_unique))
if type_unique[0].find("int") == -1 and type_unique[0].find("float") == -1:
raise ValueError("Invalid signal: Expect numeric array, instead found "
"array with types {0}: ".format(type_unique))
return True
def calculate_sampling_rate(timestamps):
"""
Parameters
----------
x : array_like of timestamps, float (unit second)
Returns
-------
float : sampling rate
"""
if isinstance(timestamps[0], float):
timestamps_second = timestamps
else:
try:
v_parse_datetime = np.vectorize(parse_datetime)
timestamps = v_parse_datetime(timestamps)
timestamps_second = []
timestamps_second.append(0)
for i in range(1, len(timestamps)):
timestamps_second.append((timestamps[i] - timestamps[
i - 1]).total_seconds())
except Exception:
sampling_rate = None
return sampling_rate
steps = np.diff(timestamps_second)
sampling_rate = round(1 / np.min(steps[steps != 0]))
return sampling_rate
def generate_timestamp(start_datetime, sampling_rate, signal_length):
"""
Parameters
----------
start_datetime :
sampling_rate : float
signal_length : int
Returns
-------
list : list of timestamps with length equal to signal_length.
"""
number_of_seconds = (signal_length - 1) / sampling_rate
if start_datetime is None:
start_datetime = dt.datetime.now()
end_datetime = start_datetime + dt.timedelta(seconds=number_of_seconds)
time_range = DateTimeRange(start_datetime, end_datetime)
timestamps = []
for value in time_range.range(dt.timedelta(seconds=1 / sampling_rate)):
timestamps.append(value)
return timestamps
def parse_datetime(string, type='datetime'):
"""
A simple dateparser that detects common datetime formats
Parameters
----------
string : str
a date string in format as denoted below.
Returns
-------
datetime.datetime
datetime object of a time.
"""
# some common formats.
date_formats = ['%Y-%m-%d',
'%d-%m-%Y',
'%d.%m.%Y',
'%Y.%m.%d',
'%d %b %Y',
'%Y/%m/%d',
'%d/%m/%Y']
datime_formats = ['%Y-%m-%d %H:%M:%S.%f',
'%d-%m-%Y %H:%M:%S.%f',
'%d.%m.%Y %H:%M:%S.%f',
'%Y.%m.%d %H:%M:%S.%f',
'%d %b %Y %H:%M:%S.%f',
'%Y/%m/%d %H:%M:%S.%f',
'%d/%m/%Y %H:%M:%S.%f',
'%Y-%m-%d %I:%M:%S.%f',
'%d-%m-%Y %I:%M:%S.%f',
'%d.%m.%Y %I:%M:%S.%f',
'%Y.%m.%d %I:%M:%S.%f',
'%d %b %Y %I:%M:%S.%f',
'%Y/%m/%d %I:%M:%S.%f',
'%d/%m/%Y %I:%M:%S.%f']
if type == 'date':
formats = date_formats
if type == 'datetime':
formats = datime_formats
for f in formats:
try:
return dt.datetime.strptime(string, f)
except:
pass
try:
return dateparser.parse(string)
except:
raise ValueError('Datetime string must be of standard Python format '
'(https://docs.python.org/3/library/time.html), '
'e.g., `%d-%m-%Y`, eg. `24-01-2020`')
def get_moving_average(q, w):
q_padded = np.pad(q, (w // 2, w - 1 - w // 2), mode='edge')
convole = np.convolve(q_padded, np.ones(w) / w, 'valid')
return convole
def parse_rule(name, source):
assert os.path.isfile(source) is True, 'Source file not found'
with open(source) as json_file:
all = json.load(json_file)
try:
sqi = all[name]
except:
raise Exception("SQI {0} not found".format(name))
rule_def, boundaries, label_list = update_rule(sqi['def'],
is_update=False)
return rule_def, \
boundaries, \
label_list
def update_rule(rule_def, threshold_list=[], is_update=True):
if rule_def is None or is_update:
all_rules = []
else:
all_rules = list(np.copy(rule_def))
for threshold in threshold_list:
all_rules.append(threshold)
df = sort_rule(all_rules)
df = decompose_operand(df.to_dict('records'))
boundaries = np.sort(df["value"].unique())
inteveral_label_list = get_inteveral_label_list(df, boundaries)
value_label_list = get_value_label_list(df, boundaries, inteveral_label_list)
label_list = []
for i in range(len(value_label_list)):
label_list.append(inteveral_label_list[i])
label_list.append(value_label_list[i])
label_list.append(inteveral_label_list[-1])
return all_rules, boundaries, label_list
def sort_rule(rule_def):
df = pd.DataFrame(rule_def)
df["value"] = pd.to_numeric(df["value"])
df['operand_order'] = df['op'].map(OPERAND_MAPPING_DICT)
df.sort_values(by=['value', 'operand_order'],
inplace=True,
ascending=[True, True],
ignore_index=True)
return df
def decompose_operand(rule_dict):
df =
|
pd.DataFrame(rule_dict)
|
pandas.DataFrame
|
#! /usr/bin/env python
from __future__ import print_function
from __future__ import division
from manual_preds import manual_train_and_predict
from info_reduc import random_error
from auto_preds import auto_train_and_predict
from sklearn.preprocessing import scale
import pickle
import math
import numpy as np
import pandas as pd
import glob
import os
class LearnSet(object):
"""
A set of parameters (i.e., features and labels) for a machine learning
algorithm, each in the format of a pandas dataframe
"""
def __init__(self, nuc_concs, burnup):#reactor, enrichment, burnup):
self.nuc_concs = nuc_concs
#self.reactor = reactor
#self.enrichment = enrichment
self.burnup = burnup
###################################################
# TODO: Leaving the following global for now; fix!#
###################################################
# Info for labeling the simulation values in the training set
pwrburn = (600, 1550, 2500, 3450, 4400, 5350, 6300, 7250, 8200, 9150, 10100,
11050, 12000, 12950, 13900, 14850, 15800, 16750, 17700
)
bwrburn = (600, 1290, 1980, 2670, 3360, 4050, 4740, 5430, 6120, 6810, 7500,
8190, 8880, 9570, 10260, 10950, 11640, 12330
)
phwrburn = (600, 1290, 1980, 2670, 3360, 4050, 4740, 5430, 6120, 6810, 7500,
8190, 8880, 9570, 10260, 10950, 11640, 12330
)
o_rxtrs = ('ce14x14', 'ce16x16', 'w14x14', 'w15x15', 'w17x17', 's14x14',
'vver440', 'vver440_3.82', 'vver440_4.25', 'vver440_4.38',
'vver1000', 'ge7x7-0', 'ge8x8-1', 'ge9x9-2', 'ge10x10-8',
'abb8x8-1', 'atrium9x9-9', 'svea64-1', 'svea100-0', 'candu28',
'candu37'
)
enrich = (2.8, 2.8, 2.8, 2.8, 2.8, 2.8, 3.6, 3.82, 4.25, 4.38, 2.8, 2.9,
2.9, 2.9, 2.9, 2.9, 2.9, 2.9, 2.9, 0.711, 0.711
)
train_label = {'ReactorType': ['pwr']*11 + ['bwr']*8 + ['phwr']*2,
'OrigenReactor': o_rxtrs,
'Enrichment': enrich,
'Burnup': [pwrburn]*11 + [bwrburn]*8 + [phwrburn]*2,
'CoolingInts': [(0.000694, 7, 30, 365.25)]*21
}
# Info for labeling the simulated/expected values in the testing set
t_burns = ((1400, 5000, 11000), (5000, 6120), (1700, 8700, 17000),
(8700, 9150), (8700, 9150), (2000, 7200, 10800),
(7200, 8800), (7200, 8800)
)
cool1 = (0.000694, 7, 30, 365.25) #1 min, 1 week, 1 month, 1 year in days
cool2 = (0.002082, 9, 730.5) #3 min, 9 days, 2 years in days
cool3 = (7, 9) #7 and 9 days
t_o_rxtrs = ('candu28_0', 'candu28_1', 'ce16x16_2', 'ce16x16_3', 'ce16x16_4',
'ge7x7-0_5','ge7x7-0_6', 'ge7x7-0_7'
)
t_enrich = (0.711, 0.711, 2.8, 2.8, 3.1, 2.9, 2.9, 3.2)
test_label = {'ReactorType': ['phwr']*2 + ['pwr']*3 + ['bwr']*3,
'OrigenReactor': t_o_rxtrs,
'Enrichment': t_enrich,
'Burnup': t_burns,
'CoolingInts': [cool1, cool2, cool1, cool2, cool3, cool1, cool2, cool3]
}
def format_df(filename):
"""
This takes a csv file and reads the data in as a dataframe.
Parameters
----------
filename : str of simulation output in a csv file
Returns
-------
data : pandas dataframe containing csv entries
"""
data = pd.read_csv(filename).T
data.columns = data.iloc[0]
data.drop(data.index[0], inplace=True)
return data
def get_labels(filename, rxtrs):
"""
This takes a filename and a dict with all simulation parameters, and
searches for the entries relevant to the given simulation (file).
Parameters
----------
filename : str of simulation output in a csv file
rxtrs : dict of a data set detailing simulation parameters in ORIGEN
Returns
-------
rxtr_info : dict of all the labels for a given simulation data set
"""
tail, _ = os.path.splitext(os.path.basename(filename))
i = rxtrs['OrigenReactor'].index(tail)
rxtr_info = {'ReactorType': rxtrs['ReactorType'][i],
'Enrichment': rxtrs['Enrichment'][i],
'Burnup': rxtrs['Burnup'][i],
'CoolingInts': rxtrs['CoolingInts'][i]
}
return rxtr_info
def label_data(label, data):
"""
Takes the labels for and a dataframe of the simulation results;
adds these labels as additional columns to the dataframe.
Parameters
----------
label : dict representing the labels for a simulation
data : dataframe of simulation results
Returns
-------
data : dataframe of simulation results + label entries in columns
"""
col = len(data.columns)
data.insert(loc = col, column = 'ReactorType', value = label['ReactorType'])
data.insert(loc = col+1, column = 'Enrichment', value = label['Enrichment'])
burnup = burnup_label(label['Burnup'], label['CoolingInts'])
data.insert(loc = col+2, column = 'Burnup', value = burnup)
return data
def burnup_label(burn_steps, cooling_ints):
"""
Takes the burnup steps and cooling intervals for each case within the
simulation and creates a list of the burnup of the irradiated and cooled/
decayed fuels; returns a list to be added as the burnup label to the main
dataframe.
Parameters
----------
burn_steps : list of the steps of burnup from the simulation parameters
cooling_ints : list of the cooling intervals from the simulation parameters
Returns
-------
burnup_list : list of burnups to be applied as a label for a given simulation
"""
num_cases = len(burn_steps)
steps_per_case = len(cooling_ints) + 2
burnup_list = [0, ]
for case in range(0, num_cases):
for step in range(0, steps_per_case):
if (case == 0 and step == 0):
continue
elif (case > 0 and step == 0):
burn_step = burn_steps[case-1]
burnup_list.append(burn_step)
else:
burn_step = burn_steps[case]
burnup_list.append(burn_step)
return burnup_list
def dataframeXY(all_files, rxtr_label):
""""
Takes the glob of files in a directory as well as the dict of labels and
produces a dataframe that has both the data features (X) and labeled data (Y).
Parameters
----------
all_files : list of str holding all simulation file names in a directory
rxtr_label : dict holding all parameters for all simulations in a directory
Returns
-------
dfXY : dataframe that has all features and labels for all simulations in a
directory
"""
all_data = []
for f in all_files:
data = format_df(f)
labels = get_labels(f, rxtr_label)
labeled = label_data(labels, data)
all_data.append(labeled)
dfXY = pd.concat(all_data)
##FILTERING STUFFS##
# Delete sim columns
# Need better way to know when the nuclide columns start (6 for now)
# Prob will just search for column idx that starts with str(1)?
cols = len(dfXY.columns)
dfXY = dfXY.iloc[:, 6:cols]
# Filter out 0 burnups so MAPE can be calc'd
dfXY = dfXY.loc[dfXY.Burnup > 0, :]
return dfXY
def top_nucs(dfXY, top_n):
"""
loops through the rows of a dataframe and keeps the top_n nuclides
(by concentration) from each row
Parameters
----------
dfXY : dataframe of nuclide concentrations + labels
top_n : number of nuclides to sort and filter by
Returns
-------
nuc_set : set of the top_n nucs as determined
"""
x = len(dfXY.columns)-3
dfX = dfXY.iloc[:, 0:x]
# Get a set of top n nucs from each row (instance)
nuc_set = set()
for case, conc in dfX.iterrows():
top_n_series = conc.sort_values(ascending=False)[:top_n]
nuc_list = list(top_n_series.index.values)
nuc_set.update(nuc_list)
return nuc_set
def filter_nucs(df, nuc_set, top_n):
"""
for each instance (row), keep only top 200 values, replace rest with 0
Parameters
----------
df : dataframe of nuclide concentrations
nuc_set : set of top_n nuclides
top_n : number of nuclides to sort and filter by
Returns
-------
top_n_df : dataframe that has values only for the top_n nuclides of the set
nuc_set in each row
"""
# To filter further, have to reconstruct the df into a new one
# Found success appending each row to a new df as a series
top_n_df = pd.DataFrame(columns=tuple(nuc_set))
for case, conc in df.iterrows():
top_n_series = conc.sort_values(ascending=False)[:top_n]
nucs = top_n_series.index.values
# some top values in test set aren't in nuc set, so need to delete those
del_list = list(set(nucs) - nuc_set)
top_n_series.drop(del_list, inplace=True)
filtered_row = conc.filter(items=top_n_series.index.values)
top_n_df = top_n_df.append(filtered_row)
# replace NaNs with 0, bc scikit don't take no NaN
top_n_df.fillna(value=0, inplace=True)
return top_n_df
def splitXY(dfXY):
"""
Takes a dataframe with all X (features) and Y (labels) information and
produces four different dataframes: nuclide concentrations only (with
input-related columns deleted) + 1 dataframe for each label column.
Parameters
----------
dfXY : dataframe with nuclide concentraations and 3 labels: reactor type,
enrichment, and burnup
Returns
-------
dfX : dataframe with only nuclide concentrations for each instance
r_dfY : dataframe with reactor type for each instance
e_dfY : dataframe with fuel enrichment for each instance
b_dfY : dataframe with fuel burnup for each instance
"""
x = len(dfXY.columns)-3
dfX = dfXY.iloc[:, 0:x]
r_dfY = dfXY.iloc[:, x]
e_dfY = dfXY.iloc[:, x+1]
b_dfY = dfXY.iloc[:, x+2]
return dfX, r_dfY, e_dfY, b_dfY
def main():
"""
Takes all origen files and compiles them into the appropriate dataframes for
training and testing sets. Then splits those dataframes into the appropriate
X and Ys for prediction of reactor type, fuel enrichment, and burnup.
The training set is varied by number of features included in trainX to
create a learning curve.
"""
pkl_train = 'trainXY_2nov.pkl'
pkl_test = 'testXY_2nov.pkl'
print("scrips\n")
#print("Did you check your training and testing data paths?\n")
# Training Datasets
#trainpath = "../origen/origen-data/training/9may2017/csv/"
#trainpath = "../origen-data/training/2nov2017/csv/"
#trainpath = "../origen/origen-data/training/2nov2017/csv/"
#train_files = glob.glob(os.path.join(trainpath, "*.csv"))
#trainXY = dataframeXY(train_files, train_label)
#trainXY.reset_index(inplace = True)
#pickle.dump(trainXY, open(pkl_train, 'wb'))
# Get set of top 200 nucs from training set
# The filter_nuc func repeats stuff from top_nucs but it is needed because
# the nuc_set needs to be determined from the training set for the test set
# and the training set is filtered within each loop
trainXY = pd.read_pickle(pkl_train)
top_n = 200
nuc_set = top_nucs(trainXY, top_n)
trainX, trainYr, trainYe, trainYb = splitXY(trainXY)
trainX = filter_nucs(trainX, nuc_set, top_n)
######################################################
#trainX = scale(trainX) WILL DO SCALING AFTER DATA MANIP
######################################################
train_set = LearnSet(nuc_concs = trainX, burnup = trainYb)
# Testing Dataset (for now)
#testpath = "../origen/origen-data/testing/10may2017_2/csv/"
#testpath = "../origen-data/testing/2nov2017/csv/"
#testpath = "../origen/origen-data/testing/2nov2017/csv/"
#test_files = glob.glob(os.path.join(testpath, "*.csv"))
#testXY = dataframeXY(test_files, test_label)
#testXY.reset_index(inplace = True)
#pickle.dump(testXY, open(pkl_test, 'wb'))
testXY =
|
pd.read_pickle(pkl_test)
|
pandas.read_pickle
|
from unittest import TestCase
import pandas as pd
import numpy as np
import pandas_validator as pv
from pandas_validator.core.exceptions import ValidationError
class BaseSeriesValidatorTest(TestCase):
def setUp(self):
self.validator = pv.BaseSeriesValidator(series_type=np.int64)
def test_is_valid_when_given_int64_series(self):
series = pd.Series([0, 1])
self.assertTrue(self.validator.is_valid(series))
def test_is_invalid_when_given_float_series(self):
series = pd.Series([0., 1.])
self.assertFalse(self.validator.is_valid(series))
def test_should_return_true_when_given_int64_series(self):
series = pd.Series([0, 1])
self.assertIsNone(self.validator.validate(series))
def test_should_return_false_when_given_float_series(self):
series = pd.Series([0., 1.])
self.assertRaises(ValidationError, self.validator.validate, series)
class IntegerSeriesValidatorTest(TestCase):
def setUp(self):
self.validator = pv.IntegerSeriesValidator(min_value=0, max_value=2)
def test_is_valid(self):
series = pd.Series([0, 1, 2])
self.assertTrue(self.validator.is_valid(series))
def test_is_invalid_by_too_low_value(self):
series = pd.Series([-1, 0, 1, 2])
self.assertFalse(self.validator.is_valid(series))
def test_is_invalid_by_too_high_value(self):
series = pd.Series([0, 1, 2, 3])
self.assertFalse(self.validator.is_valid(series))
class FloatSeriesValidatorTest(TestCase):
def setUp(self):
self.validator = pv.FloatSeriesValidator(min_value=0, max_value=2)
def test_is_valid(self):
series = pd.Series([0., 1., 2.])
self.assertTrue(self.validator.is_valid(series))
def test_is_invalid_when_given_integer_series(self):
series = pd.Series([0, 1, 2])
self.assertFalse(self.validator.is_valid(series))
def test_is_invalid_by_too_low_value(self):
series = pd.Series([-0.1, 0., 1.])
self.assertFalse(self.validator.is_valid(series))
def test_is_invalid_by_too_high_value(self):
series = pd.Series([0., 1., 2.1])
self.assertFalse(self.validator.is_valid(series))
class CharSeriesValidatorTest(TestCase):
def setUp(self):
self.validator = pv.CharSeriesValidator(min_length=0, max_length=4)
def test_is_valid(self):
series = pd.Series(['', 'ab', 'abcd'])
self.assertTrue(self.validator.is_valid(series))
def test_is_invalid_when_given_integer_series(self):
series = pd.Series([0, 1, 2])
self.assertFalse(self.validator.is_valid(series))
def test_is_invalid_by_too_long_length(self):
series =
|
pd.Series(['', 'ab', 'abcde'])
|
pandas.Series
|
from sklearn.metrics import confusion_matrix
from sklearn.feature_selection import RFE
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn import metrics
import time
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
import scipy as sp
import scipy.fftpack
from scipy.signal import welch
from scipy import signal
import numpy as np
from sklearn.metrics import confusion_matrix, make_scorer, classification_report
import os, sys, argparse
from sklearn.svm import SVC
from xgboost import XGBClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.linear_model import LogisticRegression
from hmm_filter.hmm_filter import HMMFilter
# Constants for states
WAKE_STATE = 0
NREM_STATE = 1
REM_STATE = 2
def get_info(df):
x= df.groupby('Stage').apply(lambda x: x.shape[0])
return x
def get_balanced_data(df):
df=df.set_index('unique_epoch_id', drop=False)
to_sample = df.groupby(['Stage']).groups
num_to_sample = min(get_info(df))
samples = np.concatenate([np.random.choice(to_sample[(1)], num_to_sample, False),np.random.choice(to_sample[(2)], num_to_sample, False),np.random.choice(to_sample[(0)], num_to_sample, False)])
sampled_data = df[df['unique_epoch_id'].isin(samples)].reset_index(drop=True)
print(get_info(sampled_data))
return sampled_data
def svm(X_train, X_test, Y_train, Y_test):
clf = SVC(kernel='rbf')
clf.fit(X_train, Y_train)
y_pred=clf.predict(X_test)
toc = time.time()
training_accuracy = metrics.accuracy_score(Y_train, clf.predict(X_train))
test_accuracy = metrics.accuracy_score(Y_test, y_pred)
print("Training accuracy", training_accuracy)
print("Test Accuracy:", test_accuracy)
print(classification_report(Y_test, y_pred))
return clf, training_accuracy, test_accuracy
def random_forest(X_train, X_test, Y_train, Y_test):
clf = RandomForestClassifier(n_estimators=100)
clf.fit(X_train,Y_train)
y_pred=clf.predict(X_test)
toc = time.time()
training_accuracy = metrics.accuracy_score(Y_train, clf.predict(X_train))
test_accuracy = metrics.accuracy_score(Y_test, y_pred)
print("Training accuracy", training_accuracy)
print("Test Accuracy:", test_accuracy)
print(classification_report(Y_test, y_pred))
return clf, training_accuracy, test_accuracy
def XGBoost(X_train, X_test, Y_train, Y_test):
clf = XGBClassifier()
clf.fit(X_train, Y_train)
y_pred=clf.predict(X_test)
toc = time.time()
training_accuracy = metrics.accuracy_score(Y_train, clf.predict(X_train))
test_accuracy = metrics.accuracy_score(Y_test, y_pred)
print("Training accuracy", training_accuracy)
print("Test Accuracy:", test_accuracy)
print(classification_report(Y_test, y_pred))
return clf, training_accuracy, test_accuracy
def neural_network(X_train, X_test, Y_train, Y_test):
clf = MLPClassifier(random_state=1)
clf.fit(X_train, Y_train)
y_pred=clf.predict(X_test)
toc = time.time()
training_accuracy = metrics.accuracy_score(Y_train, clf.predict(X_train))
test_accuracy = metrics.accuracy_score(Y_test, y_pred)
print("Training accuracy", training_accuracy)
print("Test Accuracy:", test_accuracy)
print(classification_report(Y_test, y_pred))
return clf, training_accuracy, test_accuracy
def logistic_regression(X_train, X_test, Y_train, Y_test):
clf = LogisticRegression()
clf.fit(X_train, Y_train)
y_pred=clf.predict(X_test)
toc = time.time()
training_accuracy = metrics.accuracy_score(Y_train, clf.predict(X_train))
test_accuracy = metrics.accuracy_score(Y_test, y_pred)
print("Training accuracy", training_accuracy)
print("Test Accuracy:", test_accuracy)
print(classification_report(Y_test, y_pred))
return clf, training_accuracy, test_accuracy
def hmm_filter(clf, t, X_train, Y_train, X_test, Y_test, S_train, S_test):
y_pred_train=clf.predict(X_train)
train_data=pd.DataFrame(Y_train,columns=["Stage"])
train_data["predict_stage"]=y_pred_train
train_data['video'] = S_train
hmmfilter = HMMFilter()
hmmfilter.A = t
hmmfilter.fit(train_data, session_column="video", prediction_column="predict_stage")
d = pd.DataFrame.from_records(clf.predict_proba(X_test), columns=clf.classes_).to_dict(orient="records")
test_data=pd.DataFrame(Y_test,columns=["Stage"])
test_data["predict_stage"]=clf.predict(X_test)
test_data["probabs"] = [{ k:v for k,v in r.items() if v > 0} for r in d ]
test_data["index"] = np.arange(0, len(test_data))
test_data['video'] = S_test
df_hmm = hmmfilter.predict(test_data, session_column='video', probabs_column="probabs", prediction_column='predict_stage')
df_hmm=df_hmm.set_index("index")
df_hmm = df_hmm.sort_values(by=['index'])
classifiers_accuracy = len(test_data[test_data.Stage == test_data.predict_stage]) / len(test_data)
hmm_accuracy = len(df_hmm[df_hmm.Stage == df_hmm.predict_stage]) / len(df_hmm)
return [classifiers_accuracy, hmm_accuracy, df_hmm]
def get_t_matrix(df):
df_temp=df[["unique_epoch_id","video","Stage"]]
df_temp=df_temp.drop_duplicates()
df_temp["previous_stage"] = df_temp["Stage"].shift()
df_temp = df_temp.reset_index()
df_temp["previous_stage"].iloc[0] = df_temp["Stage"].iloc[0]
df_temp['previous_stage'] = df_temp['previous_stage'].astype(int)
w2w = len(df_temp[(df_temp["Stage"]==WAKE_STATE) & (df_temp["previous_stage"]==WAKE_STATE)])
w2n = len(df_temp[(df_temp["Stage"]==NREM_STATE) & (df_temp["previous_stage"]==WAKE_STATE)])
w2r = len(df_temp[(df_temp["Stage"]==REM_STATE) & (df_temp["previous_stage"]==WAKE_STATE)])
n2w = len(df_temp[(df_temp["Stage"]==WAKE_STATE) & (df_temp["previous_stage"]==NREM_STATE)])
n2n = len(df_temp[(df_temp["Stage"]==NREM_STATE) & (df_temp["previous_stage"]==NREM_STATE)])
n2r = len(df_temp[(df_temp["Stage"]==REM_STATE) & (df_temp["previous_stage"]==NREM_STATE)])
r2w = len(df_temp[(df_temp["Stage"]==WAKE_STATE) & (df_temp["previous_stage"]==REM_STATE)])
r2n = len(df_temp[(df_temp["Stage"]==NREM_STATE) & (df_temp["previous_stage"]==REM_STATE)])
r2r = len(df_temp[(df_temp["Stage"]==REM_STATE) & (df_temp["previous_stage"]==REM_STATE)])
from_w = w2w+w2n+w2r
from_n = n2w+n2n+n2r
from_r = r2w+r2n+r2r
t={(0,0):w2w/from_w,(0,1):w2n/from_w,(0,2):w2r/from_w,
(1,0):n2w/from_n,(1,1):n2n/from_n,(1,2):n2r/from_n,
(2,0):r2w/from_r,(2,1):r2n/from_r,(2,2):r2r/from_r}
transition_matrix=
|
pd.DataFrame([[w2w,w2n,w2r],[n2w,n2n,n2r],[r2w,r2n,r2r]],columns=["wake","nrem","rem"],index=["wake","nrem","rem"])
|
pandas.DataFrame
|
import pandas as pd
import mysql.connector as sql
from dataMining.utils.config import Config
class Data():
"""docstring for Data"""
def __init__(self):
super(Data, self).__init__()
def setPathID(self, pathID):
self.pathID = pathID
def getPathID(self):
return self.pathID
def infoMysql(self):
db_connection = sql.connect(host=Config.host,user=Config.user,passwd=Config.password, db=Config.database)
db_cursor = db_connection.cursor()
#db_cursor.execute('SELECT id, deviceid, valid, latitude, longitude, fixtime, speed, attributes FROM tc_positions WHERE deviceid BETWEEN 15 AND 20')
"""
SELECT COUNT(*) FROM tc_positions WHERE deviceid = 20 -> 124297
SELECT COUNT(*) FROM tc_positions WHERE deviceid = 20 GROUP BY attributes -> 15654
SELECT COUNT(*) FROM tc_positions WHERE deviceid = 20 GROUP BY longitude, latitude, fixtime -> 121308
SELECT COUNT(*) FROM tc_positions WHERE deviceid = 20 GROUP BY longitude, latitude -> 32996
SELECT COUNT(*) FROM tc_positions WHERE deviceid = 20 GROUP BY longitude, latitude, attributes -> 33744
"""
db_cursor.execute('SELECT id, deviceid, valid, latitude, longitude, fixtime, speed, attributes FROM tc_positions WHERE deviceid BETWEEN 15 AND 20 GROUP BY longitude, latitude, attributes')
table_rows = db_cursor.fetchall()
db_cursor.execute('SELECT DISTINCT(deviceid) FROM tc_positions')
id = db_cursor.fetchall()
columna = pd.DataFrame(id)
columna.columns = ["deviceid"]
df =
|
pd.DataFrame(table_rows)
|
pandas.DataFrame
|
import s3fs
import pandas as pd
from argparse import ArgumentParser
import joblib
from pathlib import Path
from sklearn.model_selection import train_test_split
from metasense import BOARD_CONFIGURATION as DATA
from metasense.data import load
from metasense.models import SplitModel
from deepx import nn
sensor_features = ['no2', 'o3', 'co']
env_features = ['temperature', 'absolute-humidity', 'pressure']
Y_features = ['epa-no2', 'epa-o3']
BUCKET_NAME = "metasense-paper-results"
def parse_args():
argparser = ArgumentParser()
argparser.add_argument('experiment')
argparser.add_argument('name')
argparser.add_argument('--seed', type=int, default=0)
argparser.add_argument('--location', default="", type=str)
argparser.add_argument('--round', default="", type=str)
argparser.add_argument('--board', default="", type=str)
argparser.add_argument('--dim', type=int, default=3)
argparser.add_argument('--batch-size', type=int, default=10)
argparser.add_argument('--hidden-size', type=int, default=100)
argparser.add_argument('--lr', type=float, default=1e-4)
argparser.add_argument('--load', default=None)
argparser.add_argument('--num-iters', type=int, default=2000000)
return argparser.parse_args()
def train(out_dir, dim, seed, load_model=None):
out_path = out_dir / 'models'
if not (out_path).exists():
out_path.mkdir()
boards = {}
for round in DATA:
for location in DATA[round]:
for board_id in DATA[round][location]:
if board_id not in boards:
boards[board_id] = set()
boards[board_id].add((round, location))
if load_model is None:
sensor_models = {
board_id: nn.Relu(100) >> nn.Relu(100) >> nn.Linear(dim) for board_id in boards
# board_id: nn.Linear(3, dim) for board_id in boards
}
calibration_model = nn.Relu(dim + 3, args.hidden_size) >> nn.Relu(args.hidden_size) >> nn.Linear(2)
split_model = SplitModel(sensor_models, calibration_model, log_dir=out_dir, lr=args.lr, batch_size=args.batch_size)
else:
split_model = joblib.load(load_model)
data = {}
print("Filtering: %s" % ignore)
for board_id in boards:
board_train = []
for round, location in boards[board_id]:
if (round, location, board_id) in ignore:
print("Removing: ", round, location, board_id)
continue
board_train.append(load(*(round, location, board_id))[0])
if len(board_train) > 0:
print("Loaded board[%u]: %u" % (board_id, len(board_train)))
board_train = pd.concat(board_train)
board_train['board'] = board_id
if board_id not in data:
data[board_id] = []
data[board_id].append(board_train)
data = [pd.concat(ds) for ds in data.values()]
max_size = max([d.shape[0] for d in data])
for i in range(len(data)):
d = data[i]
if d.shape[0] < max_size:
data[i] = d.append(d.sample(max_size - d.shape[0], replace=True))
data =
|
pd.concat(data)
|
pandas.concat
|
import pandas as pd
import numpy as np
from hdfe.hdfe import make_dummies, get_all_dummies
def test_make_dummies_arr() -> None:
x = np.array([1, 0, 0])
results = make_dummies(x, False)
expected = np.array([[0, 1], [1, 0], [1, 0]], dtype=float)
np.testing.assert_almost_equal(results.A, expected)
def test_make_dummies_ser() -> None:
x = pd.Series([1, 0, 0])
results = make_dummies(x, False)
expected = np.array([[0, 1], [1, 0], [1, 0]], dtype=float)
np.testing.assert_almost_equal(results.A, expected)
def test_make_dummies_cat() -> None:
x = pd.Series(["horse", "cat", "cat"]).astype("category")
results = make_dummies(x, False)
expected = np.array([[0, 1], [1, 0], [1, 0]], dtype=float)
np.testing.assert_almost_equal(results.A, expected)
def test_make_dummies_arr_drop() -> None:
x = np.array([1, 0, 0])
results = make_dummies(x, True)
expected = np.array([[0], [1], [1]], dtype=float)
np.testing.assert_almost_equal(results.A, expected)
def test_make_dummies_ser_drop() -> None:
x = pd.Series([1, 0, 0])
results = make_dummies(x, True)
expected = np.array([[0], [1], [1]], dtype=float)
np.testing.assert_almost_equal(results.A, expected)
def test_make_dummies_cat_drop() -> None:
x =
|
pd.Series(["horse", "cat", "cat"])
|
pandas.Series
|
from __future__ import division
from functools import wraps
import pandas as pd
import numpy as np
import time
import csv, sys
import os.path
import logging
from .ted_functions import TedFunctions
from .ted_aggregate_methods import TedAggregateMethods
from base.uber_model import UberModel, ModelSharedInputs
class TedSpeciesProperties(object):
"""
Listing of species properties that will eventually be read in from a SQL db
"""
def __init__(self):
"""Class representing Species properties"""
super(TedSpeciesProperties, self).__init__()
self.sci_name = pd.Series([], dtype='object')
self.com_name = pd.Series([], dtype='object')
self.taxa = pd.Series([], dtype='object')
self.order = pd.Series([], dtype='object')
self.usfws_id = pd.Series([], dtype='object')
self.body_wgt = pd.Series([], dtype='object')
self.diet_item = pd.Series([], dtype='object')
self.h2o_cont = pd.Series([], dtype='float')
def read_species_properties(self):
# this is a temporary method to initiate the species/diet food items lists (this will be replaced with
# a method to access a SQL database containing the properties
#filename = './ted/tests/TEDSpeciesProperties.csv'
filename = os.path.join(os.path.dirname(__file__),'tests/TEDSpeciesProperties.csv')
try:
with open(filename,'rt') as csvfile:
# csv.DictReader uses first line in file for column headings by default
dr = pd.read_csv(csvfile) # comma is default delimiter
except csv.Error as e:
sys.exit('file: %s, %s' (filename, e))
print(dr)
self.sci_name = dr.ix[:,'Scientific Name']
self.com_name = dr.ix[:,'Common Name']
self.taxa = dr.ix[:,'Taxa']
self.order = dr.ix[:,'Order']
self.usfws_id = dr.ix[:,'USFWS Species ID (ENTITY_ID)']
self.body_wgt= dr.ix[:,'BW (g)']
self.diet_item = dr.ix[:,'Food item']
self.h2o_cont = dr.ix[:,'Water content of diet']
class TedInputs(ModelSharedInputs):
"""
Required inputs class for Ted.
"""
def __init__(self):
"""Class representing the inputs for Ted"""
super(TedInputs, self).__init__()
# Inputs: Assign object attribute variables from the input Pandas DataFrame
self.chemical_name = pd.Series([], dtype="object", name="chemical_name")
# application parameters for min/max application scenarios
self.crop_min = pd.Series([], dtype="object", name="crop")
self.app_method_min = pd.Series([], dtype="object", name="app_method_min")
self.app_rate_min = pd.Series([], dtype="float", name="app_rate_min")
self.num_apps_min = pd.Series([], dtype="int", name="num_apps_min")
self.app_interval_min = pd.Series([], dtype="int", name="app_interval_min")
self.droplet_spec_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.boom_hgt_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.pest_incorp_depth_min = pd.Series([], dtype="object", name="pest_incorp_depth")
self.crop_max = pd.Series([], dtype="object", name="crop")
self.app_method_max = pd.Series([], dtype="object", name="app_method_max")
self.app_rate_max = pd.Series([], dtype="float", name="app_rate_max")
self.num_apps_max = pd.Series([], dtype="int", name="num_app_maxs")
self.app_interval_max = pd.Series([], dtype="int", name="app_interval_max")
self.droplet_spec_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.boom_hgt_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.pest_incorp_depth_max = pd.Series([], dtype="object", name="pest_incorp_depth")
# physical, chemical, and fate properties of pesticide
self.foliar_diss_hlife = pd.Series([], dtype="float", name="foliar_diss_hlife")
self.aerobic_soil_meta_hlife = pd.Series([], dtype="float", name="aerobic_soil_meta_hlife")
self.frac_retained_mamm = pd.Series([], dtype="float", name="frac_retained_mamm")
self.frac_retained_birds = pd.Series([], dtype="float", name="frac_retained_birds")
self.log_kow = pd.Series([], dtype="float", name="log_kow")
self.koc = pd.Series([], dtype="float", name="koc")
self.solubility = pd.Series([], dtype="float", name="solubility")
self.henry_law_const = pd.Series([], dtype="float", name="henry_law_const")
# bio concentration factors (ug active ing/kg-ww) / (ug active ing/liter)
self.aq_plant_algae_bcf_mean = pd.Series([], dtype="float", name="aq_plant_algae_bcf_mean")
self.aq_plant_algae_bcf_upper = pd.Series([], dtype="float", name="aq_plant_algae_bcf_upper")
self.inv_bcf_mean = pd.Series([], dtype="float", name="inv_bcf_mean")
self.inv_bcf_upper = pd.Series([], dtype="float", name="inv_bcf_upper")
self.fish_bcf_mean = pd.Series([], dtype="float", name="fish_bcf_mean")
self.fish_bcf_upper = pd.Series([], dtype="float", name="fish_bcf_upper")
# bounding water concentrations (ug active ing/liter)
self.water_conc_1 = pd.Series([], dtype="float", name="water_conc_1") # lower bound
self.water_conc_2 = pd.Series([], dtype="float", name="water_conc_2") # upper bound
# health value inputs
# naming convention (based on listing from OPP TED Excel spreadsheet 'inputs' worksheet):
# dbt: dose based toxicity
# cbt: concentration-based toxicity
# arbt: application rate-based toxicity
# 1inmill_mort: 1/million mortality (note initial character is numeral 1, not letter l)
# 1inten_mort: 10% mortality (note initial character is numeral 1, not letter l)
# others are self explanatory
# dose based toxicity(dbt): mammals (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort")
self.dbt_mamm_1inten_mort = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_low_ld50 = pd.Series([], dtype="float", name="dbt_mamm_low_ld50")
self.dbt_mamm_rat_oral_ld50 = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_rat_derm_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50")
self.dbt_mamm_rat_inhal_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50")
self.dbt_mamm_sub_direct = pd.Series([], dtype="float", name="dbt_mamm_sub_direct")
self.dbt_mamm_sub_indirect = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect")
self.dbt_mamm_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort_wgt")
self.dbt_mamm_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_low_ld50_wgt")
self.dbt_mamm_rat_oral_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_rat_derm_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50_wgt")
self.dbt_mamm_rat_inhal_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50_wgt")
self.dbt_mamm_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_direct_wgt")
self.dbt_mamm_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect_wgt")
# dose based toxicity(dbt): birds (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_bird_1inmill_mort = pd.Series([], dtype="float", name="dbt_bird_1inmill_mort")
self.dbt_bird_1inten_mort = pd.Series([], dtype="float", name="dbt_bird_1inten_mort")
self.dbt_bird_low_ld50 = pd.Series([], dtype="float", name="dbt_bird_low_ld50")
self.dbt_bird_hc05 = pd.Series([], dtype="float", name="dbt_bird_hc05")
self.dbt_bird_hc50 = pd.Series([], dtype="float", name="dbt_bird_hc50")
self.dbt_bird_hc95 = pd.Series([], dtype="float", name="dbt_bird_hc95")
self.dbt_bird_sub_direct = pd.Series([], dtype="float", name="dbt_bird_sub_direct")
self.dbt_bird_sub_indirect = pd.Series([], dtype="float", name="dbt_bird_sub_indirect")
self.mineau_sca_fact = pd.Series([], dtype="float", name="mineau_sca_fact")
self.dbt_bird_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_bird_1inmill_mort_wgt")
self.dbt_bird_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_bird_1inten_mort_wgt")
self.dbt_bird_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_bird_low_ld50_wgt")
self.dbt_bird_hc05_wgt = pd.Series([], dtype="float", name="dbt_bird_hc05_wgt")
self.dbt_bird_hc50_wgt = pd.Series([], dtype="float", name="dbt_bird_hc50_wgt")
self.dbt_bird_hc95_wgt = pd.Series([], dtype="float", name="dbt_bird_hc95_wgt")
self.dbt_bird_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_bird_sub_direct_wgt")
self.dbt_bird_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_bird_sub_indirect_wgt")
self.mineau_sca_fact_wgt = pd.Series([], dtype="float", name="mineau_sca_fact_wgt")
# dose based toxicity(dbt): reptiles, terrestrial-phase amphibians (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_reptile_1inmill_mort = pd.Series([], dtype="float", name="dbt_reptile_1inmill_mort")
self.dbt_reptile_1inten_mort = pd.Series([], dtype="float", name="dbt_reptile_1inten_mort")
self.dbt_reptile_low_ld50 = pd.Series([], dtype="float", name="dbt_reptile_low_ld50")
self.dbt_reptile_sub_direct = pd.Series([], dtype="float", name="dbt_reptile_sub_direct")
self.dbt_reptile_sub_indirect = pd.Series([], dtype="float", name="dbt_reptile_sub_indirect")
self.dbt_reptile_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_reptile_1inmill_mort_wgt")
self.dbt_reptile_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_reptile_1inten_mort_wgt")
self.dbt_reptile_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_reptile_low_ld50_wgt")
self.dbt_reptile_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_reptile_sub_direct_wgt")
self.dbt_reptile_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_reptile_sub_indirect_wgt")
# concentration-based toxicity (cbt) : mammals (mg-pest/kg-diet food)
self.cbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="cbt_mamm_1inmill_mort")
self.cbt_mamm_1inten_mort = pd.Series([], dtype="float", name="cbt_mamm_1inten_mort")
self.cbt_mamm_low_lc50 = pd.Series([], dtype="float", name="cbt_mamm_low_lc50")
self.cbt_mamm_sub_direct = pd.Series([], dtype="float", name="cbt_mamm_sub_direct")
self.cbt_mamm_grow_noec = pd.Series([], dtype="float", name="cbt_mamm_grow_noec")
self.cbt_mamm_grow_loec = pd.Series([], dtype="float", name="cbt_mamm_grow_loec")
self.cbt_mamm_repro_noec = pd.Series([], dtype="float", name="cbt_mamm_repro_noec")
self.cbt_mamm_repro_loec = pd.Series([], dtype="float", name="cbt_mamm_repro_loec")
self.cbt_mamm_behav_noec = pd.Series([], dtype="float", name="cbt_mamm_behav_noec")
self.cbt_mamm_behav_loec = pd.Series([], dtype="float", name="cbt_mamm_behav_loec")
self.cbt_mamm_sensory_noec = pd.Series([], dtype="float", name="cbt_mamm_sensory_noec")
self.cbt_mamm_sensory_loec = pd.Series([], dtype="float", name="cbt_mamm_sensory_loec")
self.cbt_mamm_sub_indirect = pd.Series([], dtype="float", name="cbt_mamm_sub_indirect")
# concentration-based toxicity (cbt) : birds (mg-pest/kg-diet food)
self.cbt_bird_1inmill_mort = pd.Series([], dtype="float", name="cbt_bird_1inmill_mort")
self.cbt_bird_1inten_mort = pd.Series([], dtype="float", name="cbt_bird_1inten_mort")
self.cbt_bird_low_lc50 = pd.Series([], dtype="float", name="cbt_bird_low_lc50")
self.cbt_bird_sub_direct = pd.Series([], dtype="float", name="cbt_bird_sub_direct")
self.cbt_bird_grow_noec = pd.Series([], dtype="float", name="cbt_bird_grow_noec")
self.cbt_bird_grow_loec = pd.Series([], dtype="float", name="cbt_bird_grow_loec")
self.cbt_bird_repro_noec = pd.Series([], dtype="float", name="cbt_bird_repro_noec")
self.cbt_bird_repro_loec = pd.Series([], dtype="float", name="cbt_bird_repro_loec")
self.cbt_bird_behav_noec = pd.Series([], dtype="float", name="cbt_bird_behav_noec")
self.cbt_bird_behav_loec = pd.Series([], dtype="float", name="cbt_bird_behav_loec")
self.cbt_bird_sensory_noec = pd.Series([], dtype="float", name="cbt_bird_sensory_noec")
self.cbt_bird_sensory_loec = pd.Series([], dtype="float", name="cbt_bird_sensory_loec")
self.cbt_bird_sub_indirect = pd.Series([], dtype="float", name="cbt_bird_sub_indirect")
# concentration-based toxicity (cbt) : reptiles, terrestrial-phase amphibians (mg-pest/kg-diet food)
self.cbt_reptile_1inmill_mort = pd.Series([], dtype="float", name="cbt_reptile_1inmill_mort")
self.cbt_reptile_1inten_mort = pd.Series([], dtype="float", name="cbt_reptile_1inten_mort")
self.cbt_reptile_low_lc50 = pd.Series([], dtype="float", name="cbt_reptile_low_lc50")
self.cbt_reptile_sub_direct = pd.Series([], dtype="float", name="cbt_reptile_sub_direct")
self.cbt_reptile_grow_noec = pd.Series([], dtype="float", name="cbt_reptile_grow_noec")
self.cbt_reptile_grow_loec = pd.Series([], dtype="float", name="cbt_reptile_grow_loec")
self.cbt_reptile_repro_noec = pd.Series([], dtype="float", name="cbt_reptile_repro_noec")
self.cbt_reptile_repro_loec = pd.Series([], dtype="float", name="cbt_reptile_repro_loec")
self.cbt_reptile_behav_noec = pd.Series([], dtype="float", name="cbt_reptile_behav_noec")
self.cbt_reptile_behav_loec = pd.Series([], dtype="float", name="cbt_reptile_behav_loec")
self.cbt_reptile_sensory_noec = pd.Series([], dtype="float", name="cbt_reptile_sensory_noec")
self.cbt_reptile_sensory_loec = pd.Series([], dtype="float", name="cbt_reptile_sensory_loec")
self.cbt_reptile_sub_indirect = pd.Series([], dtype="float", name="cbt_reptile_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates body weight (mg-pest/kg-bw(ww))
self.cbt_inv_bw_1inmill_mort = pd.Series([], dtype="float", name="cbt_inv_bw_1inmill_mort")
self.cbt_inv_bw_1inten_mort = pd.Series([], dtype="float", name="cbt_inv_bw_1inten_mort")
self.cbt_inv_bw_low_lc50 = pd.Series([], dtype="float", name="cbt_inv_bw_low_lc50")
self.cbt_inv_bw_sub_direct = pd.Series([], dtype="float", name="cbt_inv_bw_sub_direct")
self.cbt_inv_bw_grow_noec = pd.Series([], dtype="float", name="cbt_inv_bw_grow_noec")
self.cbt_inv_bw_grow_loec = pd.Series([], dtype="float", name="cbt_inv_bw_grow_loec")
self.cbt_inv_bw_repro_noec = pd.Series([], dtype="float", name="cbt_inv_bw_repro_noec")
self.cbt_inv_bw_repro_loec = pd.Series([], dtype="float", name="cbt_inv_bw_repro_loec")
self.cbt_inv_bw_behav_noec = pd.Series([], dtype="float", name="cbt_inv_bw_behav_noec")
self.cbt_inv_bw_behav_loec = pd.Series([], dtype="float", name="cbt_inv_bw_behav_loec")
self.cbt_inv_bw_sensory_noec = pd.Series([], dtype="float", name="cbt_inv_bw_sensory_noec")
self.cbt_inv_bw_sensory_loec = pd.Series([], dtype="float", name="cbt_inv_bw_sensory_loec")
self.cbt_inv_bw_sub_indirect = pd.Series([], dtype="float", name="cbt_inv_bw_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates body diet (mg-pest/kg-food(ww))
self.cbt_inv_food_1inmill_mort = pd.Series([], dtype="float", name="cbt_inv_food_1inmill_mort")
self.cbt_inv_food_1inten_mort = pd.Series([], dtype="float", name="cbt_inv_food_1inten_mort")
self.cbt_inv_food_low_lc50 = pd.Series([], dtype="float", name="cbt_inv_food_low_lc50")
self.cbt_inv_food_sub_direct = pd.Series([], dtype="float", name="cbt_inv_food_sub_direct")
self.cbt_inv_food_grow_noec = pd.Series([], dtype="float", name="cbt_inv_food_grow_noec")
self.cbt_inv_food_grow_loec = pd.Series([], dtype="float", name="cbt_inv_food_grow_loec")
self.cbt_inv_food_repro_noec = pd.Series([], dtype="float", name="cbt_inv_food_repro_noec")
self.cbt_inv_food_repro_loec = pd.Series([], dtype="float", name="cbt_inv_food_repro_loec")
self.cbt_inv_food_behav_noec =
|
pd.Series([], dtype="float", name="cbt_inv_food_behav_noec")
|
pandas.Series
|
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from
# np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]
) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.categories
with tm.assert_produces_warning(FutureWarning):
res = cat.levels
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
res = pd.Categorical([1, 2, 3, np.nan], levels=[1, 2, 3])
self.assert_numpy_array_equal(res.categories, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_removed_names_produces_warning(self):
# 10482
with tm.assert_produces_warning(UserWarning):
Categorical([0, 1], name="a")
with tm.assert_produces_warning(UserWarning):
Categorical.from_codes([1, 2], ["a", "b", "c"], name="a")
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(
pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True])
self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True])
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assert_numpy_array_equal(cat > cat[0], [False, True, True])
self.assert_numpy_array_equal(cat[0] < cat, [False, True, True])
def test_comparison_with_unknown_scalars(self):
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assertRaises(TypeError, lambda: cat < 4)
self.assertRaises(TypeError, lambda: cat > 4)
self.assertRaises(TypeError, lambda: 4 < cat)
self.assertRaises(TypeError, lambda: 4 > cat)
self.assert_numpy_array_equal(cat == 4, [False, False, False])
self.assert_numpy_array_equal(cat != 4, [True, True, True])
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a', 'a', 'c',
'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False,
labels=labels)
self.cat = df
def test_dtypes(self):
# GH8143
index = ['cat', 'obj', 'num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = Categorical(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
result = Categorical(['foo%05d' % i for i in range(40000)])
self.assertTrue(result.codes.dtype == 'int32')
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = result.add_categories(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
self.assertTrue(result.codes.dtype == 'int8')
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
self.assertEqual(s.dtype, 'category')
self.assertEqual(len(s), len(self.factor))
str(s.values)
str(s)
# in a frame
df = DataFrame({'A': self.factor})
result = df['A']
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
df = DataFrame({'A': s})
result = df['A']
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# multiples
df = DataFrame({'A': s, 'B': s, 'C': 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
self.assertEqual(result2.name, 'B')
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name
) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
self.assertEqual(result, expected)
result = x.person_name[0]
self.assertEqual(result, expected)
result = x.person_name.loc[0]
self.assertEqual(result, expected)
def test_creation_astype(self):
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1, 2, 3, 1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats": [1, 2, 3, 4, 5, 6],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats": ['a', 'b', 'b', 'a', 'a', 'd'],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(
l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1, 2, 3, 1]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
l = ["a", "b", "c", "a"]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = pd.date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_construction_frame(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([pd.Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([pd.Categorical(list('abc')), pd.Categorical(list(
'abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([pd.Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
self.assertRaises(
ValueError,
lambda: DataFrame([pd.Categorical(list('abc')),
pd.Categorical(list('abdefg'))]))
# ndim > 1
self.assertRaises(NotImplementedError,
lambda: pd.Categorical(np.array([list('abcd')])))
def test_reshaping(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(p.major_axis))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
def test_reindex(self):
index = pd.date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'], dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
'c']))
expected.index = [1, 2]
result = s.reindex([1, 2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(
values=['c', np.nan], categories=['a', 'b', 'c']))
expected.index = [2, 3]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat, copy=True)
self.assertFalse(s.cat is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
exp_cat = np.array(["a", "b", "c", "a"])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat)
self.assertTrue(s.values is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_nan_handling(self):
# Nans are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(s.values.codes, np.array([0, 1, -1, 0]))
# If categories have nan included, the label should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
s2 = Series(Categorical(
["a", "b", np.nan, "a"], categories=["a", "b", np.nan]))
self.assert_numpy_array_equal(s2.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s2.values.codes, np.array([0, 1, 2, 0]))
# Changing categories should also make the replaced category np.nan
s3 = Series(Categorical(["a", "b", "c", "a"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s3.cat.categories = ["a", "b", np.nan]
self.assert_numpy_array_equal(s3.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s3.values.codes, np.array([0, 1, 2, 0]))
def test_cat_accessor(self):
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assertEqual(s.cat.ordered, False)
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s.cat.set_categories(["b", "a"], inplace=True)
self.assertTrue(s.values.equals(exp))
res = s.cat.set_categories(["b", "a"])
self.assertTrue(res.values.equals(exp))
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s[:] = "a"
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, np.array(["a"]))
def test_sequence_like(self):
# GH 7839
# make sure can iterate
df = DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.iteritems():
str(s)
def test_series_delegations(self):
# invalid accessor
self.assertRaises(AttributeError, lambda: Series([1, 2, 3]).cat)
tm.assertRaisesRegexp(
AttributeError,
r"Can only use .cat accessor with a 'category' dtype",
lambda: Series([1, 2, 3]).cat)
self.assertRaises(AttributeError, lambda: Series(['a', 'b', 'c']).cat)
self.assertRaises(AttributeError, lambda: Series(np.arange(5.)).cat)
self.assertRaises(AttributeError,
lambda: Series([Timestamp('20130101')]).cat)
# Series should delegate calls to '.categories', '.codes', '.ordered'
# and the methods '.set_categories()' 'drop_unused_categories()' to the
# categorical
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
s.cat.categories = [1, 2, 3]
exp_categories = np.array([1, 2, 3])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
exp_codes = Series([0, 1, 2, 0], dtype='int8')
tm.assert_series_equal(s.cat.codes, exp_codes)
self.assertEqual(s.cat.ordered, True)
s = s.cat.as_unordered()
self.assertEqual(s.cat.ordered, False)
s.cat.as_ordered(inplace=True)
self.assertEqual(s.cat.ordered, True)
# reorder
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
s = s.cat.set_categories(["c", "b", "a"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused categories
s = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"
]))
exp_categories = np.array(["a", "b"])
exp_values = np.array(["a", "b", "b", "a"])
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error
# on wrong inputs:
def f():
s.set_categories([4, 3, 2, 1])
self.assertRaises(Exception, f)
# right: s.cat.set_categories([4,3,2,1])
def test_series_functions_no_warnings(self):
df = pd.DataFrame({'value': np.random.randint(0, 100, 20)})
labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False,
labels=labels)
def test_assignment_to_dataframe(self):
# assignment
df = DataFrame({'value': np.array(
np.random.randint(0, 10000, 100), dtype='int32')})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df['D'] = d
str(df)
result = df.dtypes
expected = Series(
[np.dtype('int32'), com.CategoricalDtype()], index=['value', 'D'])
tm.assert_series_equal(result, expected)
df['E'] = s
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'), com.CategoricalDtype(),
com.CategoricalDtype()],
index=['value', 'D', 'E'])
tm.assert_series_equal(result, expected)
result1 = df['D']
result2 = df['E']
self.assertTrue(result1._data._block.values.equals(d))
# sorting
s.name = 'E'
self.assertTrue(result2.sort_index().equals(s.sort_index()))
cat = pd.Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
df = pd.DataFrame(pd.Series(cat))
def test_describe(self):
# Categoricals should not show up together with numerical columns
result = self.cat.describe()
self.assertEqual(len(result.columns), 1)
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = pd.Series(pd.Categorical(["a", "b", "c", "c"]))
df3 = pd.DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
res = df3.describe()
self.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_repr(self):
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
self.assertEqual(exp, a.__unicode__())
a = pd.Series(pd.Categorical(["a", "b"] * 25))
exp = u("0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" +
"dtype: category\nCategories (2, object): [a, b]")
with option_context("display.max_rows", 5):
self.assertEqual(exp, repr(a))
levs = list("abcdefghijklmnopqrstuvwxyz")
a = pd.Series(pd.Categorical(
["a", "b"], categories=levs, ordered=True))
exp = u("0 a\n1 b\n" + "dtype: category\n"
"Categories (26, object): [a < b < c < d ... w < x < y < z]")
self.assertEqual(exp, a.__unicode__())
def test_categorical_repr(self):
c = pd.Categorical([1, 2, 3])
exp = """[1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1, 2, 3, 4, 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20))
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_ordered(self):
c = pd.Categorical([1, 2, 3], ordered=True)
exp = """[1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3],
ordered=True)
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20), ordered=True)
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
# TODO(wesm): exceeding 80 characters in the console is not good
# behavior
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]""")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]")
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, "
"2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, "
"2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_series_repr(self):
s = pd.Series(pd.Categorical([1, 2, 3]))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10)))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_ordered(self):
s = pd.Series(pd.Categorical([1, 2, 3], ordered=True))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10), ordered=True))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
8 days 01:00:00, 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
8 days 01:00:00 < 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_index_repr(self):
idx = pd.CategoricalIndex(pd.Categorical([1, 2, 3]))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10)))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_ordered(self):
i = pd.CategoricalIndex(pd.Categorical([1, 2, 3], ordered=True))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10), ordered=True))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx), ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
'2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
'2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period(self):
# test all length
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=1)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=2)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=3)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx)))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',
'2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',
'2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_frame(self):
# normal DataFrame
dt = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
p = pd.period_range('2011-01', freq='M', periods=5)
df = pd.DataFrame({'dt': dt, 'p': p})
exp = """ dt p
0 2011-01-01 09:00:00-05:00 2011-01
1 2011-01-01 10:00:00-05:00 2011-02
2 2011-01-01 11:00:00-05:00 2011-03
3 2011-01-01 12:00:00-05:00 2011-04
4 2011-01-01 13:00:00-05:00 2011-05"""
df = pd.DataFrame({'dt': pd.Categorical(dt), 'p': pd.Categorical(p)})
self.assertEqual(repr(df), exp)
def test_info(self):
# make sure it works
n = 2500
df = DataFrame({'int64': np.random.randint(100, size=n)})
df['category'] = Series(np.array(list('abcdefghij')).take(
np.random.randint(0, 10, size=n))).astype('category')
df.isnull()
df.info()
df2 = df[df['category'] == 'd']
df2.info()
def test_groupby_sort(self):
# http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
res = self.cat.groupby(['value_group'])['value_group'].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = pd.CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Series(Categorical(["a", "b", "c", "d"], ordered=False))
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Series(Categorical(["a", "b", "c", "d"], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Series(Categorical(["a", "b", "c", "d"], categories=[
'd', 'c', 'b', 'a'], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Series(Categorical(
[np.nan, "b", "c", np.nan], categories=['d', 'c', 'b', 'a'
], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
cat = Series(Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
def test_mode(self):
s = Series(Categorical([1, 1, 2, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 1, 1, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5, 1], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True))
res = s.mode()
exp = Series(Categorical([], categories=[5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
def test_value_counts(self):
s = pd.Series(pd.Categorical(
["a", "b", "c", "c", "c", "b"], categories=["c", "a", "b", "d"]))
res = s.value_counts(sort=False)
exp = Series([3, 1, 2, 0],
index=pd.CategoricalIndex(["c", "a", "b", "d"]))
tm.assert_series_equal(res, exp)
res = s.value_counts(sort=True)
exp = Series([3, 2, 1, 0],
index=pd.CategoricalIndex(["c", "b", "a", "d"]))
tm.assert_series_equal(res, exp)
def test_value_counts_with_nan(self):
# https://github.com/pydata/pandas/issues/9443
s = pd.Series(["a", "b", "a"], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
s = pd.Series(["a", "b", None, "a", None, None], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1], index=pd.CategoricalIndex([np.nan, "a", "b"])))
# When we aren't sorting by counts, and np.nan isn't a
# category, it should be last.
tm.assert_series_equal(
s.value_counts(dropna=False, sort=False),
pd.Series([2, 1, 3],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(
["a", "b", "a"], categories=["a", "b", np.nan]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1, 0],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(
["a", "b", None, "a", None, None], categories=["a", "b", np.nan
]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1],
index=pd.CategoricalIndex([np.nan, "a", "b"])))
def test_groupby(self):
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"
], categories=["a", "b", "c", "d"], ordered=True)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
expected = DataFrame({'a': Series(
[1, 2, 4, np.nan], index=pd.CategoricalIndex(
['a', 'b', 'c', 'd'], name='b'))})
result = data.groupby("b").mean()
tm.assert_frame_equal(result, expected)
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A")
exp_idx = pd.CategoricalIndex(['a', 'b', 'z'], name='A')
expected = DataFrame({'values': Series([3, 7, np.nan], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers
gb = df.groupby(['A', 'B'])
expected = DataFrame({'values': Series(
[1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan
], index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y']], names=['A', 'B']))})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers with a non-cat
df = df.copy()
df['C'] = ['foo', 'bar'] * 2
gb = df.groupby(['A', 'B', 'C'])
expected = DataFrame({'values': Series(
np.nan, index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y'], ['foo', 'bar']
], names=['A', 'B', 'C']))}).sortlevel()
expected.iloc[[1, 2, 7, 8], 0] = [1, 2, 3, 4]
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name)
g = x.groupby(['person_id'])
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[['person_name']])
result = x.drop_duplicates('person_name')
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates('person_name').iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name='person_id')
expected['person_name'] = expected['person_name'].astype('object')
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']])
# Filter
tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a'])
tm.assert_frame_equal(df.groupby(c).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']])
# GH 9603
df = pd.DataFrame({'a': [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4])
result = df.groupby(c).apply(len)
expected = pd.Series([1, 0, 0, 0],
index=pd.CategoricalIndex(c.values.categories))
expected.index.name = 'a'
tm.assert_series_equal(result, expected)
def test_pivot_table(self):
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'])
expected = Series([1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan],
index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y']],
names=['A', 'B']),
name='values')
tm.assert_series_equal(result, expected)
def test_count(self):
s = Series(Categorical([np.nan, 1, 2, np.nan],
categories=[5, 4, 3, 2, 1], ordered=True))
result = s.count()
self.assertEqual(result, 2)
def test_sort(self):
c = Categorical(["a", "b", "b", "a"], ordered=False)
cat = Series(c)
# 9816 deprecated
with tm.assert_produces_warning(FutureWarning):
c.order()
# sort in the categories order
expected = Series(
Categorical(["a", "a", "b", "b"],
ordered=False), index=[0, 3, 1, 2])
result = cat.sort_values()
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "c", "b", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Series(Categorical(["a", "c", "b", "d"], categories=[
"a", "b", "c", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res.__array__(), exp)
raw_cat1 = Categorical(["a", "b", "c", "d"],
categories=["a", "b", "c", "d"], ordered=False)
raw_cat2 = Categorical(["a", "b", "c", "d"],
categories=["d", "c", "b", "a"], ordered=True)
s = ["a", "b", "c", "d"]
df = DataFrame({"unsort": raw_cat1,
"sort": raw_cat2,
"string": s,
"values": [1, 2, 3, 4]})
# Cats must be sorted in a dataframe
res = df.sort_values(by=["string"], ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res["sort"].values.__array__(), exp)
self.assertEqual(res["sort"].dtype, "category")
res = df.sort_values(by=["sort"], ascending=False)
exp = df.sort_values(by=["string"], ascending=True)
self.assert_numpy_array_equal(res["values"], exp["values"])
self.assertEqual(res["sort"].dtype, "category")
self.assertEqual(res["unsort"].dtype, "category")
# unordered cat, but we allow this
df.sort_values(by=["unsort"], ascending=False)
# multi-columns sort
# GH 7848
df = DataFrame({"id": [6, 5, 4, 3, 2, 1],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"], ordered=True)
df['grade'] = df['grade'].cat.set_categories(['b', 'e', 'a'])
# sorts 'grade' according to the order of the categories
result = df.sort_values(by=['grade'])
expected = df.iloc[[1, 2, 5, 0, 3, 4]]
tm.assert_frame_equal(result, expected)
# multi
result = df.sort_values(by=['grade', 'id'])
expected = df.iloc[[2, 1, 5, 4, 3, 0]]
tm.assert_frame_equal(result, expected)
# reverse
cat = Categorical(["a", "c", "c", "b", "d"], ordered=True)
res = cat.sort_values(ascending=False)
exp_val = np.array(["d", "c", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
# some NaN positions
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
def test_slicing(self):
cat = Series(Categorical([1, 2, 3, 4]))
reversed = cat[::-1]
exp = np.array([4, 3, 2, 1])
self.assert_numpy_array_equal(reversed.__array__(), exp)
df = DataFrame({'value': (np.arange(100) + 1).astype('int64')})
df['D'] = pd.cut(df.value, bins=[0, 25, 50, 75, 100])
expected = Series([11, '(0, 25]'], index=['value', 'D'], name=10)
result = df.iloc[10]
tm.assert_series_equal(result, expected)
expected = DataFrame({'value': np.arange(11, 21).astype('int64')},
index=np.arange(10, 20).astype('int64'))
expected['D'] = pd.cut(expected.value, bins=[0, 25, 50, 75, 100])
result = df.iloc[10:20]
tm.assert_frame_equal(result, expected)
expected = Series([9, '(0, 25]'], index=['value', 'D'], name=8)
result = df.loc[8]
tm.assert_series_equal(result, expected)
def test_slicing_and_getting_ops(self):
# systematically test the slicing operations:
# for all slicing ops:
# - returning a dataframe
# - returning a column
# - returning a row
# - returning a single value
cats = pd.Categorical(
["a", "c", "b", "c", "c", "c", "c"], categories=["a", "b", "c"])
idx = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 2, 3, 4, 5, 6, 7]
df = pd.DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
cats2 = pd.Categorical(["b", "c"], categories=["a", "b", "c"])
idx2 = pd.Index(["j", "k"])
values2 = [3, 4]
# 2:4,: | "j":"k",:
exp_df = pd.DataFrame({"cats": cats2, "values": values2}, index=idx2)
# :,"cats" | :,0
exp_col = pd.Series(cats, index=idx, name='cats')
# "j",: | 2,:
exp_row = pd.Series(["b", 3], index=["cats", "values"], dtype="object",
name="j")
# "j","cats | 2,0
exp_val = "b"
# iloc
# frame
res_df = df.iloc[2:4, :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.iloc[2, :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.iloc[2, 0]
self.assertEqual(res_val, exp_val)
# loc
# frame
res_df = df.loc["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.loc["j", :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.loc[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.loc["j", "cats"]
self.assertEqual(res_val, exp_val)
# ix
# frame
# res_df = df.ix["j":"k",[0,1]] # doesn't work?
res_df = df.ix["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.ix["j", :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.ix[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.ix["j", 0]
self.assertEqual(res_val, exp_val)
# iat
res_val = df.iat[2, 0]
self.assertEqual(res_val, exp_val)
# at
res_val = df.at["j", "cats"]
self.assertEqual(res_val, exp_val)
# fancy indexing
exp_fancy = df.iloc[[2]]
res_fancy = df[df["cats"] == "b"]
tm.assert_frame_equal(res_fancy, exp_fancy)
res_fancy = df[df["values"] == 3]
tm.assert_frame_equal(res_fancy, exp_fancy)
# get_value
res_val = df.get_value("j", "cats")
self.assertEqual(res_val, exp_val)
# i : int, slice, or sequence of integers
res_row = df.iloc[2]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
res_df = df.iloc[slice(2, 4)]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_df = df.iloc[[2, 3]]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
res_df = df.iloc[:, slice(0, 2)]
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_df = df.iloc[:, [0, 1]]
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
def test_slicing_doc_examples(self):
# GH 7918
cats = Categorical(
["a", "b", "b", "b", "c", "c", "c"], categories=["a", "b", "c"])
idx = Index(["h", "i", "j", "k", "l", "m", "n", ])
values = [1, 2, 2, 2, 3, 4, 5]
df = DataFrame({"cats": cats, "values": values}, index=idx)
result = df.iloc[2:4, :]
expected = DataFrame(
{"cats": Categorical(
['b', 'b'], categories=['a', 'b', 'c']),
"values": [2, 2]}, index=['j', 'k'])
tm.assert_frame_equal(result, expected)
result = df.iloc[2:4, :].dtypes
expected = Series(['category', 'int64'], ['cats', 'values'])
tm.assert_series_equal(result, expected)
result = df.loc["h":"j", "cats"]
expected = Series(Categorical(['a', 'b', 'b'],
categories=['a', 'b', 'c']),
index=['h', 'i', 'j'], name='cats')
tm.assert_series_equal(result, expected)
result = df.ix["h":"j", 0:1]
expected = DataFrame({'cats': Series(
Categorical(
['a', 'b', 'b'], categories=['a', 'b', 'c']), index=['h', 'i',
'j'])})
tm.assert_frame_equal(result, expected)
def test_assigning_ops(self):
# systematically test the assigning operations:
# for all slicing ops:
# for value in categories and value not in categories:
# - assign a single value -> exp_single_cats_value
# - assign a complete row (mixed values) -> exp_single_row
# assign multiple rows (mixed values) (-> array) -> exp_multi_row
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
cats = pd.Categorical(
["a", "a", "a", "a", "a", "a", "a"], categories=["a", "b"])
idx = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 1, 1, 1, 1, 1, 1]
orig = pd.DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
# changed single row
cats1 = pd.Categorical(
["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx1 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values1 = [1, 1, 2, 1, 1, 1, 1]
exp_single_row = pd.DataFrame(
{"cats": cats1,
"values": values1}, index=idx1)
# changed multiple rows
cats2 = pd.Categorical(
["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx2 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values2 = [1, 1, 2, 2, 1, 1, 1]
exp_multi_row = pd.DataFrame(
{"cats": cats2,
"values": values2}, index=idx2)
# changed part of the cats column
cats3 = pd.Categorical(
["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx3 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values3 = [1, 1, 1, 1, 1, 1, 1]
exp_parts_cats_col = pd.DataFrame(
{"cats": cats3,
"values": values3}, index=idx3)
# changed single value in cats col
cats4 = pd.Categorical(
["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx4 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values4 = [1, 1, 1, 1, 1, 1, 1]
exp_single_cats_value = pd.DataFrame(
{"cats": cats4,
"values": values4}, index=idx4)
# iloc
# ###############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.iloc[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.iloc[df.index == "j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iloc[2, 0] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.iloc[2, :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.iloc[2, :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.iloc[2:4, :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.iloc[2:4, :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.iloc[2:4, 0] = ["c", "c"]
# loc
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.loc["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.loc[df.index == "j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.loc["j", "cats"] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.loc["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.loc["j", :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.loc["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.loc["j":"k", :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.loc["j":"k", "cats"] = ["c", "c"]
# ix
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.ix["j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.ix[df.index == "j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.ix["j", 0] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.ix["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.ix["j", :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.ix["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.ix["j":"k", :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.ix["j":"k", 0] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.ix["j":"k", 0] = ["c", "c"]
# iat
df = orig.copy()
df.iat[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iat[2, 0] = "c"
self.assertRaises(ValueError, f)
# at
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.at["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.at["j", "cats"] = "c"
self.assertRaises(ValueError, f)
# fancy indexing
catsf = pd.Categorical(
["a", "a", "c", "c", "a", "a", "a"], categories=["a", "b", "c"])
idxf = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
valuesf = [1, 1, 3, 3, 1, 1, 1]
df = pd.DataFrame({"cats": catsf, "values": valuesf}, index=idxf)
exp_fancy = exp_multi_row.copy()
exp_fancy["cats"].cat.set_categories(["a", "b", "c"], inplace=True)
df[df["cats"] == "c"] = ["b", 2]
tm.assert_frame_equal(df, exp_multi_row)
# set_value
df = orig.copy()
df.set_value("j", "cats", "b")
tm.assert_frame_equal(df, exp_single_cats_value)
def f():
df = orig.copy()
df.set_value("j", "cats", "c")
self.assertRaises(ValueError, f)
# Assigning a Category to parts of a int/... column uses the values of
# the Catgorical
df = pd.DataFrame({"a": [1, 1, 1, 1, 1],
"b": ["a", "a", "a", "a", "a"]})
exp = pd.DataFrame({"a": [1, "b", "b", 1, 1],
"b": ["a", "a", "b", "b", "a"]})
df.loc[1:2, "a"] = pd.Categorical(["b", "b"], categories=["a", "b"])
df.loc[2:3, "b"] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp)
# Series
orig = Series(pd.Categorical(["b", "b"], categories=["a", "b"]))
s = orig.copy()
s[:] = "a"
exp = Series(pd.Categorical(["a", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[1] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[s.index > 0] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[[False, True]] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s.index = ["x", "y"]
s["y"] = "a"
exp = Series(
pd.Categorical(["b", "a"],
categories=["a", "b"]), index=["x", "y"])
tm.assert_series_equal(s, exp)
# ensure that one can set something to np.nan
s = Series(Categorical([1, 2, 3]))
exp = Series(Categorical([1, np.nan, 3]))
s[1] = np.nan
tm.assert_series_equal(s, exp)
def test_comparisons(self):
tests_data = [(list("abc"), list("cba"), list("bbb")),
([1, 2, 3], [3, 2, 1], [2, 2, 2])]
for data, reverse, base in tests_data:
cat_rev = pd.Series(pd.Categorical(data, categories=reverse,
ordered=True))
cat_rev_base = pd.Series(pd.Categorical(base, categories=reverse,
ordered=True))
cat = pd.Series(pd.Categorical(data, ordered=True))
cat_base = pd.Series(pd.Categorical(
base, categories=cat.cat.categories, ordered=True))
s = Series(base)
a = np.array(base)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = Series([True, False, False])
tm.assert_series_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = Series([False, False, True])
tm.assert_series_equal(res_rev, exp_rev)
res = cat > cat_base
exp = Series([False, False, True])
tm.assert_series_equal(res, exp)
scalar = base[1]
res = cat > scalar
exp = Series([False, False, True])
exp2 = cat.values > scalar
tm.assert_series_equal(res, exp)
tm.assert_numpy_array_equal(res.values, exp2)
res_rev = cat_rev > scalar
exp_rev = Series([True, False, False])
exp_rev2 = cat_rev.values > scalar
tm.assert_series_equal(res_rev, exp_rev)
tm.assert_numpy_array_equal(res_rev.values, exp_rev2)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
# categorical cannot be compared to Series or numpy array, and also
# not the other way around
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# unequal comparison should raise for unordered cats
cat = Series(Categorical(list("abc")))
def f():
cat > "b"
self.assertRaises(TypeError, f)
cat = Series(Categorical(list("abc"), ordered=False))
def f():
cat > "b"
self.assertRaises(TypeError, f)
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Series(Categorical(list("abc"), ordered=True))
self.assertRaises(TypeError, lambda: cat < "d")
self.assertRaises(TypeError, lambda: cat > "d")
self.assertRaises(TypeError, lambda: "d" < cat)
self.assertRaises(TypeError, lambda: "d" > cat)
self.assert_series_equal(cat == "d", Series([False, False, False]))
self.assert_series_equal(cat != "d", Series([True, True, True]))
# And test NaN handling...
cat = Series(Categorical(["a", "b", "c", np.nan]))
exp = Series([True, True, True, False])
res = (cat == cat)
tm.assert_series_equal(res, exp)
def test_cat_equality(self):
# GH 8938
# allow equality comparisons
a = Series(list('abc'), dtype="category")
b = Series(list('abc'), dtype="object")
c = Series(['a', 'b', 'cc'], dtype="object")
d = Series(list('acb'), dtype="object")
e = Categorical(list('abc'))
f = Categorical(list('acb'))
# vs scalar
self.assertFalse((a == 'a').all())
self.assertTrue(((a != 'a') == ~(a == 'a')).all())
self.assertFalse(('a' == a).all())
self.assertTrue((a == 'a')[0])
self.assertTrue(('a' == a)[0])
self.assertFalse(('a' != a)[0])
# vs list-like
self.assertTrue((a == a).all())
self.assertFalse((a != a).all())
self.assertTrue((a == list(a)).all())
self.assertTrue((a == b).all())
self.assertTrue((b == a).all())
self.assertTrue(((~(a == b)) == (a != b)).all())
self.assertTrue(((~(b == a)) == (b != a)).all())
self.assertFalse((a == c).all())
self.assertFalse((c == a).all())
self.assertFalse((a == d).all())
self.assertFalse((d == a).all())
# vs a cat-like
self.assertTrue((a == e).all())
self.assertTrue((e == a).all())
self.assertFalse((a == f).all())
self.assertFalse((f == a).all())
self.assertTrue(((~(a == e) == (a != e)).all()))
self.assertTrue(((~(e == a) == (e != a)).all()))
self.assertTrue(((~(a == f) == (a != f)).all()))
self.assertTrue(((~(f == a) == (f != a)).all()))
# non-equality is not comparable
self.assertRaises(TypeError, lambda: a < b)
self.assertRaises(TypeError, lambda: b < a)
self.assertRaises(TypeError, lambda: a > b)
self.assertRaises(TypeError, lambda: b > a)
def test_concat(self):
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
vals = [1, 2]
df = pd.DataFrame({"cats": cat, "vals": vals})
cat2 = pd.Categorical(["a", "b", "a", "b"], categories=["a", "b"])
vals2 = [1, 2, 1, 2]
exp = pd.DataFrame({"cats": cat2,
"vals": vals2}, index=pd.Index([0, 1, 0, 1]))
res = pd.concat([df, df])
tm.assert_frame_equal(exp, res)
# Concat should raise if the two categoricals do not have the same
# categories
cat3 = pd.Categorical(["a", "b"], categories=["a", "b", "c"])
vals3 = [1, 2]
df_wrong_categories = pd.DataFrame({"cats": cat3, "vals": vals3})
def f():
pd.concat([df, df_wrong_categories])
self.assertRaises(ValueError, f)
# GH 7864
# make sure ordering is preserverd
df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"])
df['grade'].cat.set_categories(['e', 'a', 'b'])
df1 = df[0:3]
df2 = df[3:]
self.assert_numpy_array_equal(df['grade'].cat.categories,
df1['grade'].cat.categories)
self.assert_numpy_array_equal(df['grade'].cat.categories,
df2['grade'].cat.categories)
dfx = pd.concat([df1, df2])
dfx['grade'].cat.categories
self.assert_numpy_array_equal(df['grade'].cat.categories,
dfx['grade'].cat.categories)
def test_concat_preserve(self):
# GH 8641
# series concat not preserving category dtype
s = Series(list('abc'), dtype='category')
s2 = Series(list('abd'), dtype='category')
def f():
pd.concat([s, s2])
self.assertRaises(ValueError, f)
result = pd.concat([s, s], ignore_index=True)
expected = Series(list('abcabc')).astype('category')
tm.assert_series_equal(result, expected)
result = pd.concat([s, s])
expected = Series(
list('abcabc'), index=[0, 1, 2, 0, 1, 2]).astype('category')
tm.assert_series_equal(result, expected)
a = Series(np.arange(6, dtype='int64'))
b = Series(list('aabbca'))
df2 = DataFrame({'A': a,
'B': b.astype('category', categories=list('cab'))})
result = pd.concat([df2, df2])
expected = DataFrame({'A': pd.concat([a, a]),
'B':
|
pd.concat([b, b])
|
pandas.concat
|
import json
import orca
import pandas as pd
import pandas.util.testing as pdt
import pytest
from .. import server
@pytest.fixture
def tapp():
server.app.config['TESTING'] = True
return server.app.test_client()
@pytest.fixture(scope='module')
def dfa():
return pd.DataFrame(
{'a': [100, 200, 300, 200, 100]},
index=['v', 'w', 'x', 'y', 'z'])
@pytest.fixture(scope='module')
def dfb():
return pd.DataFrame(
{'b': [70, 80, 90],
'a_id': ['w', 'v', 'z']},
index=['a', 'b', 'b'])
@pytest.fixture(scope='module')
def dfa_col(dfa):
return pd.Series([2, 4, 6, 8, 10], index=dfa.index)
@pytest.fixture(scope='module')
def dfb_col(dfb):
return pd.Series([10, 20, 30], index=dfb.index)
@pytest.fixture(scope='module')
def dfa_factor():
return 0.5
@pytest.fixture(scope='module')
def dfb_factor():
return 2
@pytest.fixture(scope='module', autouse=True)
def setup_orca(dfa, dfb, dfa_col, dfb_col, dfa_factor, dfb_factor):
orca.add_injectable('a_factor', dfa_factor)
@orca.injectable()
def b_factor():
return dfb_factor
orca.add_table('dfa', dfa)
@orca.table('dfb')
def dfb_table():
return dfb
orca.add_column('dfa', 'acol', dfa_col)
orca.add_column('dfb', 'bcol', dfb_col)
@orca.column('dfa')
def extra_acol(a_factor):
return dfa_col * a_factor
@orca.column('dfb')
def extra_bcol(b_factor):
return dfb_col * b_factor
orca.broadcast('dfb', 'dfa', cast_on='a_id', onto_index=True)
@orca.step()
def test_step(dfa, dfb):
pass
def test_schema(tapp):
rv = tapp.get('/schema')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert set(data['tables']) == {'dfa', 'dfb'}
assert set(data['columns']['dfa']) == {'extra_acol', 'acol', 'a'}
assert set(data['columns']['dfb']) == {'bcol', 'extra_bcol', 'a_id', 'b'}
assert data['steps'] == ['test_step']
assert set(data['injectables']) == {'a_factor', 'b_factor'}
assert data['broadcasts'] == [['dfb', 'dfa']]
def test_list_tables(tapp):
rv = tapp.get('/tables')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert set(data['tables']) == {'dfa', 'dfb'}
def test_table_info(tapp):
rv = tapp.get('/tables/dfa/info')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'extra_acol' in data
def test_table_preview(tapp):
rv = tapp.get('/tables/dfa/preview')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert data == orca.get_table('dfa').to_frame().to_json(orient='split')
def test_table_preview_404(tapp):
rv = tapp.get('/tables/not_a_table/preview')
assert rv.status_code == 404
def test_table_describe(tapp):
rv = tapp.get('/tables/dfa/describe')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert data == (orca.get_table('dfa')
.to_frame()
.describe()
.to_json(orient='split'))
def test_table_definition_frame(tapp):
rv = tapp.get('/tables/dfa/definition')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data == {'type': 'dataframe'}
def test_table_definition_func(tapp):
rv = tapp.get('/tables/dfb/definition')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['type'] == 'function'
assert data['filename'].endswith('test_server.py')
assert isinstance(data['lineno'], int)
assert data['text'] == (
" @orca.table('dfb')\n"
" def dfb_table():\n"
" return dfb\n")
assert 'dfb_table' in data['html']
def test_table_csv(tapp):
rv = tapp.get('/tables/dfb/csv')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert rv.mimetype == 'text/csv'
assert data == orca.get_table('dfb').to_frame().to_csv()
def test_list_table_columns(tapp):
rv = tapp.get('/tables/dfb/columns')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert set(data['columns']) == {'a_id', 'b', 'bcol', 'extra_bcol'}
def test_column_definition_local(tapp):
rv = tapp.get('/tables/dfa/columns/a/definition')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data == {'type': 'local'}
def test_column_definition_series(tapp):
rv = tapp.get('/tables/dfa/columns/acol/definition')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data == {'type': 'series'}
def test_column_definition_func(tapp):
rv = tapp.get('/tables/dfa/columns/extra_acol/definition')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['type'] == 'function'
assert data['filename'].endswith('test_server.py')
assert isinstance(data['lineno'], int)
assert data['text'] == (
" @orca.column('dfa')\n"
" def extra_acol(a_factor):\n"
" return dfa_col * a_factor\n")
assert 'extra_acol' in data['html']
def test_column_describe(tapp):
rv = tapp.get('/tables/dfa/columns/extra_acol/describe')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert data == (orca.get_table('dfa')
.extra_acol.describe()
.to_json(orient='split'))
def test_column_csv(tapp, dfa):
rv = tapp.get('/tables/dfa/columns/a/csv')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert data == dfa.a.to_csv(path=None)
def test_no_column_404(tapp):
rv = tapp.get('/tables/dfa/columns/not-a-column/csv')
assert rv.status_code == 404
def test_list_injectables(tapp):
rv = tapp.get('/injectables')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert set(data['injectables']) == {'a_factor', 'b_factor'}
def test_injectable_repr(tapp, dfb_factor):
rv = tapp.get('/injectables/b_factor/repr')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data == {'type': str(type(42)), 'repr': '2'}
def test_no_injectable_404(tapp):
rv = tapp.get('/injectables/nope/repr')
assert rv.status_code == 404
def test_injectable_definition_var(tapp):
rv = tapp.get('/injectables/a_factor/definition')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data == {'type': 'variable'}
def test_injectable_definition_func(tapp):
rv = tapp.get('/injectables/b_factor/definition')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['type'] == 'function'
assert data['filename'].endswith('test_server.py')
assert isinstance(data['lineno'], int)
assert data['text'] == (
" @orca.injectable()\n"
" def b_factor():\n"
" return dfb_factor\n")
assert 'b_factor' in data['html']
def test_list_broadcasts(tapp):
rv = tapp.get('/broadcasts')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data == {'broadcasts': [{'cast': 'dfb', 'onto': 'dfa'}]}
def test_broadcast_definition(tapp):
rv = tapp.get('/broadcasts/dfb/dfa/definition')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data == {
'cast': 'dfb',
'onto': 'dfa',
'cast_on': 'a_id',
'onto_on': None,
'cast_index': False,
'onto_index': True}
def test_no_broadcast_404(tapp):
rv = tapp.get('/broadcasts/table1/table2/definition')
assert rv.status_code == 404
def test_list_steps(tapp):
rv = tapp.get('/steps')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data == {'steps': ['test_step']}
def test_no_step_404(tapp):
rv = tapp.get('/steps/not_a_step/definition')
assert rv.status_code == 404
def test_step_definition(tapp):
rv = tapp.get('/steps/test_step/definition')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['filename'].endswith('test_server.py')
assert isinstance(data['lineno'], int)
assert data['text'] == (
" @orca.step()\n"
" def test_step(dfa, dfb):\n"
" pass\n")
assert 'test_step' in data['html']
def test_table_groupbyagg_errors(tapp):
# non-existant column
rv = tapp.get('/tables/dfa/groupbyagg?column=notacolumn')
assert rv.status_code == 400
# both by and level missing
rv = tapp.get('/tables/dfa/groupbyagg?column=a')
assert rv.status_code == 400
# bad or missing agg type
rv = tapp.get('/tables/dfa/groupbyagg?column=a&level=0&agg=notanagg')
assert rv.status_code == 400
def test_table_groupbyagg_by_size(tapp):
rv = tapp.get('/tables/dfa/groupbyagg?by=a&column=a&agg=size')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
test = pd.read_json(data, orient='split', typ='series')
pdt.assert_series_equal(
test,
pd.Series([2, 2, 1], index=[100, 200, 300]),
check_names=False)
def test_table_groupbyagg_level_mean(tapp):
rv = tapp.get('/tables/dfb/groupbyagg?level=0&column=b&agg=mean')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
test = pd.read_json(data, orient='split', typ='series')
pdt.assert_series_equal(
test,
pd.Series([70, 85], index=['a', 'b'], name='b'))
def test_table_groupbyagg_level_median(tapp):
rv = tapp.get('/tables/dfb/groupbyagg?level=0&column=b&agg=median')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
test = pd.read_json(data, orient='split', typ='series')
pdt.assert_series_equal(
test,
pd.Series([70, 85], index=['a', 'b'], name='b'))
def test_table_groupbyagg_level_sum(tapp):
rv = tapp.get('/tables/dfb/groupbyagg?level=0&column=b&agg=sum')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
test = pd.read_json(data, orient='split', typ='series')
pdt.assert_series_equal(
test,
pd.Series([70, 170], index=['a', 'b'], name='b'))
def test_table_groupbyagg_level_std(tapp):
rv = tapp.get('/tables/dfb/groupbyagg?level=0&column=b&agg=std')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
test = pd.read_json(data, orient='split', typ='series')
pdt.assert_series_equal(
test,
pd.Series(
[pd.np.nan,
|
pd.Series([80, 90])
|
pandas.Series
|
# This files defiens the error table as a panda object
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from dotmap import DotMap
from collections import defaultdict
from kmodes.kmodes import KModes
class error_table():
def __init__(self, space=None, table=None, column_type = None):
assert space is not None or table is not None
if space is not None:
self.space= space
self.column_names = []
self.column_type = {}
for i in range(space.fixedFlattenedDimension):
self.column_names.append(space.meaningOfFlatCoordinate(i))
self.column_type[space.meaningOfFlatCoordinate(i)] = \
space.coordinateIsNumerical(i)
self.column_names.append("rho")
self.column_type["rho"] = True # Set to numerical by default. Can be updated later.
self.table = pd.DataFrame(columns=self.column_names)
self.ignore_locs = []
else:
self.table = table
self.column_names = table.columns
if column_type is None:
self.column_type = {col:True for col in self.column_names}
else:
self.column_type = column_type
self.ignore_locs = []
def update_column_names(self, column_names):
assert len(self.table.columns) == len(column_names)
self.table.columns = column_names
self.column_names = column_names
def update_error_table(self, sample, rho):
sample = self.space.flatten(sample, fixedDimension=True)
sample_dict = {}
for k, v in zip(self.table.columns, list(sample)):
if np.any(np.array(sample) == None):
locs = np.where(np.array(sample) == None)
self.ignore_locs = self.ignore_locs + list(locs[0])
sample_dict[k] = float(v) if self.column_type[k] and v is not None else v
if isinstance(rho, (list, tuple)):
for i,r in enumerate(rho[:-1]):
if "rho_" + str(i) not in self.column_names:
self.column_names.append("rho_"+str(i))
if isinstance(r, bool):
self.column_type["rho_" + str(i)] = False
else:
self.column_type["rho_" + str(i)] = True
sample_dict["rho_"+str(i)] = r
sample_dict["rho"] = rho[-1]
if isinstance(rho[-1], bool) and self.column_type["rho"]:
print("Updating column type")
self.column_type["rho"] = False
else:
sample_dict["rho"] = rho
if isinstance(rho, bool) and self.column_type["rho"]:
print("Updating column type")
self.column_type["rho"] = False
self.ignore_locs = list(set(tuple(self.ignore_locs)))
self.table = self.table.append(sample_dict, ignore_index=True)
def get_column_by_index(self, index):
if isinstance(index, int):
index = list([index])
if len(index) < 1:
print("No indices provided: returning all samples")
elif max(index) >= len(self.table.columns):
for i in index:
if i >= len(self.table.columns):
index.remove(i)
print("Tried to access index not in error table")
if len(self.table) > 0:
names_index = self.table.columns[index]
return self.table[names_index]
else:
print("No entries in error table yet")
return None
def get_column_by_name(self, column_names):
index = []
if isinstance(column_names, str):
if column_names in self.table.columns:
index.append(column_names)
else:
for s in column_names:
if s in self.table.columns:
index.append(s)
return self.table[index]
def get_samples_by_index(self, index):
if isinstance(index, int):
index = list([index])
if max(index) >= len(self.table):
print("Trying to access samples not in the table")
for i in index:
if i >= len(self.table):
index.remove(i)
return self.table.iloc[index]
def split_table(self, column_names=None):
if column_names is None:
column_names = self.column_names
numerical, categorical = [], []
for c in column_names:
if self.column_type[c]:
numerical.append(c)
else:
categorical.append(c)
return self.get_column_by_name(numerical), self.get_column_by_name(categorical)
def get_random_samples(self, count=5):
if count > len(self.table):
return list(range(len(self.table)))
else:
sample_ids = set()
while len(sample_ids) < count:
i = np.random.randint(len(self.table))
sample_ids.add(i)
return list(sample_ids)
def build_normalized(self, column_names=None):
if len(self.table) < 1:
return pd.DataFrame(), pd.DataFrame()
if column_names is None:
column_names = self.column_names
numerical, categorical = self.split_table(column_names=column_names)
if len(categorical.columns) + len(numerical.columns) == 0:
return pd.DataFrame(), pd.DataFrame()
# Normalize tables (only for numerical table)
stats = numerical.describe()
normalized_dict = {r: (numerical[r] - stats[r]['min']) / (stats[r]['max'] - stats[r]['min'])
for r in numerical.columns}
normalized_table = pd.DataFrame(normalized_dict)
return normalized_table, categorical, \
np.array([stats[r]['min'] for r in numerical.columns]),\
np.array([stats[r]['max'] for r in numerical.columns])
def build_standardized(self, column_names=None):
if len(self.table) < 1:
return pd.DataFrame(), pd.DataFrame()
if column_names is None:
column_names = self.column_names
numerical, categorical = self.split_table(column_names=column_names)
if len(categorical.columns) + len(numerical.columns) == 0:
return pd.DataFrame(), pd.DataFrame()
# Normalize tables (only for numerical table)
stats = numerical.describe()
standardized_dict = {r: (numerical[r] - stats[r]['mean']) / stats[r]['std']
for r in numerical.columns}
standardized_table =
|
pd.DataFrame(standardized_dict)
|
pandas.DataFrame
|
import string
import warnings
import numpy as np
from pandas import (
DataFrame,
MultiIndex,
NaT,
Series,
date_range,
isnull,
period_range,
timedelta_range,
)
from .pandas_vb_common import tm
class GetNumericData:
def setup(self):
self.df = DataFrame(np.random.randn(10000, 25))
self.df["foo"] = "bar"
self.df["bar"] = "baz"
self.df = self.df._consolidate()
def time_frame_get_numeric_data(self):
self.df._get_numeric_data()
class Lookup:
def setup(self):
self.df = DataFrame(np.random.randn(10000, 8), columns=list("abcdefgh"))
self.df["foo"] = "bar"
self.row_labels = list(self.df.index[::10])[:900]
self.col_labels = list(self.df.columns) * 100
self.row_labels_all = np.array(
list(self.df.index) * len(self.df.columns), dtype="object"
)
self.col_labels_all = np.array(
list(self.df.columns) * len(self.df.index), dtype="object"
)
def time_frame_fancy_lookup(self):
self.df.lookup(self.row_labels, self.col_labels)
def time_frame_fancy_lookup_all(self):
self.df.lookup(self.row_labels_all, self.col_labels_all)
class Reindex:
def setup(self):
N = 10 ** 3
self.df = DataFrame(np.random.randn(N * 10, N))
self.idx = np.arange(4 * N, 7 * N)
self.idx_cols = np.random.randint(0, N, N)
self.df2 = DataFrame(
{
c: {
0: np.random.randint(0, 2, N).astype(np.bool_),
1: np.random.randint(0, N, N).astype(np.int16),
2: np.random.randint(0, N, N).astype(np.int32),
3: np.random.randint(0, N, N).astype(np.int64),
}[np.random.randint(0, 4)]
for c in range(N)
}
)
def time_reindex_axis0(self):
self.df.reindex(self.idx)
def time_reindex_axis1(self):
self.df.reindex(columns=self.idx_cols)
def time_reindex_axis1_missing(self):
self.df.reindex(columns=self.idx)
def time_reindex_both_axes(self):
self.df.reindex(index=self.idx, columns=self.idx_cols)
def time_reindex_upcast(self):
self.df2.reindex(np.random.permutation(range(1200)))
class Rename:
def setup(self):
N = 10 ** 3
self.df = DataFrame(np.random.randn(N * 10, N))
self.idx = np.arange(4 * N, 7 * N)
self.dict_idx = {k: k for k in self.idx}
self.df2 = DataFrame(
{
c: {
0: np.random.randint(0, 2, N).astype(np.bool_),
1: np.random.randint(0, N, N).astype(np.int16),
2: np.random.randint(0, N, N).astype(np.int32),
3: np.random.randint(0, N, N).astype(np.int64),
}[np.random.randint(0, 4)]
for c in range(N)
}
)
def time_rename_single(self):
self.df.rename({0: 0})
def time_rename_axis0(self):
self.df.rename(self.dict_idx)
def time_rename_axis1(self):
self.df.rename(columns=self.dict_idx)
def time_rename_both_axes(self):
self.df.rename(index=self.dict_idx, columns=self.dict_idx)
def time_dict_rename_both_axes(self):
self.df.rename(index=self.dict_idx, columns=self.dict_idx)
class Iteration:
# mem_itertuples_* benchmarks are slow
timeout = 120
def setup(self):
N = 1000
self.df = DataFrame(np.random.randn(N * 10, N))
self.df2 = DataFrame(np.random.randn(N * 50, 10))
self.df3 = DataFrame(
np.random.randn(N, 5 * N), columns=["C" + str(c) for c in range(N * 5)]
)
self.df4 = DataFrame(np.random.randn(N * 1000, 10))
def time_items(self):
# (monitor no-copying behaviour)
if hasattr(self.df, "_item_cache"):
self.df._item_cache.clear()
for name, col in self.df.items():
pass
def time_items_cached(self):
for name, col in self.df.items():
pass
def time_iteritems_indexing(self):
for col in self.df3:
self.df3[col]
def time_itertuples_start(self):
self.df4.itertuples()
def time_itertuples_read_first(self):
next(self.df4.itertuples())
def time_itertuples(self):
for row in self.df4.itertuples():
pass
def time_itertuples_to_list(self):
list(self.df4.itertuples())
def mem_itertuples_start(self):
return self.df4.itertuples()
def peakmem_itertuples_start(self):
self.df4.itertuples()
def mem_itertuples_read_first(self):
return next(self.df4.itertuples())
def peakmem_itertuples(self):
for row in self.df4.itertuples():
pass
def mem_itertuples_to_list(self):
return list(self.df4.itertuples())
def peakmem_itertuples_to_list(self):
list(self.df4.itertuples())
def time_itertuples_raw_start(self):
self.df4.itertuples(index=False, name=None)
def time_itertuples_raw_read_first(self):
next(self.df4.itertuples(index=False, name=None))
def time_itertuples_raw_tuples(self):
for row in self.df4.itertuples(index=False, name=None):
pass
def time_itertuples_raw_tuples_to_list(self):
list(self.df4.itertuples(index=False, name=None))
def mem_itertuples_raw_start(self):
return self.df4.itertuples(index=False, name=None)
def peakmem_itertuples_raw_start(self):
self.df4.itertuples(index=False, name=None)
def peakmem_itertuples_raw_read_first(self):
next(self.df4.itertuples(index=False, name=None))
def peakmem_itertuples_raw(self):
for row in self.df4.itertuples(index=False, name=None):
pass
def mem_itertuples_raw_to_list(self):
return list(self.df4.itertuples(index=False, name=None))
def peakmem_itertuples_raw_to_list(self):
list(self.df4.itertuples(index=False, name=None))
def time_iterrows(self):
for row in self.df.iterrows():
pass
class ToString:
def setup(self):
self.df = DataFrame(np.random.randn(100, 10))
def time_to_string_floats(self):
self.df.to_string()
class ToHTML:
def setup(self):
nrows = 500
self.df2 = DataFrame(np.random.randn(nrows, 10))
self.df2[0] = period_range("2000", periods=nrows)
self.df2[1] = range(nrows)
def time_to_html_mixed(self):
self.df2.to_html()
class ToDict:
params = [["dict", "list", "series", "split", "records", "index"]]
param_names = ["orient"]
def setup(self, orient):
data = np.random.randint(0, 1000, size=(10000, 4))
self.int_df =
|
DataFrame(data)
|
pandas.DataFrame
|
import pymongo
from pymongo import MongoClient
from tkinter import *
import time;
import datetime
import random
from tkinter import messagebox
import numpy as np
import pandas as pd
from tkinter import simpledialog
#GLOBAL VALUES
d_c = []
x = pd.DataFrame()
y = pd.DataFrame()
X_train = pd.DataFrame()
X_test = pd.DataFrame()
y_train = pd.DataFrame()
y_test =
|
pd.DataFrame()
|
pandas.DataFrame
|
import unittest
from random import random
from craft_ai.pandas import CRAFTAI_PANDAS_ENABLED
if CRAFTAI_PANDAS_ENABLED:
import copy
import pandas as pd
from numpy.random import randn
import craft_ai.pandas
from .data import pandas_valid_data, valid_data
from .utils import generate_entity_id
from . import settings
AGENT_ID_1_BASE = "test_pandas_1"
AGENT_ID_2_BASE = "test_pandas_2"
GENERATOR_ID_BASE = "test_pandas_generator"
SIMPLE_AGENT_CONFIGURATION = pandas_valid_data.SIMPLE_AGENT_CONFIGURATION
SIMPLE_AGENT_BOOSTING_CONFIGURATION = (
pandas_valid_data.SIMPLE_AGENT_BOOSTING_CONFIGURATION
)
SIMPLE_AGENT_BOOSTING_CONFIGURATION_WITH_GEN_TYPE = (
pandas_valid_data.SIMPLE_AGENT_BOOSTING_CONFIGURATION_WITH_GEN_TYPE
)
AGENT_BOOSTING_CONFIGURATION_WITHOUT_TIMEZONE = (
pandas_valid_data.AGENT_BOOSTING_CONFIGURATION_WITHOUT_TIMEZONE
)
SIMPLE_AGENT_DATA = pandas_valid_data.SIMPLE_AGENT_DATA
SIMPLE_AGENT_BOOSTING_DATA = pandas_valid_data.SIMPLE_AGENT_BOOSTING_DATA
SIMPLE_AGENT_BOOSTING_MANY_DATA = pandas_valid_data.SIMPLE_AGENT_BOOSTING_MANY_DATA
AGENT_BOOSTING_WITHOUT_TIMEZONE_DATA = (
pandas_valid_data.AGENT_BOOSTING_WITHOUT_TIMEZONE_DATA
)
SIMPLE_AGENT_MANY_DATA = pandas_valid_data.SIMPLE_AGENT_MANY_DATA
COMPLEX_AGENT_CONFIGURATION = pandas_valid_data.COMPLEX_AGENT_CONFIGURATION
COMPLEX_AGENT_CONFIGURATION_2 = pandas_valid_data.COMPLEX_AGENT_CONFIGURATION_2
COMPLEX_AGENT_DATA = pandas_valid_data.COMPLEX_AGENT_DATA
COMPLEX_AGENT_DATA_2 = pandas_valid_data.COMPLEX_AGENT_DATA_2
DATETIME_AGENT_CONFIGURATION = pandas_valid_data.DATETIME_AGENT_CONFIGURATION
DATETIME_AGENT_DATA = pandas_valid_data.DATETIME_AGENT_DATA
MISSING_AGENT_CONFIGURATION = pandas_valid_data.MISSING_AGENT_CONFIGURATION
MISSING_AGENT_DATA = pandas_valid_data.MISSING_AGENT_DATA
MISSING_AGENT_DATA_DECISION = pandas_valid_data.MISSING_AGENT_DATA_DECISION
INVALID_PYTHON_IDENTIFIER_CONFIGURATION = (
pandas_valid_data.INVALID_PYTHON_IDENTIFIER_CONFIGURATION
)
INVALID_PYTHON_IDENTIFIER_DATA = pandas_valid_data.INVALID_PYTHON_IDENTIFIER_DATA
INVALID_PYTHON_IDENTIFIER_DECISION = (
pandas_valid_data.INVALID_PYTHON_IDENTIFIER_DECISION
)
EMPTY_TREE = pandas_valid_data.EMPTY_TREE
CLIENT = craft_ai.pandas.Client(settings.CRAFT_CFG)
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasSimpleAgent(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "SimpleAgent")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(SIMPLE_AGENT_CONFIGURATION, self.agent_id)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_add_agent_operations_df_bad_index(self):
df = pd.DataFrame(randn(10, 5), columns=["a", "b", "c", "d", "e"])
self.assertRaises(
craft_ai.pandas.errors.CraftAiBadRequestError,
CLIENT.add_agent_operations,
self.agent_id,
df,
)
def test_add_agent_operations_df(self):
CLIENT.add_agent_operations(self.agent_id, SIMPLE_AGENT_DATA)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
SIMPLE_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
SIMPLE_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
def test_add_agent_operations_df_websocket(self):
CLIENT.add_agent_operations(self.agent_id, SIMPLE_AGENT_DATA, True)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
SIMPLE_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
SIMPLE_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
def test_add_agent_operations_df_unexpected_property(self):
df = pd.DataFrame(
randn(300, 6),
columns=["a", "b", "c", "d", "e", "f"],
index=pd.date_range("20200101", periods=300, freq="T").tz_localize(
"Europe/Paris"
),
)
self.assertRaises(
craft_ai.pandas.errors.CraftAiBadRequestError,
CLIENT.add_agent_operations,
self.agent_id,
df,
)
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasComplexAgent(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "ComplexAgent")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(COMPLEX_AGENT_CONFIGURATION, self.agent_id)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_add_agent_operations_df_complex_agent(self):
CLIENT.add_agent_operations(self.agent_id, COMPLEX_AGENT_DATA)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
COMPLEX_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
def test_add_agent_operations_df_complex_agent_websocket(self):
CLIENT.add_agent_operations(self.agent_id, COMPLEX_AGENT_DATA, True)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
COMPLEX_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
def test_add_agent_operations_df_without_tz(self):
test_df = COMPLEX_AGENT_DATA.drop(columns="tz")
CLIENT.add_agent_operations(self.agent_id, test_df)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
COMPLEX_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
def test_add_agent_operations_df_without_tz_websocket(self):
test_df = COMPLEX_AGENT_DATA.drop(columns="tz")
CLIENT.add_agent_operations(self.agent_id, test_df, True)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
COMPLEX_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
COMPLEX_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasMissingAgent(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "MissingAgent")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(MISSING_AGENT_CONFIGURATION, self.agent_id)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_add_agent_operations_df_missing_agent(self):
CLIENT.add_agent_operations(self.agent_id, MISSING_AGENT_DATA)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
MISSING_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
MISSING_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
def test_add_agent_operations_df_missing_agent_websocket(self):
CLIENT.add_agent_operations(self.agent_id, MISSING_AGENT_DATA, True)
agent = CLIENT.get_agent(self.agent_id)
self.assertEqual(
agent["firstTimestamp"],
MISSING_AGENT_DATA.first_valid_index().value // 10 ** 9,
)
self.assertEqual(
agent["lastTimestamp"],
MISSING_AGENT_DATA.last_valid_index().value // 10 ** 9,
)
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasSimpleAgentWithData(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "SimpleAgentWData")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(SIMPLE_AGENT_CONFIGURATION, self.agent_id)
CLIENT.add_agent_operations(self.agent_id, SIMPLE_AGENT_DATA)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_get_agent_operations_df(self):
df = CLIENT.get_agent_operations(self.agent_id)
self.assertEqual(len(df), 300)
self.assertEqual(len(df.dtypes), 5)
self.assertEqual(
df.first_valid_index(),
pd.Timestamp("2020-01-01 00:00:00", tz="Europe/Paris"),
)
self.assertEqual(
df.last_valid_index(),
pd.Timestamp("2020-01-01 04:59:00", tz="Europe/Paris"),
)
def test_get_agent_states_df(self):
df = CLIENT.get_agent_states(self.agent_id)
self.assertEqual(len(df), 180)
self.assertEqual(len(df.dtypes), 5)
self.assertEqual(
df.first_valid_index(),
pd.Timestamp("2020-01-01 00:00:00", tz="Europe/Paris"),
)
self.assertEqual(
df.last_valid_index(),
pd.Timestamp("2020-01-01 04:58:20", tz="Europe/Paris"),
)
def test_tree_visualization(self):
tree1 = CLIENT.get_agent_decision_tree(
self.agent_id, DATETIME_AGENT_DATA.last_valid_index().value // 10 ** 9
)
craft_ai.pandas.utils.create_tree_html(tree1, "", "constant", None, 500)
def test_display_tree_raised_error(self):
tree1 = CLIENT.get_agent_decision_tree(
self.agent_id, DATETIME_AGENT_DATA.last_valid_index().value // 10 ** 9
)
self.assertRaises(
craft_ai.pandas.errors.CraftAiError,
craft_ai.pandas.utils.display_tree,
tree1,
)
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasSimpleAgentWithOperations(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "SimpleAgentWOp")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(valid_data.VALID_CONFIGURATION, self.agent_id)
CLIENT.add_agent_operations(self.agent_id, valid_data.VALID_OPERATIONS_SET)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_get_decision_tree_with_pdtimestamp(self):
# test if we get the same decision tree
decision_tree = CLIENT.get_agent_decision_tree(
self.agent_id, pd.Timestamp(valid_data.VALID_TIMESTAMP, unit="s", tz="UTC")
)
ground_truth_decision_tree = CLIENT.get_agent_decision_tree(
self.agent_id, valid_data.VALID_TIMESTAMP
)
self.assertIsInstance(decision_tree, dict)
self.assertNotEqual(decision_tree.get("_version"), None)
self.assertNotEqual(decision_tree.get("configuration"), None)
self.assertNotEqual(decision_tree.get("trees"), None)
self.assertEqual(decision_tree, ground_truth_decision_tree)
@unittest.skipIf(CRAFTAI_PANDAS_ENABLED is False, "pandas is not enabled")
class TestPandasComplexAgentWithData(unittest.TestCase):
def setUp(self):
self.agent_id = generate_entity_id(AGENT_ID_1_BASE + "ComplexAgentWData")
CLIENT.delete_agent(self.agent_id)
CLIENT.create_agent(COMPLEX_AGENT_CONFIGURATION, self.agent_id)
CLIENT.add_agent_operations(self.agent_id, COMPLEX_AGENT_DATA)
def tearDown(self):
CLIENT.delete_agent(self.agent_id)
def test_get_agent_operations_df_complex_agent(self):
df = CLIENT.get_agent_operations(self.agent_id)
self.assertEqual(len(df), 10)
self.assertEqual(len(df.dtypes), 3)
self.assertEqual(
df.first_valid_index(),
pd.Timestamp("2020-01-01 00:00:00", tz="Europe/Paris"),
)
self.assertEqual(
df.last_valid_index(),
|
pd.Timestamp("2020-01-10 00:00:00", tz="Europe/Paris")
|
pandas.Timestamp
|
import numpy as np
import pandas as pd
from sklearn.base import clone, BaseEstimator, ClassifierMixin
from sklearn.model_selection import GroupKFold, KFold
from sklearn.exceptions import NotFittedError
from sklearn.utils import check_random_state
from collections import Counter, defaultdict
from itertools import chain
__all__ = [
'StratifiedGroupKFold',
'RepeatedKFold',
'RepeatedGroupKFold',
'RepeatedStratifiedGroupKFold',
'AdversarialValidation',
'make_adversarial_validation',
]
def shuffle_labels(labels, random_state=None):
rstate = np.random.RandomState(random_state)
unique_labels = np.unique(labels)
random_labels = rstate.permutation(unique_labels)
mapper = dict(zip(unique_labels, random_labels))
return labels.map(mapper)
class RepeatedGroupKFold():
"""Repeated Group KFold
Same as RepeatedKFold but each group presents only in one fold on each repeat.
Parameters
----------
n_splits : int, default=5
Number of splits. Must be at least 2.
n_repeats : int, optional
Number of times cross-validator needs to be repeated.
random_state : int, RandomState instance or None, optional, default=None
If None, the random number generator is the RandomState instance used by np.random.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
"""
def __init__(self, n_splits=5, n_repeats=3, random_state=0):
self.n_splits = n_splits
self.n_repeats = n_repeats
self.random_state = random_state
def split(self, X, y, groups):
splits = []
for i in range(self.n_repeats):
groups_i = shuffle_labels(groups, self.random_state + i)
cv = GroupKFold(self.n_splits)
split = cv.split(X, y, groups_i)
splits.append(split)
return chain(*splits)
def get_n_splits(self):
return self.n_repeats * self.n_splits
class RepeatedKFold():
"""Repeated KFold (first split DO shuffled)
Same as RepeatedKFold but each group presents only in one fold on each repeat.
Parameters
----------
n_splits : int, default=5
Number of splits. Must be at least 2.
n_repeats : int, optional
Number of times cross-validator needs to be repeated.
random_state : int, RandomState instance or None, optional, default=None
If None, the random number generator is the RandomState instance used by np.random.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
"""
def __init__(self, n_splits=5, n_repeats=3, random_state=None):
self.n_splits = n_splits
self.n_repeats = n_repeats
self.random_state = random_state
def split(self, X, y, groups=None):
rstate = check_random_state(self.random_state)
for _ in range(self.n_repeats):
seed = rstate.randint(2**32-1)
cv = KFold(self.n_splits, shuffle=True, random_state=seed)
for trn, oof in cv.split(X, y):
yield trn, oof
def get_n_splits(self):
return self.n_repeats * self.n_splits
class StratifiedGroupKFold():
"""Stratified Group KFold
Same as StratifiedKFold but each group presents only in one fold.
Parameters
----------
n_splits : int, default=5
Number of splits. Must be at least 2.
n_batches : int, default=1024
Split groups to min(n_batches, n_groups) parts.
Must be greater than n_splits.
shuffle : boolean, optional
Whether to shuffle each class’s samples before splitting into batches.
random_state : int, RandomState instance or None, optional, default=0
If None, the random number generator is the RandomState instance used by np.random.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
"""
def __init__(self, n_splits=5, n_batches=1024, shuffle=False, random_state=0):
self.n_splits = n_splits
self.n_batches = n_batches
self.shuffle = shuffle
self.random_state = random_state
def get_n_splits(self, X=None, y=None, groups=None):
return self.n_splits
def split(self, X, y, groups):
# Global stats
groups_unique = set(groups.unique())
labels = np.sort(y.unique())
counts = [groups[y == label].value_counts(sort=False) for label in labels]
counts = pd.concat(counts, axis=1).fillna(0).astype(int)
counts.columns = labels
labels_total = counts.sum()
if self.shuffle:
counts = counts.sample(frac=1, random_state=self.random_state)
# Mini-Batches
n = len(groups_unique)
batch_size = max(n // self.n_batches, 1)
batches = [counts.iloc[k:k+batch_size] for k in range(0, n, batch_size)]
batches.sort(key=lambda batch: -batch.sum().std())
# Local stats (per fold)
fold_labels = pd.DataFrame(0, columns=labels, index=range(self.n_splits))
fold_groups = defaultdict(set)
for batch in batches:
batch_groups = batch.index
batch_labels = batch.sum()
best_idx = None
best_std = None
for i in range(self.n_splits):
fold_labels.loc[i] += batch_labels
fold_std = fold_labels.std().mean()
if best_std is None or fold_std < best_std:
best_std = fold_std
best_idx = i
fold_labels.loc[i] -= batch_labels
fold_labels.loc[best_idx] += batch_labels
fold_groups[best_idx].update(batch_groups)
# Yield indices
for oof_groups in fold_groups.values():
trn_groups = groups_unique - oof_groups
trn = groups[groups.isin(trn_groups)].index
oof = groups[groups.isin(oof_groups)].index
yield trn, oof
class RepeatedStratifiedGroupKFold():
"""Repeated Stratified Group KFold
Same as RepeatedStratifiedKFold but each group presents only in one fold on each repeat.
Parameters
----------
n_splits : int, default=5
Number of splits. Must be at least 2.
n_repeats : int, optional
Number of times cross-validator needs to be repeated.
n_batches : int, default=1024
Split groups to min(n_batches, n_groups) parts.
Must be greater than n_splits.
random_state : int, RandomState instance or None, optional, default=0
If None, the random number generator is the RandomState instance used by np.random.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
"""
def __init__(self, n_splits=5, n_repeats=3, n_batches=1024, random_state=None):
self.n_splits = n_splits
self.n_repeats = n_repeats
self.n_batches = n_batches
self.random_state = random_state
def get_n_splits(self, X=None, y=None, groups=None):
return self.n_splits * self.n_repeats
def split(self, X, y, groups):
# Global stats
groups_unique = set(groups.unique())
labels = np.sort(y.unique())
counts = [groups[y == label].value_counts(sort=False) for label in labels]
counts =
|
pd.concat(counts, axis=1)
|
pandas.concat
|
import sys
import hypercomparison.utils
import hypercomparison.networks
import hypercomparison.correlation_and_greedy_routing
from main import *
import torch
import numpy as np
import pandas as pd
import networkx as nx
logger = hypercomparison.utils.get_logger(__name__)
network_name = sys.argv[1]
poincare_coordinates_path = sys.argv[2]
out_path = sys.argv[-1]
result_list = []
def convert_embeddings(network, poincare_coord):
network.index_nodes()
network_size = len(network.G.nodes())
result = []
for i in range(network_size):
node = network.id2node[i]
result.append([
node, poincare_coord[i][0], poincare_coord[i][1]])
result_df = pd.DataFrame(result, columns=['node', 'x', 'y'])
embeddings = result_df.set_index('node').T.to_dict('list')
embeddings = {str(k):v for k,v in embeddings.items()}
return embeddings
logger.info("Working on network {}, poincare maps".format(network_name))
network = hypercomparison.networks.RealNetwork(network_name)
features = torch.DoubleTensor(nx.to_numpy_matrix(network.G))
poincare_coord, _ = compute_poincare_maps(
features, None, poincare_coordinates_path,
mode = 'KNN', epochs=1000, earlystop=0.00001, cuda=0)
poincare_coordinates =
|
pd.read_csv(poincare_coordinates_path + '.csv', header=None)
|
pandas.read_csv
|
'''
pandas demo - DataFrame的创建 ( pandas-1.1.2 )
'''
import numpy as np
import pandas as pd
from pandas import DataFrame
def create_from_file():
data = pd.DataFrame([['99', '80'],
['67', '98'],
['69', '98'],
['70', '97']],
index=['a', 'b', 'c', 'd'],
columns=['语文', '数学'])
data.index.names = ['姓名']
print(data.index.names)
data2 = data.reset_index()
data2.columns = ['姓名', '语文', '数学']
data.to_excel("exams.xls", index_label='姓名')
print('*' * 30, 'create from xls')
df = pd.read_excel("exams.xls", header=0, index_col=0)
print(df)
data.to_excel("exams.xlsx", index_label='姓名')
print('*' * 30, 'create from xlsx')
df =
|
pd.read_excel("exams.xlsx", header=0, index_col=0, engine='openpyxl')
|
pandas.read_excel
|
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
_testing as tm,
)
def test_split(any_string_dtype):
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = values.str.split("_")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(["a__b__c", "c__d__e", np.nan, "f__g__h"], dtype=any_string_dtype)
result = values.str.split("__")
tm.assert_series_equal(result, exp)
result = values.str.split("__", expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series(["a,b_c", "c_d,e", np.nan, "f,g,h"], dtype=any_string_dtype)
result = values.str.split("[,_]")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
def test_split_object_mixed():
mixed = Series(["a_b_c", np.nan, "d_e_f", True, datetime.today(), None, 1, 2.0])
result = mixed.str.split("_")
exp = Series(
[
["a", "b", "c"],
np.nan,
["d", "e", "f"],
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split("_", expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
@pytest.mark.parametrize("method", ["split", "rsplit"])
def test_split_n(any_string_dtype, method):
s = Series(["a b", pd.NA, "b c"], dtype=any_string_dtype)
expected = Series([["a", "b"], pd.NA, ["b", "c"]])
result = getattr(s.str, method)(" ", n=None)
tm.assert_series_equal(result, expected)
result = getattr(s.str, method)(" ", n=0)
tm.assert_series_equal(result, expected)
def test_rsplit(any_string_dtype):
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = values.str.rsplit("_")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(["a__b__c", "c__d__e", np.nan, "f__g__h"], dtype=any_string_dtype)
result = values.str.rsplit("__")
tm.assert_series_equal(result, exp)
result = values.str.rsplit("__", expand=False)
tm.assert_series_equal(result, exp)
# regex split is not supported by rsplit
values = Series(["a,b_c", "c_d,e", np.nan, "f,g,h"], dtype=any_string_dtype)
result = values.str.rsplit("[,_]")
exp = Series([["a,b_c"], ["c_d,e"], np.nan, ["f,g,h"]])
tm.assert_series_equal(result, exp)
# setting max number of splits, make sure it's from reverse
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = values.str.rsplit("_", n=1)
exp = Series([["a_b", "c"], ["c_d", "e"], np.nan, ["f_g", "h"]])
tm.assert_series_equal(result, exp)
def test_rsplit_object_mixed():
# mixed
mixed = Series(["a_b_c", np.nan, "d_e_f", True, datetime.today(), None, 1, 2.0])
result = mixed.str.rsplit("_")
exp = Series(
[
["a", "b", "c"],
np.nan,
["d", "e", "f"],
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit("_", expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
def test_split_blank_string(any_string_dtype):
# expand blank split GH 20067
values = Series([""], name="test", dtype=any_string_dtype)
result = values.str.split(expand=True)
exp = DataFrame([[]], dtype=any_string_dtype) # NOTE: this is NOT an empty df
tm.assert_frame_equal(result, exp)
values = Series(["a b c", "a b", "", " "], name="test", dtype=any_string_dtype)
result = values.str.split(expand=True)
exp = DataFrame(
[
["a", "b", "c"],
["a", "b", np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
def test_split_noargs(any_string_dtype):
# #1859
s = Series(["<NAME>", "Travis Oliphant"], dtype=any_string_dtype)
result = s.str.split()
expected = ["Travis", "Oliphant"]
assert result[1] == expected
result = s.str.rsplit()
assert result[1] == expected
@pytest.mark.parametrize(
"data, pat",
[
(["bd asdf jfg", "kjasdflqw asdfnfk"], None),
(["bd asdf jfg", "kjasdflqw asdfnfk"], "asdf"),
(["bd_asdf_jfg", "kjasdflqw_asdfnfk"], "_"),
],
)
def test_split_maxsplit(data, pat, any_string_dtype):
# re.split 0, str.split -1
s = Series(data, dtype=any_string_dtype)
result = s.str.split(pat=pat, n=-1)
xp = s.str.split(pat=pat)
tm.assert_series_equal(result, xp)
result = s.str.split(pat=pat, n=0)
tm.assert_series_equal(result, xp)
@pytest.mark.parametrize(
"data, pat, expected",
[
(
["split once", "split once too!"],
None,
Series({0: ["split", "once"], 1: ["split", "once too!"]}),
),
(
["split_once", "split_once_too!"],
"_",
Series({0: ["split", "once"], 1: ["split", "once_too!"]}),
),
],
)
def test_split_no_pat_with_nonzero_n(data, pat, expected, any_string_dtype):
s = Series(data, dtype=any_string_dtype)
result = s.str.split(pat=pat, n=1)
tm.assert_series_equal(expected, result, check_index_type=False)
def test_split_to_dataframe(any_string_dtype):
s = Series(["nosplit", "alsonosplit"], dtype=any_string_dtype)
result = s.str.split("_", expand=True)
exp = DataFrame({0: Series(["nosplit", "alsonosplit"], dtype=any_string_dtype)})
tm.assert_frame_equal(result, exp)
s = Series(["some_equal_splits", "with_no_nans"], dtype=any_string_dtype)
result = s.str.split("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
s = Series(
["some_unequal_splits", "one_of_these_things_is_not"], dtype=any_string_dtype
)
result = s.str.split("_", expand=True)
exp = DataFrame(
{
0: ["some", "one"],
1: ["unequal", "of"],
2: ["splits", "these"],
3: [np.nan, "things"],
4: [np.nan, "is"],
5: [np.nan, "not"],
},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
s = Series(
["some_splits", "with_index"], index=["preserve", "me"], dtype=any_string_dtype
)
result = s.str.split("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["splits", "index"]},
index=["preserve", "me"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
with pytest.raises(ValueError, match="expand must be"):
s.str.split("_", expand="not_a_boolean")
def test_split_to_multiindex_expand():
# https://github.com/pandas-dev/pandas/issues/23677
idx = Index(["nosplit", "alsonosplit", np.nan])
result = idx.str.split("_", expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(["some_equal_splits", "with_no_nans", np.nan, None])
result = idx.str.split("_", expand=True)
exp = MultiIndex.from_tuples(
[
("some", "equal", "splits"),
("with", "no", "nans"),
[np.nan, np.nan, np.nan],
[None, None, None],
]
)
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(["some_unequal_splits", "one_of_these_things_is_not", np.nan, None])
result = idx.str.split("_", expand=True)
exp = MultiIndex.from_tuples(
[
("some", "unequal", "splits", np.nan, np.nan, np.nan),
("one", "of", "these", "things", "is", "not"),
(np.nan, np.nan, np.nan, np.nan, np.nan, np.nan),
(None, None, None, None, None, None),
]
)
tm.assert_index_equal(result, exp)
assert result.nlevels == 6
with pytest.raises(ValueError, match="expand must be"):
idx.str.split("_", expand="not_a_boolean")
def test_rsplit_to_dataframe_expand(any_string_dtype):
s = Series(["nosplit", "alsonosplit"], dtype=any_string_dtype)
result = s.str.rsplit("_", expand=True)
exp = DataFrame({0: Series(["nosplit", "alsonosplit"])}, dtype=any_string_dtype)
tm.assert_frame_equal(result, exp)
s = Series(["some_equal_splits", "with_no_nans"], dtype=any_string_dtype)
result = s.str.rsplit("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
result = s.str.rsplit("_", expand=True, n=2)
exp = DataFrame(
{0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
result = s.str.rsplit("_", expand=True, n=1)
exp = DataFrame(
{0: ["some_equal", "with_no"], 1: ["splits", "nans"]}, dtype=any_string_dtype
)
tm.assert_frame_equal(result, exp)
s = Series(
["some_splits", "with_index"], index=["preserve", "me"], dtype=any_string_dtype
)
result = s.str.rsplit("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["splits", "index"]},
index=["preserve", "me"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
def test_rsplit_to_multiindex_expand():
idx = Index(["nosplit", "alsonosplit"])
result = idx.str.rsplit("_", expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(["some_equal_splits", "with_no_nans"])
result = idx.str.rsplit("_", expand=True)
exp = MultiIndex.from_tuples([("some", "equal", "splits"), ("with", "no", "nans")])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(["some_equal_splits", "with_no_nans"])
result = idx.str.rsplit("_", expand=True, n=1)
exp = MultiIndex.from_tuples([("some_equal", "splits"), ("with_no", "nans")])
tm.assert_index_equal(result, exp)
assert result.nlevels == 2
def test_split_nan_expand(any_string_dtype):
# gh-18450
s = Series(["foo,bar,baz", np.nan], dtype=any_string_dtype)
result = s.str.split(",", expand=True)
exp = DataFrame(
[["foo", "bar", "baz"], [np.nan, np.nan, np.nan]], dtype=any_string_dtype
)
tm.assert_frame_equal(result, exp)
# check that these are actually np.nan/pd.NA and not None
# TODO see GH 18463
# tm.assert_frame_equal does not differentiate
if any_string_dtype == "object":
assert all(np.isnan(x) for x in result.iloc[1])
else:
assert all(x is pd.NA for x in result.iloc[1])
def test_split_with_name(any_string_dtype):
# GH 12617
# should preserve name
s = Series(["a,b", "c,d"], name="xxx", dtype=any_string_dtype)
res = s.str.split(",")
exp = Series([["a", "b"], ["c", "d"]], name="xxx")
tm.assert_series_equal(res, exp)
res = s.str.split(",", expand=True)
exp = DataFrame([["a", "b"], ["c", "d"]], dtype=any_string_dtype)
tm.assert_frame_equal(res, exp)
idx = Index(["a,b", "c,d"], name="xxx")
res = idx.str.split(",")
exp = Index([["a", "b"], ["c", "d"]], name="xxx")
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
res = idx.str.split(",", expand=True)
exp = MultiIndex.from_tuples([("a", "b"), ("c", "d")])
assert res.nlevels == 2
tm.assert_index_equal(res, exp)
def test_partition_series(any_string_dtype):
# https://github.com/pandas-dev/pandas/issues/23558
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h", None], dtype=any_string_dtype)
result = s.str.partition("_", expand=False)
expected = Series(
[("a", "_", "b_c"), ("c", "_", "d_e"), np.nan, ("f", "_", "g_h"), None]
)
tm.assert_series_equal(result, expected)
result = s.str.rpartition("_", expand=False)
expected = Series(
[("a_b", "_", "c"), ("c_d", "_", "e"), np.nan, ("f_g", "_", "h"), None]
)
tm.assert_series_equal(result, expected)
# more than one char
s = Series(["a__b__c", "c__d__e", np.nan, "f__g__h", None])
result = s.str.partition("__", expand=False)
expected = Series(
[
("a", "__", "b__c"),
("c", "__", "d__e"),
np.nan,
("f", "__", "g__h"),
None,
],
)
tm.assert_series_equal(result, expected)
result = s.str.rpartition("__", expand=False)
expected = Series(
[
("a__b", "__", "c"),
("c__d", "__", "e"),
np.nan,
("f__g", "__", "h"),
None,
],
)
tm.assert_series_equal(result, expected)
# None
s = Series(["a b c", "c d e", np.nan, "f g h", None], dtype=any_string_dtype)
result = s.str.partition(expand=False)
expected = Series(
[("a", " ", "b c"), ("c", " ", "d e"), np.nan, ("f", " ", "g h"), None]
)
tm.assert_series_equal(result, expected)
result = s.str.rpartition(expand=False)
expected = Series(
[("a b", " ", "c"), ("c d", " ", "e"), np.nan, ("f g", " ", "h"), None]
)
tm.assert_series_equal(result, expected)
# Not split
s = Series(["abc", "cde", np.nan, "fgh", None], dtype=any_string_dtype)
result = s.str.partition("_", expand=False)
expected = Series([("abc", "", ""), ("cde", "", ""), np.nan, ("fgh", "", ""), None])
tm.assert_series_equal(result, expected)
result = s.str.rpartition("_", expand=False)
expected = Series([("", "", "abc"), ("", "", "cde"), np.nan, ("", "", "fgh"), None])
tm.assert_series_equal(result, expected)
# unicode
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = s.str.partition("_", expand=False)
expected = Series([("a", "_", "b_c"), ("c", "_", "d_e"), np.nan, ("f", "_", "g_h")])
tm.assert_series_equal(result, expected)
result = s.str.rpartition("_", expand=False)
expected = Series([("a_b", "_", "c"), ("c_d", "_", "e"), np.nan, ("f_g", "_", "h")])
tm.assert_series_equal(result, expected)
# compare to standard lib
s = Series(["A_B_C", "B_C_D", "E_F_G", "EFGHEF"], dtype=any_string_dtype)
result = s.str.partition("_", expand=False).tolist()
assert result == [v.partition("_") for v in s]
result = s.str.rpartition("_", expand=False).tolist()
assert result == [v.rpartition("_") for v in s]
def test_partition_index():
# https://github.com/pandas-dev/pandas/issues/23558
values = Index(["a_b_c", "c_d_e", "f_g_h", np.nan, None])
result = values.str.partition("_", expand=False)
exp = Index(
np.array(
[("a", "_", "b_c"), ("c", "_", "d_e"), ("f", "_", "g_h"), np.nan, None],
dtype=object,
)
)
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.rpartition("_", expand=False)
exp = Index(
np.array(
[("a_b", "_", "c"), ("c_d", "_", "e"), ("f_g", "_", "h"), np.nan, None],
dtype=object,
)
)
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.partition("_")
exp = Index(
[
("a", "_", "b_c"),
("c", "_", "d_e"),
("f", "_", "g_h"),
(np.nan, np.nan, np.nan),
(None, None, None),
]
)
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
result = values.str.rpartition("_")
exp = Index(
[
("a_b", "_", "c"),
("c_d", "_", "e"),
("f_g", "_", "h"),
(np.nan, np.nan, np.nan),
(None, None, None),
]
)
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
def test_partition_to_dataframe(any_string_dtype):
# https://github.com/pandas-dev/pandas/issues/23558
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h", None], dtype=any_string_dtype)
result = s.str.partition("_")
expected = DataFrame(
{
0: ["a", "c", np.nan, "f", None],
1: ["_", "_", np.nan, "_", None],
2: ["b_c", "d_e", np.nan, "g_h", None],
},
dtype=any_string_dtype,
)
|
tm.assert_frame_equal(result, expected)
|
pandas._testing.assert_frame_equal
|
import os
import pickle
from typing import List
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from make_pie_chart import make_pie_chart, get_winners
from src.models.aic_bic import calc_aic_bic_individuals, get_best_model_individual
import pdb
def collect_run_percentages(dirname: str, run_type="ll") -> List:
exemplar = []
for filename in os.listdir(dirname):
if filename.endswith(".p"):
models = pickle.load(open(os.path.join(dirname, filename), "rb"))
if run_type == "ll":
percentages = make_pie_chart(get_winners(models, p_vals=False, include_null=True, len_included=True), include_null=True, filename=None)
exemplar.append(percentages["exemplar"])
else:
aic, bic = calc_aic_bic_individuals(models)
aic_individual = get_best_model_individual(aic)
bic_individual = get_best_model_individual(bic)
if run_type == "aic":
percentages = make_pie_chart(aic_individual, include_null=True, len_included=False, filename=None)
if run_type == "bic":
percentages = make_pie_chart(bic_individual, include_null=True, len_included=False, filename=None)
exemplar.append(percentages["1NN"])
return exemplar
def aggregate_run_percentages(dirname: str, run_type: str = "ll") -> None:
percentages = {}
for subdir, dirs, files in os.walk(dirname):
if subdir == dirname:
continue
print(subdir)
name_ind = subdir.rfind("/")
percent = collect_run_percentages(subdir, run_type=run_type)
percentages[subdir[name_ind + 1:]] = percent
percentage_df =
|
pd.DataFrame.from_dict(percentages, orient="index")
|
pandas.DataFrame.from_dict
|
import tensorflow as tf
import os
import time
import math
import pandas as pd
import numpy as np
from ast import literal_eval
from sklearn.utils import shuffle
from sklearn.metrics import *
from tensorflow.keras import Sequential
from tensorflow.keras.layers import *
from tensorflow.keras import backend as K
import sqlite3
iteration = 889100
conn = sqlite3.connect("cmb_export_iteration_"+str(iteration)+".db")
#############################################
def tolist(x):
return np.array(literal_eval(x.replace(" ", ",")))
def tolog(x):
return math.log(x)
def exploding_rows(cmb, chunksize=1000, iteration = 889100):
"""
1000 splits rows by 1,000
"""
exploded_dfs = [cmb[int((i-1)*chunksize):int(i*chunksize)].explode(cmb.columns[0]) for i in range(1, round(cmb.shape[0]/chunksize))]
patch = [cmb[int((i-1)*chunksize):int(i*chunksize)].iloc[:, 1:].explode(cmb.iloc[:, 1:].columns[0])[cmb.iloc[:, 1:].columns[0]] for i in range(1, round(cmb.shape[0]/chunksize))]
exploded_dfs_patched = []
for i in range(len(patch)):
exploded_dfs[i]['k'] = patch[i]
exploded_dfs_patched.append(exploded_dfs[i])
exploded_dfs_patched[0].to_sql("iteration_"+str(iteration), conn, if_exists="append")
for i in range(1, len(exploded_dfs_patched)):
exploded_dfs_patched[i].to_sql("iteration_"+str(iteration), conn, if_exists="append")
faster = pd.read_sql_query("select * from "+str("iteration_"+str(iteration))+";", conn, chunksize=1000)
concat_df = pd.concat(faster, ignore_index=True)
return concat_df
def generation(iteration = 889100):
"""
Returns raw, non-arrayed, exploded data
"""
faster = pd.read_csv("cmb_export_"+str(iteration)+".csv", iterator=True, chunksize=1000, usecols = ['pk_true', 'k', 'Omega_c', 'h', 'Omega_b', 'sigma8', 'n_s', 't'])
cmb =
|
pd.concat(faster, ignore_index=True)
|
pandas.concat
|
import os
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
# from sklearn.preprocessing import StandardScaler
from utils.tools import StandardScaler
from utils.timefeatures import time_features
import warnings
warnings.filterwarnings('ignore')
class Dataset_ETT_ms(Dataset):
def __init__(self, root_path, flag='train', size=None,
features='S', data_path='ETTh1.csv',
target='OT', scale=True, inverse=False, timeenc=0, freq='h', cols=None,
train_ratio=0.8, dev_ratio=0.1, test_ratio=0.1, debug=False):
# size [seq_len, label_len, pred_len]
# info
#import ipdb; ipdb.set_trace()
self.debug = debug
if size == None:
self.seq_len = 24*4*4
self.label_len = 24*4
self.pred_len = 24*4
else:
self.seq_len = size[0] # 1152
self.label_len = size[1] # 1152
self.pred_len = size[2] # 576, e.g., 2 slices to predict 1 slice
# init
assert flag in ['train', 'test', 'val']
type_map = {'train':0, 'val':1, 'test':2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.inverse = inverse
self.timeenc = timeenc
self.freq = freq
self.root_path = root_path # './data/ETT/'
self.data_path = data_path # 'archxixia_ms.csv'
self.train_ratio = train_ratio
self.dev_ratio = dev_ratio
self.test_ratio = test_ratio
self.__read_data__()
#import ipdb; ipdb.set_trace()
def __read_data__(self):
#import ipdb; ipdb.set_trace()
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path, # (124416, 3), row=一共多少个“ms points”,column=3,"date, slice.num, value"
self.data_path))
# TODO need to change this!
total_line_num = df_raw.shape[0]
point1 = int(total_line_num * self.train_ratio)
point2 = int(total_line_num * (self.train_ratio + self.test_ratio))
point3 = total_line_num
#border1s = [0, 12*30*24 - self.seq_len, 12*30*24+4*30*24 - self.seq_len] # [0, 8304, 11184]
#border2s = [12*30*24, 12*30*24+4*30*24, 12*30*24+8*30*24] # [8640, 11520, 14400]
border1s = [0, point1 - self.seq_len, point2 - self.seq_len] # [0, 98380, 110822]
border2s = [point1, point2, point3] # [99532, 111974, 124416]
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.features=='M' or self.features=='MS' or self.features=='ms':
cols_data = df_raw.columns[2:] # Index(['OT'], dtype='object')
df_data = df_raw[cols_data] # (124416, 1)
elif self.features=='S':
df_data = df_raw[[self.target]]
if self.scale:
train_data = df_data[border1s[0]:border2s[0]] # df_data[0:99532],
self.scaler.fit(train_data.values) # [99532, 1], train data
data = self.scaler.transform(df_data.values) # TODO changed! (x - mean.of.train.data)/std.of.train.data
else:
data = df_data.values
df_stamp = df_raw[['date']][border1:border2] # train=(99532, 1); dev=(13594, 1); test=(13594, 1). 关于date的信息,精确到了yyyy-mm-dd hh:mm:ss.575000
df_stamp['date'] = pd.to_datetime(df_stamp.date)
data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq) # train=[99532, 7]; dev=(13594, 7); test=(13594, 7), 用7个特征值来表示一个时间点(to ms)
self.data_x = data[border1:border2] # data: [124416, 1], border1=0, border2=99532, so self.data_x=[99532, 1] for train. | (13594,1) for dev | (13594, 1) for test
if self.inverse: # False
self.data_y = df_data.values[border1:border2]
else:
self.data_y = data[border1:border2] # train=(99532, 1); dev=(13594, 1); test=(13594,1)
self.data_stamp = data_stamp # train=(99532, 7); dev=(13594,7); test=(13594, 7)
#import ipdb; ipdb.set_trace()
def __getitem__(self, index):
#import ipdb; ipdb.set_trace()
s_begin = index
s_end = s_begin + self.seq_len # self.seq_len=336=24*14
r_begin = s_end - self.label_len # self.label_len=336=24*14
r_end = r_begin + self.label_len + self.pred_len
seq_x = self.data_x[s_begin:s_end]
if self.inverse:
seq_y = np.concatenate([self.data_x[r_begin:r_begin+self.label_len], self.data_y[r_begin+self.label_len:r_end]], 0)
else:
seq_y = self.data_y[r_begin:r_end]
seq_x_mark = self.data_stamp[s_begin:s_end]
seq_y_mark = self.data_stamp[r_begin:r_end]
#import ipdb; ipdb.set_trace()
return seq_x, seq_y, seq_x_mark, seq_y_mark # seq_x:[336,7], seq_y:[336+168,7] and seq_x=seq_y[:336]; seq_x_mark:[336,4], seq_y_mark:[504,4]
def __len__(self):
return len(self.data_x) - self.seq_len- self.pred_len + 1 # 8640 - 336 - 168 + 1 = 8137
def inverse_transform(self, data):
return self.scaler.inverse_transform(data)
def inverse_transform_tar(self, data):
return self.scaler.inverse_transform_tar(data)
class Dataset_ETT_hour2(Dataset):
def __init__(self, root_path, flag='train', size=None,
features='S', data_path='ETTh1.csv',
target='OT', scale=True, inverse=False, timeenc=0, freq='h', cols=None,
train_ratio=0.8, dev_ratio=0.1, test_ratio=0.1, debug=False):
# size [seq_len, label_len, pred_len]
# info
self.debug = debug
if self.debug:
import ipdb; ipdb.set_trace()
if size == None:
self.seq_len = 24*4*4
self.label_len = 24*4
self.pred_len = 24*4
else:
self.seq_len = size[0] # 1152
self.label_len = size[1] # 1152
self.pred_len = size[2] # 576, e.g., 2 slices to predict 1 slice
# init
assert flag in ['train', 'test', 'val']
type_map = {'train':0, 'val':1, 'test':2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.inverse = inverse
self.timeenc = timeenc
self.freq = freq
self.root_path = root_path # './data/ETT/'
self.data_path = data_path # 'archxixia_ms.csv'
self.train_ratio = train_ratio
self.dev_ratio = dev_ratio
self.test_ratio = test_ratio
self.__read_data__()
if self.debug:
import ipdb; ipdb.set_trace()
def __read_data__(self):
if self.debug:
import ipdb; ipdb.set_trace()
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path, # (124416, 3), row=一共多少个“ms points”,column=3,"date, slice.num, value"
self.data_path))
# TODO need to change this!
total_line_num = df_raw.shape[0]
point1 = int(total_line_num * self.train_ratio)
point2 = int(total_line_num * (self.train_ratio + self.test_ratio))
point3 = total_line_num
#border1s = [0, 12*30*24 - self.seq_len, 12*30*24+4*30*24 - self.seq_len] # [0, 8304, 11184]
#border2s = [12*30*24, 12*30*24+4*30*24, 12*30*24+8*30*24] # [8640, 11520, 14400]
border1s = [0, point1 - self.seq_len, point2 - self.seq_len] # [0, 98380, 110822]
border2s = [point1, point2, point3] # [99532, 111974, 124416]
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.features=='M' or self.features=='MS' or self.features=='ms':
cols_data = df_raw.columns[1:] # Index(['OT'], dtype='object')
df_data = df_raw[cols_data] # (124416, 1)
elif self.features=='S':
df_data = df_raw[[self.target]]
if self.scale:
train_data = df_data[border1s[0]:border2s[0]] # df_data[0:99532],
self.scaler.fit(train_data.values) # [99532, 1], train data
data = self.scaler.transform(df_data.values) # TODO changed! (x - mean.of.train.data)/std.of.train.data
else:
data = df_data.values
df_stamp = df_raw[['date']][border1:border2] # train=(99532, 1); dev=(13594, 1); test=(13594, 1). 关于date的信息,精确到了yyyy-mm-dd hh:mm:ss.575000
df_stamp['date'] =
|
pd.to_datetime(df_stamp.date)
|
pandas.to_datetime
|
"""Create violin plots of CpG methylation in different regions."""
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import os
def extract_sample_info(sample_str):
"""Extract kit, sample, and technical replicate from sample_str.
Inputs -
sample_str - string from sample name
Returns -
tuple (kit, biological sample name, technical replicate)
"""
s = sample_str.replace('Ftube', '')
# The biological sample in now the first character in name
bio = s[0]
# Extract what kit is in sample
kit = ''
if 'kapabc' in s.lower():
kit = 'Kapa'
elif 'pbat' in s.lower():
kit = 'PBAT'
elif 'neb' in s.lower():
kit = 'NEB'
elif 'swift' in s.lower():
kit = 'Swift'
# Determine if low or high input
if '10ng' in s:
kit = 'Low ' + kit
# Determine technical replicate
rep = '1'
if 'rep2' in s.lower():
rep = '2'
if (bio not in ['A', 'B']) or (kit == ''):
print('[extract_sample_info] ERROR: Incorrect entry')
return ('', '', '')
return (kit, bio, rep)
def import_files(tag, samp):
"""Import CpG islands, shores, shelves, and open seas BED files into a
DataFrame
Inputs -
tag - sample name to load (include any paths needed)
samp - only sample name (no paths)
Returns -
DataFrame with beta values, sea_group, sample, and replicate columns
"""
names = ['chr','start','end','beta','covg']
cols = [3]
isld = pd.read_csv(
tag+'.island.bed.gz', sep='\t', header=None, usecols=cols
)
isld.rename(columns={3: 'beta'}, inplace=True)
isld['sea_group'] = 'islands'
shor = pd.read_csv(
tag+'.shores.bed.gz', sep='\t', header=None, usecols=cols
)
shor.rename(columns={3: 'beta'}, inplace=True)
shor['sea_group'] = 'shores'
shlv = pd.read_csv(
tag+'.shelves.bed.gz', sep='\t', header=None, usecols=cols
)
shlv.rename(columns={3: 'beta'}, inplace=True)
shlv['sea_group'] = 'shelves'
seas = pd.read_csv(
tag+'.open_seas.bed.gz', sep='\t', header=None, usecols=cols
)
seas.rename(columns={3: 'beta'}, inplace=True)
seas['sea_group'] = 'open_seas'
comb =
|
pd.concat([isld, shor, shlv, seas])
|
pandas.concat
|
import os
import sys
import numpy as np
import pandas as pd
from scipy import sparse
from tensorflow import keras
class ContentVaeDataGenerator(keras.utils.Sequence):
'''
Generate the training and validation data
for the content part of vae model.
'''
def __init__(self,
data_root,
batch_size,
batch_num=None,
prev_layers=[],
noise_type=None,
joint=False,
shuffle=True):
feature_path = os.path.join(data_root, "item_features.npz")
self.features = sparse.load_npz(feature_path)
self.num_items = self.features.shape[0]
self.batch_size = batch_size
self.batch_num = batch_num
if prev_layers != []:
self.apply_prev_layers(self.features, prev_layers)
### Whether or not, or add which type of noise.
self.noise_type = noise_type
### Shuffle the items if necessary.
self.indexes = np.arange(self.num_items)
self.shuffle = shuffle
if self.shuffle:
self.on_epoch_end()
### Train jointly with the collaborative part
self.joint = joint
def __len__(self):
'''
The total number of batches.
'''
if self.batch_num is None:
batch_num = self.num_items//self.batch_size
if self.num_items%self.batch_size != 0:
batch_num+=1
else:
batch_num = self.batch_num
return batch_num
def __getitem__(self, i):
'''
Return the batch indexed by i.
'''
batch_idxes = self.indexes[i*self.batch_size:(i+1)*self.batch_size]
batch_target = self.features[batch_idxes].toarray()
if self.noise_type is None:
batch_input = batch_target
else:
batch_input = self.add_noise(self.noise_type, batch_target)
if self.joint:
batch_input = [batch_input, self.z_b[batch_idxes]]
batch_target = batch_target
return batch_input, batch_target
def apply_prev_layers(self, features, prev_layers):
'''
Apply the previous pretrained layers on the feature
'''
batch_num = self.__len__()
ori_features = features.toarray()
for prev_layer in prev_layers:
new_dims = prev_layer.outputs[0].shape.as_list()[-1]
new_features = np.zeros((self.num_items, new_dims), dtype=np.float32)
for i in range(batch_num):
new_features[i*self.batch_size:(i+1)*self.batch_size] = prev_layer(
ori_features[i*self.batch_size:(i+1)*self.batch_size]
)
ori_features = new_features
self.features = sparse.csr_matrix(new_features)
def on_epoch_end(self):
'''
Shuffle the item index after each epoch.
'''
if self.shuffle:
np.random.shuffle(self.indexes)
def add_noise(self, noise_type, contents):
'''
corrupt the inputs and train as SDAE style.
'''
if 'Mask' in noise_type:
frac = float(noise_type.split('-')[1])
masked_contents = np.copy(contents)
for item in masked_contents:
zero_pos = np.random.choice(len(item), int(round(
frac*len(item))), replace=False)
item[zero_pos] = 0
return masked_contents
else:
raise NotImplementedError
def update_previous_bstep(self, z_b):
self.z_b = z_b
@property
def feature_dim(self):
return self.features.shape[-1]
class CollaborativeVAEDataGenerator(keras.utils.Sequence):
def __init__(self,
data_root,
phase,
batch_size,
batch_num=None,
ragged_x=False,
reuse=True,
joint=True,
shuffle=True):
'''
Generate the training and validation data
for the collaborative part of vbae model.
'''
assert phase in ["train", "val", "test"], "Phase must be [train, val, test]"
self.phase = phase
self.batch_size = batch_size
self.batch_num = batch_num
self.ragged_x = ragged_x
self.data_root = data_root
self._load_data(data_root, reuse=reuse, ragged_x=ragged_x)
self.shuffle = shuffle
if self.shuffle:
self.on_epoch_end()
### Train jointly with the content part
self.joint = joint
def _load_data(self, data_root, reuse, ragged_x):
### Load the dataset
meta_table = pd.read_csv(os.path.join(data_root, "meta.csv"))
self.num_items = meta_table["num_items"][0]
if self.phase == "train":
obs_path = os.path.join(data_root, "train.csv")
obs_records = pd.read_csv(obs_path)
obs_group = obs_records.groupby("uid")
unk_group = obs_group
else:
obs_path = os.path.join(data_root, "{}_obs.csv".format(self.phase))
unk_path = os.path.join(data_root, "{}_unk.csv".format(self.phase))
obs_records = pd.read_csv(obs_path)
unk_records = pd.read_csv(unk_path)
obs_group = obs_records.groupby("uid")
unk_group = unk_records.groupby("uid")
### IDs and corresponding indexes
self.user_ids = np.array(
|
pd.unique(obs_records["uid"])
|
pandas.unique
|
"""
Tests dtype specification during parsing
for all of the parsers defined in parsers.py
"""
from io import StringIO
import os
import numpy as np
import pytest
from pandas.errors import ParserWarning
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import Categorical, DataFrame, Index, MultiIndex, Series, Timestamp, concat
import pandas._testing as tm
@pytest.mark.parametrize("dtype", [str, object])
@pytest.mark.parametrize("check_orig", [True, False])
def test_dtype_all_columns(all_parsers, dtype, check_orig):
# see gh-3795, gh-6607
parser = all_parsers
df = DataFrame(
np.random.rand(5, 2).round(4),
columns=list("AB"),
index=["1A", "1B", "1C", "1D", "1E"],
)
with tm.ensure_clean("__passing_str_as_dtype__.csv") as path:
df.to_csv(path)
result = parser.read_csv(path, dtype=dtype, index_col=0)
if check_orig:
expected = df.copy()
result = result.astype(float)
else:
expected = df.astype(str)
tm.assert_frame_equal(result, expected)
def test_dtype_all_columns_empty(all_parsers):
# see gh-12048
parser = all_parsers
result = parser.read_csv(StringIO("A,B"), dtype=str)
expected =
|
DataFrame({"A": [], "B": []}, index=[], dtype=str)
|
pandas.DataFrame
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270:
|
pd.Timestamp("2013-01-27 00:00:00")
|
pandas.Timestamp
|
# import all the required files i.e. numpy , pandas and math library
from graphlib.financialGraph import Data
import numpy as np
import pandas as pd
from pandas import DataFrame , Series
import math
# All the indicators are defined and arranged in Alphabetical order
# ------------------> A <------------------------
# [0] __ Average True Range (ATR)
# Moving Average of True Range(TR)
def atr(data: DataFrame, period: int = 14) -> Series:
TR = tr(data)
return pd.Series(
TR.rolling(center=False, window=period,
min_periods=1).mean(),
name=f'{period} ATR'
)
# [0] __ Adaptive Price Zone (APZ)
# TODO
def apz(data: DataFrame,period: int = 21,dev_factor: int = 2,
MA: Series = None,adjust: bool = True,) -> DataFrame:
if not isinstance(MA, pd.Series):
MA = dema(data, period)
price_range = pd.Series(
(data["high"] - data["low"]).ewm(span=period, adjust=adjust).mean()
)
volatility_value = pd.Series(
price_range.ewm(span=period, adjust=adjust).mean(), name="vol_val"
)
upper_band = pd.Series((volatility_value * dev_factor) + MA, name="UPPER")
lower_band = pd.Series(MA - (volatility_value * dev_factor), name="LOWER")
return pd.concat([upper_band, lower_band], axis=1)
# ------------------> B <------------------------
# [0] __ Bollinger Bands (BBANDS)
# TODO
def bbands(data: DataFrame,period: int = 20,MA: Series = None,
column: str = "close",std_multiplier: float = 2,) -> DataFrame:
std = data[column].rolling(window=period).std()
if not isinstance(MA, pd.core.series.Series):
middle_band = pd.Series(sma(data, period), name="BB_MIDDLE")
else:
middle_band = pd.Series(MA, name="BB_MIDDLE")
upper_bb = pd.Series(middle_band + (std_multiplier * std), name="BB_UPPER")
lower_bb = pd.Series(middle_band - (std_multiplier * std), name="BB_LOWER")
return pd.concat([upper_bb, middle_band, lower_bb], axis=1)
# [0] __ Bollinger Bands Width (BBWidth)
# TODO
def bbwidth(
data: DataFrame, period: int = 20, MA: Series = None, column: str = "close"
) -> Series:
BB = bbands(data, period, MA, column)
return pd.Series(
(BB["BB_UPPER"] - BB["BB_LOWER"]) / BB["BB_MIDDLE"],
name="{0} period BBWITH".format(period),
)
# ------------------> D <------------------------
# [0] __ Double Exponential Moving Average (DEMA)
# 2 * EWMA - ewm(EWMA)
def dema(data : DataFrame,period: int = 10,column: str ='close',adjust: bool = True) -> Series:
DEMA = (
2*ema(data,period) - ema(data,period).ewm(span=period , adjust=adjust).mean()
)
return pd.Series(
DEMA ,
name = f'{period}_DEMA'
)
# [0] __ Directional Movement Index (DMI)
# TODO
def dmi(data: DataFrame, column: str = "close", adjust: bool = True) -> Series:
def _get_time(close):
sd = close.rolling(5).std()
asd = sd.rolling(10).mean()
v = sd / asd
t = 14 / v.round()
t[t.isna()] = 0
t = t.map(lambda x: int(min(max(x, 5), 30)))
return t
def _dmi(index):
time = t.iloc[index]
if (index - time) < 0:
subset = data.iloc[0:index]
else:
subset = data.iloc[(index - time) : index]
return rsi(subset, period=time, adjust=adjust).values[-1]
dates = Series(data.index)
periods = Series(range(14, len(dates)), index=dates.index[14:].values)
t = _get_time(data[column])
return periods.map(lambda x: _dmi(x))
# ------------------> E <------------------------
# [0] __ Exponential Weighted Moving Average (EWMA) or Exponential Moving Average(EMA)
# Exponential average of prev n day prices
def ema(data : DataFrame,period: int = 10,column: str ='close',adjust: bool = True) -> Series:
return pd.Series(
data[column].ewm(span=period, adjust=adjust).mean(),
name = f'{period}_EMA'
)
# [0] __ Kaufman Efficiency indicator (KER) or (ER)
# change in price / volatility Here change and volatility are absolute
def er(data : DataFrame,period: int = 10,column: str ='close') -> Series:
change = data[column].diff(period).abs()
volatility = data[column].diff().abs().rolling(window=period,min_periods=1).sum()
return pd.Series(change / volatility,
name=f'{period}_ER'
)
# [0] __ TODO (EVSTC)
# TODO
def evstc(data: DataFrame,period_fast: int = 12,period_slow: int = 30,
k_period: int = 10,d_period: int = 3,adjust: bool = True) -> Series:
ema_slow = evwma(data, period_slow)
ema_fast = evwma(data, period_fast)
macd = ema_fast - ema_slow
STOK = pd.Series((
(macd - macd.rolling(window=k_period).min())
/ (macd.rolling(window=k_period).max() - macd.rolling(window=k_period).min())
) * 100)
STOD = STOK.rolling(window=d_period).mean()
STOD_DoubleSmooth = STOD.rolling(window=d_period).mean()
return pd.Series(STOD_DoubleSmooth, name="{0} period EVSTC".format(k_period))
# [0] __ Elastic Volume Weighted Moving Average (EVWMA)
# x is ((volume sum for n period) - volume ) divided by (volume sum for n period)
# y is volume * close / (volume sum for n period)
def evwma(data, period: int = 20) -> Series:
vol_sum = (data["volume"].rolling(window=period,min_periods=1).sum())
x = (vol_sum - data["volume"]) / vol_sum
y = (data["volume"] * data["close"]) / vol_sum
evwma = [0]
for x, y in zip(x.fillna(0).iteritems(), y.iteritems()):
if x[1] == 0 or y[1] == 0:
evwma.append(0)
else:
evwma.append(evwma[-1] * x[1] + y[1])
return pd.Series(
evwma[1:], index=data.index,
name=f'{period}_EVWMA'
)
# [0] __ Elastic Volume Weighted Moving average convergence divergence (EV_MACD)
# MACD calculation on basis of Elastic Volume Weighted Moving average (EVWMA)
def ev_macd(data: DataFrame,period_fast: int = 20,period_slow: int = 40,
signal: int = 9,adjust: bool = True,) -> DataFrame:
evwma_slow = evwma(data, period_slow)
evwma_fast = evwma(data, period_fast)
MACD = pd.Series(evwma_fast - evwma_slow, name="EV MACD")
MACD_signal = pd.Series(
MACD.ewm(ignore_na=False, span=signal, adjust=adjust).mean(), name="SIGNAL"
)
return pd.concat([MACD, MACD_signal], axis=1)
# ------------------> F <------------------------
# [0] __ Fisher Transform
# TODO
def fish(data: DataFrame, period: int = 10, adjust: bool = True) -> Series:
from numpy import log, seterr
seterr(divide="ignore")
med = (data["high"] + data["low"]) / 2
ndaylow = med.rolling(window=period).min()
ndayhigh = med.rolling(window=period).max()
raw = (2 * ((med - ndaylow) / (ndayhigh - ndaylow))) - 1
smooth = raw.ewm(span=5, adjust=adjust).mean()
_smooth = smooth.fillna(0)
return pd.Series(
(log((1 + _smooth) / (1 - _smooth))).ewm(span=3, adjust=adjust).mean(),
name="{0} period FISH.".format(period),
)
# [0] __ Fractal Adaptive Moving Average (FRAMA)
# TODO
def FRAMA(data: DataFrame, period: int = 16, batch: int=10) -> Series:
assert period % 2 == 0, print("FRAMA period must be even")
c = data.close.copy()
window = batch * 2
hh = c.rolling(batch).max()
ll = c.rolling(batch).min()
n1 = (hh - ll) / batch
n2 = n1.shift(batch)
hh2 = c.rolling(window).max()
ll2 = c.rolling(window).min()
n3 = (hh2 - ll2) / window
# calculate fractal dimension
D = (np.log(n1 + n2) - np.log(n3)) / np.log(2)
alp = np.exp(-4.6 * (D - 1))
alp = np.clip(alp, .01, 1).values
filt = c.values
for i, x in enumerate(alp):
cl = c.values[i]
if i < window:
continue
filt[i] = cl * x + (1 - x) * filt[i - 1]
return pd.Series(filt, index=data.index,
name= f'{period} FRAMA'
)
# [0] __ Finite Volume Element (FVE)
# TODO
def fve(data: DataFrame, period: int = 22, factor: int = 0.3) -> Series:
hl2 = (data["high"] + data["low"]) / 2
tp_ = tp(data)
smav = data["volume"].rolling(window=period).mean()
mf = pd.Series((data["close"] - hl2 + tp_.diff()), name="mf")
_mf = pd.concat([data["close"], data["volume"], mf], axis=1)
def vol_shift(row):
if row["mf"] > factor * row["close"] / 100:
return row["volume"]
elif row["mf"] < -factor * row["close"] / 100:
return -row["volume"]
else:
return 0
_mf["vol_shift"] = _mf.apply(vol_shift, axis=1)
_sum = _mf["vol_shift"].rolling(window=period).sum()
return pd.Series((_sum / smav) / period * 100)
# ------------------> H <------------------------
# [0] __ Hull Moving Average (HMA)
# wma of change in wma where change in wma is 2 * (wma half period) - (wma full period)
def hma(data, period: int = 16) -> Series:
half_length = int(period / 2)
sqrt_length = int(math.sqrt(period))
wmaf = wma(data, period=half_length)
wmas = wma(data, period=period)
data["deltawma"] = 2 * wmaf - wmas
hma = wma(data, column="deltawma", period=sqrt_length)
return pd.Series(hma, name=f'{period}_HMA')
# ------------------> I <------------------------
# [0] __ Ichimoku Cloud
# TODO
def ichimoku(data: DataFrame,tenkan_period: int = 9,kijun_period: int = 26,
senkou_period: int = 52,chikou_period: int = 26,) -> DataFrame:
tenkan_sen = pd.Series(
(
data["high"].rolling(window=tenkan_period).max()
+ data["low"].rolling(window=tenkan_period).min()
)
/ 2,
name="TENKAN",
) ## conversion line
kijun_sen = pd.Series(
(
data["high"].rolling(window=kijun_period).max()
+ data["low"].rolling(window=kijun_period).min()
)
/ 2,
name="KIJUN",
) ## base line
senkou_span_a = pd.Series(
((tenkan_sen + kijun_sen) / 2), name="senkou_span_a"
) .shift(kijun_period) ## Leading span
senkou_span_b = pd.Series(
(
(
data["high"].rolling(window=senkou_period).max()
+ data["low"].rolling(window=senkou_period).min()
)
/ 2
),
name="SENKOU",
).shift(kijun_period)
chikou_span = pd.Series(
data["close"].shift(-chikou_period),
name="CHIKOU",
)
return pd.concat(
[tenkan_sen, kijun_sen, senkou_span_a, senkou_span_b, chikou_span], axis=1
)
# [0] __ Inverse Fisher Transform (IFTRSI)
# TODO
def ift_rsi(data: DataFrame,column: str = "close",rsi_period: int = 5,
wma_period: int = 9,) -> Series:
v1 = pd.Series(0.1 * (rsi(data, rsi_period) - 50), name="v1")
d = (wma_period * (wma_period + 1)) / 2
weights = np.arange(1, wma_period + 1)
def linear(w):
def _compute(x):
return (w * x).sum() / d
return _compute
_wma = v1.rolling(wma_period, min_periods=wma_period)
v2 = _wma.apply(linear(weights), raw=True)
return pd.Series(
((v2 ** 2 - 1) / (v2 ** 2 + 1)),
name="IFT_RSI"
)
# ------------------> K <------------------------
# [0] __ Kaufman's Adaptive Moving Average (KAMA)
# first KAMA is SMA
# Current KAMA = Previous KAMA + smoothing_constant * (Price - Previous KAMA)
def kama(data,er_: int = 10,ema_fast: int = 2,
ema_slow: int = 30,period: int = 20,
column: str ='close') -> Series:
er_ = er(data)
fast_alpha = 2 / (ema_fast + 1)
slow_alpha = 2 / (ema_slow + 1)
sc = pd.Series(
(er_ * (fast_alpha - slow_alpha) + slow_alpha) ** 2,
name="smoothing_constant",
)
sma = pd.Series(
data[column].rolling(period).mean(), name="SMA"
)
kama = []
for s, ma, price in zip(
sc.iteritems(), sma.shift().iteritems(), data[column].iteritems()
):
try:
kama.append(kama[-1] + s[1] * (price[1] - kama[-1]))
except (IndexError, TypeError):
if pd.notnull(ma[1]):
kama.append(ma[1] + s[1] * (price[1] - ma[1]))
else:
kama.append(None)
sma["KAMA"] = pd.Series(
kama, index=sma.index, name=f'{period}_KAMA')
return sma['KAMA']
# [0] __ Keltner Channels (KC)
# TODO
def kc(ohlc: DataFrame,period: int = 20,atr_period: int = 10,
MA: Series = None,kc_mult: float = 2,) -> DataFrame:
if not isinstance(MA, pd.core.series.Series):
middle = pd.Series(ema(ohlc, period), name="KC_MIDDLE")
else:
middle = pd.Series(MA, name="KC_MIDDLE")
up = pd.Series(middle + (kc_mult * atr(ohlc, atr_period)), name="KC_UPPER")
down = pd.Series(
middle - (kc_mult * atr(ohlc, atr_period)), name="KC_LOWER"
)
return pd.concat([up, down], axis=1)
# ------------------> M <------------------------
# [0] __ Moving average convergence divergence (MACD)
# MACD is Difference of ema fast and ema slow
# Here fast period is 12 and slow period is 26
# MACD Signal is ewm of MACD
def macd(data,period_fast: int = 12,period_slow: int = 26,
signal: int = 9,column: str = "close",adjust: bool = True
) -> DataFrame:
EMA_fast = pd.Series(
data[column].ewm(ignore_na=False, span=period_fast, adjust=adjust).mean(),
name=f'{period_fast}_EMA_fast')
EMA_slow = pd.Series(
data[column].ewm(ignore_na=False, span=period_slow, adjust=adjust).mean(),
name=f'{period_slow}_EMA_slow')
MACD = pd.Series(EMA_fast - EMA_slow,name='MACD')
MACD_signal = pd.Series(
MACD.ewm(ignore_na=False, span=signal, adjust=adjust).mean(),name=f'{signal}_SIGNAL'
)
DIFF = pd.Series(
MACD - MACD_signal,
name="diff MACD_MSIGNAL"
)
return pd.concat(
[DIFF, MACD, MACD_signal ],
axis=1
)
# [0] __ Moving Standard Deviation (MSD)
# Standard deviation of a given period for the column passed as arguement
def msd(data: DataFrame, period: int = 21, column: str = "close") -> Series:
return pd.Series(data[column].rolling(period).std(), name="MSD")
# Momentum Breakout Bands (MOBO)
# TODO
def mobo(data: DataFrame,period: int = 10,std_multiplier: float = 0.8,
column: str = "close",) -> DataFrame:
BB = bbands(data, period=10, std_multiplier=0.8, column=column)
return BB
# [0] __ Market momentum (MOM)
def mom(data: DataFrame, period: int = 10, column: str = "close") -> Series:
return pd.Series(data[column].diff(period),
name=f'{period}_MOM'
)
# [0] __ Moving Volume Weighted Average Price (MVWAP)
# SMA of (close * volume ) divided by SMA of volume
def mvwap(data: DataFrame, period:int = 9) -> Series:
data["cv"] =(data["close"] * data["volume"])
return pd.Series(
(sma(data,period = period,column = "cv")/sma(data,period=period,column="volume")),
name="MVWAP."
)
# ------------------> P <------------------------
# ------------|| Pivot ||------------------------
# [0] __ Pivot Camarilla
# TODO
def pivot_camarilla(data: DataFrame) -> DataFrame:
df_ = data.shift()
pivot = pd.Series(tp(df_), name="pivot")
s1 = df_['close']+(1.1*(df_['high']-df_['low'])/12)
s2 = df_['close']-(1.1*(df_['high']-df_['low'])/6)
s3 = df_['close']-(1.1*(df_['high']-df_['low'])/4)
s4 =df_['close']-(1.1*(df_['high']-df_['low'])/2)
r1 = df_['close']+(1.1*(df_['high']-df_['low'])/12)
r2 = df_['close']+(1.1*(df_['high']-df_['low'])/6)
r3 =df_['close']+(1.1*(df_['high']-df_['low'])/4)
r4 = df_['close']+(1.1*(df_['high']-df_['low'])/2)
return pd.concat(
[
pivot,
pd.Series(s1, name="s1"),
pd.Series(s2, name="s2"),
pd.Series(s3, name="s3"),
pd.Series(s4, name="s4"),
pd.Series(r1, name="r1"),
pd.Series(r2, name="r2"),
pd.Series(r3, name="r3"),
pd.Series(r4, name="r4"),
],
axis=1,
)
# [0] __ Pivot Classic
# TODO
def pivot_classic(data: DataFrame) -> DataFrame:
df_ = data.shift()
pivot = pd.Series(tp(df_), name="pivot")
s1 = (pivot * 2) - df_["high"]
s2 = pivot - (df_["high"] - df_["low"])
s3 = pivot - 2*(df_["high"] - df_["low"])
s4 = pivot - 3*(df_["high"] - df_["low"])
r1 = (pivot * 2) - df_["low"]
r2 = pivot + (df_["high"] - df_["low"])
r3 = pivot + 2*(df_["high"] - df_["low"])
r4 = pivot + 3*(df_["high"] - df_["low"])
return pd.concat(
[
pivot,
pd.Series(s1, name="s1"),
pd.Series(s2, name="s2"),
pd.Series(s3, name="s3"),
pd.Series(s4, name="s4"),
pd.Series(r1, name="r1"),
pd.Series(r2, name="r2"),
pd.Series(r3, name="r3"),
pd.Series(r4, name="r4"),
],
axis=1,
)
# [0] __ Pivot Demark
# TODO
def pivot_demark(data: DataFrame) -> DataFrame:
df_ = data.shift()
pivot,s1,r1=[],[],[]
for i in range(len(df_)):
if df_['open'][i]==df_['close'][i]:
x=df_['high'][i]+df_['low'][i]+2*df_['close'][i]
elif df_['close'][i]>df_['open'][i]:
x=2*df_['high'][i]+df_['low'][i]+df_['close'][i]
else:
x=df_['high'][i]+2*df_['low'][i]+df_['close'][i]
pivot.append(x/4)
s1.append(x/2 - df_["high"][i])
r1.append(x/2 - df_["low"][i])
data_ = pd.DataFrame(pivot,columns=['pivot'])
data_['s1']=s1
data_['r1']=r1
return data_
# [0] __ Pivot Fibonacci
# TODO
def pivot_fibonacci(data: DataFrame) -> DataFrame:
df_ = data.shift()
pivot = pd.Series(tp(df_), name="pivot")
s1 = pivot - ((df_["high"] - df_["low"])*0.382)
s2 = pivot - ((df_["high"] - df_["low"])*0.618)
s3 = pivot - (df_["high"] - df_["low"])
s4 = pivot + ((df_["high"] - df_["low"])*1.382)
r1 = pivot + ((df_["high"] - df_["low"])*0.382)
r2 = pivot + ((df_["high"] - df_["low"])*0.618)
r3 =pivot + (df_["high"] - df_["low"])
r4 = pivot + (df_["high"] - df_["low"])*1.382
return pd.concat(
[
pivot,
pd.Series(s1, name="s1"),
pd.Series(s2, name="s2"),
pd.Series(s3, name="s3"),
pd.Series(s4, name="s4"),
pd.Series(r1, name="r1"),
pd.Series(r2, name="r2"),
pd.Series(r3, name="r3"),
pd.Series(r4, name="r4"),
],
axis=1,
)
# [0] __ Pivot Traditional
# TODO
def pivot_traditional(data: DataFrame) -> DataFrame:
df_ = data.shift()
pivot = pd.Series(tp(df_), name="pivot")
s1 = (pivot * 2) - df_["high"]
s2 = pivot - (df_["high"] - df_["low"])
s3 = df_["low"] - (2 * (df_["high"] - pivot))
s4 = df_["low"] - (3 * (df_["high"] - pivot))
s5 = df_["low"] - (4 * (df_["high"] - pivot))
r1 = (pivot * 2) - df_["low"]
r2 = pivot + (df_["high"] - df_["low"])
r3 = df_["high"] + (2 * (pivot - df_["low"]))
r4 = df_["high"] + (3 * (pivot - df_["low"]))
r5 = df_["high"] + (4 * (pivot - df_["low"]))
return pd.concat(
[
pivot,
pd.Series(s1, name="s1"),
pd.Series(s2, name="s2"),
pd.Series(s3, name="s3"),
pd.Series(s4, name="s4"),
pd.Series(s5, name="s5"),
pd.Series(r1, name="r1"),
pd.Series(r2, name="r2"),
pd.Series(r3, name="r3"),
pd.Series(r4, name="r4"),
pd.Series(r5, name="r5"),
],
axis=1,
)
# [0] __ Pivot Woodie
# TODO
def pivot_woodie(data: DataFrame) -> DataFrame:
df_ = data.shift()
pivot = pd.Series((df_['high']+df_['low']+2*data['open'])/4, name="pivot")
s1 = 2*pivot-df_['high']
s2 = pivot - (df_["high"] - df_["low"])
s3 = df_["low"] - (2 * (pivot - df_["high"]))
s4 = s3 - (df_["high"] - df_["low"])
r1 = 2*pivot-df_['low']
r2 = pivot + (df_["high"] - df_["low"])
r3 =df_["high"] + (2 * (pivot - df_["low"]))
r4 = r3 + (df_["high"] - df_["low"])
return pd.concat(
[
pivot,
|
pd.Series(s1, name="s1")
|
pandas.Series
|
# Data files are too large to include. Download from Kaggle: https://www.kaggle.com/c/home-credit-default-risk/data
# Code source: https://www.kaggle.com/jsaguiar/lightgbm-with-simple-features
import argparse
import pickle
import time
import warnings
from contextlib import contextmanager
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from credit_utils import *
from willump.evaluation.willump_executor import willump_execute
warnings.simplefilter(action='ignore', category=FutureWarning)
base_folder = "tests/test_resources/home_credit_default_risk/"
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--disable", help="Disable Willump", action="store_true")
parser.add_argument("-c", "--cascades", action="store_true", help="Cascades?")
parser.add_argument("-k", "--top_k", type=int, help="Top-K to return", required=True)
parser.add_argument("-b", "--debug", help="Debug Mode", action="store_true")
args = parser.parse_args()
if args.cascades:
cascades = pickle.load(open(base_folder + "training_cascades.pk", "rb"))
else:
cascades = None
top_K = args.top_k
@contextmanager
def timer(title):
t0 = time.time()
yield
print("{} - done in {:.5f}s".format(title, time.time() - t0))
# One-hot encoding for categorical columns with get_dummies
def one_hot_encoder(df, nan_as_category=True):
original_columns = list(df.columns)
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
df = pd.get_dummies(df, columns=categorical_columns, dummy_na=nan_as_category)
new_columns = [c for c in df.columns if c not in original_columns]
return df, new_columns
# Preprocess application_train.csv and application_test.csv
def application_train_test(num_rows=None, nan_as_category=False):
# Read data and merge
df = pd.read_csv(base_folder + 'application_train.csv', nrows=num_rows)
print("Train samples: {}".format(len(df)))
# Optional: Remove 4 applications with XNA CODE_GENDER (train set)
df = df[df['CODE_GENDER'] != 'XNA']
# Categorical features with Binary encode (0 or 1; two categories)
for bin_feature in ['CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY']:
df[bin_feature], uniques =
|
pd.factorize(df[bin_feature])
|
pandas.factorize
|
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)})
expected = expected.set_index(['Price', 'P2'])
df = self.read_csv(StringIO(data), usecols=[
'Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2'])
tm.assert_frame_equal(expected, df)
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
# See test in TestCParserLowMemory.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# 5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_integer_overflow_bug(self):
# #2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# Issue 5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# GH 6607, GH 3374
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# GH 10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
# GH 9735
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = pd.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = pd.DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# GH 10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_emtpy_with_multiindex(self):
# GH 10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
# None, no index
index_col, expected = None, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# False, no index
index_col, expected = False, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, first column
index_col, expected = 0, DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, not first column
index_col, expected = 1, DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, first column
index_col, expected = 'x', DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, not the first column
index_col, expected = 'y', DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# list of int
index_col, expected = [0, 1], DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str
index_col = ['x', 'y']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of int, reversed sequence
index_col = [1, 0]
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str, reversed sequence
index_col = ['y', 'x']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
def test_empty_with_index_col_false(self):
# GH 10413
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame([], columns=['x', 'y'])
tm.assert_frame_equal(result, expected)
def test_float_parser(self):
# GH 9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = pd.DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['ID'].dtype == object)
self.assertRaises((OverflowError, pandas.parser.OverflowError),
self.read_csv, StringIO(data),
converters={'ID': np.int64})
# Just inside int64 range: parse as integer
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([x])
tm.assert_frame_equal(result, expected)
# Just outside int64 range: parse as string
too_big = i_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# GH 9535
expected = pd.DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
result = pd.read_csv(StringIO('foo,bar\n'), nrows=10, as_recarray=True)
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
result = next(
iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10, as_recarray=True)))
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
def test_eof_states(self):
# GH 10728 and 10548
# With skip_blank_lines = True
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# GH 10728
# WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# GH 10548
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
class TestPythonParser(ParserTests, tm.TestCase):
def test_negative_skipfooter_raises(self):
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
with tm.assertRaisesRegexp(ValueError,
'skip footer cannot be negative'):
df = self.read_csv(StringIO(text), skipfooter=-1)
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_table(*args, **kwds)
def test_sniff_delimiter(self):
text = """index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data = self.read_csv(StringIO(text), index_col=0, sep=None)
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
data2 = self.read_csv(StringIO(text), index_col=0, delimiter='|')
tm.assert_frame_equal(data, data2)
text = """ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data3 = self.read_csv(StringIO(text), index_col=0,
sep=None, skiprows=2)
tm.assert_frame_equal(data, data3)
text = u("""ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
""").encode('utf-8')
s = BytesIO(text)
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
data4 = self.read_csv(s, index_col=0, sep=None, skiprows=2,
encoding='utf-8')
tm.assert_frame_equal(data, data4)
def test_regex_separator(self):
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep='\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
self.assertIsNone(expected.index.name)
tm.assert_frame_equal(df, expected)
def test_1000_fwf(self):
data = """
1 2,334.0 5
10 13 10.
"""
expected = [[1, 2334., 5],
[10, 13, 10]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (3, 11), (12, 16)],
thousands=',')
tm.assert_almost_equal(df.values, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_comment_fwf(self):
data = """
1 2. 4 #hello world
5 NaN 10.0
"""
expected = [[1, 2., 4],
[5, np.nan, 10.]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (4, 9), (9, 25)],
comment='#')
tm.assert_almost_equal(df.values, expected)
def test_fwf(self):
data_expected = """\
2011,58,360.242940,149.910199,11950.7
2011,59,444.953632,166.985655,11788.4
2011,60,364.136849,183.628767,11806.2
2011,61,413.836124,184.375703,11916.8
2011,62,502.953953,173.237159,12468.3
"""
expected = self.read_csv(StringIO(data_expected), header=None)
data1 = """\
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
df = read_fwf(StringIO(data1), colspecs=colspecs, header=None)
tm.assert_frame_equal(df, expected)
data2 = """\
2011 58 360.242940 149.910199 11950.7
2011 59 444.953632 166.985655 11788.4
2011 60 364.136849 183.628767 11806.2
2011 61 413.836124 184.375703 11916.8
2011 62 502.953953 173.237159 12468.3
"""
df = read_fwf(StringIO(data2), widths=[5, 5, 13, 13, 7], header=None)
tm.assert_frame_equal(df, expected)
# From <NAME>: apparently some non-space filler characters can
# be seen, this is supported by specifying the 'delimiter' character:
# http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html
data3 = """\
201158~~~~360.242940~~~149.910199~~~11950.7
201159~~~~444.953632~~~166.985655~~~11788.4
201160~~~~364.136849~~~183.628767~~~11806.2
201161~~~~413.836124~~~184.375703~~~11916.8
201162~~~~502.953953~~~173.237159~~~12468.3
"""
df = read_fwf(
StringIO(data3), colspecs=colspecs, delimiter='~', header=None)
tm.assert_frame_equal(df, expected)
with tm.assertRaisesRegexp(ValueError, "must specify only one of"):
read_fwf(StringIO(data3), colspecs=colspecs, widths=[6, 10, 10, 7])
with tm.assertRaisesRegexp(ValueError, "Must specify either"):
read_fwf(StringIO(data3), colspecs=None, widths=None)
def test_fwf_colspecs_is_list_or_tuple(self):
with tm.assertRaisesRegexp(TypeError,
'column specifications must be a list or '
'tuple.+'):
pd.io.parsers.FixedWidthReader(StringIO(self.data1),
{'a': 1}, ',', '#')
def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self):
with tm.assertRaisesRegexp(TypeError,
'Each column specification must be.+'):
read_fwf(StringIO(self.data1), [('a', 1)])
def test_fwf_colspecs_None(self):
# GH 7079
data = """\
123456
456789
"""
colspecs = [(0, 3), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, 3), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(0, None), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, None), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
def test_fwf_regression(self):
# GH 3594
# turns out 'T060' is parsable as a datetime slice!
tzlist = [1, 10, 20, 30, 60, 80, 100]
ntz = len(tzlist)
tcolspecs = [16] + [8] * ntz
tcolnames = ['SST'] + ["T%03d" % z for z in tzlist[1:]]
data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657
2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379
2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039
"""
df = read_fwf(StringIO(data),
index_col=0,
header=None,
names=tcolnames,
widths=tcolspecs,
parse_dates=True,
date_parser=lambda s: datetime.strptime(s, '%Y%j%H%M%S'))
for c in df.columns:
res = df.loc[:, c]
self.assertTrue(len(res))
def test_fwf_for_uint8(self):
data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71"""
df = read_fwf(StringIO(data),
colspecs=[(0, 17), (25, 26), (33, 37),
(49, 51), (58, 62), (63, 1000)],
names=['time', 'pri', 'pgn', 'dst', 'src', 'data'],
converters={
'pgn': lambda x: int(x, 16),
'src': lambda x: int(x, 16),
'dst': lambda x: int(x, 16),
'data': lambda x: len(x.split(' '))})
expected = DataFrame([[1421302965.213420, 3, 61184, 23, 40, 8],
[1421302964.226776, 6, 61442, None, 71, 8]],
columns=["time", "pri", "pgn", "dst", "src", "data"])
expected["dst"] = expected["dst"].astype(object)
tm.assert_frame_equal(df, expected)
def test_fwf_compression(self):
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest("Need gzip and bz2 to run this test")
data = """1111111111
2222222222
3333333333""".strip()
widths = [5, 5]
names = ['one', 'two']
expected = read_fwf(StringIO(data), widths=widths, names=names)
if compat.PY3:
data = bytes(data, encoding='utf-8')
comps = [('gzip', gzip.GzipFile), ('bz2', bz2.BZ2File)]
for comp_name, compresser in comps:
with tm.ensure_clean() as path:
tmp = compresser(path, mode='wb')
tmp.write(data)
tmp.close()
result = read_fwf(path, widths=widths, names=names,
compression=comp_name)
tm.assert_frame_equal(result, expected)
def test_BytesIO_input(self):
if not compat.PY3:
raise nose.SkipTest(
"Bytes-related test - only needs to work on Python 3")
result = pd.read_fwf(BytesIO("שלום\nשלום".encode('utf8')), widths=[
2, 2], encoding='utf8')
expected = pd.DataFrame([["של", "ום"]], columns=["של", "ום"])
tm.assert_frame_equal(result, expected)
data = BytesIO("שלום::1234\n562::123".encode('cp1255'))
result = pd.read_table(data, sep="::", engine='python',
encoding='cp1255')
expected = pd.DataFrame([[562, 123]], columns=["שלום", "1234"])
tm.assert_frame_equal(result, expected)
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
buf = StringIO()
sys.stdout = buf
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True)
self.assertEqual(
buf.getvalue(), 'Filled 3 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
buf = StringIO()
sys.stdout = buf
text = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True, index_col=0)
self.assertEqual(
buf.getvalue(), 'Filled 1 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
def test_float_precision_specified(self):
# Should raise an error if float_precision (C parser option) is
# specified
with tm.assertRaisesRegexp(ValueError, "The 'float_precision' option "
"is not supported with the 'python' engine"):
self.read_csv(StringIO('a,b,c\n1,2,3'), float_precision='high')
def test_iteration_open_handle(self):
if PY3:
raise nose.SkipTest(
"won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
with open(path, 'rb') as f:
for line in f:
if 'CCC' in line:
break
try:
read_table(f, squeeze=True, header=None, engine='c')
except Exception:
pass
else:
raise ValueError('this should not happen')
result = read_table(f, squeeze=True, header=None,
engine='python')
expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0)
tm.assert_series_equal(result, expected)
def test_iterator(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_single_line(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_malformed(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_skip_footer(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = self.read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_decompression_regex_sep(self):
# GH 6607
# This is a copy which should eventually be moved to ParserTests
# when the issue with the C parser is fixed
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest('need gzip and bz2 to run')
data = open(self.csv1, 'rb').read()
data = data.replace(b',', b'::')
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='gzip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean() as path:
tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='bz2')
tm.assert_frame_equal(result, expected)
self.assertRaises(ValueError, self.read_csv,
path, compression='bz3')
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with multi-level index is fixed in the C parser.
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
# GH 6893
data = ' A B C\na b c\n1 3 7 0 3 6\n3 1 4 1 5 9'
expected = DataFrame.from_records([(1, 3, 7, 0, 3, 6), (3, 1, 4, 1, 5, 9)],
columns=list('abcABC'), index=list('abc'))
actual = self.read_table(StringIO(data), sep='\s+')
tm.assert_frame_equal(actual, expected)
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep='\s+')
tm.assert_almost_equal(df.values, expected)
expected = [[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_almost_equal(list(df.values), list(expected))
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = [[1, 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
class TestFwfColspaceSniffing(tm.TestCase):
def test_full_file(self):
# File with all values
test = '''index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
2000-01-05T00:00:00 0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0.487094399463 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
2000-01-11T00:00:00 0.157160753327 34 foo'''
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_missing(self):
# File with missing values
test = '''index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
34'''
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces(self):
# File with spaces in columns
test = '''
Account Name Balance CreditLimit AccountCreated
101 <NAME> 9315.45 10000.00 1/17/1998
312 <NAME> 90.00 1000.00 8/6/2003
868 <NAME> 0 17000.00 5/25/1985
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 <NAME> 789.65 5000.00 2/5/2007
'''.strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces_and_missing(self):
# File with spaces and missing values in columsn
test = '''
Account Name Balance CreditLimit AccountCreated
101 10000.00 1/17/1998
312 <NAME> 90.00 1000.00 8/6/2003
868 5/25/1985
761 <NAME>-Smith 49654.87 100000.00 12/5/2006
317 <NAME> 789.65
'''.strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_messed_up_data(self):
# Completely messed up file
test = '''
Account Name Balance Credit Limit Account Created
101 10000.00 1/17/1998
312 <NAME> 90.00 1000.00
761 <NAME> 49654.87 100000.00 12/5/2006
317 <NAME> 789.65
'''.strip('\r\n')
colspecs = ((2, 10), (15, 33), (37, 45), (49, 61), (64, 79))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_multiple_delimiters(self):
test = r'''
col1~~~~~col2 col3++++++++++++++++++col4
~~22.....11.0+++foo~~~~~~~~~~<NAME>
33+++122.33\\\bar.........<NAME>
++44~~~~12.01 baz~~<NAME>
~~55 11+++foo++++<NAME>-Smith
..66++++++.03~~~bar <NAME>
'''.strip('\r\n')
colspecs = ((0, 4), (7, 13), (15, 19), (21, 41))
expected = read_fwf(StringIO(test), colspecs=colspecs,
delimiter=' +~.\\')
tm.assert_frame_equal(expected, read_fwf(StringIO(test),
delimiter=' +~.\\'))
def test_variable_width_unicode(self):
if not compat.PY3:
raise nose.SkipTest(
'Bytes-related test - only needs to work on Python 3')
test = '''
שלום שלום
ום שלל
של ום
'''.strip('\r\n')
expected = pd.read_fwf(BytesIO(test.encode('utf8')),
colspecs=[(0, 4), (5, 9)], header=None, encoding='utf8')
tm.assert_frame_equal(expected, read_fwf(BytesIO(test.encode('utf8')),
header=None, encoding='utf8'))
class CParserTests(ParserTests):
""" base class for CParser Testsing """
def test_buffer_overflow(self):
# GH9205
# test certain malformed input files that cause buffer overflows in
# tokenizer.c
malfw = "1\r1\r1\r 1\r 1\r" # buffer overflow in words pointer
malfs = "1\r1\r1\r 1\r 1\r11\r" # buffer overflow in stream pointer
malfl = "1\r1\r1\r 1\r 1\r11\r1\r" # buffer overflow in lines pointer
for malf in (malfw, malfs, malfl):
try:
df = self.read_table(StringIO(malf))
except Exception as cperr:
self.assertIn(
'Buffer overflow caught - possible malformed input file.', str(cperr))
def test_buffer_rd_bytes(self):
# GH 12098
# src->buffer can be freed twice leading to a segfault if a corrupt
# gzip file is read with read_csv and the buffer is filled more than
# once before gzip throws an exception
data = '\x1F\x8B\x08\x00\x00\x00\x00\x00\x00\x03\xED\xC3\x41\x09' \
'\x00\x00\x08\x00\xB1\xB7\xB6\xBA\xFE\xA5\xCC\x21\x6C\xB0' \
'\xA6\x4D' + '\x55' * 267 + \
'\x7D\xF7\x00\x91\xE0\x47\x97\x14\x38\x04\x00' \
'\x1f\x8b\x08\x00VT\x97V\x00\x03\xed]\xefO'
for i in range(100):
try:
_ = self.read_csv(StringIO(data),
compression='gzip',
delim_whitespace=True)
except Exception as e:
pass
class TestCParserHighMemory(CParserTests, tm.TestCase):
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = False
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = False
return read_table(*args, **kwds)
def test_compact_ints(self):
if compat.is_platform_windows():
raise nose.SkipTest(
"segfaults on win-64, only when all tests are run")
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True, as_recarray=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
result = read_csv(StringIO(data), delimiter=',', header=None,
as_recarray=True, compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
def test_parse_dates_empty_string(self):
# #2263
s = StringIO("Date, test\n2012-01-01, 1\n,2")
result = self.read_csv(s, parse_dates=["Date"], na_filter=False)
self.assertTrue(result['Date'].isnull()[1])
def test_usecols(self):
raise nose.SkipTest(
"Usecols is not supported in C High Memory engine.")
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
# check with delim_whitespace=True
df = self.read_csv(StringIO(data.replace(',', ' ')), comment='#',
delim_whitespace=True)
tm.assert_almost_equal(df.values, expected)
# check with custom line terminator
df = self.read_csv(StringIO(data.replace('\n', '*')), comment='#',
lineterminator='*')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_skiprows_lineterminator(self):
# GH #9079
data = '\n'.join(['SMOSMANIA ThetaProbe-ML2X ',
'2007/01/01 01:00 0.2140 U M ',
'2007/01/01 02:00 0.2141 M O ',
'2007/01/01 04:00 0.2142 D M '])
expected = pd.DataFrame([['2007/01/01', '01:00', 0.2140, 'U', 'M'],
['2007/01/01', '02:00', 0.2141, 'M', 'O'],
['2007/01/01', '04:00', 0.2142, 'D', 'M']],
columns=['date', 'time', 'var', 'flag',
'oflag'])
# test with the three default lineterminators LF, CR and CRLF
df = self.read_csv(StringIO(data), skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data.replace('\n', '\r')),
skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data.replace('\n', '\r\n')),
skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
def test_trailing_spaces(self):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n"
expected = pd.DataFrame([[1., 2., 4.],
[5.1, np.nan, 10.]])
# this should ignore six lines including lines with trailing
# whitespace and blank lines. issues 8661, 8679
df = self.read_csv(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
# test skipping set of rows after a row with trailing spaces, issue
# #8983
expected = pd.DataFrame({"A": [1., 5.1], "B": [2., np.nan],
"C": [4., 10]})
df = self.read_table(StringIO(data.replace(',', ' ')),
delim_whitespace=True,
skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep='\s+')
tm.assert_almost_equal(df.values, expected)
expected = [[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_almost_equal(list(df.values), list(expected))
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = [[1, 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_passing_dtype(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the dtype argument is supported by all engines.
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
# empty frame
# GH12048
actual = self.read_csv(StringIO('A,B'), dtype=str)
expected = DataFrame({'A': [], 'B': []}, index=[], dtype=str)
tm.assert_frame_equal(actual, expected)
def test_dtype_and_names_error(self):
# GH 8833
# passing both dtype and names resulting in an error reporting issue
data = """
1.0 1
2.0 2
3.0 3
"""
# base cases
result = self.read_csv(StringIO(data), sep='\s+', header=None)
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]])
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), sep='\s+',
header=None, names=['a', 'b'])
expected = DataFrame(
[[1.0, 1], [2.0, 2], [3.0, 3]], columns=['a', 'b'])
tm.assert_frame_equal(result, expected)
# fallback casting
result = self.read_csv(StringIO(
data), sep='\s+', header=None, names=['a', 'b'], dtype={'a': np.int32})
expected = DataFrame([[1, 1], [2, 2], [3, 3]], columns=['a', 'b'])
expected['a'] = expected['a'].astype(np.int32)
tm.assert_frame_equal(result, expected)
data = """
1.0 1
nan 2
3.0 3
"""
# fallback casting, but not castable
with tm.assertRaisesRegexp(ValueError, 'cannot safely convert'):
self.read_csv(StringIO(data), sep='\s+', header=None,
names=['a', 'b'], dtype={'a': np.int32})
def test_fallback_to_python(self):
# GH 6607
data = 'a b c\n1 2 3'
# specify C engine with unsupported options (raise)
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', sep=None,
delim_whitespace=False)
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', sep='\s')
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', skip_footer=1)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), delim_whitespace=True,
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), lineterminator='\n',
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
class TestCParserLowMemory(CParserTests, tm.TestCase):
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = True
kwds['buffer_lines'] = 2
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = True
kwds['buffer_lines'] = 2
return read_table(*args, **kwds)
def test_compact_ints(self):
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
self.assertEqual(result.to_records(index=False).dtype, ex_dtype)
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
self.assertEqual(result.to_records(index=False).dtype, ex_dtype)
def test_compact_ints_as_recarray(self):
if compat.is_platform_windows():
raise nose.SkipTest(
"segfaults on win-64, only when all tests are run")
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True, as_recarray=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
result = read_csv(StringIO(data), delimiter=',', header=None,
as_recarray=True, compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
def test_precise_conversion(self):
# GH #8002
tm._skip_if_32bit()
from decimal import Decimal
normal_errors = []
precise_errors = []
for num in np.linspace(1., 2., num=500): # test numbers between 1 and 2
text = 'a\n{0:.25}'.format(num) # 25 decimal digits of precision
normal_val = float(self.read_csv(StringIO(text))['a'][0])
precise_val = float(self.read_csv(
StringIO(text), float_precision='high')['a'][0])
roundtrip_val = float(self.read_csv(
StringIO(text), float_precision='round_trip')['a'][0])
actual_val = Decimal(text[2:])
def error(val):
return abs(Decimal('{0:.100}'.format(val)) - actual_val)
normal_errors.append(error(normal_val))
precise_errors.append(error(precise_val))
# round-trip should match float()
self.assertEqual(roundtrip_val, float(text[2:]))
self.assertTrue(sum(precise_errors) <= sum(normal_errors))
self.assertTrue(max(precise_errors) <= max(normal_errors))
def test_pass_dtype(self):
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'})
self.assertEqual(result['one'].dtype, 'u1')
self.assertEqual(result['two'].dtype, 'object')
def test_pass_dtype_as_recarray(self):
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
if compat.is_platform_windows():
raise nose.SkipTest(
"segfaults on win-64, only when all tests are run")
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'},
as_recarray=True)
self.assertEqual(result['one'].dtype, 'u1')
self.assertEqual(result['two'].dtype, 'S1')
def test_empty_pass_dtype(self):
data = 'one,two'
result = self.read_csv(StringIO(data), dtype={'one': 'u1'})
expected = DataFrame({'one': np.empty(0, dtype='u1'),
'two': np.empty(0, dtype=np.object)})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_index_pass_dtype(self):
data = 'one,two'
result = self.read_csv(StringIO(data), index_col=['one'],
dtype={'one': 'u1', 1: 'f'})
expected = DataFrame({'two': np.empty(0, dtype='f')},
index=Index([], dtype='u1', name='one'))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_multiindex_pass_dtype(self):
data = 'one,two,three'
result = self.read_csv(StringIO(data), index_col=['one', 'two'],
dtype={'one': 'u1', 1: 'f8'})
exp_idx = MultiIndex.from_arrays([np.empty(0, dtype='u1'), np.empty(0, dtype='O')],
names=['one', 'two'])
expected = DataFrame(
{'three': np.empty(0, dtype=np.object)}, index=exp_idx)
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_mangled_column_pass_dtype_by_names(self):
data = 'one,one'
result = self.read_csv(StringIO(data), dtype={
'one': 'u1', 'one.1': 'f'})
expected = DataFrame(
{'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_mangled_column_pass_dtype_by_indexes(self):
data = 'one,one'
result = self.read_csv(StringIO(data), dtype={0: 'u1', 1: 'f'})
expected = DataFrame(
{'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_dup_column_pass_dtype_by_names(self):
data = 'one,one'
result = self.read_csv(
StringIO(data), mangle_dupe_cols=False, dtype={'one': 'u1'})
expected = pd.concat([Series([], name='one', dtype='u1')] * 2, axis=1)
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_dup_column_pass_dtype_by_indexes(self):
### FIXME in GH9424
raise nose.SkipTest(
"GH 9424; known failure read_csv with duplicate columns")
data = 'one,one'
result = self.read_csv(
StringIO(data), mangle_dupe_cols=False, dtype={0: 'u1', 1: 'f'})
expected = pd.concat([Series([], name='one', dtype='u1'),
Series([], name='one', dtype='f')], axis=1)
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_usecols_dtypes(self):
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(0, 1, 2),
names=('a', 'b', 'c'),
header=None,
converters={'a': str},
dtype={'b': int, 'c': float},
)
result2 = self.read_csv(StringIO(data), usecols=(0, 2),
names=('a', 'b', 'c'),
header=None,
converters={'a': str},
dtype={'b': int, 'c': float},
)
self.assertTrue((result.dtypes == [object, np.int, np.float]).all())
self.assertTrue((result2.dtypes == [object, np.float]).all())
def test_usecols_implicit_index_col(self):
# #2654
data = 'a,b,c\n4,apple,bat,5.7\n8,orange,cow,10'
result = self.read_csv(StringIO(data), usecols=['a', 'b'])
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_with_whitespace(self):
data = 'a b c\n4 apple bat 5.7\n8 orange cow 10'
result = self.read_csv(StringIO(data), delim_whitespace=True,
usecols=('a', 'b'))
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_regex_sep(self):
# #2733
data = 'a b c\n4 apple bat 5.7\n8 orange cow 10'
df = self.read_csv(StringIO(data), sep='\s+', usecols=('a', 'b'))
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(df, expected)
def test_pure_python_failover(self):
data = "a,b,c\n1,2,3#ignore this!\n4,5,6#ignorethistoo"
result = self.read_csv(
|
StringIO(data)
|
pandas.compat.StringIO
|
# -*- coding: utf-8 -*-
from datetime import timedelta
from distutils.version import LooseVersion
import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
from pandas import (
DatetimeIndex, Int64Index, Series, Timedelta, TimedeltaIndex, Timestamp,
date_range, timedelta_range
)
from pandas.errors import NullFrequencyError
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(params=['B', 'D'])
def freq(request):
return request.param
class TestTimedeltaIndexArithmetic(object):
# Addition and Subtraction Operations
# -------------------------------------------------------------
# TimedeltaIndex.shift is used by __add__/__sub__
def test_tdi_shift_empty(self):
# GH#9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
def test_tdi_shift_hours(self):
# GH#9903
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_tdi_shift_minutes(self):
# GH#9903
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_tdi_shift_int(self):
# GH#8083
trange = pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
result = trange.shift(1)
expected = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00',
'4 days 01:00:00', '5 days 01:00:00'],
freq='D')
tm.assert_index_equal(result, expected)
def test_tdi_shift_nonstandard_freq(self):
# GH#8083
trange = pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
result = trange.shift(3, freq='2D 1s')
expected = TimedeltaIndex(['6 days 01:00:03', '7 days 01:00:03',
'8 days 01:00:03', '9 days 01:00:03',
'10 days 01:00:03'], freq='D')
tm.assert_index_equal(result, expected)
def test_shift_no_freq(self):
# GH#19147
tdi = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00'], freq=None)
with pytest.raises(NullFrequencyError):
tdi.shift(2)
# -------------------------------------------------------------
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
# -------------------------------------------------------------
# Binary operations TimedeltaIndex and integer
def test_tdi_add_int(self, one):
# Variants of `one` for #19012
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + one
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
def test_tdi_iadd_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
rng += one
tm.assert_index_equal(rng, expected)
def test_tdi_sub_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - one
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
def test_tdi_isub_int(self, one):
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_tdi_add_integer_array(self, box):
# GH#19959
rng = timedelta_range('1 days 09:00:00', freq='H', periods=3)
other = box([4, 3, 2])
expected = TimedeltaIndex(['1 day 13:00:00'] * 3)
result = rng + other
tm.assert_index_equal(result, expected)
result = other + rng
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_tdi_sub_integer_array(self, box):
# GH#19959
rng = timedelta_range('9H', freq='H', periods=3)
other = box([4, 3, 2])
expected = TimedeltaIndex(['5H', '7H', '9H'])
result = rng - other
tm.assert_index_equal(result, expected)
result = other - rng
tm.assert_index_equal(result, -expected)
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_tdi_addsub_integer_array_no_freq(self, box):
# GH#19959
tdi = TimedeltaIndex(['1 Day', 'NaT', '3 Hours'])
other = box([14, -1, 16])
with pytest.raises(NullFrequencyError):
tdi + other
with pytest.raises(NullFrequencyError):
other + tdi
with pytest.raises(NullFrequencyError):
tdi - other
with pytest.raises(NullFrequencyError):
other - tdi
# -------------------------------------------------------------
# Binary operations TimedeltaIndex and timedelta-like
# Note: add and sub are tested in tests.test_arithmetic
def test_tdi_iadd_timedeltalike(self, delta):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng += delta
tm.assert_index_equal(rng, expected)
def test_tdi_isub_timedeltalike(self, delta):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng -= delta
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
pytest.raises(TypeError, lambda: tdi - dt)
pytest.raises(TypeError, lambda: tdi - dti)
pytest.raises(TypeError, lambda: td - dt)
pytest.raises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
pytest.raises(TypeError, lambda: dt_tz - ts)
pytest.raises(TypeError, lambda: dt_tz - dt)
pytest.raises(TypeError, lambda: dt_tz - ts_tz2)
pytest.raises(TypeError, lambda: dt - dt_tz)
pytest.raises(TypeError, lambda: ts - dt_tz)
pytest.raises(TypeError, lambda: ts_tz2 - ts)
pytest.raises(TypeError, lambda: ts_tz2 - dt)
pytest.raises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
pytest.raises(TypeError, lambda: dti - ts_tz)
pytest.raises(TypeError, lambda: dti_tz - ts)
pytest.raises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
pytest.raises(ValueError, lambda: tdi + dti[0:1])
pytest.raises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
pytest.raises(NullFrequencyError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
def test_ops_ndarray(self):
td = Timedelta('1 day')
# timedelta, timedelta
other = pd.to_timedelta(['1 day']).values
expected = pd.to_timedelta(['2 days']).values
tm.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(other + td, expected)
pytest.raises(TypeError, lambda: td + np.array([1]))
pytest.raises(TypeError, lambda: np.array([1]) + td)
expected = pd.to_timedelta(['0 days']).values
tm.assert_numpy_array_equal(td - other, expected)
if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(-other + td, expected)
pytest.raises(TypeError, lambda: td - np.array([1]))
pytest.raises(TypeError, lambda: np.array([1]) - td)
expected = pd.to_timedelta(['2 days']).values
tm.assert_numpy_array_equal(td * np.array([2]), expected)
tm.assert_numpy_array_equal(np.array([2]) * td, expected)
pytest.raises(TypeError, lambda: td * other)
pytest.raises(TypeError, lambda: other * td)
tm.assert_numpy_array_equal(td / other,
np.array([1], dtype=np.float64))
if LooseVersion(np.__version__) >= LooseVersion('1.8'):
tm.assert_numpy_array_equal(other / td,
np.array([1], dtype=np.float64))
# timedelta, datetime
other =
|
pd.to_datetime(['2000-01-01'])
|
pandas.to_datetime
|
import os
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from catboost import Pool
from catboost import CatBoostClassifier
import scipy.io.wavfile as wavfile
import python_speech_features.base as speech
class pohui:
def __init__(self):
if not os.path.exists('data/ours.csv'):
raise Exception("No base data to train on (data/ours.csv missing)")
if not os.path.exists('data/random.csv'):
raise Exception("No base data to train on (data/random.csv missing)")
self.ourdata = pd.read_csv('data/ours.csv')
self.ourdata = self.ourdata.sample(frac=1)
randoms = pd.read_csv('data/random.csv')
self.upd = pd.concat([randoms, self.ourdata], ignore_index = True).sample(frac = 1)
if not os.path.exists('models'):
os.makedirs('models')
self.trainModels()
else:
self.cbc0 = CatBoostClassifier()
self.cbc1 = CatBoostClassifier()
self.cbc2 = CatBoostClassifier()
self.cbc3 = CatBoostClassifier()
self.cbc4 = CatBoostClassifier()
try:
self.cbc0.load_model('models/cbc0.cbm')
self.cbc1.load_model('models/cbc1.cbm')
self.cbc2.load_model('models/cbc2.cbm')
self.cbc3.load_model('models/cbc3.cbm')
self.cbc4.load_model('models/cbc4.cbm')
except:
self.trainModels()
self.saveModels()
self.le = preprocessing.LabelEncoder()
self.le.fit(list(self.upd.person.unique()))
def saveModels(self):
self.cbc0.save_model('models/cbc0.cbm')
self.cbc1.save_model('models/cbc1.cbm')
self.cbc2.save_model('models/cbc2.cbm')
self.cbc3.save_model('models/cbc3.cbm')
self.cbc4.save_model('models/cbc4.cbm')
def Features(self, data, rate, dim):
spec = np.abs(np.fft.rfft(data))
freq = np.fft.rfftfreq(len(data), d=1 / dim)
a = spec / spec.sum()
meaN = (freq * a).sum()
std = np.sqrt(np.sum(a * ((freq - meaN) ** 2)))
a_cumsum = np.cumsum(a)
mediaN = freq[len(a_cumsum[a_cumsum <= 0.5])]
modE = freq[a.argmax()]
q25 = freq[len(a_cumsum[a_cumsum <= 0.25])]
q75 = freq[len(a_cumsum[a_cumsum <= 0.75])]
IQR = q75 - q25
z = a - a.mean()
w = a.std()
skewnesS = ((z ** 3).sum() / (len(spec) - 1)) / w ** 3
kurtosiS = ((z ** 4).sum() / (len(spec) - 1)) / w ** 4
m = speech.mfcc(data,rate)
f = speech.fbank(data,rate)
l = speech.logfbank(data,rate)
s = speech.ssc(data,rate)
data = pd.DataFrame(data)
desc = data.describe()
mean = desc.loc["mean"].get(0)
mad = data.mad().get(0)
sd = desc.loc["std"].get(0)
median = data.median().get(0)
minimum = desc.loc["min"].get(0)
maximum = desc.loc["max"].get(0)
Q25 = desc.loc["25%"].get(0)
Q75 = desc.loc["75%"].get(0)
interquartileR = Q75 - Q25
skewness = data.skew().get(0)
kurtosis = data.kurtosis().get(0)
result = {
"Mean": mean, "Mad": mad, "deviation": sd, "Median": median, "Min": minimum, "Max": maximum,
"interquartileR": interquartileR, "Skewness": skewness, "Q25": Q25, "Q75": Q75, "Kurtosis": kurtosis,
"mfcc_mean": np.mean(m), "mfcc_max": np.max(m), "mfcc_min": np.min(m),
"fbank_mean": np.mean(f[0]), "fbank_max": np.max(f[0]), "fbank_min": np.min(f[0]),
"energy_mean": np.mean(f[1]), "energy_max": np.max(f[1]), "energy_min": np.min(f[1]),
"lfbank_mean": np.mean(l), "lfbank_max": np.max(l), "lfbank_min": np.min(l),
"ssc_mean": np.mean(s), "ssc_max": np.max(s), "ssc_min": np.min(s),
"meaN": meaN, "deviatioN": std, "mediaN": mediaN, "modE": modE, "IQR": IQR,
"skewnesS": skewnesS, "q25": q25, "q75": q75, "kurtosiS": kurtosiS}
return result
def StereoToMono(self, data):
newdata = []
for i in range(len(data)):
d = (data[i][0] + data[i][1])/2
newdata.append(d)
return(np.array(newdata, dtype='int16'))
def trainModels(self):
x = self.upd.drop(columns = ["person"]).values
y = self.upd["person"].values
x_train, x_valid, y_train, y_valid = train_test_split(x, y, test_size=0.42)
self.le = preprocessing.LabelEncoder()
self.le.fit(list(self.upd.person.unique()))
y_train = self.le.transform(y_train)
y_valid = self.le.transform(y_valid)
train = Pool(x_train, y_train)
valid = Pool(x_valid, y_valid)
self.cbc0 = CatBoostClassifier(iterations=100, learning_rate=0.01, depth=5)
self.cbc0.fit(train, eval_set=valid, use_best_model=True, verbose=False)
self.cbc1 = CatBoostClassifier(iterations=100, learning_rate=0.001, depth=4)
self.cbc1.fit(train, eval_set=valid, use_best_model=True, verbose=False)
self.cbc2 = CatBoostClassifier(iterations=100, learning_rate=0.0001, depth=3)
self.cbc2.fit(train, eval_set=valid, use_best_model=True, verbose=False)
self.cbc3 = CatBoostClassifier(iterations=100, learning_rate=0.00001, depth=2)
self.cbc3.fit(train, eval_set=valid, use_best_model=True, verbose=False)
self.cbc4 = CatBoostClassifier(iterations=100, learning_rate=0.000001, depth=1)
self.cbc4.fit(train, eval_set=valid, use_best_model=True, verbose=False)
def registerUser(self, name, age, gender, pathToWav):
if not os.path.exists(pathToWav):
raise Exception('No such file exists (can\'t register user)')
if gender not in [0, 1]:
raise Exception('Invalid gender entered')
if age >= 0 and age < 25:
age = 20
elif age >= 25 and age < 35:
age = 30
elif age >= 35 and age < 45:
age = 40
elif age >= 45 and age < 101:
age = 50
else:
raise Exception('Invalid age entered')
rate, newdata = wavfile.read(pathToWav)
try:
newdata = self.StereoToMono(newdata)
except:
pass
wew = []
chunk_size = len(newdata)//10
for start in range(0, len(newdata), chunk_size):
temp = []
temp.append(gender)
temp.append(age)
f = self.Features(newdata[start:start+chunk_size],rate,1000)
if start+chunk_size >= len(newdata):
print("clearing", f)
break
for feature in f:
temp.append(f[feature])
temp.append(name)
if temp[6]!=0 and 'nan' not in ','.join(list(map(str,temp))):
wew.append(','.join(list(map(str,temp))))
self.upd = self.upd.reset_index(drop=True)
self.upd.loc[self.upd.shape[0]] = temp
self.ourdata.loc[self.ourdata.shape[0]] = temp
t = open("data/ours.csv",'r').read()
open("data/ours.csv",'w').write(t+'\n'+'\n'.join(wew))
self.upd = self.upd.sample(frac=1)
self.trainModels()
def predict(self, age, gender, pathToWav):
if not os.path.exists(pathToWav):
raise Exception('No such file exists (can\'t predict)')
if gender not in [0, 1]:
raise Exception('Invalid gender entered')
if age >= 0 and age < 25:
age = 20
elif age >= 25 and age < 35:
age = 30
elif age >= 35 and age < 45:
age = 40
elif age >= 45 and age < 101:
age = 50
else:
raise Exception('Invalid age entered')
rate, newdata = wavfile.read(pathToWav)
try: newdata = self.StereoToMono(newdata)
except: pass
temp = []
temp.append(gender)
temp.append(age)
f = self.Features(newdata,rate,1000)
for feature in f:
temp.append(f[feature])
new =
|
pd.DataFrame(temp)
|
pandas.DataFrame
|
import tensorflow as tf
from tensorflow.keras.datasets import cifar10
import os
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.pylab as plt
import numpy as np
import time
import pathlib
import timeit
import seaborn as sns
test_dir = "/home/jetson/Desktop/America/data/archive/seg_test/seg_test"
train_dir = "/home/jetson/Desktop/America/data/archive/seg_train/seg_train"
from keras.preprocessing.image import ImageDataGenerator
train_data = ImageDataGenerator(rescale = 1./255)
train_imagess = train_data.flow_from_directory(train_dir,
batch_size = 32,
target_size = (32,32),
class_mode = 'categorical')
test_data = ImageDataGenerator(rescale = 1./255)
test_imagess = test_data.flow_from_directory(test_dir,
batch_size = 100,
target_size = (32,32),
class_mode = 'categorical')
for img, lbl in train_imagess:
train_images = img
break
for img, lbl in test_imagess:
test_images = img
break
for img, lbl in test_imagess:
image_labels = lbl
break
for img, lbl in test_imagess:
train_labels = lbl
break
test_labels=np.argmax(image_labels, axis=1)
train_labels=np.argmax(train_labels, axis=1)
testcode = '''
def test():
model = keras.models.load_model('/home/jetson/Desktop/greedyhpo-main/Main_test_case/intel/greedy_approch_Intel_vgg16')
model.predict(test_images[0])
'''
time = timeit.repeat(stmt=testcode, repeat=100)
#time = np.array(time)
time = np.reshape(time, (100, 1))
#print(time)
pd.DataFrame(time).to_csv("NQ_greedy_approch_Intel_vgg16.csv")
print("Latency saved...")
testcode = '''
def test():
model = keras.models.load_model('/home/jetson/Desktop/greedyhpo-main/Main_test_case/intel/Bayesian_Search_Intel_vgg16')
model.predict(test_images[0])
'''
time = timeit.repeat(stmt=testcode, repeat=100)
#time = np.array(time)
time = np.reshape(time, (100, 1))
#print(time)
|
pd.DataFrame(time)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import sys, os
import pandas as pd
import numpy as np
from data_factory.temperature_spider import getTemperatureData
def loadNTL(path):
lineloss = pd.read_csv(path)
lineloss['Date'] = pd.to_datetime(lineloss['Date'])
lineloss = lineloss.sort_values(['AreaID', 'Date'])
lineloss['Date'] = lineloss['Date'].astype(int)
lineloss['Date'] = (lineloss['Date'] / 1e9).astype(int)
return lineloss
def loadUser(path):
if os.path.isdir(path):
files = os.listdir(path)
userdata = []
for file in files:
temp = pd.read_csv(os.path.join(path, file))
userdata.append(temp)
userdata = pd.concat(userdata, ignore_index=True)
else:
userdata = pd.read_csv(path)
userdata['Date'] = pd.to_datetime(userdata['Date'])
userdata = userdata.sort_values(['AreaID', 'UserID', 'Date'])
userdata['Date'] = userdata['Date'].astype(int)
userdata['Date'] = (userdata['Date'] / 1e9).astype(int)
return userdata
def loadTemperature(path, need_spider=False, **kwargs):
if need_spider:
city_ids = kwargs['cities']
years = kwargs['years']
months = kwargs['months']
if not os.path.isdir(path):
storepath = os.path.dirname(path)
else:
storepath = path
getTemperatureData(city_ids, years, months, storepath)
temperaturedata = pd.read_csv(path)
temperaturedata['date'] = pd.to_datetime(temperaturedata['Date'], format='%Y年%m月%d日')
temperaturedata['high'] = temperaturedata['temperature_high'].map(lambda x: x.split('℃')[0])
temperaturedata['low'] = temperaturedata['temperature_low'].map(lambda x: x.split('℃')[0])
temperaturedata = temperaturedata[['date', 'high', 'low']].astype(int)
temperaturedata = temperaturedata[['date', 'high', 'low']].astype(int)
temperaturedata['date'] = (temperaturedata['date'] / 1e9).astype(int)
return temperaturedata
# abstract data
def abstractData(userdata, ntldata, tempdata):
datax = []
datainfo = []
tq_ids = pd.unique(ntldata['AreaID'])
num = 0
length = len(tq_ids)
bar_length = 20
percent = 1.0
for tq_id in tq_ids:
# 进度条
hashes = '#' * int(percent / length * bar_length)
spaces = ' ' * (bar_length - len(hashes))
sys.stdout.write("\rPercent: [%s] %d%%" % (hashes + spaces, int(percent * 100 / length)))
sys.stdout.flush()
percent += 1
tdata = ntldata[ntldata['AreaID'] == tq_id][['Date', 'NTL']].values
tempuser = userdata[userdata['AreaID'] == tq_id]
if tempuser.shape[0] == 0:
num += 1
user_ids = pd.unique(tempuser['UserID'])
for user_id in user_ids:
udata = tempuser[tempuser['UserID'] == user_id][['Date', 'Total', 'Top', 'Peak', 'Flat', 'Valley']].values
datax.append([udata, tdata, tempdata.values])
datainfo.append([tq_id, user_id])
sys.stdout.write('\n')
print("Num_area: {}, Num_Outlier: {}".format(length, num))
return datax, datainfo
# establish dataset
def establishDataset(datax, starttime, endtime):
def getdata(d, starttime):
tmpd = d[(d[:, 0] < starttime + 24 * 60 * 60) & (d[:, 0] >= starttime)]
if tmpd.shape[0] == 0:
tmpd = [starttime] + [0] * (tmpd.shape[1]-1)
else:
tmpd = np.max(tmpd, axis=0)
return tmpd
tempx = []
length = len(datax)
bar_length = 20
percent = 1.0
for i in range(length):
d = datax[i]
# 进度条
hashes = '#' * int(percent / length * bar_length)
spaces = ' ' * (bar_length - len(hashes))
sys.stdout.write("\rPercent: [%s] %d%%" % (hashes + spaces, int(percent * 100 / length)))
sys.stdout.flush()
percent += 1
ele = np.array(d[0], dtype=float, copy=True)
ntl = np.array(d[1], dtype=float, copy=True)
temperature = np.array(d[2], dtype=float, copy=True)
startstamp = (pd.DataFrame([pd.to_datetime(starttime)]).astype(int) / 1e9).astype(int).values[0, 0]
endstamp = (pd.DataFrame([
|
pd.to_datetime(endtime)
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 27 10:23:59 2021
@author: alber
"""
import re
import os
import pandas as pd
import numpy as np
import spacy
import pickle
import lightgbm as lgb
import imblearn
from sklearn import preprocessing
from sklearn.semi_supervised import (
LabelPropagation,
LabelSpreading,
SelfTrainingClassifier,
)
from sklearn import metrics
from sklearn.dummy import DummyClassifier
from sklearn.metrics import classification_report
# from nltk.corpus import stopwords
# from nltk import ngrams
from nltk.stem.snowball import SnowballStemmer
# from sentence_transformers import SentenceTransformer, util
from imblearn.over_sampling import SMOTE, BorderlineSMOTE, ADASYN
from statsmodels.stats.inter_rater import cohens_kappa
from common.tools import get_files, file_presistance
from common.config import (
PATH_POEMS, PATH_RESULTS, PATH_AFF_LEXICON, PATH_GROUND_TRUTH
)
nlp = spacy.load("es_core_news_md")
stemmer = SnowballStemmer("spanish")
def _getReport(
y_test, y_pred, y_pred_proba, target_names, using_affective = "yes",
semantic_model = "", classification_model = ""
):
"""
TODO
Parameters
----------
y_test : TYPE
DESCRIPTION.
y_pred : TYPE
DESCRIPTION.
target_names : TYPE
DESCRIPTION.
using_affective : TYPE, optional
DESCRIPTION. The default is "yes".
semantic_model : TYPE, optional
DESCRIPTION. The default is "".
classification_model : TYPE, optional
DESCRIPTION. The default is "".
Returns
-------
df_metrics_iter : TYPE
DESCRIPTION.
"""
### 1. Standard Metrics
report = classification_report(
y_test, y_pred, target_names = target_names, output_dict = True
)
df_metrics_iter = pd.DataFrame(
{
'category': [category],
'using_affective': [using_affective],
'semantic_model': [semantic_model],
'classification_model': [classification_model],
'n_class_0': [report[f'{category}_0']['support']],
'n_class_1': [report[f'{category}_1']['support']],
'precision_class_0': [report[f'{category}_0']['precision']],
'precision_class_1': [report[f'{category}_1']['precision']],
'recall_class_0': [report[f'{category}_0']['recall']],
'recall_class_1': [report[f'{category}_1']['recall']],
'f1_class_0': [report[f'{category}_0']['f1-score']],
'f1_class_1': [report[f'{category}_1']['f1-score']],
'precision_weighted': [report['weighted avg']['precision']],
'recall_weighted': [report['weighted avg']['recall']],
'f1_weighted': [report['weighted avg']['f1-score']]
}
)
### 2. Cohen's Kappa
# Make Dataframe
df = pd.DataFrame({"A": y_test, "B": y_pred})
# Switch it to three columns A's answer, B's answer and count of that combination
df = df.value_counts().reset_index()
# Check compliance
if len(df) < 4:
df_aux = pd.DataFrame({'A': [0.0, 1.0, 0.0, 1.0],
'B': [0.0, 0.0, 1.0, 1.0]
})
df = df.merge(df_aux, how="outer").fillna(0)
# Make square
square = df.pivot(columns="A",index="B").values
# Get Kappa
dct_kappa = cohens_kappa(square)
kappa_max = dct_kappa['kappa_max']
kappa = dct_kappa['kappa']
df_metrics_iter['kappa'] = [kappa]
df_metrics_iter['kappa_max'] = [kappa_max]
### 3. AUC
y_pred_proba = np.asarray([x if str(x) != 'nan' else 0.0 for x in y_pred_proba])
fpr, tpr, thresholds = metrics.roc_curve(
y_test, y_pred_proba, pos_label=1
)
auc = metrics.auc(fpr, tpr)
df_metrics_iter['auc'] = [auc]
return df_metrics_iter
# =============================================================================
# 1. Prepare Data
# =============================================================================
### Load Sonnets Features
# Load Data
file_to_read = open(f"{PATH_RESULTS}/dct_sonnets_input_v5", "rb")
dct_sonnets = pickle.load(file_to_read)
file_to_read.close()
# Only DISCO
if False:
dct_sonnets = {x:y for x,y in dct_sonnets.items() if x <= 4085}
# Sonnet Matrix
list_original_sentence = [
'enc_text_model1',
'enc_text_model2',
'enc_text_model3',
'enc_text_model4',
'enc_text_model5'
]
list_semantic_models = [
'enc_text_model1',
'enc_text_model2',
'enc_text_model3',
'enc_text_model4',
'enc_text_model5',
# 'enc_text_model_hg_bert_max',
# 'enc_text_model_hg_bert_span',
# 'enc_text_model_hg_bert_median',
'enc_text_model_hg_bert_avg_w',
# 'enc_text_model_hg_bert_sp_max',
# 'enc_text_model_hg_bert_sp_span',
# 'enc_text_model_hg_bert_sp_median',
'enc_text_model_hg_bert_sp_avg_w',
# 'enc_text_model_hg_ro_max',
# 'enc_text_model_hg_ro_span',
# 'enc_text_model_hg_ro_median',
# 'enc_text_model_hg_ro_avg_w'
]
# General Variables
dct_metrics_all_models = {}
df_meta = pd.concat(
[
pd.DataFrame({"index": [item["index"]], "text": [item["text"]]})
for key, item in dct_sonnets.items()
]
)
df_affective = pd.concat([item["aff_features"] for key, item in dct_sonnets.items()]).fillna(0)
# Load psycho names
df_names = pd.read_csv(f"{PATH_GROUND_TRUTH}/variable_names_en.csv", encoding="latin-1")
list_names = list(df_names["es_name"].values)
list_aff = [
"concreteness",
"context availability",
"anger",
"arousal",
"disgust",
"fear",
"happinness",
"imageability",
"sadness",
"valence",
]
### Load Ground Truth
if False:
df_gt =
|
pd.read_csv(f"{PATH_GROUND_TRUTH}/poems_corpus_all.csv")
|
pandas.read_csv
|
# Main Program
# 29.04.20 DGM: Program's opening
'''
ATTENTION to use this program:
- you might want to overwrite in Globals Initialization:
+ client_id = <your ID>
+ client_secret = <your ID Key>
YOU CAN ALSO: create a json file named: 'credentials.json' on the same folder and write:
{
'client_id': <your ID>,
'client_secret': '<your ID Key>
}
- you may also want to actualize the cities DataFrame and uncomment the rows at the very beginning of this program
- AFTER Running program should be two maps created in html format!:
('popupmap.html')
('chorobusiness.html')
('choroschool.html')
GOAL:
- Retrieve the actual companies' locations and plot them to a map to find out which companies about your sector in your surroundings you have,
in which city you should move to for this, etc. Feel free to change these queries and places to match your needs.
- Cluster this information per states and generate choroplets to visualize wich country region is the most suitable for you
##################
PROCEDURE
##################
Retrieving data from foursquare's API:
- needs to pass:
+ Class Link
- Read from external json Foursquare API:
+ Load data from cities in Germany and matching them with queries
- Request to FouSquare API about:
+ Data Science
+ Data Mining
+ Data Analysis
Generatation of a map popping companies' names
Generation of clusters
Generation of maps showing clusters (choropleths)
Generation of statistics and graphs for visualization
##################
'''
from pathlib import Path
# working with json and tranforming json file into a pandas dataframe library
import json
from pandas.io.json import json_normalize
import requests # library to handle requests
import pandas as pd # library for data analsysis
import numpy as np # library to handle data in a vectorized manner
import random # library for random number generation
from geopy.geocoders import Nominatim # module to convert an address into latitude and longitude values
# libraries for displaying images
from IPython.display import Image
from IPython.core.display import HTML
import folium # plotting library
# plot
import matplotlib as mpl
import matplotlib.pyplot as plt
# regex
import re
'''
Uncomment if want to actualize cities
'''
# url = 'https://en.wikipedia.org/wiki/List_of_cities_in_Germany_by_population'
# series_df = pd.read_html(url, encoding='utf_16')
# # series_df = pd.read_html(url)
# df = series_df[0]
# df = df.drop(labels={'2015rank', '2015estimate', 'Change', '2011census', '2015land area', '2015populationdensity'}, axis=1)
# location_columns = df['Location'].str.split(pat="/", expand=True).rename(columns={0: 'cardLocation', 1: 'dLocation'})
# location_columns2 = location_columns['dLocation'].str.split(pat="°N", expand=True)
# location_columns2 = location_columns2.rename(columns={0: 'Latitude', 1: 'Longitude'})
# try:
# location_columns2['Latitude'] = location_columns2['Latitude'].astype(float)
# print ('converted to float')
# except Exception as e:
# print (e)
# print ('Trying to decode...')
# location_columns2['Latitude'] = location_columns2['Latitude'].str.replace(u' \ufeff', u'')
# location_columns2['Latitude'] = location_columns2['Latitude'].astype(float)
# print('final data type from {} is {}'.format(location_columns2['Latitude'].name, location_columns2['Latitude'].dtypes))
# try:
# location_columns2['Longitude'] = location_columns2['Longitude'].astype(float)
# print ('converted to float')
# except Exception as e:
# print (e)
# print ('Trying to decode...')
# location_columns2['Longitude'] = location_columns2['Longitude'].str.replace(u' \ufeff', u'')
# location_columns2['Longitude'] = location_columns2['Longitude'].str.replace(u'°E', u'')
# location_columns2['Longitude'] = location_columns2['Longitude'].astype(float)
# print('final data type from {} is {}'.format(location_columns2['Longitude'].name, location_columns2['Longitude'].dtypes))
# df = pd.concat([df, location_columns2], axis=1)
# df = df.drop(labels={'Location'}, axis=1)
# position = pd.DataFrame(columns={'Position'})
# my_dict = {
# 'Position': ''
# }
# for idx in df.index:
# string = str(round(df['Latitude'].iloc[idx], ndigits=3)) + ',' + str(round(df['Longitude'].iloc[idx], ndigits=3))
# my_dict = {
# 'Position': string
# }
# position = position.append(my_dict, ignore_index=True)
# df = pd.concat([df, position], axis=1)
# df.to_csv('./data/loc_cities_germany.csv')
'''
Commented out after saving dataframe!
'''
# Find geocenter of Germany
geolocator = Nominatim(user_agent='aiFinder')
location = geolocator.geocode('Germany')
latGE = location.latitude # --> Center
lonGE = location.longitude # --> Center
locationGE = str(str(latGE) + ',' + str(lonGE))
# generate map centered on the middle of Germany:
#################################################################
mapGE = folium.Map(location=[latGE, lonGE], zoom_start=7)
choro_businessGE = folium.Map(location=[latGE, lonGE], zoom_start=7)
choro_studyGE = folium.Map(location=[latGE, lonGE], zoom_start=7)
# Get credentials from json file
#########################################################################
wksPath = Path(__file__).parents[0]
# Open json file and return content of it
def readFile(nameFile):
path = wksPath / nameFile
with open(path, 'r') as file:
content = file.read()
return content
def writeFile(nameFile, data):
with open(nameFile, 'w') as file:
file.write(data)
# Classes
##########################################################################
'''
class Link() -> generates a link to query the json file out of foursquare's API:
method url_from_venue -> generates this link for venues and needs of:
option, venues
location, as: latitud, longitud
query as: coffee, chinese food, windsurf...
'''
class Link:
global main_URL, client_id, client_secret, version
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def venue(self, **kwargs):
self.option = '/' + self.option + '?'
self.location = 'll=' + self.location
self.query = '&query=' + self.query
url = str(main_URL+'venues'+self.option+self.location+self.query+'&client_id='+client_id+'&client_secret='+client_secret+'&v='+version)
# kwargs should have the same name as in Foursquare
for key, value in kwargs.items():
append = '&' + key + '=' + str(value)
url = url + append
return (url)
# TODO: Finish it...
def explore(self):
self.option = '/' + self.option + '?'
self.location = 'll=' + self.location
self.query = '&query=' + self.query
url = str(main_URL+'venues'+self.option+self.location+self.query+'&client_id='+client_id+'&client_secret='+client_secret+'&v='+version)
return (url)
# Functions:
#################################################################
# function to avoid generating testing variables of json indenxed files (only for test):
def view_json(var):
# r=requests.get("http://www.example.com/", headers={"content-type":"text"})
var = requests.get(var).json()
var = json.dumps(var, sort_keys=True, indent=2)
return print (var)
# function that extracts the category of the venue
def get_category_type(row):
try:
categories_list = row['categories']
except:
categories_list = row['venue.categories']
if len(categories_list) == 0:
return None
else:
return categories_list[0]['name']
# function to pass a link and get a request and a json data out of it
def get_df(link):
results = requests.get(link).json()
# print (results)
# Pretty printing:
json_string = json.dumps(results, sort_keys=True, indent=2)
# print(json_string)
# generate a python variable to access data:
jdata = json.loads(json_string)
jdata = jdata['response']['venues']
# jdata = jdata['response']['groups']
# view_json(jdata) # object of a self-made function which prints json beautified!
# lets import this data into pandas
dataframe = json_normalize(jdata)
# print (df.columns)
# print (df.head())
return dataframe
# keep only columns that include venue name, and anything that is associated with location
def filter_df(dataframe):
filtered_columns = ['name', 'categories'] + [col for col in dataframe.columns if col.startswith('location.')] + ['id']
dataframe = dataframe.loc[:, filtered_columns]
dataframe = dataframe.rename(columns=lambda x: re.sub('location.','',x))
return dataframe
def readJson(path):
with open(path, 'r') as file:
data = json.load(file)
return data
# Globals initialization
#########################################################################
# read json file and save it
id_key = readFile('credentials.json')
id_key = json.loads(id_key)
# inizializate id credentials
client_id = id_key["client_id"]
client_secret = id_key["client_secret"]
version = str(20180602)
# object for URL from foursquare's API
main_URL = 'https://api.foursquare.com/v2/'
# TODO: limit the data frame 'df' to a distance with 'me' and drop the rest
#Read Dataframe from CSV
df = pd.read_csv('./data/loc_cities_germany.csv', usecols={'City', 'State', 'Latitude', 'Longitude', 'Position'})
col_use = [
'id', 'name', 'categories', 'address', 'cc', 'city', 'country',
'formattedAddress', 'labeledLatLngs', 'lat', 'lng', 'state',
'postalCode']
i=0
categories = {}
cl_DSGE = pd.DataFrame()
cl_DMGE = pd.DataFrame()
cl_DAGE = pd.DataFrame()
address = ['city', 'state', 'country', 'lat', 'lng', 'postalCode']
bundeslaender_EN = [
"Baden-Württemberg",
"Bavaria",
"Berlin",
"Brandenburg",
"Bremen",
"Hamburg",
"Hessen",
"Lower Saxony",
"Mecklenburg-Vorpommern",
"North Rhine-Westphalia",
"Rhineland-Palatinate",
"Saarland",
"Saxony",
"Saxony-Anhalt",
"Schleswig-Holstein",
"Thuringia",
]
bundeslaender_DE = [
"Baden-Württemberg",
"Bayern",
"Berlin",
"Brandenburg",
"Bremen",
"Hamburg",
"Hessen",
"Niedersachsen",
"Mecklenburg-Vorpommern",
"Nordrhein-Westfalen",
"Rheinland-Pfalz",
"Saarland",
"Sachsen",
"Sachsen-Anhalt",
"Schleswig-Holstein",
"Thüringen",
]
geojson = readJson('data/test.json')
total = pd.DataFrame()
for pos in (df['Position']):
# Generate links for different query types out of Foursquare
datascienceGE = Link(option='search', location=pos, query='\"data science\"').venue(radius=50000,limit=80)
dataminingGE = Link(option='search', location=pos, query='\"data mining\"').venue(radius=50000,limit=80)
dataanalysisGE = Link(option='search', location=pos, query='\"data analysis\"').venue(radius=50000,limit=80)
try:
# convert first to json and secondly to pandas DataFrame
df_DSGE = get_df(datascienceGE)
# filter the category for each row
df_DSGE = filter_df(df_DSGE)
df_DSGE['categories'] = df_DSGE.apply(get_category_type, axis=1)
# drop nulls
df_DSGE = df_DSGE[ ((df_DSGE['city'].notnull()) & (df_DSGE['state'].notnull())) ]
# clusters:
'''
1) count values:
+ matches for every city
+ categories (need bins)
2) get the position of this city with Nominatim
3) save it in another dataframe (can be 'i'clustDSGE.csv)
'''
dummy = pd.get_dummies(df_DSGE['categories'])
if cl_DSGE.empty:
cl_DSGE = pd.concat([df_DSGE, dummy], axis=1)
else:
a = pd.concat([df_DSGE, dummy], axis=1)
cl_DSGE = a.append(cl_DSGE)
# instantiate a feature group for matches in the dataframe
dataSci = folium.map.FeatureGroup()
for latitude, longitude, companyname in zip(df_DSGE.lat, df_DSGE.lng, df_DSGE.name):
dataSci.add_child(
# Add markers to the map for every match
folium.CircleMarker(
[latitude, longitude],
radius=7, # define how big you want the circle markers to be
color='yellow',
fill=True,
fill_color='red',
fill_opacity=0.6
)
)
dataSci.add_child(
# add simple popup with the name of the company when clicked
folium.Marker(
location=[latitude, longitude],
icon=None,
popup=companyname,
)
)
mapGE.add_child(dataSci)
print ('{}: This city has {} results for data science'.format(df['City'].iloc[i], df_DSGE.shape[0]))
except Exception as e:
print ('E1000: {}, {}: This city has no results for data science'.format(e, df['City'].iloc[i]))
pass
try:
df_DMGE = get_df(dataminingGE)
df_DMGE = filter_df(df_DMGE)
df_DMGE['categories'] = df_DMGE.apply(get_category_type, axis=1)
df_DMGE = df_DMGE[ ((df_DMGE['city'].notnull()) & (df_DMGE['state'].notnull())) ]
dummy = pd.get_dummies(df_DMGE['categories'])
if cl_DMGE.empty:
cl_DMGE = pd.concat([df_DMGE, dummy], axis=1)
else:
b = pd.concat([df_DMGE, dummy], axis=1)
cl_DMGE = b.append(cl_DMGE)
dataMin = folium.map.FeatureGroup()
for latitude, longitude, companyname in zip(df_DMGE.lat, df_DMGE.lng, df_DMGE.name):
dataMin.add_child(
folium.CircleMarker(
[latitude, longitude],
radius=7,
color='yellow',
fill=True,
fill_color='green',
fill_opacity=0.6
)
)
dataMin.add_child(
folium.Marker(
location=[latitude, longitude],
icon=None,
popup=companyname,
)
)
mapGE.add_child(dataMin)
print ('{}: This city has {} results for data mining'.format(df['City'].iloc[i], df_DMGE.shape[0]))
except Exception as e:
print ('E1000: {}, {}: This city has no results for data mining'.format(e, df['City'].iloc[i]))
pass
try:
df_DAGE = get_df(dataanalysisGE)
df_DAGE = filter_df(df_DAGE)
df_DAGE['categories'] = df_DAGE.apply(get_category_type, axis=1)
df_DAGE = df_DAGE[ ((df_DAGE['city'].notnull()) & (df_DAGE['state'].notnull())) ]
dummy = pd.get_dummies(df_DAGE['categories'])
if cl_DAGE.empty:
cl_DAGE = pd.concat([df_DAGE, dummy], axis=1)
else:
c =
|
pd.concat([df_DAGE, dummy], axis=1)
|
pandas.concat
|
# random forest regression tutorial at:
# https://github.com/WillKoehrsen/Data-Analysis/blob/master/random_forest_explained/Random%20Forest%20Explained.ipynb
import argparse
import os
import sys
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.tree import export_graphviz
import numpy as np
import pandas as pd
import pydot
# args
parser = argparse.ArgumentParser()
parser.add_argument("filename", help="CSV file")
args = parser.parse_args()
# data
df = pd.read_csv(args.filename)
print(df)
print()
# separate the output column
y_name = df.columns[-1]
y_df = df[y_name]
X_df = df.drop(y_name, axis=1)
# one-hot encode categorical features
X_df =
|
pd.get_dummies(X_df)
|
pandas.get_dummies
|
####
# Read in LSHTM results and perform inference on
####
import matplotlib
matplotlib.use('Agg')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import norm
from scipy.special import expit
from sys import argv
from datetime import timedelta, datetime
import pystan
import pickle
import os, glob
from Reff_functions import *
from Reff_constants import *
iterations=5000
chains=2
### Read in md surveys
surveys = pd.DataFrame()
##Improve this to read by glob.glob and get all of them
path = "data/md/Barometer wave*.csv"
for file in glob.glob(path):
surveys = surveys.append(pd.read_csv(file,parse_dates=['date']))
surveys = surveys.sort_values(by='date')
print("Latest Microdistancing survey is {}".format(surveys.date.values[-1]))
surveys.loc[surveys.state!='ACT','state'] = surveys.loc[surveys.state!='ACT','state'].map(states_initials).fillna(surveys.loc[surveys.state!='ACT','state'])
surveys['proportion'] = surveys['count']/surveys.respondents
surveys.date = pd.to_datetime(surveys.date)
always =surveys.loc[surveys.response=='Always'].set_index(["state",'date'])
always = always.unstack(['state'])
idx = pd.date_range('2020-03-01',pd.to_datetime("today"))
always = always.reindex(idx, fill_value=np.nan)
always.index.name = 'date'
#fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
always =always.fillna(method='bfill')
#assume values continue forward if survey hasn't completed
always = always.fillna(method='ffill')
always = always.stack(['state'])
#Zero out before first survey 20th March
always = always.reset_index().set_index('date')
always.loc[:'2020-03-20','count'] =0
always.loc[:'2020-03-20','respondents'] =0
always.loc[:'2020-03-20','proportion'] =0
always = always.reset_index().set_index(['state','date'])
survey_X = pd.pivot_table(data=always,
index='date',columns='state',values='proportion')
survey_counts_base =pd.pivot_table(data=always,
index='date',columns='state',values='count').drop(['Australia','Other'],axis=1).astype(int)
survey_respond_base = pd.pivot_table(data=always,
index='date',columns='state',values='respondents').drop(['Australia','Other'],axis=1).astype(int)
## Read in pystan model that is saved on disk
rho_model_gamma = """
data {
int N; //data length num days
int K; //Number of mobility indices
int j; //Number of states
matrix[N,j] Reff; //response
matrix[N,K] Mob[j]; //Mobility indices
matrix[N,K] Mob_std[j]; ///std of mobility
matrix[N,j] sigma2; //Variances of R_eff from previous study
vector[N] policy; //Indicators for post policy or not
matrix[N,j] local; //local number of cases
matrix[N,j] imported; //imported number of cases
int N_v; //length of VIC days
int j_v; //second wave states
matrix[N_v,j_v] Reff_v; //Reff for VIC in June
matrix[N_v,K] Mob_v[j_v]; //Mob for VIC June
matrix[N_v,K] Mob_v_std[j_v];// std of mobility
matrix[N_v,j_v] sigma2_v;// variance of R_eff from previous study
vector[N_v] policy_v;// micro distancing compliance
matrix[N_v,j_v] local_v; //local cases in VIC
matrix[N_v,j_v] imported_v; //imported cases in VIC
vector[N] count_md[j]; //count of always
vector[N] respond_md[j]; // num respondants
vector[N_v] count_md_v[j_v]; //count of always
vector[N_v] respond_md_v[j_v]; // num respondants
int map_to_state_index[j_v];// indices of second wave to map to first
int total_N_pv; //total number of data in sec wave, entire state first
vector[N_v] include_in_sec_wave[j_v]; // dates include in sec_wave
int pos_starts[j_v];//starting positions for each state
}
parameters {
vector[K] bet; //coefficients
real<lower=0> R_I; //base level imports,
real<lower=0> R_L; //base level local
vector<lower=0>[j] R_Li; //state level estimates
real<lower=0> sig; //state level variance
real<lower=0> theta_md; // md weighting
matrix<lower=0,upper=1>[N,j] prop_md; // proportion who are md'ing
vector<lower=0,upper=1>[total_N_pv] prop_md_v;
matrix<lower=0,upper=1>[N,j] brho; //estimate of proportion of imported cases
matrix<lower=0,upper=1>[N,K] noise[j];
//real<lower=0> R_temp;
vector<lower=0,upper=1>[total_N_pv] brho_v; //estimate of proportion of imported cases
//matrix<lower=0,upper=1>[N_v,K] noise_v[j_v];
}
transformed parameters {
matrix<lower=0>[N,j] mu_hat;
vector<lower=0>[total_N_pv] mu_hat_v;
matrix<lower=0>[N,j] md; //micro distancing
vector<lower=0>[total_N_pv] md_v;
for (i in 1:j) {
for (n in 1:N){
md[n,i] = pow(1+theta_md , -1*prop_md[n,i]);
mu_hat[n,i] = brho[n,i]*R_I + (1-brho[n,i])*2*R_Li[i]*(
(1-policy[n]) + md[n,i]*policy[n] )*inv_logit(
Mob[i][n,:]*(bet)); //mean estimate
}
}
for (i in 1:j_v){
int pos;
if (i==1){
pos=1;
}
else {
//Add 1 to get to start of new group, not end of old group
pos =pos_starts[i-1]+1;
}
for (n in 1:N_v){
if (include_in_sec_wave[i][n]==1){
md_v[pos] = pow(1+theta_md ,-1*prop_md_v[pos]);
if (map_to_state_index[i] == 5) {
mu_hat_v[pos] = brho_v[pos]*R_I + (1-brho_v[pos])*(2*R_Li[
map_to_state_index[i]
])*(
(1-policy_v[n]) + md_v[pos]*policy_v[n] )*inv_logit(
Mob_v[i][n,:]*(bet)
); //mean estimate
}
else {
mu_hat_v[pos] = brho_v[pos]*R_I + (1-brho_v[pos])*2*R_Li[
map_to_state_index[i]
]*(
(1-policy_v[n]) + md_v[pos]*policy_v[n] )*inv_logit(
Mob_v[i][n,:]*(bet)); //mean estimate
}
pos += 1;
}
}
}
}
model {
int pos2;
bet ~ normal(0,1);
theta_md ~ lognormal(0,0.5);
//md ~ beta(7,3);
R_L ~ gamma(1.8*1.8/0.05,1.8/0.05); //hyper-prior
R_I ~ gamma(0.5*0.5/.2,0.5/.2);
sig ~ exponential(200); //mean is 1/50=0.02
R_Li ~ gamma( R_L*R_L/sig, R_L/sig); //partial pooling of state level estimates
for (i in 1:j) {
for (n in 1:N){
prop_md[n,i] ~ beta(1 + count_md[i][n], 1+ respond_md[i][n] - count_md[i][n]);
brho[n,i] ~ beta( 1+ imported[n,i], 1+ local[n,i]); //ratio imported/ (imported + local)
//noise[i][n,:] ~ normal( Mob[i][n,:] , Mob_std[i][n,:]);
mu_hat[n,i] ~ gamma( Reff[n,i]*Reff[n,i]/(sigma2[n,i]), Reff[n,i]/sigma2[n,i]); //Stan uses shape/inverse scale
}
}
for (i in 1:j_v){
if (i==1){
pos2=1;
}
else {
//Add 1 to get to start of new group, not end of old group
pos2 =pos_starts[i-1]+1;
}
for (n in 1:N_v){
if (include_in_sec_wave[i][n]==1){
prop_md_v[pos2] ~ beta(1 + count_md_v[i][n], 1+ respond_md_v[i][n] - count_md_v[i][n]);
brho_v[pos2] ~ beta( 1+ imported_v[n,i], 1+ local_v[n,i]); //ratio imported/ (imported + local)
//noise_v[i][n,:] ~ normal( Mob_v[i][n,:] , Mob_v_std[i][n,:]);
mu_hat_v[pos2] ~ gamma( Reff_v[n,i]*Reff_v[n,i]/(sigma2_v[n,i]), Reff_v[n,i]/sigma2_v[n,i]);
pos2+=1;
}
}
}
}
"""
sm_pol_gamma = pystan.StanModel(
model_code = rho_model_gamma,
model_name ='gamma_pol_state'
)
#sm_pol_gamma = pickle.load(open('model/sm_pol_gamma.pkl','rb'))
###Create dates
try:
cprs_start_date = pd.to_datetime(argv[1])#2020-04-01')
cprs_end_date = pd.to_datetime(argv[1])#'2020-07-22')
except:
print("Running full validation dates")
cprs_start_date = pd.to_datetime('2020-04-01')
cprs_end_date = pd.to_datetime('2020-10-07')
cprs_all_dates = pd.date_range(cprs_start_date, cprs_end_date, freq='7D')
cprs_dates = cprs_all_dates[cprs_all_dates!='2020-09-09']
#if argv[1]=='2020-09-09':
# print("This won't run due to cprs date definitions, please comment out line 215.")
for data_date in cprs_dates:
print(data_date)
print(data_date.strftime('%d%b%Y'))
#data_date = pd.to_datetime('2020-08-17')
## also filter Reff by 10 days!
## need to truncate most recent days of Reff
#########
### here is where I can loop over to perform inference##
#######
# Reff estimates from Price et al 2020
#df_Reff = read_in_Reff() #estimates up to 14th April
#df_Reff = read_in_LSHTM()#read_in_Reff()
#df_Reff = df_Reff.loc[df_Reff.date_of_analysis==data_date.strftime("%Y-%m-%d")]
df_Reff = pd.read_csv("results/EpyReff/Reff"+
data_date.strftime("%Y-%m-%d")+"tau_4.csv",parse_dates=['INFECTION_DATES'])
df_Reff['date'] = df_Reff.INFECTION_DATES
df_Reff['state'] = df_Reff.STATE
print('data loaded')
if data_date <
|
pd.to_datetime('2020-06-02')
|
pandas.to_datetime
|
import pandas as pd
import warnings
import numpy as np
from matplotlib import pyplot as plt
warnings.simplefilter("ignore")
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
online = False # if True: download xml files from github URL
# be careful: online version will not work if requirements from requirements.txt are not satisfied!
if online:
url_link_302_19 = 'https://github.com/Hidancloud/risk_management_debt_forecast/' \
'blob/main/data_folder/302-19.xlsx?raw=true'
url_link_01_13_F_Debt_sme_subj = 'https://github.com/Hidancloud/risk_management_debt_forecast/' \
'blob/main/data_folder/01_13_F_Debt_sme_subj.xlsx?raw=true'
url_link_Interpolationexp2 = 'https://github.com/Hidancloud/risk_management_debt_forecast/' \
'blob/main/data_folder/Interpolationexp2.xlsx?raw=true'
def extract_data_before_2019y():
"""
Extracts data from the 302-19.xlsx file
:return: pandas dataframe with columns 'Дата', 'Задолженность', 'Просроченная задолженность'
"""
if online:
return pd.read_excel(url_link_302_19, usecols=[0, 5, 11], skiprows=list(range(7)),
names=['Дата', 'Задолженность', 'Просроченная задолженность'])
return pd.read_excel('data_folder/302-19.xlsx', usecols=[0, 5, 11], skiprows=list(range(7)),
names=['Дата', 'Задолженность', 'Просроченная задолженность'])
def extract_data_after_2018():
"""
Extracts data from the 01_13_F_Debt_sme_subj.xlsx file
:return: pandas dataframe with columns 'Дата', 'Задолженность', 'Просроченная задолженность'
"""
# read Задолженность from the page МСП Итого
# .T to make rows for entities and columns for properties
if online:
after_19y_debt = pd.read_excel(url_link_01_13_F_Debt_sme_subj, skiprows=1, nrows=1,
sheet_name='МСП Итого ').T
else:
after_19y_debt = pd.read_excel('data_folder/01_13_F_Debt_sme_subj.xlsx',
skiprows=1, nrows=1, sheet_name='МСП Итого ').T
after_19y_debt.reset_index(inplace=True)
# remove an odd row after transpose
after_19y_debt.drop(labels=0, axis=0, inplace=True)
after_19y_debt.columns = before_19y.columns[:2]
# change types of the columns for convenience
after_19y_debt[after_19y_debt.columns[0]] = pd.to_datetime(after_19y_debt[after_19y_debt.columns[0]])
after_19y_debt = after_19y_debt.astype({after_19y_debt.columns[1]: 'int32'}, copy=False)
# read Просроченная задолженность from the page МСП в т.ч. просроч.
if online:
after_19y_prosro4eno = pd.read_excel(url_link_01_13_F_Debt_sme_subj, skiprows=2, nrows=0,
sheet_name='МСП в т.ч. просроч.').T
else:
after_19y_prosro4eno = pd.read_excel('data_folder/01_13_F_Debt_sme_subj.xlsx', skiprows=2, nrows=0,
sheet_name='МСП в т.ч. просроч.').T
after_19y_prosro4eno.reset_index(inplace=True)
# remove an odd row after the transpose
after_19y_prosro4eno.drop(labels=0, axis=0, inplace=True)
# name the column
after_19y_prosro4eno.columns = ['Просроченная задолженность']
# concatenate Задолженность and Просроченная задолженность in one table and return it
return pd.concat([after_19y_debt, after_19y_prosro4eno], axis=1)
def extract_macro_parameters():
if online:
return pd.read_excel(url_link_Interpolationexp2, index_col=0, parse_dates=True)
return pd.read_excel('data_folder/Interpolationexp2.xlsx', index_col=0, parse_dates=True)
def transform_to_quarters_format(custom_table, date_column_name='Дата',
already_3month_correct_step=False):
"""
Transforms table from month format to quarters taking the last month element for each quarter
:param custom_table: Pandas dataframe
:param date_column_name: name of a column with dates
:param already_3month_correct_step: if the time step between custom_table rows is a 3 month instead of month
and correspond to 3, 6, 9, 12 months
:return: table in correct quarter format with averaged values in columns
"""
if not already_3month_correct_step:
# quarter of the first month in the data
first_quarter = (custom_table[date_column_name].dt.month[0] - 1) // 3 + 1
# creates array [1, 1, 1, 2, 2, 2, 3, 3, 3, ...], so i-th month will be from corresponding quarter
# in case when each row corresponds to a month
correct_quarters = np.ones((custom_table.shape[0] // 3 + 3, 3), dtype=int).cumsum(axis=0).flatten()
# assumption: the data is not missing a single month
# then quarters are from correct_quarters continuous part
custom_table['Квартал'] = correct_quarters[3*(first_quarter-1): custom_table.shape[0] + 3*(first_quarter-1)]
else:
# in case when each row corresponds to either 3, 6, 9 or 12 month (file with macro data)
debt_table_quarters = custom_table.copy()
debt_table_quarters.reset_index(inplace=True)
debt_table_quarters['Квартал'] = custom_table.index.month // 3
return debt_table_quarters
# take the last value (last month value) inside each quarter and assign those values to the resulting table
group = custom_table.groupby('Квартал')
debt_table_quaters_features = dict()
for feature in custom_table.columns:
if feature != date_column_name and feature != 'Квартал':
debt_table_quaters_features[feature] = group[feature].nth(2)
debt_table_quarters =
|
pd.concat(debt_table_quaters_features, axis=1)
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 13 15:21:55 2019
@author: raryapratama
"""
#%%
#Step (1): Import Python libraries, set land conversion scenarios general parameters
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import quad
import seaborn as sns
import pandas as pd
#RIL Scenario
##Set parameters
#Parameters for primary forest
initAGB = 233 #source: van Beijma et al. (2018)
initAGB_min = 233-72
initAGB_max = 233 + 72
tf = 201
#Parameters for residue decomposition (Source: De Rosa et al., 2017)
a = 0.082
b = 2.53
#%%
#Step (2_1): C loss from the harvesting/clear cut
#df1 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_S1')
df2 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_S2')
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_E')
t = range(0,tf,1)
#c_loss_S1 = df1['C_loss'].values
c_firewood_energy_S2 = df2['Firewood_other_energy_use'].values
c_firewood_energy_E = dfE['Firewood_other_energy_use'].values
#%%
#Step (2_2): C loss from the harvesting/clear cut as wood pellets
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_E')
c_pellets_E = dfE['Wood_pellets'].values
#%%
#Step (3): Aboveground biomass (AGB) decomposition
#S2
df = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_S2')
tf = 201
t = np.arange(tf)
def decomp_S2(t,remainAGB_S2):
return (1-(1-np.exp(-a*t))**b)*remainAGB_S2
#set zero matrix
output_decomp_S2 = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_S2 in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_S2[i:,i] = decomp_S2(t[:len(t)-i],remain_part_S2)
print(output_decomp_S2[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S2 = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_S2[:,i] = np.diff(output_decomp_S2[:,i])
i = i + 1
print(subs_matrix_S2[:,:4])
print(len(subs_matrix_S2))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S2 = subs_matrix_S2.clip(max=0)
print(subs_matrix_S2[:,:4])
#make the results as absolute values
subs_matrix_S2 = abs(subs_matrix_S2)
print(subs_matrix_S2[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S2 = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_S2)
subs_matrix_S2 = np.vstack((zero_matrix_S2, subs_matrix_S2))
print(subs_matrix_S2[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S2 = (tf,1)
decomp_tot_S2 = np.zeros(matrix_tot_S2)
i = 0
while i < tf:
decomp_tot_S2[:,0] = decomp_tot_S2[:,0] + subs_matrix_S2[:,i]
i = i + 1
print(decomp_tot_S2[:,0])
#S2_C
df = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_C_S2')
tf = 201
t = np.arange(tf)
def decomp_S2_C(t,remainAGB_S2_C):
return (1-(1-np.exp(-a*t))**b)*remainAGB_S2_C
#set zero matrix
output_decomp_S2_C = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_S2_C in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_S2_C[i:,i] = decomp_S2_C(t[:len(t)-i],remain_part_S2_C)
print(output_decomp_S2_C[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S2_C = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_S2_C[:,i] = np.diff(output_decomp_S2_C[:,i])
i = i + 1
print(subs_matrix_S2_C[:,:4])
print(len(subs_matrix_S2_C))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S2_C = subs_matrix_S2_C.clip(max=0)
print(subs_matrix_S2_C[:,:4])
#make the results as absolute values
subs_matrix_S2_C = abs(subs_matrix_S2_C)
print(subs_matrix_S2_C[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S2_C = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_S2_C)
subs_matrix_S2_C = np.vstack((zero_matrix_S2_C, subs_matrix_S2_C))
print(subs_matrix_S2_C[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S2_C = (tf,1)
decomp_tot_S2_C = np.zeros(matrix_tot_S2_C)
i = 0
while i < tf:
decomp_tot_S2_C[:,0] = decomp_tot_S2_C[:,0] + subs_matrix_S2_C[:,i]
i = i + 1
print(decomp_tot_S2_C[:,0])
#E
df = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_E')
tf = 201
t = np.arange(tf)
def decomp_E(t,remainAGB_E):
return (1-(1-np.exp(-a*t))**b)*remainAGB_E
#set zero matrix
output_decomp_E = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_E in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_E[i:,i] = decomp_E(t[:len(t)-i],remain_part_E)
print(output_decomp_E[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_E = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_E[:,i] = np.diff(output_decomp_E[:,i])
i = i + 1
print(subs_matrix_E[:,:4])
print(len(subs_matrix_E))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_E = subs_matrix_E.clip(max=0)
print(subs_matrix_E[:,:4])
#make the results as absolute values
subs_matrix_E = abs(subs_matrix_E)
print(subs_matrix_E[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_E = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_E)
subs_matrix_E = np.vstack((zero_matrix_E, subs_matrix_E))
print(subs_matrix_E[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_E = (tf,1)
decomp_tot_E = np.zeros(matrix_tot_E)
i = 0
while i < tf:
decomp_tot_E[:,0] = decomp_tot_E[:,0] + subs_matrix_E[:,i]
i = i + 1
print(decomp_tot_E[:,0])
#E_C
df = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_C_E')
tf = 201
t = np.arange(tf)
def decomp_E_C(t,remainAGB_E_C):
return (1-(1-np.exp(-a*t))**b)*remainAGB_E_C
#set zero matrix
output_decomp_E_C = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_E_C in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_E_C[i:,i] = decomp_E_C(t[:len(t)-i],remain_part_E_C)
print(output_decomp_E_C[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_E_C = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_E_C[:,i] = np.diff(output_decomp_E_C[:,i])
i = i + 1
print(subs_matrix_E_C[:,:4])
print(len(subs_matrix_E_C))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_E_C = subs_matrix_E_C.clip(max=0)
print(subs_matrix_E_C[:,:4])
#make the results as absolute values
subs_matrix_E_C = abs(subs_matrix_E_C)
print(subs_matrix_E_C[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_E_C = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_E_C)
subs_matrix_E_C = np.vstack((zero_matrix_E_C, subs_matrix_E_C))
print(subs_matrix_E_C[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_E_C = (tf,1)
decomp_tot_E_C = np.zeros(matrix_tot_E_C)
i = 0
while i < tf:
decomp_tot_E_C[:,0] = decomp_tot_E_C[:,0] + subs_matrix_E_C[:,i]
i = i + 1
print(decomp_tot_E_C[:,0])
#plotting
t = np.arange(0,tf)
#plt.plot(t,decomp_tot_S1,label='S1')
plt.plot(t,decomp_tot_S2,label='S2')
plt.plot(t,decomp_tot_E,label='E')
plt.plot(t,decomp_tot_S2_C,label='S2_C')
plt.plot(t,decomp_tot_E_C,label='E_C')
plt.xlim(0,200)
plt.legend(loc='best', frameon=False)
plt.show()
#%%
#Step (4): Dynamic stock model of in-use wood materials
from dynamic_stock_model import DynamicStockModel
df2 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_S2')
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_E')
#product lifetime
#building materials
B = 35
TestDSM2 = DynamicStockModel(t = df2['Year'].values, i = df2['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSME = DynamicStockModel(t = dfE['Year'].values, i = dfE['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
CheckStr2, ExitFlag2 = TestDSM2.dimension_check()
CheckStrE, ExitFlagE = TestDSME.dimension_check()
Stock_by_cohort2, ExitFlag2 = TestDSM2.compute_s_c_inflow_driven()
Stock_by_cohortE, ExitFlagE = TestDSME.compute_s_c_inflow_driven()
S2, ExitFlag2 = TestDSM2.compute_stock_total()
SE, ExitFlagE = TestDSME.compute_stock_total()
O_C2, ExitFlag2 = TestDSM2.compute_o_c_from_s_c()
O_CE, ExitFlagE = TestDSME.compute_o_c_from_s_c()
O2, ExitFlag2 = TestDSM2.compute_outflow_total()
OE, ExitFlagE = TestDSME.compute_outflow_total()
DS2, ExitFlag2 = TestDSM2.compute_stock_change()
DSE, ExitFlagE = TestDSME.compute_stock_change()
Bal2, ExitFlag2 = TestDSM2.check_stock_balance()
BalE, ExitFlagE = TestDSME.check_stock_balance()
#print output flow
print(TestDSM2.o)
print(TestDSME.o)
plt.xlim(0,100)
plt.show()
#%%
#Step (5): Biomass growth
# RIL_Scenario biomass growth, following RIL disturbance
#recovery time, follow the one by Alice-guier
#H = [M, E, C_M, C_E]
#LD0 = [M, E, C_M, C_E]
H = [5.78, 7.71, 5.78, 7.71]
LD0 = [53.46-5.78, 53.46-7.71, 29.29-5.78, 29.29-7.71]
s = 1.106
#RIL
RT = ((H[0] + LD0[0])*100/initAGB)**s
print(RT)
#growth per year (Mg C/ha.yr)
gpy = (H[0] + LD0[0])/RT
print(gpy)
tf_RIL_S1 = 36
A1 = range(0,tf_RIL_S1,1)
#caculate the disturbed natural forest recovery carbon regrowth over time following RIL
def Y_RIL_S1(A1):
return 44/12*1000*gpy*A1
seq_RIL = np.array([Y_RIL_S1(A1i) for A1i in A1])
print(len(seq_RIL))
print(seq_RIL)
##3 times 35-year cycle of new AGB following logging (RIL)
counter_35y = range(0,6,1)
y_RIL = []
for i in counter_35y:
y_RIL.append(seq_RIL)
flat_list_RIL = []
for sublist in y_RIL:
for item in sublist:
flat_list_RIL.append(item)
#the length of the list is now 216, so we remove the last 15 elements of the list to make the len=tf
flat_list_RIL = flat_list_RIL[:len(flat_list_RIL)-15]
print(flat_list_RIL)
#plotting
t = np.arange(0,tf,1)
plt.xlim([0, 200])
plt.plot(t, flat_list_RIL, color='darkviolet')
#yearly sequestration
## RIL (35-year cycle)
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_RIL (https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_RIL = [p - q for q, p in zip(flat_list_RIL, flat_list_RIL[1:])]
#since there is no sequestration between the replanting year (e.g., year 35 to 36), we have to replace negative numbers in 'flat_list_RIL' with 0 values
flat_list_RIL = [0 if i < 0 else i for i in flat_list_RIL]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_RIL.insert(0,var)
#make 'flat_list_RIL' elements negative numbers to denote sequestration
flat_list_RIL = [ -x for x in flat_list_RIL]
print(flat_list_RIL)
#RIL_C
RT_C = ((H[2] + LD0[2])*100/initAGB)**s
print(RT_C)
#growth per year (Mg C/ha.yr)
gpy_C = (H[2] + LD0[2])/RT_C
print(gpy_C)
tf_RIL_C = 36
A1 = range(0,tf_RIL_C,1)
#caculate the disturbed natural forest recovery carbon regrowth over time following RIL
def Y_RIL_C(A1):
return 44/12*1000*gpy_C*A1
seq_RIL_C = np.array([Y_RIL_C(A1i) for A1i in A1])
print(len(seq_RIL_C))
print(seq_RIL_C)
##3 times 35-year cycle of new AGB following logging (RIL)
counter_35y = range(0,6,1)
y_RIL_C = []
for i in counter_35y:
y_RIL_C.append(seq_RIL_C)
flat_list_RIL_C = []
for sublist_C in y_RIL_C:
for item in sublist_C:
flat_list_RIL_C.append(item)
#the length of the list is now 216, so we remove the last 15 elements of the list to make the len=tf
flat_list_RIL_C = flat_list_RIL_C[:len(flat_list_RIL_C)-15]
#plotting
t = np.arange(0,tf,1)
plt.xlim([0, 200])
plt.plot(t, flat_list_RIL_C, color='darkviolet')
#yearly sequestration
## RIL (35-year cycle)
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_RIL (https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_RIL_C = [p - q for q, p in zip(flat_list_RIL_C, flat_list_RIL_C[1:])]
#since there is no sequestration between the replanting year (e.g., year 35 to 36), we have to replace negative numbers in 'flat_list_RIL' with 0 values
flat_list_RIL_C = [0 if i < 0 else i for i in flat_list_RIL_C]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_RIL_C.insert(0,var)
#make 'flat_list_RIL' elements negative numbers to denote sequestration
flat_list_RIL_C = [ -x for x in flat_list_RIL_C]
print(flat_list_RIL_C)
#%%
#Step (5_1): Biomass C sequestration of the remaining unharvested block
df2 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_S2')
df2_C = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_C_S2')
dfE= pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_E')
dfE_C = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_C_E')
t = range(0,tf,1)
RIL_seq_S2= df2['RIL_seq'].values
RIL_seq_C_S2= df2_C['RIL_seq'].values
RIL_seq_E = dfE['RIL_seq'].values
RIL_seq_C_E = dfE_C['RIL_seq'].values
#%%
#Step (6): post-harvest processing of wood
#post-harvest wood processing
df2 =
|
pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_StLF.xlsx', 'RIL_S2')
|
pandas.read_excel
|
from modules.Neo4jDataAccess import Neo4jDataAccess
from neo4j import GraphDatabase, basic_auth
import pandas as pd
import pytest
import os
import sys
from pathlib import Path
class TestNeo4jDataAccess:
@classmethod
def setup_class(cls):
cls.creds = [
{
"type": "writer",
"creds": {
"host": "localhost",
"port": "7687",
"user": "writer",
"password": "<PASSWORD>"
}
},
{
"type": "reader",
"creds": {
"host": "localhost",
"port": "7687",
"user": "reader",
"password": "<PASSWORD>"
}
}
]
data = [{'tweet_id': 1, 'text': 'Tweet 1', 'hydrated': 'FULL'},
{'tweet_id': 2, 'text': 'Tweet 2', 'hydrated': 'FULL'},
{'tweet_id': 3, 'text': 'Tweet 3'},
{'tweet_id': 4, 'text': 'Tweet 4', 'hydrated': 'PARTIAL'},
{'tweet_id': 5, 'text': 'Tweet 5', 'hydrated': 'PARTIAL'},
]
traversal = '''UNWIND $tweets AS t
MERGE (tweet:Tweet {id:t.tweet_id})
ON CREATE SET
tweet.text = t.text,
tweet.hydrated = t.hydrated
'''
res = list(filter(lambda c: c["type"] == 'writer', cls.creds))
creds = res[0]["creds"]
uri = f'bolt://{creds["host"]}:{creds["port"]}'
graph = GraphDatabase.driver(
uri, auth=basic_auth(creds['user'], creds['password']), encrypted=False)
try:
with graph.session() as session:
session.run(traversal, tweets=data)
cls.ids =
|
pd.DataFrame({'id': [1, 2, 3, 4, 5]})
|
pandas.DataFrame
|
import platform
import sys
import pandas as pd
from datetime import date, timedelta
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
REPORTS_URL = 'https://safety2.oit.ncsu.edu//newblotter.asp'
CHROME_URL = 'http://127.0.0.1:4444/wd/hub'
def make_driver():
"""Returns a selenium webdriver.
Returns:
A webdriver for either a Windows Chrome install
or a remote instance of Chrome on localhost:4444.
"""
options = Options()
options.headless = True
os = platform.system()
if os == 'Linux':
return webdriver.Remote(CHROME_URL, DesiredCapabilities.CHROME, options=options)
elif os == 'Windows':
return webdriver.Chrome('./chromedriver', options=options)
else:
print(f'scraper.py does not support {os}.')
sys.exit()
def get_day(day, driver):
"""Returns a dataframe of reports for a given date.
Args:
day: The Date to get reports for.
driver: The webdriver to use.
Returns:
A dataframe of police reports.
"""
close_driver = False
if driver == None:
driver = make_driver()
close_driver = True
driver.get(REPORTS_URL)
driver.set_page_load_timeout(20)
search_box = driver.find_element_by_xpath('//*[@id="NOTDATE"]')
submit = driver.find_element_by_xpath('/html/body/form/p[1]/input[2]')
search_box.clear()
search_box.send_keys(str(day.strftime("%x")))
submit.click()
table = pd.read_html(driver.current_url)
df =
|
pd.concat(table)
|
pandas.concat
|
import rdflib
from datetime import datetime
from nanopub import Nanopublication
import logging
import sys
import pandas as pd
import configparser
import hashlib
from .autonomic.update_change_service import UpdateChangeService
from whyis.namespace import whyis, prov, sio
class Interpreter(UpdateChangeService):
kb = ":"
cb_fn = None
timeline_fn = None
data_fn = None
prefix_fn = "prefixes.txt"
prefixes = {}
studyRef = None
unit_code_list = []
unit_uri_list = []
unit_label_list = []
explicit_entry_list = []
virtual_entry_list = []
explicit_entry_tuples = []
virtual_entry_tuples = []
cb_tuple = {}
timeline_tuple = {}
config = configparser.ConfigParser()
def __init__(self, config_fn=None): # prefixes should be
if config_fn is not None:
try:
self.config.read(config_fn)
except Exception as e:
logging.exception("Error: Unable to open configuration file: ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
if self.config.has_option('Prefixes', 'prefixes'):
self.prefix_fn = self.config.get('Prefixes', 'prefixes')
# prefix_file = open(self.prefix_fn,"r")
# self.prefixes = prefix_file.readlines()
prefix_file = pd.read_csv(self.prefix_fn, dtype=object)
try:
for row in prefix_file.itertuples():
self.prefixes[row.prefix] = row.url
except Exception as e:
logging.exception("Error: Something went wrong when trying to read the Prefix File: ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
if self.config.has_option('Prefixes', 'base_uri'):
self.kb = self.config.get('Prefixes', 'base_uri')
if self.config.has_option('Source Files', 'dictionary'):
dm_fn = self.config.get('Source Files', 'dictionary')
try:
dm_file = pd.read_csv(dm_fn, dtype=object)
try: # Populate virtual and explicit entry lists
for row in dm_file.itertuples():
if pd.isnull(row.Column):
logging.exception("Error: The SDD must have a column named 'Column'")
sys.exit(1)
if row.Column.startswith("??"):
self.virtual_entry_list.append(row)
else:
self.explicit_entry_list.append(row)
except Exception as e:
logging.exception(
"Error: Something went wrong when trying to read the Dictionary Mapping File: ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
except Exception as e:
logging.exception("Error: The specified Dictionary Mapping file does not exist: ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
if self.config.has_option('Source Files', 'codebook'):
self.cb_fn = self.config.get('Source Files', 'codebook')
if self.cb_fn is not None:
try:
cb_file = pd.read_csv(self.cb_fn, dtype=object)
try:
inner_tuple_list = []
for row in cb_file.itertuples():
if (pd.notnull(row.Column) and row.Column not in self.cb_tuple):
inner_tuple_list = []
inner_tuple = {}
inner_tuple["Code"] = row.Code
if pd.notnull(row.Label):
inner_tuple["Label"] = row.Label
if pd.notnull(row.Class):
inner_tuple["Class"] = row.Class
if "Resource" in row and pd.notnull(row.Resource):
inner_tuple["Resource"] = row.Resource
inner_tuple_list.append(inner_tuple)
self.cb_tuple[row.Column] = inner_tuple_list
except Exception as e:
logging.warning("Warning: Unable to process Codebook file: ")
if hasattr(e, 'message'):
logging.warning(e.message)
else:
logging.warning(e)
except Exception as e:
logging.exception("Error: The specified Codebook file does not exist: ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
if self.config.has_option('Source Files', 'timeline'):
self.timeline_fn = self.config.get('Source Files', 'timeline')
if self.timeline_fn is not None:
try:
timeline_file = pd.read_csv(self.timeline_fn, dtype=object)
try:
inner_tuple_list = []
for row in timeline_file.itertuples():
if pd.notnull(row.Name) and row.Name not in self.timeline_tuple:
inner_tuple_list = []
inner_tuple = {}
inner_tuple["Type"] = row.Type
if pd.notnull(row.Label):
inner_tuple["Label"] = row.Label
if pd.notnull(row.Start):
inner_tuple["Start"] = row.Start
if pd.notnull(row.End):
inner_tuple["End"] = row.End
if pd.notnull(row.Unit):
inner_tuple["Unit"] = row.Unit
if pd.notnull(row.inRelationTo):
inner_tuple["inRelationTo"] = row.inRelationTo
inner_tuple_list.append(inner_tuple)
self.timeline_tuple[row.Name] = inner_tuple_list
except Exception as e:
logging.warning("Warning: Unable to process Timeline file: ")
if hasattr(e, 'message'):
logging.warning(e.message)
else:
logging.warning(e)
except Exception as e:
logging.exception("Error: The specified Timeline file does not exist: ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
if self.config.has_option('Source Files', 'code_mappings'):
cmap_fn = self.config.get('Source Files', 'code_mappings')
code_mappings_reader = pd.read_csv(cmap_fn)
for code_row in code_mappings_reader.itertuples():
if pd.notnull(code_row.code):
self.unit_code_list.append(code_row.code)
if pd.notnull(code_row.uri):
self.unit_uri_list.append(code_row.uri)
if pd.notnull(code_row.label):
self.unit_label_list.append(code_row.label)
if self.config.has_option('Source Files', 'data_file'):
self.data_fn = self.config.get('Source Files', 'data_file')
def getInputClass(self):
return whyis.SemanticDataDictionary
def getOutputClass(self):
return whyis.SemanticDataDictionaryInterpretation
def get_query(self):
return '''SELECT ?s WHERE { ?s ?p ?o .} LIMIT 1\n'''
def process(self, i, o):
print("Processing SDD...")
self.app.db.store.nsBindings = {}
npub = Nanopublication(store=o.graph.store)
# prefixes={}
# prefixes.update(self.prefixes)
# prefixes.update(self.app.NS.prefixes)
self.writeVirtualEntryNano(npub)
self.writeExplicitEntryNano(npub)
self.interpretData(npub)
def parseString(self, input_string, delim):
my_list = input_string.split(delim)
my_list = [element.strip() for element in my_list]
return my_list
def rdflibConverter(self, input_word):
if "http" in input_word:
return rdflib.term.URIRef(input_word)
if ':' in input_word:
word_list = input_word.split(":")
term = self.prefixes[word_list[0]] + word_list[1]
return rdflib.term.URIRef(term)
return rdflib.Literal(input_word, datatype=rdflib.XSD.string)
def codeMapper(self, input_word):
unitVal = input_word
for unit_label in self.unit_label_list:
if unit_label == input_word:
unit_index = self.unit_label_list.index(unit_label)
unitVal = self.unit_uri_list[unit_index]
for unit_code in self.unit_code_list:
if unit_code == input_word:
unit_index = self.unit_code_list.index(unit_code)
unitVal = self.unit_uri_list[unit_index]
return unitVal
def convertVirtualToKGEntry(self, *args):
if args[0][:2] == "??":
if self.studyRef is not None:
if args[0] == self.studyRef:
return self.prefixes[self.kb] + args[0][2:]
if len(args) == 2:
return self.prefixes[self.kb] + args[0][2:] + "-" + args[1]
return self.prefixes[self.kb] + args[0][2:]
if ':' not in args[0]:
# Check for entry in column list
for item in self.explicit_entry_list:
if args[0] == item.Column:
if len(args) == 2:
return self.prefixes[self.kb] + args[0].replace(" ", "_").replace(",", "").replace("(",
"").replace(
")", "").replace("/", "-").replace("\\", "-") + "-" + args[1]
return self.prefixes[self.kb] + args[0].replace(" ", "_").replace(",", "").replace("(", "").replace(
")", "").replace("/", "-").replace("\\", "-")
return '"' + args[0] + "\"^^xsd:string"
return args[0]
def checkVirtual(self, input_word):
try:
if input_word[:2] == "??":
return True
return False
except Exception as e:
logging.exception("Something went wrong in Interpreter.checkVirtual(): ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
def isfloat(self, value):
try:
float(value)
return True
except ValueError:
return False
def writeVirtualEntryNano(self, nanopub):
for item in self.virtual_entry_list:
virtual_tuple = {}
term = rdflib.term.URIRef(self.prefixes[self.kb] + str(item.Column[2:]))
nanopub.assertion.add((term, rdflib.RDF.type, rdflib.OWL.Class))
nanopub.assertion.add(
(term, rdflib.RDFS.label, rdflib.Literal(str(item.Column[2:]), datatype=rdflib.XSD.string)))
# Set the rdf:type of the virtual row to either the Attribute or Entity value (or else owl:Individual)
if (pd.notnull(item.Entity)) and (pd.isnull(item.Attribute)):
if ',' in item.Entity:
entities = self.parseString(item.Entity, ',')
for entity in entities:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(entity))))
else:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(item.Entity))))
virtual_tuple["Column"] = item.Column
virtual_tuple["Entity"] = self.codeMapper(item.Entity)
if virtual_tuple["Entity"] == "hasco:Study":
self.studyRef = item.Column
virtual_tuple["Study"] = item.Column
elif (pd.isnull(item.Entity)) and (pd.notnull(item.Attribute)):
if ',' in item.Attribute:
attributes = self.parseString(item.Attribute, ',')
for attribute in attributes:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(attribute))))
else:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(item.Attribute))))
virtual_tuple["Column"] = item.Column
virtual_tuple["Attribute"] = self.codeMapper(item.Attribute)
else:
logging.warning(
"Warning: Virtual entry not assigned an Entity or Attribute value, or was assigned both.")
virtual_tuple["Column"] = item.Column
# If there is a value in the inRelationTo column ...
if pd.notnull(item.inRelationTo):
virtual_tuple["inRelationTo"] = item.inRelationTo
# If there is a value in the Relation column but not the Role column ...
if (pd.notnull(item.Relation)) and (pd.isnull(item.Role)):
nanopub.assertion.add((term, self.rdflibConverter(item.Relation),
self.rdflibConverter(self.convertVirtualToKGEntry(item.inRelationTo))))
virtual_tuple["Relation"] = item.Relation
# If there is a value in the Role column but not the Relation column ...
elif (pd.isnull(item.Relation)) and (pd.notnull(item.Role)):
role = rdflib.BNode()
nanopub.assertion.add(
(role, rdflib.RDF.type, self.rdflibConverter(self.convertVirtualToKGEntry(item.Role))))
nanopub.assertion.add(
(role, sio.inRelationTo, self.rdflibConverter(self.convertVirtualToKGEntry(item.inRelationTo))))
nanopub.assertion.add((term, sio.hasRole, role))
virtual_tuple["Role"] = item.Role
# If there is a value in the Role and Relation columns ...
elif (pd.notnull(item.Relation)) and (pd.notnull(item.Role)):
virtual_tuple["Relation"] = item.Relation
virtual_tuple["Role"] = item.Role
nanopub.assertion.add(
(term, sio.hasRole, self.rdflibConverter(self.convertVirtualToKGEntry(item.Role))))
nanopub.assertion.add((term, self.rdflibConverter(item.Relation),
self.rdflibConverter(self.convertVirtualToKGEntry(item.inRelationTo))))
nanopub.provenance.add((term, prov.generatedAtTime, rdflib.Literal(
"{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year, datetime.utcnow().month,
datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format(
datetime.utcnow().hour, datetime.utcnow().minute, datetime.utcnow().second) + "Z",
datatype=rdflib.XSD.dateTime)))
if pd.notnull(item.wasDerivedFrom):
if ',' in item.wasDerivedFrom:
derivedFromTerms = self.parseString(item.wasDerivedFrom, ',')
for derivedFromTerm in derivedFromTerms:
nanopub.provenance.add((term, prov.wasDerivedFrom,
self.rdflibConverter(self.convertVirtualToKGEntry(derivedFromTerm))))
else:
nanopub.provenance.add((term, prov.wasDerivedFrom,
self.rdflibConverter(self.convertVirtualToKGEntry(item.wasDerivedFrom))))
virtual_tuple["wasDerivedFrom"] = item.wasDerivedFrom
if pd.notnull(item.wasGeneratedBy):
if ',' in item.wasGeneratedBy:
generatedByTerms = self.parseString(item.wasGeneratedBy, ',')
for generatedByTerm in generatedByTerms:
nanopub.provenance.add((term, prov.wasGeneratedBy,
self.rdflibConverter(self.convertVirtualToKGEntry(generatedByTerm))))
else:
nanopub.provenance.add((term, prov.wasGeneratedBy,
self.rdflibConverter(self.convertVirtualToKGEntry(item.wasGeneratedBy))))
virtual_tuple["wasGeneratedBy"] = item.wasGeneratedBy
self.virtual_entry_tuples.append(virtual_tuple)
if self.timeline_fn is not None:
for key in self.timeline_tuple:
tl_term = self.rdflibConverter(self.convertVirtualToKGEntry(key))
nanopub.assertion.add((tl_term, rdflib.RDF.type, rdflib.OWL.Class))
for timeEntry in self.timeline_tuple[key]:
if 'Type' in timeEntry:
nanopub.assertion.add(
(tl_term, rdflib.RDFS.subClassOf, self.rdflibConverter(timeEntry['Type'])))
if 'Label' in timeEntry:
nanopub.assertion.add((tl_term, rdflib.RDFS.label,
rdflib.Literal(str(timeEntry['Label']), datatype=rdflib.XSD.string)))
if 'Start' in timeEntry and 'End' in timeEntry and timeEntry['Start'] == timeEntry['End']:
nanopub.assertion.add((tl_term, sio.hasValue, self.rdflibConverter(str(timeEntry['Start']))))
if 'Start' in timeEntry:
start_time = rdflib.BNode()
nanopub.assertion.add((start_time, sio.hasValue, self.rdflibConverter(str(timeEntry['Start']))))
nanopub.assertion.add((tl_term, sio.hasStartTime, start_time))
if 'End' in timeEntry:
end_time = rdflib.BNode()
nanopub.assertion.add((end_time, sio.hasValue, self.rdflibConverter(str(timeEntry['End']))))
nanopub.assertion.add((tl_term, sio.hasEndTime, end_time))
if 'Unit' in timeEntry:
nanopub.assertion.add(
(tl_term, sio.hasUnit, self.rdflibConverter(self.codeMapper(timeEntry['Unit']))))
if 'inRelationTo' in timeEntry:
nanopub.assertion.add((tl_term, sio.inRelationTo, self.rdflibConverter(
self.convertVirtualToKGEntry(timeEntry['inRelationTo']))))
nanopub.provenance.add((tl_term, prov.generatedAtTime, rdflib.Literal(
"{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year, datetime.utcnow().month,
datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format(
datetime.utcnow().hour, datetime.utcnow().minute, datetime.utcnow().second) + "Z",
datatype=rdflib.XSD.dateTime)))
def writeExplicitEntryNano(self, nanopub):
for item in self.explicit_entry_list:
explicit_entry_tuple = {}
term = rdflib.term.URIRef(self.prefixes[self.kb] + str(
item.Column.replace(" ", "_").replace(",", "").replace("(", "").replace(")", "").replace("/",
"-").replace(
"\\", "-")))
nanopub.assertion.add((term, rdflib.RDF.type, rdflib.OWL.Class))
if pd.notnull(item.Attribute):
if ',' in item.Attribute:
attributes = self.parseString(item.Attribute, ',')
for attribute in attributes:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(attribute))))
else:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(item.Attribute))))
explicit_entry_tuple["Column"] = item.Column
explicit_entry_tuple["Attribute"] = self.codeMapper(item.Attribute)
elif pd.notnull(item.Entity):
if ',' in item.Entity:
entities = self.parseString(item.Entity, ',')
for entity in entities:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(entity))))
else:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(item.Entity))))
explicit_entry_tuple["Column"] = item.Column
explicit_entry_tuple["Entity"] = self.codeMapper(item.Entity)
else:
nanopub.assertion.add((term, rdflib.RDFS.subClassOf, sio.Attribute))
explicit_entry_tuple["Column"] = item.Column
explicit_entry_tuple["Attribute"] = self.codeMapper("sio:Attribute")
logging.warning("Warning: Explicit entry not assigned an Attribute or Entity value.")
if pd.notnull(item.attributeOf):
nanopub.assertion.add(
(term, sio.isAttributeOf, self.rdflibConverter(self.convertVirtualToKGEntry(item.attributeOf))))
explicit_entry_tuple["isAttributeOf"] = self.convertVirtualToKGEntry(item.attributeOf)
else:
logging.warning("Warning: Explicit entry not assigned an isAttributeOf value.")
if pd.notnull(item.Unit):
nanopub.assertion.add(
(term, sio.hasUnit, self.rdflibConverter(self.convertVirtualToKGEntry(self.codeMapper(item.Unit)))))
explicit_entry_tuple["Unit"] = self.convertVirtualToKGEntry(self.codeMapper(item.Unit))
if pd.notnull(item.Time):
nanopub.assertion.add(
(term, sio.existsAt, self.rdflibConverter(self.convertVirtualToKGEntry(item.Time))))
explicit_entry_tuple["Time"] = item.Time
if pd.notnull(item.inRelationTo):
explicit_entry_tuple["inRelationTo"] = item.inRelationTo
# If there is a value in the Relation column but not the Role column ...
if (pd.notnull(item.Relation)) and (pd.isnull(item.Role)):
nanopub.assertion.add((term, self.rdflibConverter(item.Relation),
self.rdflibConverter(self.convertVirtualToKGEntry(item.inRelationTo))))
explicit_entry_tuple["Relation"] = item.Relation
# If there is a value in the Role column but not the Relation column ...
elif (pd.isnull(item.Relation)) and (pd.notnull(item.Role)):
role = rdflib.BNode()
nanopub.assertion.add(
(role, rdflib.RDF.type, self.rdflibConverter(self.convertVirtualToKGEntry(item.Role))))
nanopub.assertion.add(
(role, sio.inRelationTo, self.rdflibConverter(self.convertVirtualToKGEntry(item.inRelationTo))))
nanopub.assertion.add((term, sio.hasRole, role))
explicit_entry_tuple["Role"] = item.Role
# If there is a value in the Role and Relation columns ...
elif (pd.notnull(item.Relation)) and (pd.notnull(item.Role)):
nanopub.assertion.add(
(term, sio.hasRole, self.rdflibConverter(self.convertVirtualToKGEntry(item.Role))))
nanopub.assertion.add((term, self.rdflibConverter(item.Relation),
self.rdflibConverter(self.convertVirtualToKGEntry(item.inRelationTo))))
explicit_entry_tuple["Relation"] = item.Relation
explicit_entry_tuple["Role"] = item.Role
if ("Label" in item and pd.notnull(item.Label)):
nanopub.assertion.add((term, rdflib.RDFS.label, self.rdflibConverter(item.Label)))
explicit_entry_tuple["Label"] = item.Label
if ("Comment" in item and pd.notnull(item.Comment)):
nanopub.assertion.add((term, rdflib.RDFS.comment, self.rdflibConverter(item.Comment)))
explicit_entry_tuple["Comment"] = item.Comment
nanopub.provenance.add((term, prov.generatedAtTime, rdflib.Literal(
"{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year, datetime.utcnow().month,
datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format(
datetime.utcnow().hour, datetime.utcnow().minute, datetime.utcnow().second) + "Z",
datatype=rdflib.XSD.dateTime)))
if
|
pd.notnull(item.wasDerivedFrom)
|
pandas.notnull
|
#!/usr/bin/env python
# coding: utf-8
# # ResCenterNet Trial
#
# I am very new to these concepts so I am trying out by changing this amazing and probably only 3D model related awesome public kernel by Ruslan
# https://www.kaggle.com/hocop1/centernet-baseline
#
# Most of the codes are loaned from there . There are other codes that I took from OFT implementation github . But I dont know what is OFT , so I have not yet implemented it .
#
# My current score is not from this kernel( as there are some errors in this kernel) , but from some simple architecture modification of the original public kernel.
#
# In[1]:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import cv2
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
from functools import reduce
import os
from sklearn.model_selection import train_test_split
from scipy.optimize import minimize
from tqdm.auto import tqdm as tq
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.utils.data import Dataset, DataLoader
from torchvision import models
from torchvision import transforms, utils
from albumentations import ( Compose, OneOf, RandomBrightnessContrast,
RandomGamma, HueSaturationValue, RGBShift, MotionBlur, Blur,
GaussNoise, ChannelShuffle, MultiplicativeNoise, GaussNoise, ISONoise
)
PATH = '/home/hy/pkuad/'
#os.listdir(PATH)
# # Load data
# In[2]:
train =
|
pd.read_csv(PATH + 'train.csv')
|
pandas.read_csv
|
import numpy as np
import pandas as pd
# Auxiliary functions
def get_dummies(data):
data = data.copy()
if isinstance(data, pd.Series):
data = pd.factorize(data)[0]
return data
for col in data.columns:
data.loc[:, col] = pd.factorize(data[col])[0]
return data
def learncats(data, classcol=None, continuous_ids=[]):
"""
Learns the number of categories in each variable and standardizes the data.
Parameters
----------
data: numpy n x m
Numpy array comprising n realisations (instances) of m variables.
classcol: int
The column index of the class variables (if any).
continuous_ids: list of ints
List containing the indices of known continuous variables. Useful for
discrete data like age, which is better modeled as continuous.
Returns
-------
ncat: numpy m
The number of categories of each variable. One if the variable is
continuous.
"""
data = data.copy()
ncat = np.ones(data.shape[1])
if not classcol:
classcol = data.shape[1]-1
for i in range(data.shape[1]):
if i != classcol and (i in continuous_ids or is_continuous(data[:, i])):
continue
else:
data[:, i] = data[:, i].astype(int)
ncat[i] = max(data[:, i]) + 1
return ncat
def get_stats(data, ncat=None):
"""
Compute univariate statistics for continuous variables.
Parameters
----------
data: numpy n x m
Numpy array comprising n realisations (instances) of m variables.
Returns
-------
data: numpy n x m
The normalized data.
maxv, minv: numpy m
The maximum and minimum values of each variable. One and zero, resp.
if the variable is categorical.
mean, std: numpy m
The mean and standard deviation of the variable. Zero and one, resp.
if the variable is categorical.
"""
data = data.copy()
maxv = np.ones(data.shape[1])
minv = np.zeros(data.shape[1])
mean = np.zeros(data.shape[1])
std = np.zeros(data.shape[1])
if ncat is not None:
for i in range(data.shape[1]):
if ncat[i] == 1:
maxv[i] = np.max(data[:, i])
minv[i] = np.min(data[:, i])
mean[i] = np.mean(data[:, i])
std[i] = np.std(data[:, i])
assert maxv[i] != minv[i], 'Cannot have constant continuous variable in the data'
data[:, i] = (data[:, i] - minv[i])/(maxv[i] - minv[i])
else:
for i in range(data.shape[1]):
if is_continuous(data[:, i]):
maxv[i] = np.max(data[:, i])
minv[i] = np.min(data[:, i])
mean[i] = np.mean(data[:, i])
std[i] = np.std(data[:, i])
assert maxv[i] != minv[i], 'Cannot have constant continuous variable in the data'
data[:, i] = (data[:, i] - minv[i])/(maxv[i] - minv[i])
return data, maxv, minv, mean, std
def normalize_data(data, maxv, minv):
"""
Normalizes the data given the maximum and minimum values of each variable.
Parameters
----------
data: numpy n x m
Numpy array comprising n realisations (instances) of m variables.
maxv, minv: numpy m
The maximum and minimum values of each variable. One and zero, resp.
if the variable is categorical.
Returns
-------
data: numpy n x m
The normalized data.
"""
data = data.copy()
for v in range(data.shape[1]):
if maxv[v] != minv[v]:
data[:, v] = (data[:, v] - minv[v])/(maxv[v] - minv[v])
return data
def standardize_data(data, mean, std):
"""
Standardizes the data given the mean and standard deviations values of
each variable.
Parameters
----------
data: numpy n x m
Numpy array comprising n realisations (instances) of m variables.
mean, std: numpy m
The mean and standard deviation of the variable. Zero and one, resp.
if the variable is categorical.
Returns
-------
data: numpy n x m
The standardized data.
"""
data = data.copy()
for v in range(data.shape[1]):
if std[v] > 0:
data[:, v] = (data[:, v] - mean[v])/(std[v])
# Clip values more than 6 standard deviations from the mean
data[:, v] = np.clip(data[:, v], -6, 6)
return data
def is_continuous(data):
"""
Returns true if data was sampled from a continuous variables, and false
Otherwise.
Parameters
----------
data: numpy
One dimensional array containing the values of one variable.
"""
observed = data[~np.isnan(data)] # not consider missing values for this.
rules = [np.min(observed) < 0,
np.sum((observed) != np.round(observed)) > 0,
len(np.unique(observed)) > min(30, len(observed)/3)]
if any(rules):
return True
else:
return False
def train_test_split(data, ncat, train_ratio=0.7, prep='std'):
assert train_ratio >= 0
assert train_ratio <= 1
shuffle = np.random.choice(range(data.shape[0]), data.shape[0], replace=False)
data_train = data[shuffle[:int(train_ratio*data.shape[0])], :]
data_test = data[shuffle[int(train_ratio*data.shape[0]):], :]
if prep=='norm':
data_train, maxv, minv, _, _, = get_stats(data_train, ncat)
data_test = normalize_data(data_test, maxv, minv)
elif prep=='std':
_, maxv, minv, mean, std = get_stats(data_train, ncat)
data_train = standardize_data(data_train, mean, std)
data_test = standardize_data(data_test, mean, std)
X_train, y_train = data_train[:, :-1], data_train[:, -1]
X_test, y_test = data_test[:, :-1], data_test[:, -1]
return X_train, X_test, y_train, y_test, data_train, data_test
# Preprocessing functions
def adult(data):
cat_cols = ['workclass', 'education', 'education-num', 'marital-status', 'occupation',
'relationship', 'race', 'sex', 'native-country', 'y']
cont_cols = ['age', 'workclass', 'fnlwgt', 'education', 'education-num', 'capital-gain',
'capital-loss', 'hours-per-week']
data.loc[:, cat_cols] = get_dummies(data[cat_cols])
ncat = learncats(data.values, classcol=-1, continuous_ids=[data.columns.get_loc(c) for c in cont_cols])
return data.values.astype(float), ncat
def australia(data):
cat_cols = ['A1', 'A4', 'A5', 'A6', 'A7', 'A9', 'A10', 'A12', 'A13', 'class']
cont_cols = ['A2', 'A3', 'A8', 'A11', 'A14', 'A15']
data.loc[:, cat_cols] = get_dummies(data[cat_cols])
data = data.replace('?', np.nan)
ncat = learncats(data.values.astype(float), classcol=-1, continuous_ids=[data.columns.get_loc(c) for c in cont_cols])
return data.values.astype(float), ncat
def bank(data):
cat_cols = ['job', 'marital', 'education', 'default', 'housing', 'loan',
'contact', 'month', 'day_of_week', 'poutcome', 'y']
cont_cols = ['age', 'duration', 'campaign', 'previous', 'emp.var.rate',
'cons.price.idx','cons.conf.idx', 'euribor3m', 'nr.employed']
data.loc[:, cat_cols] = get_dummies(data[cat_cols])
data.loc[:, 'pdays'] = np.where(data['pdays']==999, 0, 1)
ncat = learncats(data.values, classcol=-1, continuous_ids=[data.columns.get_loc(c) for c in cont_cols])
return data.values.astype(float), ncat
def credit(data):
cat_cols = ['SEX', 'EDUCATION', 'MARRIAGE', 'default payment next month']
cont_cols = ['LIMIT_BAL', 'AGE', 'PAY_0', 'PAY_2',
'PAY_3', 'PAY_4', 'PAY_5', 'PAY_6', 'BILL_AMT1', 'BILL_AMT2',
'BILL_AMT3', 'BILL_AMT4', 'BILL_AMT5', 'BILL_AMT6', 'PAY_AMT1',
'PAY_AMT2', 'PAY_AMT3', 'PAY_AMT4', 'PAY_AMT5', 'PAY_AMT6']
data.loc[:, cat_cols] = get_dummies(data[cat_cols])
ncat = learncats(data.values, classcol=-1, continuous_ids=[data.columns.get_loc(c) for c in cont_cols])
return data.values.astype(float), ncat
def electricity(data):
cat_cols = ['day', 'class']
cont_cols = ['date', 'period', 'nswprice', 'nswdemand', 'vicprice',
'vicdemand', 'transfer']
data.loc[:, cat_cols] = get_dummies(data[cat_cols])
ncat = learncats(data.values, classcol=-1, continuous_ids=[data.columns.get_loc(c) for c in cont_cols])
return data.values.astype(float), ncat
def segment(data):
data = data.drop(columns=['region.centroid.col', 'region.pixel.count'])
cat_cols = ['short.line.density.5', 'short.line.density.2', 'class']
cont_cols = ['region.centroid.row', 'vedge.mean', 'vegde.sd', 'hedge.mean', 'hedge.sd',
'intensity.mean', 'rawred.mean', 'rawblue.mean', 'rawgreen.mean', 'exred.mean', 'exblue.mean' ,
'exgreen.mean', 'value.mean', 'saturation.mean', 'hue.mean']
data.loc[:, cat_cols] = get_dummies(data[cat_cols])
ncat = learncats(data.values, classcol=-1, continuous_ids=[data.columns.get_loc(c) for c in cont_cols])
return data.values.astype(float), ncat
def german(data):
cat_cols = [0, 2, 3, 5, 6, 8, 9, 11, 13, 14, 16, 18, 19, 20]
cont_cols = [1, 4, 7, 10, 12, 15, 17]
data.iloc[:, cat_cols] = get_dummies(data[cat_cols])
ncat = learncats(data.values, classcol=-1, continuous_ids=cont_cols)
return data.values.astype(float), ncat
def vowel(data):
cat_cols = ['Speaker_Number', 'Sex', 'Class']
data.loc[:, cat_cols] = get_dummies(data[cat_cols])
ncat = learncats(data.values, classcol=data.shape[1]-1)
return data.values.astype(float), ncat
def cmc(data):
cat_cols = ['Wifes_education', 'Husbands_education', 'Wifes_religion', 'Wifes_now_working%3F',
'Husbands_occupation', 'Standard-of-living_index', 'Media_exposure', 'Contraceptive_method_used']
cont_cols = ['Wifes_age', 'Number_of_children_ever_born']
data.loc[:, cat_cols] = get_dummies(data[cat_cols])
ncat = learncats(data.values, classcol=data.shape[1]-1)
return data.values.astype(float), ncat
def get_data(name):
if 'wine' in name:
data_red = pd.read_csv('../data/winequality_red.csv')
data_white = pd.read_csv('../data/winequality_white.csv')
data = pd.concat([data_red, data_white]).values
data[:, -1] = np.where(data[:, -1] <= 6, 0, 1)
ncat = learncats(data, classcol=data.shape[1]-1)
elif 'bank' in name:
data = pd.read_csv('../data/bank-additional-full.csv', sep=';')
data, ncat = bank(data)
elif 'segment' in name:
data = pd.read_csv('../data/segment.csv')
data, ncat = segment(data)
elif 'german' in name:
data = pd.read_csv('../data/german.csv', sep=' ', header=None)
data, ncat = german(data)
elif 'vehicle' in name:
data = pd.read_csv('../data/vehicle.csv')
data['Class'] = get_dummies(data['Class'])
ncat = np.ones(data.shape[1])
ncat[-1] = len(np.unique(data['Class']))
data = data.values.astype(float)
elif 'vowel' in name:
data = pd.read_csv('../data/vowel.csv')
data, ncat = vowel(data)
elif 'authent' in name:
data = pd.read_csv('../data/authent.csv')
data['Class'] = get_dummies(data['Class'])
ncat = learncats(data.values).astype(int)
data = data.values.astype(float)
elif 'diabetes' in name:
data = pd.read_csv('../data/diabetes.csv')
data['class'] = get_dummies(data['class'])
ncat = learncats(data.values,
continuous_ids=[0] # Force first variable to be continuous
).astype(int)
data = data.values.astype(float)
elif 'cmc' in name:
data =
|
pd.read_csv('../data/cmc.csv')
|
pandas.read_csv
|
import pandas as pd
import numpy as np
a=input()
b=input()
a=a.split()
b=b.split()
d={0:pd.Series(a),1:
|
pd.Series(b)
|
pandas.Series
|
'''
yw pred model using GRU model
Author:<NAME>
'''
import os
import joblib
from torch import optim
from collections import namedtuple
from torch.utils.data import Dataset
from alive_progress import alive_bar, config_handler
from interval import Interval
from time import ctime
from gen_context import *
from duration_model import get_input, linear_reg
import torch
import pickle
import torch.nn as nn
import torch.nn.functional as F
from sklearn.preprocessing import MinMaxScaler
import numpy as np
import pandas as pd
import warnings
import matplotlib.pyplot as plt
warnings.filterwarnings('ignore')
## 全局参数定义
input_size = 7 # 模型输入特征维度
# -------------
# 模型结构定义:GRU模型
# -------------
class BaseModel(nn.Module):
def __init__(self, input_size=36, hidden_units_1=32, hidden_units_2=8, layer_num=1, type='tank', cell='GRU'):
super(BaseModel, self).__init__()
self.hidden_units_1 = hidden_units_1
self.hidden_units_2 = hidden_units_2
self.input_size = input_size
self.layer_num = layer_num
self.type = type
if cell == 'LSTM':
self.cell = nn.LSTM(input_size=self.input_size, hidden_size=self.hidden_units,
num_layers=self.layer_num, dropout=0.0, batch_first=True)
if cell == 'GRU':
self.cell = nn.GRU(input_size=self.input_size, hidden_size=self.hidden_units_1,
num_layers=self.layer_num, dropout=0.0, batch_first=True)
if self.type == 'water_level':
# 子网络的输入维度
sec_size = 1
elif self.type == 'pressure':
# 子网络的输入维度
sec_size = 5
else:
raise ValueError
self.deep_cell = nn.GRU(input_size=sec_size, hidden_size=self.hidden_units_2,
num_layers=self.layer_num, dropout=0.0, batch_first=True)
self.fc1 = nn.Linear(self.hidden_units_1 + self.hidden_units_2, 4)
self.fc2 = nn.Linear(4, 1)
# ---------------
# 门控单元循环网络
# ---------------
class GRU_Model(BaseModel):
def __init__(self, device, input_size, hidden_units_1, hidden_units_2, layer_num=1, type='water_level', cell='GRU'):
super(GRU_Model, self).__init__(input_size, hidden_units_1, hidden_units_2, layer_num, type, cell)
self.length = 36
self.steps = 5
self.device = device
self.reshape = lambda x, hn, units: hn.view(x.size(0), units)
def forward(self, x):
# 进水压力、测压点压力、时间编码特征(36*3*1)
x1 = x[:, :self.length * self.input_size].view(-1, self.length, self.input_size)
# 前五日最大同时刻液位数据特征(5*1*1)
if self.type == 'water_level':
x2 = torch.max(x[:, self.input_size * self.length:].view(-1, self.steps, 5), axis=2)[0].view(
-1, self.steps, 1)
# 前五日同时刻压力数据特征(5*5*1)
elif self.type == 'pressure':
x2 = x[:, self.input_size * self.length:].view(-1, self.steps, 5)
else:
raise ValueError
hn1 = self.cell(x1)[-1]
hn2 = self.deep_cell(x2)[-1]
hn1 = self.reshape(x1, hn1, self.hidden_units_1)
hn2 = self.reshape(x1, hn2, self.hidden_units_2)
hn = torch.cat((hn1, hn2), dim=1)
fcOutput = F.relu(self.fc1(hn))
fcOutput = self.fc2(fcOutput)
return fcOutput
'''
数据预处理模,时间序列样本的生成、训练集和测试集的划分
以及Pytorch数据集格式的封装
'''
# ----------------------
# 定义 Dataset
# ----------------------
class Time_Series_Data(Dataset):
def __init__(self, train_x, train_y):
self.X = train_x
self.y = train_y
def __getitem__(self, item):
x_t = self.X[item]
y_t = self.y[item]
return x_t, y_t
def __len__(self):
return len(self.X)
# ----------------------
# 数据预处理模块
# ----------------------
class Data_Preparement:
'''
input: DataFrame which contains all features model needs
output: the train Dataset normalized by Minmaxscalar
'''
def __init__(self, station, type, data, size, n_out=1, trans=None, denose=False):
self.station = station
self.type = type
self.denose = denose
self.data = data.copy() # 使用copy方法避免篡改原始数据
self.size = size # 输入步长
self.n_out = n_out # 预测步长
self.day_step = 288 # 五分钟数据集的日步长
self.data['time_encode'] = [i.hour * 60 + i.minute for i in self.data.index.time] # 计算时间特征
if trans:
self.data[self.data.columns] = trans.fit_transform(self.data) # 归一化数据
self.sample = self._CreateSample()
self.dataSet = namedtuple('dataSet', ['x', 'y'])
self.valnum = self.day_step * 3 # 取3天作为验证集
self.train = self._DivideTrainTest()[0] # 选取训练集
self.val = self._DivideTrainTest()[1] # 选取验证集
# 修改数据集的维度以符合模型输入条件,此处固定axis=2
def _unsqeeze(self, axis, *data_group):
res = list()
for data in data_group:
size = [data.x.shape[0], data.x.shape[1]]
size.insert(axis, 1)
temp = data.x.reshape(size)
data = data._replace(x=temp)
res.append(data)
return res
# 创建样本
def _CreateSample(self):
cols = list()
## 提取单一特征数据
# 时间编码数据
time_encode = self.data[['time_encode']]
# 测压点压力
bottom_pressure = self.data[['bottom_pressure']]
# 水厂流量
water_demand = self.data[['water_demand']]
# 水厂压力
xj_pressure = self.data[['xj_pressure']]
# 根据预测的站点名选择出水压力、液位数据
if self.station == 'hx':
# 出水压力数据
pump_pressure = self.data[['hx_pressure']]
alt_pump_pressure = self.data[['xfx_pressure']]
# 液位数据
water_level = self.data[['hx_water_level']]
alt_water_level = self.data[['xfx_water_level']]
elif self.station == 'xfx':
# 出水压力数据
pump_pressure = self.data[['xfx_pressure']]
alt_pump_pressure = self.data[['hx_pressure']]
# 液位数据
water_level = self.data[['xfx_water_level']]
alt_water_level = self.data[['hx_water_level']]
else:
raise ValueError
# 取前三小时数据
for i in range(self.size - 1, -1, -1):
cols.append(bottom_pressure.shift(i))
for i in range(self.size - 1, -1, -1):
cols.append(time_encode.shift(i))
if self.type == 'water_level':
# 加入前泵站出站压力特征
for i in range(self.size - 1, -1, -1):
cols.append(pump_pressure.shift(i))
for i in range(self.size - 1, -1, -1):
cols.append(alt_pump_pressure.shift(i))
for i in range(self.size - 1, -1, -1):
cols.append(water_demand.shift(i))
for i in range(self.size - 1, -1, -1):
cols.append(xj_pressure.shift(i))
for i in range(self.size - 1, -1, -1):
cols.append(alt_water_level.shift(i))
# 加入前三天同时刻的特征
for i in list(range(5 * self.day_step + 2, 5 * self.day_step - 3, -1)) + \
list(range(4 * self.day_step + 2, 4 * self.day_step - 3, -1)) + \
list(range(3 * self.day_step + 2, 3 * self.day_step - 3, -1)) + \
list(range(2 * self.day_step + 2, 2 * self.day_step - 3, -1)) + \
list(range(self.day_step + 2, self.day_step - 3, -1)):
cols.append(water_level.shift(i))
cols.append(water_level.shift(-self.n_out))
elif self.type == 'pressure':
# 加入前泵站液位特征
for i in range(self.size - 1, -1, -1):
cols.append(water_level.shift(i))
for i in range(self.size - 1, -1, -1):
cols.append(alt_water_level.shift(i))
for i in range(self.size - 1, -1, -1):
cols.append(water_demand.shift(i))
for i in range(self.size - 1, -1, -1):
cols.append(xj_pressure.shift(i))
for i in range(self.size - 1, -1, -1):
cols.append(alt_pump_pressure.shift(i))
# 加入前三天同时刻的特征
for i in list(range(5 * self.day_step + 2, 5 * self.day_step - 3, -1)) + \
list(range(4 * self.day_step + 2, 4 * self.day_step - 3, -1)) + \
list(range(3 * self.day_step + 2, 3 * self.day_step - 3, -1)) + \
list(range(2 * self.day_step + 2, 2 * self.day_step - 3, -1)) + \
list(range(self.day_step + 2, self.day_step - 3, -1)):
cols.append(pump_pressure.shift(i))
cols.append(pump_pressure.shift(-self.n_out))
sample = pd.concat(cols, axis=1)
sample.dropna(inplace=True)
return sample.values
# 划分样本和标签,训练集和验证集
def _DivideTrainTest(self):
split = lambda x: self.dataSet(x[:, :-1], np.squeeze(x[:, -1:]))
train, val = self.sample[:-self.valnum], self.sample[-self.valnum:]
train, val = map(split, [train, val])
train, val = self._unsqeeze(2, train, val)
train = Time_Series_Data(train.x, train.y)
val = Time_Series_Data(val.x, val.y)
return train, val
'''
定义神经网络训练模块
input: DataFrame格式的训练数据(包括液位、青东测压点压力、华翔进水压力)
output: 存储验证集上误差最小的模型参数到./yw_model, 存储归一化数据的scalar到./yw_scalar
'''
# ----------------------
# 定义早停类
# ----------------------
class EarlyStopping():
def __init__(self, patience=15, verbose=False, delta=0):
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
def __call__(self, val_loss, model, path, bar, label):
# print("val_loss={}".format(val_loss))
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model, path, bar, label)
elif score < self.best_score + self.delta:
self.counter += 1
bar.text(f'EarlyStopping counter:{self.counter} out of {self.patience}')
bar()
# print(f'EarlyStopping counter:{self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(val_loss, model, path, bar, label)
self.counter = 0
def save_checkpoint(self, val_loss, model, path, bar, label):
if self.verbose:
bar.text(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}).')
bar()
# print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}).')
torch.save(model.state_dict(), os.path.join(path, 'model_best_{}.pth'.format(label)))
self.val_loss_min = val_loss
# ----------------------
# 定义模型训练类
# ----------------------
class pump_model_train:
# 载入模型参数
def __init__(self, station_name, obj_type):
self.station_name = station_name # 站点名称
self.obj_type = obj_type # 预测目标
self.input_size = 36 # 样本大小
self.batch_size = 128 # 小批次规模
self.max_epoch = 100 # 最大训练代数
self.lr = 0.0005 # 初始学习率
self.trans = MinMaxScaler()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 训练采用的硬件
self.optim_p = optim.Adam # 定义优化器
self.loss_f = nn.MSELoss() # 定义损失函数
self.scalar_path = './scalar_pump'
self.model_path = './{}'.format('_'.join([self.station_name, self.obj_type])) # 定义模型储存路径
if not os.path.exists(self.model_path): # 如果目录不存在创建目录
os.mkdir(self.model_path)
# 创建网络
self.net = GRU_Model(device=self.device, input_size=input_size, hidden_units_1=32, hidden_units_2=8,
type=self.obj_type)
# 定义单个epoch的训练过程
def _train(self, train_loader, optimizer):
self.net.train() # 启动训练
epochloss = 0.0 # 用于记录一个epoch的损失
for batch_idx, (inp, label) in enumerate(train_loader):
inp, label = inp.to(device=self.device, dtype=torch.float), label.to(device=self.device,
dtype=torch.float) # 将输入导入硬件
optimizer.zero_grad() # 梯度置0
out = self.net.forward(inp) # 前向传播
out = out.squeeze()
loss = self.loss_f(out, label) # 计算损失函数
epochloss += loss.item() # 记录损失
loss.backward() # 梯度反向传播
optimizer.step() # 梯度更新
return epochloss / len(train_loader.dataset)
# 定义单个epoch的验证过程
def _val(self, val_loader):
self.net.eval() # 启动验证
val_loss = 0.0
# 逐次验证
with torch.no_grad():
for inp, label in val_loader:
inp, label = inp.to(device=self.device, dtype=torch.float), label.to(device=self.device,
dtype=torch.float)
out = self.net.forward(inp)
out = out.squeeze()
val_loss += self.loss_f(out, label).item() # 计算验证集损失
return val_loss / len(val_loader.dataset)
def epoches_train(self, train_data):
'''
:param train_data: 训练数据
:return:
'''
self._setup_seed()
self.net = self.net.to(self.device)
time_label = train_data.index[-1].strftime("%Y_%m_%d")
Dataset = Data_Preparement(self.station_name, self.obj_type, train_data, size=self.input_size, trans=self.trans)
# 加载数据集
train_loader = torch.utils.data.DataLoader(Dataset.train, batch_size=self.batch_size, shuffle=True,
drop_last=True)
val_loader = torch.utils.data.DataLoader(Dataset.val, batch_size=self.batch_size, shuffle=False,
drop_last=True)
# 保存scalar
if not os.path.exists(self.scalar_path):
os.mkdir(self.scalar_path)
joblib.dump(self.trans, '{}/scalar_{}'.format(self.scalar_path, time_label))
# 统计模型参数数量
trainable_param_n = sum(p.numel() for p in self.net.parameters() if p.requires_grad)
print('Number of learnable model parameters: %d' % trainable_param_n)
# 定义优化器
optimizer = self.optim_p(self.net.parameters(), lr=self.lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.98)
early_stopping = EarlyStopping(patience=15, verbose=True)
# 初始化损失记录
trainLossList = []
valLossList = []
# 迭代训练max_epoch轮
config_handler.set_global(length=25, spinner='waves2')
with alive_bar(self.max_epoch, bar='bubbles') as bar:
for t in range(self.max_epoch):
train_loss = self._train(train_loader, optimizer)
val_loss = self._val(val_loader)
trainLossList.append(train_loss)
valLossList.append(val_loss)
# early_stopping(valLossList[-1]*1000, model=self.net, path=self.model_path)
early_stopping(valLossList[-1] * 1000, model=self.net, path=self.model_path, bar=bar, label=time_label)
if early_stopping.early_stop:
bar.text('Early stopping')
bar()
# print('Early stopping')
break
scheduler.step()
return valLossList
'''
定义神经网络预测模块
input: DataFrame格式的前3天历史数据和前三小时实时数据(包括液位、青东测压点压力、华翔进水压力)
output: 后五分钟预测液位值
'''
class pump_model_pred:
def __init__(self, station_name, obj_type, selected_date=None, denose=False):
self.denose = denose
self.station_name = station_name
self.obj_type = obj_type
self.selected_date = selected_date
self.target_name = '_'.join([self.station_name, self.obj_type])
self.path = './{}'.format(self.target_name)
self.scalar_path = './scalar_pump'
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 训练采用的硬件
self.net = self._get_model()
self.trans = self._get_scalar()
def _update_paras(self):
self.net = self._get_model()
self.trans = self._get_scalar()
# 找到最新的模型文件
def _get_model_weights(self):
if self.selected_date:
model_name = f'model_best_{self.selected_date[:4]}_{self.selected_date[4:6]}_{self.selected_date[-2:]}.pth'
return os.path.join(self.path, model_name)
files = os.listdir(self.path)
if not files:
return None
else:
files = sorted(files, key=lambda x: os.path.getmtime(
os.path.join(self.path, x))) # 格式解释:对files进行排序.x是files的元素,:后面的是排序的依据. x只是文件名,所以要带上join.
return os.path.join(self.path, files[-1])
# 加载模型参数
def _get_model(self):
file_path = self._get_model_weights()
net = GRU_Model(device=self.device, input_size=input_size, hidden_units_1=32, hidden_units_2=8,
type=self.obj_type)
net.load_state_dict(torch.load(file_path, map_location='cpu'))
net = net.to(self.device)
return net
# 加载scalar参数
def _get_scalar(self):
file_path = self._get_model_weights()
filename = os.path.basename(file_path)
scalar_name = 'scalar_' + '_'.join(os.path.splitext(filename)[0].split('_')[2:])
scalar = joblib.load(os.path.join(self.scalar_path, scalar_name))
return scalar
# 提取同期特征(周期性特征)
def _get_CycleAttr(self, data, time_index):
# 目标特征
cycle_fea = data[self.target_name]
att_cycle = pd.Series()
for i in range(1, 6):
# 找到前i天同期的时间索引
timelabel_di = time_index - pd.Timedelta(days=i)
# 前后偏移2个时间步长
time_bias = 2 * pd.Timedelta(minutes=5)
# 找到当天同期的液位数据
df_di = cycle_fea[timelabel_di - time_bias:timelabel_di + time_bias]
att_cycle = att_cycle.append(df_di)
return att_cycle.values
# 提取前三小时最不利点压力、进水压力和时间编码特征
def _get_DriveAttr(self, data):
if self.obj_type == 'water_level':
vb_column = self.station_name + '_pressure'
if self.station_name == 'hx':
alt_vb_column = 'xfx_pressure'
alt_ovb_volumn = 'xfx_water_level'
elif self.station_name == 'xfx':
alt_vb_column = 'hx_pressure'
alt_ovb_volumn = 'hx_water_level'
else:
raise ValueError
elif self.obj_type == 'pressure':
vb_column = self.station_name + '_water_level'
if self.station_name == 'hx':
alt_vb_column = 'xfx_water_level'
alt_ovb_volumn = 'xfx_pressure'
elif self.station_name == 'xfx':
alt_vb_column = 'hx_water_level'
alt_ovb_volumn = 'hx_pressure'
else:
raise ValueError
# fea_columns = [vb_column, 'bottom_pressure', 'time_encode','water_demand','xj_pressure']
fea_columns = ['bottom_pressure', 'time_encode', vb_column, alt_vb_column, 'water_demand',
'xj_pressure', alt_ovb_volumn]
data = data[fea_columns]
att_drive = data.values.T.ravel()
return att_drive
def _inverse_transform(self, inp, out):
pos_dict = {'hx_water_level': 0, 'xfx_water_level': 1, 'hx_pressure': 3, 'xfx_pressure': 4}
format_array = np.zeros((1, inp.shape[1]))
format_array[0, pos_dict[self.target_name]] = out
return self.trans.inverse_transform(format_array)[0, pos_dict[self.target_name]]
def _get_treat(self, data):
data['time_encode'] = [i.hour * 60 + i.minute for i in data.index.time]
data[data.columns] = self.trans.transform(data.values)
return data
def pred(self, realtime_data, history_data):
realtime_data = self._get_treat(realtime_data.copy())
history_data = self._get_treat(history_data.copy())
next_time = realtime_data.index[-1] + pd.Timedelta(minutes=5)
att_drive = self._get_DriveAttr(realtime_data)
att_cycle = self._get_CycleAttr(history_data, next_time)
input = np.concatenate([att_drive, att_cycle])
self.net.eval() # 启动测试
input_torch = torch.tensor(input)
input_torch = input_torch.to(device=self.device, dtype=torch.float)
input_torch = input_torch.view(1, -1, 1)
out = self.net.forward(input_torch)
out = out.squeeze()
out = out.cpu().detach().numpy()
output = self._inverse_transform(realtime_data, out) # 需要按照格式调整
return output
'''
生成调度指令
实例初始化:init_time 起始时间
调用函数:signal_cal (input:当前时刻液位,下一时刻预测液位; output:调度指令)
'''
## 用于线上代码生成所有泵站调度指令,离线模拟系统不用
# class Union_Order_Gen:
# def __init__(self, init_time, init_status):
# self.init_time = init_time
# self.init_status = init_status
# hx_yl_status = self.init_status[:3]
# hx_yw_status = self.init_status[3:5]
# xfx_yl_status = self.init_status[5:8]
# xfx_yw_status = self.init_status[8:10]
# self.hx_yw = Order_Gen('hx_water_level', self.init_time, hx_yw_status)
# self.hx_yl = Order_Gen('hx_pressure', self.init_time, hx_yl_status)
# self.xfx_yw = Order_Gen('xfx_water_level', self.init_time, xfx_yw_status)
# self.xfx_yl = Order_Gen('xfx_pressure', self.init_time, xfx_yl_status)
#
# def signal_cal(self, next_status, current_status, bottom_pressure, real_hx_yw, real_xfx_yw):
# ## 读取当前的压力、液位数据和下一时刻预测值
# current_hx_yw, current_xfx_yw, current_hx_yl, current_xfx_yl = current_status
# next_hx_yw, next_xfx_yw, next_hx_yl, next_xfx_yl = next_status
# ## 生成当前时刻的指令
# hx_yw_signal = self.hx_yw.signal_cal(current_hx_yw, next_hx_yw)
# xfx_yw_signal = self.xfx_yw.signal_cal(current_xfx_yw, next_xfx_yw)
# hx_yl_signal = self.hx_yl.signal_cal(current_hx_yl, next_hx_yl)
# xfx_yl_signal = self.xfx_yl.signal_cal(current_xfx_yl, next_xfx_yl)
# signals = [hx_yw_signal, xfx_yw_signal, hx_yl_signal, xfx_yl_signal]
# ## 返回指令
# orders = []
# if self.hx_yw.order_cal(bottom_pressure, real_hx_yw):
# hx_tank_ord = self.hx_yw.order_cal(bottom_pressure, real_hx_yw)
# orders.append(hx_tank_ord)
# if self.xfx_yw.order_cal(bottom_pressure, real_xfx_yw):
# xfx_tank_ord = self.xfx_yw.order_cal(bottom_pressure, real_xfx_yw)
# orders.append(xfx_tank_ord)
# if self.hx_yl.order_cal(bottom_pressure):
# hx_pump_ord = self.hx_yl.order_cal(bottom_pressure)
# orders.append(hx_pump_ord)
# if self.xfx_yl.order_cal(bottom_pressure):
# xfx_pump_ord = self.xfx_yl.order_cal(bottom_pressure)
# orders.append(xfx_pump_ord)
# return signals, orders
## 每个模型的指令生成
class Order_Gen:
def __init__(self, target_name, init_time, init_status):
# 储存上次使用的泵站信息
self.pump_number = []
try:
pump_id = list(init_status).index(1)
except:
print('[Order Gen]:No pump open now at {}'.format(ctime()))
pass
else:
if target_name == 'hx_water_level' or target_name == 'xfx_water_level':
self.pump_number.append('#{}'.format(pump_id + 4))
if target_name == 'hx_pressure' or target_name == 'xfx_pressure':
self.pump_number.append('#{}'.format(pump_id + 1))
init_cc = sum(init_status)
if init_cc > 1:
init_cc = 1
# 信号序列
self.signals = [init_cc] # 初始化信号序列
self.control_signals = [init_cc] # 带控制的信号序列
# 预测序列
self.preds = [] # 初始化预测结果序列
# 其他配置参数
self.max_len = 100 # 最大序列长度
self.time = init_time # 起始时间
self.flag = init_cc
self.silence_flag = False
self.silence_collect = []
self.silence_counter = 0
self.sensor_flag = 0
self.target_name = target_name
def _maxlen_cut(self):
self.signals = self.signals[-self.max_len:]
self.preds = self.preds[-self.max_len - 1:]
## 华翔水库液位调度逻辑
def _hx_tank_logi(self, current_val, next_val, current_water_level):
# 静默指令
if self.silence_flag:
self.silence_counter += 1
return 0
## 突破阈值触发
# 进水触发
if (next_val - current_val > 0.025
and next_val < 2
and self.signals[-1] == 0):
self.flag = -1
# 关水触发
if (next_val - current_val < 0.015
and next_val > 4
and self.signals[-1] == -1):
self.flag = 0
# 开泵触发
if (next_val - current_val < -0.02
and next_val > 3
and self.signals[-1] == 0
and (self.time + pd.Timedelta(minutes=5)).strftime('%H:%M') not in Interval('00:00', '06:00')):
self.flag = 1
# 关泵触发
if (-0.025 < next_val - current_val < 0
and next_val < 2
and self.signals[-1] == 1):
self.flag = 0
## 强制触发逻辑
# 强制液位触发
if current_water_level < 1.1 and self.flag == 1:
self.flag = 0
if (current_water_level < 1.1
and self.flag == 0
and (self.time + pd.Timedelta(minutes=5)).strftime('%H:%M') in Interval('06:00', '18:00')):
self.flag = -1
# 强制时段触发
if ((self.time + pd.Timedelta(minutes=5)).strftime('%H:%M') in Interval('00:00', '06:00')
and self.flag == 1):
self.flag = 0
if ((self.time + pd.Timedelta(minutes=5)).strftime('%H:%M') in Interval('13:00', '16:00')
and self.flag == 1):
self.flag = 0
if ((self.time + pd.Timedelta(minutes=5)).strftime('%H:%M') in Interval('01:00', '02:00')
and self.flag == 0):
self.flag = -1
## 新凤溪水库液位调度逻辑
def _xfx_tank_logi(self, current_val, next_val, current_water_level):
## 静默指令
if self.silence_flag:
self.silence_counter += 1
return 0
## 突破阈值触发
# 进水触发
if ((next_val - current_val) > 0.025
and next_val < 2
and ((self.time + pd.Timedelta(minutes=5)).strftime('%H:%M') in Interval('00:00', '02:00')
or (self.time + pd.Timedelta(minutes=5)).strftime('%H:%M') in Interval('23:00', '23:55'))
and self.signals[-1] == 0):
self.flag = -1
# 关水触发
if (next_val - current_val < 0.015
and next_val > 3
and self.signals[-1] == -1):
self.flag = 0
# 开泵触发
if (((next_val - current_val) < -0.015
and next_val > 2
or self.time.strftime('%H:%M') in Interval('18:00', '21:00'))
and self.signals[-1] == 0):
self.flag = 1
# 关泵触发
if (((abs(next_val - current_val) < 0.003 and next_val < 3) or next_val < 1.8)
and self.signals[-1] == 1
and self.time.strftime('%H:%M') not in Interval('18:00', '21:00')):
self.flag = 0
## 强制触发逻辑
# 强制液位触发
if (current_water_level < 1.1
and self.flag == 1):
self.flag = 0
if (current_water_level < 1.1
and self.flag == 0
and (self.time + pd.Timedelta(minutes=5)).strftime('%H:%M') in Interval('06:00', '18:00')):
self.flag = -1
# 强制时段触发
if ((self.time +
|
pd.Timedelta(minutes=5)
|
pandas.Timedelta
|
import pandas as pd
import matplotlib.pyplot as plt
from statsmodels.tsa.seasonal import seasonal_decompose
from pyramid.arima import auto_arima
from Sloth import predict
data = pd.read_csv("datasets/PRSA_data_2010.1.1-2014.12.31.csv",index_col=0)
data = data.groupby(['year', 'month']).mean()
data = data['TEMP']
print(data.head())
# clean data - set datetime, take temperature at hour 0, set index
#data = data.loc[data['hour'] == 0]
#data["date"] = pd.to_datetime(data['year'].map(str) + ' ' + data['month'].map(str) + ' ' + data['day'].map(str))
#data = data[['TEMP', 'date']]
#data = data.set_index('date')
# shift data to positive for multiplicative decomposition
#data['TEMP'] = data['TEMP'] - data['TEMP'].min() + 1
#data.index = pd.to_datetime(data.index)
#data.columns = ['Energy Production']
plt.figure()
plt.subplot(1, 1, 1)
plt.plot(data.values, "k-")
plt.xlabel("data point index")
plt.ylabel("temperature")
plt.title("Beijing Temperature 2010-2014")
plt.tight_layout()
plt.show()
#Sloth = Sloth()
result = predict.DecomposeSeriesSeasonal(data.index, data.values, 12)
fig = result.plot()
plt.show()
train = data.loc[2010:2013]
test = data.loc[2014:]
print("DEBUG:the size of test is:")
print(test.shape)
future_forecast = predict.PredictSeriesARIMA(train,test.shape[0],True, 12)
'''
#n_periods=test.shape[0]
#seasonal=True
#stepwise_model = auto_arima(data, start_p=1, start_q=1,
max_p=5, max_q=5, m=12,
start_P=0, seasonal=seasonal,
d=None, D=1, trace=True,
error_action='warn',
suppress_warnings=False,
stepwise=True)
#stepwise_model.fit(train)
#future_forecast = stepwise_model.predict(n_periods=n_periods)
'''
print("DEBUG::Future forecast:")
print(future_forecast)
future_forecast = pd.DataFrame(future_forecast,index = test.index, columns=["Prediction"])
plt.subplot(2, 1, 1)
plt.plot(pd.concat([test,future_forecast],axis=1).values)
plt.xlabel("data point index")
plt.ylabel("temperature")
plt.title("Future Forecast")
plt.subplot(2, 1, 2)
plt.plot(
|
pd.concat([data,future_forecast],axis=1)
|
pandas.concat
|
"""
This the model selection
KfoldCV
GridsearchCV
etc
"""
__all__ = [
'EstimatorSelectionHelper'
]
#===========================================================================================
#Imports
#===========================================================================================
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import GridSearchCV
#Class for hyperparameter searches across models
class EstimatorSelectionHelper:
"""
Authors
-------
<NAME>
"""
def __init__(self, models, params):
if not set(models.keys()).issubset(set(params.keys())):
missing_params = list(set(models.keys()) - set(params.keys()))
raise ValueError("Some estimators are missing parameters: %s" % missing_params)
self.models = models
self.params = params
self.keys = models.keys()
self.grid_searches = {}
def fit(self, X, y, cv=2, n_jobs=-1, verbose=5, scoring=None, refit=False):
for key in self.keys:
print("Running GridSearchCV for %s." % key)
model = self.models[key]
params = self.params[key]
gs = GridSearchCV(model, params, cv=cv, n_jobs=n_jobs,
verbose=verbose, scoring=scoring, refit=refit)
gs.fit(X,y)
self.grid_searches[key] = gs
def score_summary(self, sort_by='mean_score'):
def row(key, scores, params):
d = {
'estimator': key,
'min_score': min(scores),
'max_score': max(scores),
'mean_score': np.mean(scores),
'std_score': np.std(scores),
}
dict1 = d.copy()
dict1.update(params)
return
|
pd.Series(dict1)
|
pandas.Series
|
import bz2
import gzip
import logging
import tarfile
from dataclasses import dataclass
from datetime import datetime
from io import BytesIO
from typing import Optional, Generator, Tuple
import pandas as pd
from wetterdienst.dwd.metadata.constants import ArchiveFormat
from wetterdienst.dwd.network import download_file_from_dwd
from wetterdienst.dwd.radar.index import (
create_fileindex_radolan_cdc,
create_fileindex_radar,
)
from wetterdienst.dwd.radar.util import get_date_from_filename
from wetterdienst.dwd.radar.metadata import (
DWDRadarParameter,
DWDRadarDate,
DWDRadarDataFormat,
DWDRadarDataSubset,
DWDRadarPeriod,
DWDRadarResolution,
)
from wetterdienst.dwd.radar.sites import DWDRadarSite
from wetterdienst.dwd.metadata.column_names import DWDMetaColumns
from wetterdienst.dwd.metadata.datetime import DatetimeFormat
from wetterdienst.util.cache import (
payload_cache_twelve_hours,
payload_cache_five_minutes,
)
log = logging.getLogger(__name__)
@dataclass
class RadarResult:
"""
Result object encapsulating radar data and metadata.
Currently, this will relate to exactly one radar data file.
"""
data: BytesIO
timestamp: datetime = None
url: str = None
filename: str = None
def __getitem__(self, index):
"""
Backward compatibility to address this instance as a tuple.
Formerly, this returned a tuple of ``(datetime, BytesIO)``.
:param index:
:return:
"""
if index == 0: # pragma: no cover
return self.timestamp
elif index == 1:
return self.data
else: # pragma: no cover
raise KeyError(f"Index {index} undefined on RadarResult")
def collect_radar_data(
parameter: DWDRadarParameter,
resolution: Optional[DWDRadarResolution] = None,
period: Optional[DWDRadarPeriod] = None,
site: Optional[DWDRadarSite] = None,
fmt: Optional[DWDRadarDataFormat] = None,
subset: Optional[DWDRadarDataSubset] = None,
elevation: Optional[int] = None,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
) -> RadarResult:
"""
Collect radar data for given parameters.
:param parameter: The radar moment to request
:param site: Site/station if parameter is one of
RADAR_PARAMETERS_SITES
:param fmt: Data format (BINARY, BUFR, HDF5)
:param subset: The subset (simple or polarimetric) for HDF5 data.
:param elevation:
:param start_date: Start date
:param end_date: End date
:param resolution: Time resolution for RadarParameter.RADOLAN_CDC,
either daily or hourly or 5 minutes.
:param period: Period type for RadarParameter.RADOLAN_CDC
:return: ``RadarResult`` item
"""
# Find latest file.
if start_date == DWDRadarDate.LATEST:
file_index = create_fileindex_radar(
parameter=parameter,
site=site,
fmt=fmt,
parse_datetime=False,
)
# Find "-latest-" file.
filenames = file_index["FILENAME"].tolist()
latest_file = list(filter(lambda x: "-latest-" in x, filenames))[0]
# Yield single "RadarResult" item.
result = next(_download_generic_data(url=latest_file))
yield result
else:
if parameter == DWDRadarParameter.RADOLAN_CDC:
if period:
period_types = [period]
else:
period_types = [
DWDRadarPeriod.RECENT,
DWDRadarPeriod.HISTORICAL,
]
results = []
for period in period_types:
file_index = create_fileindex_radolan_cdc(
resolution=resolution, period=period
)
# Filter for dates range if start_date and end_date are defined.
if period == DWDRadarPeriod.RECENT:
file_index = file_index[
(file_index[DWDMetaColumns.DATETIME.value] >= start_date)
& (file_index[DWDMetaColumns.DATETIME.value] < end_date)
]
# This is for matching historical data, e.g. "RW-200509.tar.gz".
else:
file_index = file_index[
(
file_index[DWDMetaColumns.DATETIME.value].dt.year
== start_date.year
)
& (
file_index[DWDMetaColumns.DATETIME.value].dt.month
== start_date.month
)
]
results.append(file_index)
file_index =
|
pd.concat(results)
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Authors: <NAME>, <NAME>, <NAME>, and
<NAME>
IHE Delft 2017
Contact: <EMAIL>
Repository: https://github.com/gespinoza/hants
Module: hants
"""
from __future__ import division
import netCDF4
import pandas as pd
import numpy as np
import datetime
import math
import os
import osr
import glob
from copy import deepcopy
import matplotlib.pyplot as plt
import warnings
import gdal
from joblib import Parallel, delayed
def run_HANTS(rasters_path_inp, name_format,
start_date, end_date, latlim, lonlim, cellsize, nc_path,
nb, nf, HiLo, low, high, fet, dod, delta, Scaling_factor = 0.001,
epsg=4326, cores=1):
'''
This function runs the python implementation of the HANTS algorithm. It
takes a folder with geotiffs raster data as an input, creates a netcdf
file, and optionally export the data back to geotiffs.
'''
nc_paths = create_netcdf(rasters_path_inp, name_format, start_date, end_date,
latlim, lonlim, cellsize, nc_path, Scaling_factor,
epsg)
args = [nb, nf, HiLo, low, high, fet, dod, delta, Scaling_factor]
print('\tApply HANTS on tiles...')
results = Parallel(n_jobs=cores)(delayed(HANTS_netcdf)(nc_path, args)
for nc_path in nc_paths)
if len(nc_paths) > 1:
Merge_NC_Tiles(nc_paths, nc_path, start_date, end_date, latlim, lonlim, cellsize, epsg, Scaling_factor)
return nc_path
def create_netcdf(rasters_path, name_format, start_date, end_date,
latlim, lonlim, cellsize, nc_path, Scaling_factor,
epsg=4326):
'''
This function creates a netcdf file from a folder with geotiffs rasters to
be used to run HANTS.
'''
# Latitude and longitude
lat_ls = pd.np.arange(latlim[0] + 0.5*cellsize, latlim[1],
cellsize)
lat_ls = lat_ls[::-1] # ArcGIS numpy
lon_ls = pd.np.arange(lonlim[0] + 0.5*cellsize, lonlim[1],
cellsize)
lat_n = len(lat_ls)
lon_n = len(lon_ls)
spa_ref = Spatial_Reference(epsg)
# ll_corner = [lonlim[0], latlim[0]]
# Rasters
dates_dt = pd.date_range(start_date, end_date, freq='D')
dates_ls = [d.toordinal() for d in dates_dt]
os.chdir(rasters_path)
ras_ls = glob.glob('*.tif')
# Create tile parts
if (lat_n > 200 or lon_n > 200):
lat_n_amount = np.maximum(1,int(np.floor(lat_n/100)))
lon_n_amount = np.maximum(1,int(np.floor(lon_n/100)))
nc_path_part_names = nc_path.split('.')
nc_path_tiles = []
for lat_n_one in range(0, lat_n_amount):
for lon_n_one in range(0, lon_n_amount):
nc_path_tile = ''.join(nc_path_part_names[0] + "_h%03d_v%03d.nc" %(lon_n_one, lat_n_one))
nc_path_tiles = np.append(nc_path_tiles, nc_path_tile)
else:
nc_path_tiles = nc_path
i = 0
# Loop over the nc_paths
for nc_path_tile in nc_path_tiles:
i += 1
if lat_n_amount > 1:
lat_part = int(nc_path_tile[-6:-3])
lat_start = lat_part * 100
if int(lat_part) is not int(lat_n_amount-1):
lat_end = int((lat_part + 1) * 100)
else:
lat_end = int(lat_n)
else:
lat_start = int(0)
lat_end = int(lat_n)
if lon_n_amount > 1:
lon_part = int(nc_path_tile[-11:-8])
lon_start = int(lon_part * 100)
if int(lon_part) is not int(lon_n_amount-1):
lon_end = int((lon_part + 1) * 100)
else:
lon_end = int(lon_n)
else:
lon_start = int(0)
lon_end = int(lon_n)
# Define space dimention
lat_range = lat_ls[lat_start:lat_end]
lon_range = lon_ls[lon_start:lon_end]
geo_ex = tuple([lon_range[0] - 0.5*cellsize, cellsize, 0, lat_range[0] + cellsize * 0.5, 0, -cellsize])
# Create netcdf file
print('Creating netCDF file tile %s out of %s...' %(i,len(nc_path_tiles)))
nc_file = netCDF4.Dataset(nc_path_tile, 'w', format="NETCDF4_CLASSIC")
# Create Dimensions
lat_dim = nc_file.createDimension('latitude', lat_end - lat_start)
lon_dim = nc_file.createDimension('longitude', lon_end - lon_start)
time_dim = nc_file.createDimension('time', len(dates_ls))
# Create Variables
crso = nc_file.createVariable('crs', 'i4')
crso.long_name = 'Lon/Lat Coords in WGS84'
crso.standard_name = 'crs'
crso.grid_mapping_name = 'latitude_longitude'
crso.projection = spa_ref
crso.longitude_of_prime_meridian = 0.0
crso.semi_major_axis = 6378137.0
crso.inverse_flattening = 298.257223563
crso.geo_reference = geo_ex
lat_var = nc_file.createVariable('latitude', 'f8', ('latitude',))
lat_var.units = 'degrees_north'
lat_var.standard_name = 'latitude'
lon_var = nc_file.createVariable('longitude', 'f8', ('longitude',))
lon_var.units = 'degrees_east'
lon_var.standard_name = 'longitude'
time_var = nc_file.createVariable('time', 'l', ('time',))
time_var.standard_name = 'time'
time_var.calendar = 'gregorian'
original_var = nc_file.createVariable('original_values', 'i',
('time', 'latitude', 'longitude'),
fill_value=-9999, zlib=True, least_significant_digit=0)
original_var.long_name = 'original_values'
original_var.grid_mapping = 'crs'
original_var.add_offset = 0.00
original_var.scale_factor = Scaling_factor
original_var.set_auto_maskandscale(False)
print('\tVariables created')
# Fill in time and space dimention
lat_var[:] = lat_range
lon_var[:] = lon_range
time_var[:] = dates_ls
# Create memory example file
# empty array
empty_vec = pd.np.empty((lat_end - lat_start, lon_end - lon_start))
empty_vec[:] = -9999 * np.float(Scaling_factor)
dest_ex = Save_as_MEM(empty_vec, geo_ex, str(epsg))
# Raster loop
print('\tExtracting data from rasters...')
for tt in range(len(dates_ls)):
Date_now = datetime.datetime.fromordinal(dates_ls[tt])
yyyy = str(Date_now.year)
mm = '%02d' %int(Date_now.month)
dd = '%02d' %int(Date_now.day)
# Raster
ras = name_format.format(yyyy=yyyy,mm=mm,dd=dd)
if ras in ras_ls:
data_in = os.path.join(rasters_path, ras)
dest = reproject_dataset_example(data_in, dest_ex)
array_tt = dest.GetRasterBand(1).ReadAsArray()
array_tt[array_tt<-9999] = -9999 * np.float(Scaling_factor)
original_var[tt, :, :] = np.int_(array_tt * 1./np.float(Scaling_factor))
else:
# Store values
original_var[tt, :, :] = np.int_(empty_vec * 1./np.float(Scaling_factor))
# Close file
nc_file.close()
print('NetCDF %s file created' %i)
# Return
return nc_path_tiles
def HANTS_netcdf(nc_path, args):
'''
This function runs the python implementation of the HANTS algorithm. It
takes the input netcdf file and fills the 'hants_values',
'combined_values', and 'outliers' variables.
'''
nb, nf, HiLo, low, high, fet, dod, delta, Scaling_factor = args
# Read netcdfs
nc_file = netCDF4.Dataset(nc_path, 'r+', format="NETCDF4_CLASSIC")
nc_file.set_fill_on()
time_var = nc_file.variables['time'][:]
original_values = nc_file.variables['original_values'][:]
[ztime, rows, cols] = original_values.shape
size_st = cols*rows
values_hants = pd.np.empty((ztime, rows, cols))
outliers_hants = pd.np.empty((ztime, rows, cols))
values_hants[:] = pd.np.nan
outliers_hants[:] = pd.np.nan
# Additional parameters
ni = len(time_var)
ts = range(ni)
# Loop
counter = 1
#print('Running HANTS...')
for m in range(rows):
for n in range(cols):
#print('\t{0}/{1}'.format(counter, size_st))
y =
|
pd.np.array(original_values[:, m, n])
|
pandas.np.array
|
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import cross_val_score
from sklearn.svm import SVC
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import seaborn as sb
from matplotlib import pyplot as plt
from sklearn.preprocessing import LabelEncoder
import pandas as pd
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
warnings.simplefilter(action='ignore', category=FutureWarning)
# Reading dataset
train_datas =
|
pd.read_csv('Test_Set (1).csv')
|
pandas.read_csv
|
import pandas as pd
from dplypy.dplyframe import DplyFrame
def test_init():
pandas_df = pd.DataFrame(
data={
"col1": [0, 1, 2, 3],
"col2": [3, 4, 5, 6],
"col3": [6, 7, 8, 9],
"col4": [9, 10, 11, 12],
}
)
# Drop by columns
df1 = DplyFrame(pandas_df)
pd.testing.assert_frame_equal(df1.pandas_df, pandas_df)
def test_getitem():
pandas_df = pd.DataFrame(
data={
"col1": [0, 1, 2, 3],
"col2": [3, 4, 5, 6],
"col3": [6, 7, 8, 9],
"col4": [9, 10, 11, 12],
}
)
df1 = DplyFrame(pandas_df)
pd.testing.assert_series_equal(pandas_df["col2"], df1["col2"])
pd.testing.assert_frame_equal(
pandas_df[pandas_df["col2"] > 4], df1[df1["col2"] > 4]
)
def test_setitem_deepcopy():
pandas_df = pd.DataFrame([[0, 1]])
df1 = DplyFrame(pandas_df)
df2 = df1.deep_copy()
pd.testing.assert_frame_equal(df1.pandas_df, df2.pandas_df)
df1.__setitem__(1, 2)
try:
pd.testing.assert_frame_equal(df1.pandas_df, df2.pandas_df)
except AssertionError:
pass
else:
raise AssertionError("Dataframes should be unequal")
df2.pandas_df[1] = 2
pd.testing.assert_frame_equal(df1.pandas_df, df2.pandas_df)
def test_add():
pandas_df = pd.DataFrame(
data={
"col1": [0, 1, 2, 3],
"col2": [3, 4, 5, 6],
"col3": [6, 7, 8, 9],
"col4": [9, 10, 11, 12],
}
)
df = DplyFrame(pandas_df)
def add1():
return lambda d1: DplyFrame(d1.pandas_df + 1)
df_output = df + add1() + add1() + add1()
pd.testing.assert_frame_equal(df_output.pandas_df, pandas_df + 3)
def test_repr():
pandas_df = pd.DataFrame(
data={
"col1": [0, 1, 2, 3],
}
)
df = DplyFrame(pandas_df)
assert pandas_df.to_string() == df.__repr__()
def test_comparison_ops():
pdf = pd.DataFrame(
data={
"a": [0, 1, 2, 3],
}
)
df = DplyFrame(pdf)
pd.testing.assert_series_equal(df["a"] > 0, pdf["a"] > 0)
pd.testing.assert_series_equal(df["a"] >= 1, pdf["a"] >= 1)
|
pd.testing.assert_series_equal(df["a"] < 3, pdf["a"] < 3)
|
pandas.testing.assert_series_equal
|
import numpy as np
import scipy as sp
import pandas as pd
import numbers
from typing import Callable, List, Union
import logging
from .base import Epsilon
from ..distance import SCALE_LIN
from ..sampler import Sampler
from ..storage import save_dict_to_json
logger = logging.getLogger("Epsilon")
class TemperatureBase(Epsilon):
"""
A temperature scheme handles the decrease of the temperatures employed
by a :class:`pyabc.acceptor.StochasticAcceptor` over time.
This class is not functional on its own, its derivatives must be used.
"""
class ListTemperature(TemperatureBase):
"""
Pass a list of temperature values to use successively.
Parameters
----------
values:
The array of temperatures to use successively.
For exact inference, finish with 1.
"""
def __init__(self, values: List[float]):
self.values = values
def __call__(self,
t: int) -> float:
return self.values[t]
class Temperature(TemperatureBase):
"""
This class implements a highly adaptive and configurable temperature
scheme. Via the argument `schemes`, arbitrary temperature schemes can be
passed to calculate the next generation's temperature, via `aggregate_fun`
one can define how to combine multiple guesses, via `initial_temperature`
the initial temperature can be set.
Parameters
----------
schemes: Union[Callable, List[Callable]], optional
Temperature schemes returning proposed
temperatures for the next time point, e.g.
instances of :class:`pyabc.epsilon.TemperatureScheme`.
aggregate_fun: Callable[List[float], float], optional
The function to aggregate the schemes by, of the form
``Callable[List[float], float]``.
Defaults to taking the minimum.
initial_temperature: float, optional
The initial temperature. If None provided, an AcceptanceRateScheme
is used.
enforce_exact_final_temperature: bool, optional
Whether to force the final temperature (if max_nr_populations < inf)
to be 1.0, giving exact inference.
log_file: str, optional
A log file for storing data of the temperature that are currently not
saved in the database. The data are saved in json format.
Properties
----------
max_nr_populations: int
The maximum number of iterations as passed to ABCSMC.
May be inf, but not all schemes can handle that (and will complain).
temperatures: Dict[int, float]
Times as keys and temperatures as values.
"""
def __init__(
self,
schemes: Union[Callable, List[Callable]] = None,
aggregate_fun: Callable[[List[float]], float] = None,
initial_temperature: float = None,
enforce_exact_final_temperature: bool = True,
log_file: str = None):
self.schemes = schemes
if aggregate_fun is None:
# use minimum over all proposed temperature values
aggregate_fun = min
self.aggregate_fun = aggregate_fun
if initial_temperature is None:
initial_temperature = AcceptanceRateScheme()
self.initial_temperature = initial_temperature
self.enforce_exact_final_temperature = enforce_exact_final_temperature
self.log_file = log_file
# to be filled later
self.max_nr_populations = None
self.temperatures = {}
self.temperature_proposals = {}
def initialize(self,
t: int,
get_weighted_distances: Callable[[], pd.DataFrame],
get_all_records: Callable[[], List[dict]],
max_nr_populations: int,
acceptor_config: dict):
self.max_nr_populations = max_nr_populations
# set default schemes
if self.schemes is None:
# this combination proved rather stable
acc_rate_scheme = AcceptanceRateScheme()
decay_scheme = (
ExpDecayFixedIterScheme() if np.isfinite(max_nr_populations)
else ExpDecayFixedRatioScheme())
self.schemes = [acc_rate_scheme, decay_scheme]
# set initial temperature for time t
self._update(t, get_weighted_distances, get_all_records,
1.0, acceptor_config)
def configure_sampler(self, sampler: Sampler):
if callable(self.initial_temperature):
self.initial_temperature.configure_sampler(sampler)
for scheme in self.schemes:
scheme.configure_sampler(sampler)
def update(self,
t: int,
get_weighted_distances: Callable[[], pd.DataFrame],
get_all_records: Callable[[], List[dict]],
acceptance_rate: float,
acceptor_config: dict):
# set temperature for time t
self._update(t, get_weighted_distances,
get_all_records, acceptance_rate,
acceptor_config)
def _update(self,
t: int,
get_weighted_distances: Callable[[], pd.DataFrame],
get_all_records: Callable[[], List[dict]],
acceptance_rate: float,
acceptor_config):
"""
Compute the temperature for time `t`.
"""
# scheme arguments
kwargs = dict(
t=t,
get_weighted_distances=get_weighted_distances,
get_all_records=get_all_records,
max_nr_populations=self.max_nr_populations,
pdf_norm=acceptor_config['pdf_norm'],
kernel_scale=acceptor_config['kernel_scale'],
prev_temperature=self.temperatures.get(t-1, None),
acceptance_rate=acceptance_rate,
)
if t >= self.max_nr_populations - 1 \
and self.enforce_exact_final_temperature:
# t is last time
temps = [1.0]
elif not self.temperatures: # need an initial value
if callable(self.initial_temperature):
# execute scheme
temps = [self.initial_temperature(**kwargs)]
elif isinstance(self.initial_temperature, numbers.Number):
temps = [self.initial_temperature]
else:
raise ValueError(
"Initial temperature must be a float or a callable")
else:
# evaluate schemes
temps = []
for scheme in self.schemes:
temp = scheme(**kwargs)
temps.append(temp)
# compute next temperature based on proposals and fallback
# should not be higher than before
fallback = self.temperatures[t-1] \
if t-1 in self.temperatures else np.inf
temperature = self.aggregate_fun(temps)
# also a value lower than 1.0 does not make sense
temperature = max(min(temperature, fallback), 1.0)
if not np.isfinite(temperature):
raise ValueError("Temperature must be finite.")
# record found value
self.temperatures[t] = temperature
# logging
logger.debug(f"Proposed temperatures for {t}: {temps}.")
self.temperature_proposals[t] = temps
if self.log_file:
save_dict_to_json(self.temperature_proposals, self.log_file)
def __call__(self,
t: int) -> float:
return self.temperatures[t]
class TemperatureScheme:
"""
A TemperatureScheme suggests the next temperature value. It is used as
one of potentially multiple schemes employed in the Temperature class.
This class is abstract.
Parameters
----------
t:
The time to compute for.
get_weighted_distances:
Callable to obtain the weights and kernel values to be used for
the scheme.
get_all_records:
Callable returning a List[dict] of all recorded particles.
max_nr_populations:
The maximum number of populations that are supposed to be taken.
pdf_norm:
The normalization constant c that will be used in the acceptance step.
kernel_scale:
Scale on which the pdf values are (linear or logarithmic).
prev_temperature:
The temperature that was used last time (or None if not applicable).
acceptance_rate:
The recently obtained rate.
"""
def __init__(self):
pass
def configure_sampler(self, sampler: Sampler):
"""
Modify the sampler. As in, and redirected from,
:func:`pyabc.epsilon.Temperature.configure_sampler`.
"""
def __call__(self,
t: int,
get_weighted_distances: Callable[[], pd.DataFrame],
get_all_records: Callable[[], List[dict]],
max_nr_populations: int,
pdf_norm: float,
kernel_scale: str,
prev_temperature: float,
acceptance_rate: float):
pass
class AcceptanceRateScheme(TemperatureScheme):
"""
Try to keep the acceptance rate constant at a value of
`target_rate`. Note that this scheme will fail to
reduce the temperature sufficiently in later iterations, if the
problem's inherent acceptance rate is lower, but it has been
observed to give big feasible temperature leaps in early iterations.
In particular, this scheme can be used to propose an initial temperature.
Parameters
----------
target_rate: float, optional
The target acceptance rate to match.
min_rate: float, optional
The minimum rate below which not to apply the acceptance step scheme
any more. Setting this to a value of e.g. 0.05 can make sense
1) because it may be unlikely that the acceptance rate scheme will
propose a useful temperature at such low acceptance levels, and
2) to avoid uneccessary computations.
"""
def __init__(self, target_rate: float = 0.3, min_rate: float = None):
self.target_rate = target_rate
self.min_rate = min_rate
def configure_sampler(self, sampler: Sampler):
sampler.sample_factory.record_rejected = True
def __call__(self,
t: int,
get_weighted_distances: Callable[[], pd.DataFrame],
get_all_records: Callable[[], List[dict]],
max_nr_populations: int,
pdf_norm: float,
kernel_scale: str,
prev_temperature: float,
acceptance_rate: float):
# check minimum rate
if self.min_rate is not None and acceptance_rate < self.min_rate:
return np.inf
# execute function (expensive if in calibration)
records = get_all_records()
# convert to dataframe for easier extraction
records =
|
pd.DataFrame(records)
|
pandas.DataFrame
|
# This script analyzes the csv files output by PixDistStats2.py
# Updated Feb 2021.
# PixDistStats2 separates the data into biological replicates instead of aggregating all data for each sample group.
# This script takes those data and does stats and makes plots.
# pixel_distance.py actually performs the measurement of minimum distance
# between tumor and lyve-1 pixels, and outputs the results for each image.
# PixDistStats.py performs stats and makes plots on ALL the data separated by sample group. However,
# this is insufficient because it isn't split up into biological replicates, or normalized.
# PixDistStats2.py separates the data into biological replicates instead of aggregating
# all data for each sample group, and experiments with plots.
# PixDistStats3.py takes data from PixDistStats2, normalizes it to total pixels for each animal,
# does statistical comparisons and makes plots.
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import os
import pixel_distance as pxd
import pandas as pd
from scipy.stats import stats
from statsmodels.stats.multicomp import pairwise_tukeyhsd, MultiComparison
import joypy as jpy
def load_datas(dir):
distbypercentiles = pd.read_csv(dir + 'dist_by_percentiles.csv', index_col='percentiles')
numpixbydistbins = pd.read_csv(dir + 'numpix_by_dist_bins.csv', index_col='distance bins')
normnumpixbydistbins = pd.read_csv(dir + 'norm_numpix_by_dist_bins.csv', index_col='distance bins')
print('dist by percentiles: ')
print(distbypercentiles.head(10))
print('numpix by dist bins: ')
print(numpixbydistbins.head(11))
print('normalized numpix by dist bins: ')
print(normnumpixbydistbins.head(11))
# return datas as a list
return [distbypercentiles, numpixbydistbins, normnumpixbydistbins]
def run_anova(data, savedir, labels):
# ANOVA
f_stat, p = stats.f_oneway(data[labels[0:4]],
data[labels[5:9]],
data[labels[10:14]])
# Multiple comparisons... Tukey Test:
# need a stacked dataframe with consistent labels...
# measurement is the data, group is naive, tdLN, disLN
# mc = MultiComparison(data_stacked['measurement'], data_stacked['group'])
# stack data
# data_stacked = data.stack().reset_index()
# data is in Series form... so it's already stacked. Just reset_index()
data_stacked = data.to_frame()
# data_stacked = data_stacked.rename(columns={'level_0': 'id', 'level_1': 'group', 0: 'distance'})
print(data_stacked.head(20))
# make new column with supergroups (naive, disLN, tdLN)
# data_stacked['supergroup'] = data_stacked['group'].map(lambda x: x.rstrip('12345'))
data_stacked['supergroup'] = data_stacked.index.map(lambda x: x.rstrip('12345'))
print(data_stacked.head(20))
# mc = MultiComparison(data_stacked['distance'], data_stacked['supergroup'])
mc = MultiComparison(data_stacked[data.name], data_stacked['supergroup'])
tukey = mc.tukeyhsd(alpha=0.05)
print(data_stacked[data.name])
# Save ANOVA & Tukey results in a text file
file0 = open(savedir + data.name + '_ANOVA.txt', 'a+')
file0.write('Stats: \n')
file0.write('Mean: ' + str(data_stacked.groupby(['supergroup']).mean()) + '\n')
file0.write('Standard Deviation: ' + str(data_stacked.groupby(['supergroup']).std()) + '\n')
file0.write('ANOVA Results: \n')
file0.write('F Statistic: ' + str(f_stat) + '\n')
file0.write('p-value: ' + str(p) + '\n')
file0.write('Tukey results: ' + '\n')
file0.write(str(tukey) + '\n')
file0.write('Unique groups: {}'.format(mc.groupsunique))
return
def transpose_data(data):
# remove name of indexes
data.index.names = [None]
transposed_data = data.transpose()
print('after transposing: ')
print(transposed_data)
# drop the number from the end of the indexes (make supergroups)
transposed_data = transposed_data.rename(index=lambda x: x.rstrip('12345'))
print('after renaming indexes: ')
print(transposed_data)
# stack based on supergroup
# transposed_stacked_data = transposed_data.stack()
# print('after stacking: ')
# print(transposed_stacked_data)
return transposed_data
def make_plots(dist_percentile, norm_numpix, savedir, labels):
sns.set_theme(style="whitegrid")
# keep x-axis as distance consistent across plots.
# For dist_by_percentiles_transposed, try plotting a bar graph which shows the distance tumor cells invaded to
# at each percentile
# 10th %ile |---| 10% of cells invaded less than this distance
# 20th %ile |-------| 20% of cells invaded less than this distance
# 30th %ile |------------| 30% of cells invaded less than this distance
# For norm_numpix_by_dist_bins_transposed, try plotting a histogram...
# Proportion (normalized #) of pixels at each distance.
# Can overlay all three histograms in different colors, slightly opaque
# ------------------------------------------------------------------------------------------
# bar plots for dist_percentile:
print('initial assessment: ')
dist_percentile.index.names = ['Group']
print(dist_percentile)
print(dist_percentile.index)
# convert indexes to a column so we can melt it
dist_percentile.reset_index(level=dist_percentile.index.names, inplace=True)
print('after reset index: ')
print(dist_percentile)
melt_dist_percentile = pd.melt(dist_percentile, id_vars='Group', var_name='Percentile',
value_name='Distance (microns)')
ax2 = sns.barplot(x='Distance (microns)', y='Percentile', hue='Group', data=melt_dist_percentile)
fig2 = ax2.get_figure()
fig2.set_size_inches(11, 8.5) # increase figure size
plt.gca().legend().set_title(None) # remove legend title
plt.gca().set_title('Distance from Lymphatics by Percentile') # set plot title
# Add annotations for statistical significance based on earlier anova & tukey comparisons (see txt files)
# which comparisons were significant? by tukey:
# 30th: disLN & tdLN. p-adj = 0.0401
# 40th: disLN & tdLN. p-adj = 0.0191
# 50th: disLN & tdLN. p-adj = 0.0126, naive & tdLN. p-adj = 0.0369
# 60th: disLN & tdLN. p-adj = 0.012, naive & tdLN. p-adj = 0.0177
# 70th: disLN & tdLN. p-adj = 0.0153, naive & tdLN. p-adj = 0.0122
# 80th: disLN & tdLN. p-adj = 0.0221, naive & tdLN. p-adj = 0.011
fig2.savefig(savedir + 'dist_by_percentiles.png')
fig2.clf()
# -----------------------------------------------------------------------------------------------------
# histograms for norm_numpix:
# this isn't actually a histogram... since I already have the x-labels as bins and
# the counts (proportions) for each sample. What I really want to do is create a bunch of bar plots.
# fig, ax = plt.subplots()
# for a in [x, y]:
# sns.distplot(a, bins=range(1, 110, 10), ax=ax, kde=False)
# ax.set_xlim([0, 100])
# Try melting...
print('before index rename attempt: ')
print(norm_numpix.index)
norm_numpix.index.names = ['Group']
print('after index rename attempt: ')
print(norm_numpix)
print(norm_numpix.index)
# convert indexes to a column so we can melt it
norm_numpix.reset_index(level=norm_numpix.index.names, inplace=True)
print('after reset index: ')
print(norm_numpix)
melt_norm_numpix = pd.melt(norm_numpix, id_vars='Group', var_name='Distance (microns)',
value_name='% of total pixels within group')
print('after melting: ')
print(melt_norm_numpix.head())
# # Stack Data
# norm_numpix = norm_numpix.stack()
# print('after stacking: ')
# print(norm_numpix)
# print('indexes: ')
# print(norm_numpix.index)
# # samples = ['tdLN', 'disLN', 'naive']
# # dist_bins = ['0-10um', '10-20um', '20-30um', '30-40um', '40-50um',
# # '50-60um', '60-70um', '70-80um', '80-90um', '90-100um', '100um+']
# # norm_numpix.index = pd.MultiIndex.from_product([samples, dist_bins], names=['sample', 'dist_bin'])
# # norm_numpix.rename_axis(index=['sample', 'dist_bin'])
# norm_numpix.index.names = ['sample', 'dist_bin']
# print('after rename attempt: ')
# print(norm_numpix)
# print(norm_numpix.index)
# # g = sns.FacetGrid(norm_numpix, hue='sample', palette='coolwarm')
ax = sns.barplot(x='Distance (microns)', y='% of total pixels within group', hue='Group', data=melt_norm_numpix)
fig = ax.get_figure()
fig.set_size_inches(11, 8.5) # increase figure size
plt.gca().legend().set_title(None) # remove legend title
plt.gca().set_title('% of Tumor+ Pixels vs. Distance from Lymphatics') # set plot title
# Add annotations for statistical significance based on earlier anova & tukey comparisons (see txt files)
# which comparisons were significant? by tukey:
# in general... 0-20um: tdLN sig lower. 30-50um: tdLN sig higher.
# 0-10um: disLN & tdLN. p-adj = 0.0472
# 10-20um: naive & tdLN. p-adj = 0.0306
# 30-40um: naive & tdLN. p-adj = 0.0014
# 40-50um: disLN & tdLN. p-adj = 0.0019. naive & tdLN. p-adj = 0.001
fig.savefig(savedir + 'numpix_by_dist_bins.png')
fig.clf()
return
# -------------------------------------------------------------------------------------
# MAIN --------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# choose directory
dirname = pxd.file_import(prompt='Choose the directory containing tiff folders: ')
print(dirname)
save_dir1 = dirname + '/anova_outputs_feb2020/'
print(save_dir1)
# load datas
[dist_by_percentiles, numpix_by_dist_bins, norm_numpix_by_dist_bins] = load_datas(dirname)
data_labels = ['naive1', 'naive2', 'naive3', 'naive4', 'naive5',
'disLN1', 'disLN2', 'disLN3', 'disLN4', 'disLN5',
'tdLN1', 'tdLN2', 'tdLN3', 'tdLN4', 'tdLN5']
# STATS
# We'll try ANOVA. However, looking at the data distributions, it looks like the tumor data has a higher variance.
# One of the assumptions of ANOVA is that groups have the same variance. If this is a problem, we could try
# using Welch's ANOVA through Pengouin. https://pingouin-stats.org/generated/pingouin.welch_anova.html
# Also use this for loop to create a new dataframe where each measure is stacked (essentially a transpose)
dist_by_percentiles_transposed =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
problem = 'boston'
MAX_DIST = {50, 100, 250, 500}
ALPHA = {0.1, 0.2, 0.5, 0.7, 1.0}
RANK = {'btw_id', 'voc_id'}
for alpha in ALPHA:
for max_dist in MAX_DIST:
for rank in RANK:
fid_tab = '../matlab/rank_table_%s.csv' % problem
# load table of ranks
print('Reading table of ranks: %s' % fid_tab)
table = pd.read_csv(fid_tab)
print('Converting table to arrays')
eids = table['eid'].as_matrix()
source = table['source'].as_matrix()
target = table['target'].as_matrix()
dist = table['length_km'].as_matrix()
idrank = table[rank].as_matrix()
nedges = len(source)
# (rank_id) -> row
map_rank_to_row = dict()
for k in range(nedges):
map_rank_to_row[idrank[k]] = k
print('Selecting edges')
selected_rows = []
total_dist = 0
for k in range(min(idrank), max(idrank) + 1):
row = map_rank_to_row[k]
selected_rows.append(row)
# consider only the length of non-artificial edges
eid = eids[row]
if eid >= 0:
total_dist += dist[row]
else:
print('The length of edge (%d,%d) was not added' % (source[row], target[row]))
# stop criteria
if total_dist >= max_dist:
break
print(' Number of selected edges: %d' % len(selected_rows))
print(' Total distance (km): %f' % total_dist)
# create selected column
selected = np.zeros(nedges, dtype=int)
for row in selected_rows:
selected[row] = 1
table['selected'] = pd.Series(selected, index=table.index)
# incrementing the edge capacities
cap = table['capacity'].as_matrix()
cap_old = cap.copy()
for row in selected_rows:
cap[row] *= (1 + alpha)
table['capacity'] =
|
pd.Series(cap, index=table.index)
|
pandas.Series
|
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
import pandas as pd
import numpy as np
import re
from sklearn.utils import shuffle
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class EncoderLSTM(nn.Module):
def __init__(self, input_size, hidden_size, n_layers=3, drop_prob=0.1):
super(EncoderLSTM, self).__init__()
self.hidden_size = hidden_size
self.n_layers = n_layers
self.embedding = nn.Embedding(input_size, hidden_size)
self.lstm = nn.LSTM(hidden_size, hidden_size, n_layers, dropout=drop_prob, batch_first=True)
def forward(self, inputs, hidden):
# Embed input words
embedded = self.embedding(inputs)
# Pass the embedded word vectors into LSTM and return all outputs
output, hidden = self.lstm(embedded, hidden)
return output, hidden
def init_hidden(self, batch_size=1):
return (torch.zeros(self.n_layers, batch_size, self.hidden_size, device=device),
torch.zeros(self.n_layers, batch_size, self.hidden_size, device=device))
class LuongDecoder(nn.Module):
def __init__(self, hidden_size, output_size, attention, n_layers=3, drop_prob=0.1):
super(LuongDecoder, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.drop_prob = drop_prob
# The Attention Mechanism is defined in a separate class
self.attention = attention
# self.embedding = nn.Embedding(self.output_size, self.hidden_size)
self.lin_cast = nn.Linear(self.output_size, self.hidden_size)
self.dropout = nn.Dropout(self.drop_prob)
self.lstm = nn.LSTM(self.hidden_size, self.hidden_size, self.n_layers)
self.classifier = nn.Linear(self.hidden_size * 2, self.output_size)
def forward(self, inputs, hidden, encoder_outputs):
# Embed input words
# embedded = self.embedding(inputs).view(1, 1, -1)
embedded = self.lin_cast(inputs)
embedded = self.dropout(embedded)
# Passing previous output word (embedded) and hidden state into LSTM cell
lstm_out, hidden = self.lstm(embedded, hidden)
# Calculating Alignment Scores - see Attention class for the forward pass function
alignment_scores = self.attention(lstm_out, encoder_outputs)
# Softmaxing alignment scores to obtain Attention weights
attn_weights = F.softmax(alignment_scores.view(1, -1), dim=1)
# Multiplying Attention weights with encoder outputs to get context vector
context_vector = torch.bmm(attn_weights.unsqueeze(0), encoder_outputs)
# Concatenating output from LSTM with context vector
output = torch.cat((lstm_out, context_vector), -1)
# Pass concatenated vector through Linear layer acting as a Classifier
# output = F.log_softmax(self.classifier(output[0]), dim=1)
output = self.classifier(output[0])
return output, hidden, attn_weights
def init_hidden(self, batch_size=1):
return (torch.zeros(self.n_layers, batch_size, self.hidden_size * 2, device=device),
torch.zeros(self.n_layers, batch_size, self.hidden_size * 2, device=device))
class Attention(nn.Module):
def __init__(self, hidden_size, method="dot"):
super(Attention, self).__init__()
self.method = method
self.hidden_size = hidden_size
# Defining the layers/weights required depending on alignment scoring method
if method == "general":
self.fc = nn.Linear(hidden_size, hidden_size, bias=False)
elif method == "concat":
self.fc = nn.Linear(hidden_size, hidden_size, bias=False)
self.weight = nn.Parameter(torch.empty(1, hidden_size, device=device))
def forward(self, decoder_hidden, encoder_outputs):
if self.method == "dot":
# For the dot scoring method, no weights or linear layers are involved
return encoder_outputs.bmm(decoder_hidden.view(1, -1, 1)).squeeze(-1)
elif self.method == "general":
# For general scoring, decoder hidden state is passed through linear layers to introduce a weight matrix
out = self.fc(decoder_hidden)
return encoder_outputs.bmm(out.view(1, -1, 1)).squeeze(-1)
elif self.method == "concat":
# For concat scoring, decoder hidden state and encoder outputs are concatenated first
out = torch.tanh(self.fc(decoder_hidden + encoder_outputs))
return out.bmm(self.weight.unsqueeze(-1)).squeeze(-1)
class EncoderLSTM_b(nn.Module):
def __init__(self, input_size, hidden_size, n_layers=2, drop_prob=0.2):
super(EncoderLSTM_b, self).__init__()
self.hidden_size = hidden_size
self.n_layers = n_layers
self.embedding = nn.Embedding(input_size, hidden_size)
self.lstm = nn.LSTM(hidden_size, hidden_size, n_layers, bidirectional=True, dropout=drop_prob, batch_first=True)
def forward(self, inputs, hidden):
# Embed input words
embedded = self.embedding(inputs)
# Pass the embedded word vectors into LSTM and return all outputs
output, hidden = self.lstm(embedded, hidden)
return output, hidden
def init_hidden(self, batch_size=1):
return (torch.zeros(self.n_layers * 2, batch_size, self.hidden_size, device=device),
torch.zeros(self.n_layers * 2, batch_size, self.hidden_size, device=device))
class LuongDecoder_b(nn.Module):
def __init__(self, hidden_size, output_size, attention, n_layers=4, drop_prob=0.2):
super(LuongDecoder_b, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.drop_prob = drop_prob
# The Attention Mechanism is defined in a separate class
self.attention = attention
# self.embedding = nn.Embedding(self.output_size, self.hidden_size)
self.lin_cast = nn.Linear(self.output_size, self.hidden_size)
self.dropout = nn.Dropout(self.drop_prob)
self.lstm = nn.LSTM(self.hidden_size, self.hidden_size * 2, self.n_layers)
self.classifier = nn.Linear(self.hidden_size * 4, self.output_size)
def forward(self, inputs, hidden, encoder_outputs):
# Embed input words
# embedded = self.embedding(inputs).view(1, 1, -1)
embedded = self.lin_cast(inputs)
embedded = self.dropout(embedded)
# Passing previous output word (embedded) and hidden state into LSTM cell
lstm_out, hidden = self.lstm(embedded, hidden)
# Calculating Alignment Scores - see Attention class for the forward pass function
alignment_scores = self.attention(lstm_out, encoder_outputs)
# Softmaxing alignment scores to obtain Attention weights
attn_weights = F.softmax(alignment_scores.view(1, -1), dim=1)
# Multiplying Attention weights with encoder outputs to get context vector
context_vector = torch.bmm(attn_weights.unsqueeze(0), encoder_outputs)
# Concatenating output from LSTM with context vector
output = torch.cat((lstm_out, context_vector), -1)
# Pass concatenated vector through Linear layer acting as a Classifier
# output = F.log_softmax(self.classifier(output[0]), dim=1)
output = self.classifier(output[0])
return output, hidden, attn_weights
def init_hidden(self, batch_size=1):
return (torch.zeros(self.n_layers, batch_size, self.hidden_size * 2, device=device),
torch.zeros(self.n_layers, batch_size, self.hidden_size * 2, device=device))
class Attention_b(nn.Module):
def __init__(self, hidden_size, method="dot"):
super(Attention_b, self).__init__()
self.method = method
self.hidden_size = hidden_size
# Defining the layers/weights required depending on alignment scoring method
if method == "general":
self.fc = nn.Linear(hidden_size, hidden_size, bias=False)
elif method == "concat":
self.fc = nn.Linear(hidden_size, hidden_size, bias=False)
self.weight = nn.Parameter(torch.empty(1, hidden_size, device=device))
def forward(self, decoder_hidden, encoder_outputs):
if self.method == "dot":
# For the dot scoring method, no weights or linear layers are involved
return encoder_outputs.bmm(decoder_hidden.view(1, -1, 1)).squeeze(-1)
elif self.method == "general":
# For general scoring, decoder hidden state is passed through linear layers to introduce a weight matrix
out = self.fc(decoder_hidden)
return encoder_outputs.bmm(out.view(1, -1, 1)).squeeze(-1)
elif self.method == "concat":
# For concat scoring, decoder hidden state and encoder outputs are concatenated first
out = torch.tanh(self.fc(decoder_hidden + encoder_outputs))
return out.bmm(self.weight.unsqueeze(-1)).squeeze(-1)
### more laters!!!
class EncoderLSTM_bb(nn.Module):
def __init__(self, input_size, hidden_size, n_layers=3, drop_prob=0.2):
super(EncoderLSTM_bb, self).__init__()
self.hidden_size = hidden_size
self.n_layers = n_layers
self.embedding = nn.Embedding(input_size, hidden_size)
self.lstm = nn.LSTM(hidden_size, hidden_size, n_layers, bidirectional=True, dropout=drop_prob, batch_first=True)
def forward(self, inputs, hidden):
# Embed input words
embedded = self.embedding(inputs)
# Pass the embedded word vectors into LSTM and return all outputs
output, hidden = self.lstm(embedded, hidden)
return output, hidden
def init_hidden(self, batch_size=1):
return (torch.zeros(self.n_layers * 2, batch_size, self.hidden_size, device=device),
torch.zeros(self.n_layers * 2, batch_size, self.hidden_size, device=device))
class LuongDecoder_bb(nn.Module):
def __init__(self, hidden_size, output_size, attention, n_layers=6, drop_prob=0.2):
super(LuongDecoder_bb, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.drop_prob = drop_prob
# The Attention Mechanism is defined in a separate class
self.attention = attention
# self.embedding = nn.Embedding(self.output_size, self.hidden_size)
self.lin_cast = nn.Linear(self.output_size, self.hidden_size)
self.dropout = nn.Dropout(self.drop_prob)
self.lstm = nn.LSTM(self.hidden_size, self.hidden_size * 2, self.n_layers)
self.classifier = nn.Linear(self.hidden_size * 4, self.output_size)
def forward(self, inputs, hidden, encoder_outputs):
# Embed input words
# embedded = self.embedding(inputs).view(1, 1, -1)
embedded = self.lin_cast(inputs)
embedded = self.dropout(embedded)
# Passing previous output word (embedded) and hidden state into LSTM cell
lstm_out, hidden = self.lstm(embedded, hidden)
# Calculating Alignment Scores - see Attention class for the forward pass function
alignment_scores = self.attention(lstm_out, encoder_outputs)
# Softmaxing alignment scores to obtain Attention weights
attn_weights = F.softmax(alignment_scores.view(1, -1), dim=1)
# Multiplying Attention weights with encoder outputs to get context vector
context_vector = torch.bmm(attn_weights.unsqueeze(0), encoder_outputs)
# Concatenating output from LSTM with context vector
output = torch.cat((lstm_out, context_vector), -1)
# Pass concatenated vector through Linear layer acting as a Classifier
# output = F.log_softmax(self.classifier(output[0]), dim=1)
output = self.classifier(output[0])
return output, hidden, attn_weights
def init_hidden(self, batch_size=1):
return (torch.zeros(self.n_layers, batch_size, self.hidden_size * 2, device=device),
torch.zeros(self.n_layers, batch_size, self.hidden_size * 2, device=device))
class Attention_bb(nn.Module):
def __init__(self, hidden_size, method="dot"):
super(Attention_bb, self).__init__()
self.method = method
self.hidden_size = hidden_size
# Defining the layers/weights required depending on alignment scoring method
if method == "general":
self.fc = nn.Linear(hidden_size, hidden_size, bias=False)
elif method == "concat":
self.fc = nn.Linear(hidden_size, hidden_size, bias=False)
self.weight = nn.Parameter(torch.empty(1, hidden_size, device=device))
def forward(self, decoder_hidden, encoder_outputs):
if self.method == "dot":
# For the dot scoring method, no weights or linear layers are involved
return encoder_outputs.bmm(decoder_hidden.view(1, -1, 1)).squeeze(-1)
elif self.method == "general":
# For general scoring, decoder hidden state is passed through linear layers to introduce a weight matrix
out = self.fc(decoder_hidden)
return encoder_outputs.bmm(out.view(1, -1, 1)).squeeze(-1)
elif self.method == "concat":
# For concat scoring, decoder hidden state and encoder outputs are concatenated first
out = torch.tanh(self.fc(decoder_hidden + encoder_outputs))
return out.bmm(self.weight.unsqueeze(-1)).squeeze(-1)
##### Here, we have both encoder and decoder as BIDIRECTIONAL!!!
class EncoderLSTM_bibi(nn.Module):
def __init__(self, input_size, hidden_size, n_layers=4, drop_prob=0.2):
super(EncoderLSTM_bibi, self).__init__()
self.hidden_size = hidden_size
self.n_layers = n_layers
self.embedding = nn.Embedding(input_size, hidden_size)
self.lstm = nn.LSTM(hidden_size, hidden_size, n_layers, bidirectional= True, dropout=drop_prob, batch_first=True)
def forward(self, inputs, hidden):
# Embed input words
embedded = self.embedding(inputs)
# Pass the embedded word vectors into LSTM and return all outputs
output, hidden = self.lstm(embedded, hidden)
return output, hidden
def init_hidden(self, batch_size=1):
return (torch.zeros(self.n_layers * 2, batch_size, self.hidden_size, device=device),
torch.zeros(self.n_layers * 2, batch_size, self.hidden_size, device=device))
class LuongDecoder_bibi(nn.Module):
def __init__(self, hidden_size, output_size, attention, n_layers=4, drop_prob=0.2):
super(LuongDecoder_bibi, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.drop_prob = drop_prob
# The Attention Mechanism is defined in a separate class
self.attention = attention
# self.embedding = nn.Embedding(self.output_size, self.hidden_size)
self.lin_cast = nn.Linear(self.output_size, self.hidden_size*2)
self.dropout = nn.Dropout(self.drop_prob)
self.lstm = nn.LSTM(self.hidden_size * 2, self.hidden_size, self.n_layers, bidirectional=True)
self.classifier = nn.Linear(self.hidden_size * 4, self.output_size)
def forward(self, inputs, hidden, encoder_outputs):
# Embed input words
# embedded = self.embedding(inputs).view(1, 1, -1)
embedded = self.lin_cast(inputs)
embedded = self.dropout(embedded)
# Passing previous output word (embedded) and hidden state into LSTM cell
lstm_out, hidden = self.lstm(embedded, hidden)
# Calculating Alignment Scores - see Attention class for the forward pass function
alignment_scores = self.attention(lstm_out, encoder_outputs)
# Softmaxing alignment scores to obtain Attention weights
attn_weights = F.softmax(alignment_scores.view(1, -1), dim=1)
# Multiplying Attention weights with encoder outputs to get context vector
context_vector = torch.bmm(attn_weights.unsqueeze(0), encoder_outputs)
# Concatenating output from LSTM with context vector
output = torch.cat((lstm_out, context_vector), -1)
# Pass concatenated vector through Linear layer acting as a Classifier
# output = F.log_softmax(self.classifier(output[0]), dim=1)
output = self.classifier(output[0])
return output, hidden, attn_weights
def init_hidden(self, batch_size=1):
return (torch.zeros(self.n_layers*2, batch_size, self.hidden_size, device=device),
torch.zeros(self.n_layers*2, batch_size, self.hidden_size, device=device))
class Attention_bibi(nn.Module):
def __init__(self, hidden_size, method="dot"):
super(Attention_bibi, self).__init__()
self.method = method
self.hidden_size = hidden_size
# Defining the layers/weights required depending on alignment scoring method
if method == "general":
self.fc = nn.Linear(hidden_size, hidden_size, bias=False)
elif method == "concat":
self.fc = nn.Linear(hidden_size, hidden_size, bias=False)
self.weight = nn.Parameter(torch.empty(1, hidden_size, device=device))
def forward(self, decoder_hidden, encoder_outputs):
if self.method == "dot":
# For the dot scoring method, no weights or linear layers are involved
return encoder_outputs.bmm(decoder_hidden.view(1, -1, 1)).squeeze(-1)
elif self.method == "general":
# For general scoring, decoder hidden state is passed through linear layers to introduce a weight matrix
out = self.fc(decoder_hidden)
return encoder_outputs.bmm(out.view(1, -1, 1)).squeeze(-1)
elif self.method == "concat":
# For concat scoring, decoder hidden state and encoder outputs are concatenated first
out = torch.tanh(self.fc(decoder_hidden + encoder_outputs))
return out.bmm(self.weight.unsqueeze(-1)).squeeze(-1)
### 512, 3 layers, but 64 is batch size
class EncoderLSTM_b64(nn.Module):
def __init__(self, input_size, hidden_size, n_layers=2, drop_prob=0.2):
super(EncoderLSTM_b64, self).__init__()
self.hidden_size = hidden_size
self.n_layers = n_layers
self.embedding = nn.Embedding(input_size, hidden_size)
self.lstm = nn.LSTM(hidden_size, hidden_size, n_layers, bidirectional=True, dropout=drop_prob, batch_first=True)
def forward(self, inputs, hidden):
# Embed input words
embedded = self.embedding(inputs)
# Pass the embedded word vectors into LSTM and return all outputs
output, hidden = self.lstm(embedded, hidden)
return output, hidden
def init_hidden(self, batch_size=1):
return (torch.zeros(self.n_layers * 2, batch_size, self.hidden_size, device=device),
torch.zeros(self.n_layers * 2, batch_size, self.hidden_size, device=device))
class LuongDecoder_b64(nn.Module):
def __init__(self, hidden_size, output_size, attention, n_layers=4, drop_prob=0.2):
super(LuongDecoder_b64, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.drop_prob = drop_prob
# The Attention Mechanism is defined in a separate class
self.attention = attention
# self.embedding = nn.Embedding(self.output_size, self.hidden_size)
self.lin_cast = nn.Linear(self.output_size, self.hidden_size)
self.dropout = nn.Dropout(self.drop_prob)
self.lstm = nn.LSTM(self.hidden_size, self.hidden_size * 2, self.n_layers)
self.classifier = nn.Linear(self.hidden_size * 4, self.output_size)
def forward(self, inputs, hidden, encoder_outputs):
# Embed input words
# embedded = self.embedding(inputs).view(1, 1, -1)
embedded = self.lin_cast(inputs)
embedded = self.dropout(embedded)
# Passing previous output word (embedded) and hidden state into LSTM cell
lstm_out, hidden = self.lstm(embedded, hidden)
# Calculating Alignment Scores - see Attention class for the forward pass function
alignment_scores = self.attention(lstm_out, encoder_outputs)
# Softmaxing alignment scores to obtain Attention weights
attn_weights = F.softmax(alignment_scores.view(1, -1), dim=1)
# Multiplying Attention weights with encoder outputs to get context vector
context_vector = torch.bmm(attn_weights.unsqueeze(0), encoder_outputs)
# Concatenating output from LSTM with context vector
output = torch.cat((lstm_out, context_vector), -1)
# Pass concatenated vector through Linear layer acting as a Classifier
# output = F.log_softmax(self.classifier(output[0]), dim=1)
output = self.classifier(output[0])
return output, hidden, attn_weights
def init_hidden(self, batch_size=1):
return (torch.zeros(self.n_layers, batch_size, self.hidden_size * 2, device=device),
torch.zeros(self.n_layers, batch_size, self.hidden_size * 2, device=device))
class Attention_b64(nn.Module):
def __init__(self, hidden_size, method="dot"):
super(Attention_b64, self).__init__()
self.method = method
self.hidden_size = hidden_size
# Defining the layers/weights required depending on alignment scoring method
if method == "general":
self.fc = nn.Linear(hidden_size, hidden_size, bias=False)
elif method == "concat":
self.fc = nn.Linear(hidden_size, hidden_size, bias=False)
self.weight = nn.Parameter(torch.empty(1, hidden_size, device=device))
def forward(self, decoder_hidden, encoder_outputs):
if self.method == "dot":
# For the dot scoring method, no weights or linear layers are involved
return encoder_outputs.bmm(decoder_hidden.view(1, -1, 1)).squeeze(-1)
elif self.method == "general":
# For general scoring, decoder hidden state is passed through linear layers to introduce a weight matrix
out = self.fc(decoder_hidden)
return encoder_outputs.bmm(out.view(1, -1, 1)).squeeze(-1)
elif self.method == "concat":
# For concat scoring, decoder hidden state and encoder outputs are concatenated first
out = torch.tanh(self.fc(decoder_hidden + encoder_outputs))
return out.bmm(self.weight.unsqueeze(-1)).squeeze(-1)
def evaluate(x, y, encoder, decoder, max_length, criterion):
# create lists to hold the predicted and actual values
pred_li = []
actual_li = []
attn_li = []
batch_loss = 0
tar_len = []
batch_loss = 0
for j in range(len(x)):
# set the loss to 0
loss = 0
# get the input and target tensors
input_tensor = torch.tensor(x[j], dtype=torch.long, device=device).unsqueeze(0)
target_tensor = torch.tensor(y[j], dtype=torch.float, device=device).view(-1, 1)
input_length = input_tensor.size(1)
target_length = target_tensor.size(0)
# zero the encoder weights
encoder_hidden = encoder.init_hidden()
# forward pass of the encoder: run through the whole sequence
encoder_outputs, encoder_hidden = encoder(input_tensor, encoder_hidden)
# now that we have the encoded hidden state, we'll pass an input of 0 for first context
decoder_input = torch.tensor([[0.0]], dtype=torch.float, device=device)
# print(decoder_input)
decoder_input = decoder_input.unsqueeze(1)
decoder_hidden = encoder_hidden
pred_temp = []
actual_temp = []
attn_temp = []
tar_len.append(input_length)
# Use its own predictions as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
decoder_input = decoder_output.detach() # detach from history as input
decoder_input = decoder_input.unsqueeze(0)
pred_temp.append(decoder_input.data.cpu().numpy()[0])
actual_temp.append(target_tensor[di].data.cpu().numpy()[0])
attn_li.append(decoder_attention.data.cpu().numpy()[0])
loss += criterion(decoder_output, target_tensor[di].unsqueeze(1))
# get the batch loss
batch_loss += loss / target_length
# append to separate dataframes
pred_li.append(pred_temp)
actual_li.append(actual_temp)
# attn_li.append(attn_li)
eval_loss = batch_loss / len(x)
pred_df = pd.DataFrame(pred_li)
act_df = pd.DataFrame(actual_li)
attn_df = pd.DataFrame(attn_li)
tar_df = pd.DataFrame(tar_len)
return eval_loss, pred_df, act_df, attn_df, tar_df
def evaluate_b(x, y, encoder, decoder, max_length, criterion):
encoder.eval()
decoder.eval()
with torch.no_grad():
# create lists to hold the predicted and actual values
pred_li = []
actual_li = []
attn_li = []
batch_loss = 0
tar_len = []
batch_loss = 0
for j in range(len(x)):
# set the loss to 0
loss = 0
# get the input and target tensors
input_tensor = torch.tensor(x[j], dtype=torch.long, device=device).unsqueeze(0)
target_tensor = torch.tensor(y[j], dtype=torch.float, device=device).view(-1, 1)
input_length = input_tensor.size(1)
target_length = target_tensor.size(0)
# zero the encoder weights
encoder_hidden = encoder.init_hidden()
# forward pass of the encoder: run through the whole sequence
encoder_outputs, encoder_hidden = encoder(input_tensor, encoder_hidden)
# now that we have the encoded hidden state, we'll pass an input of 0 for first context
decoder_input = torch.tensor([[0.0]], dtype=torch.float, device=device)
# print(decoder_input)
decoder_input = decoder_input.unsqueeze(1)
decoder_hidden = encoder_hidden
decoder_hidden = decoder.init_hidden()
pred_temp = []
actual_temp = []
attn_temp = []
tar_len.append(input_length)
# Use its own predictions as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
decoder_input = decoder_output.detach() # detach from history as input
decoder_input = decoder_input.unsqueeze(0)
pred_temp.append(decoder_input.data.cpu().numpy()[0])
actual_temp.append(target_tensor[di].data.cpu().numpy()[0])
attn_li.append(decoder_attention.data.cpu().numpy()[0])
loss += criterion(decoder_output, target_tensor[di].unsqueeze(1))
# get the batch loss
batch_loss += loss / target_length
# append to separate dataframes
pred_li.append(pred_temp)
actual_li.append(actual_temp)
# attn_li.append(attn_li)
eval_loss = batch_loss / len(x)
pred_df = pd.DataFrame(pred_li)
act_df =
|
pd.DataFrame(actual_li)
|
pandas.DataFrame
|
import logging
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Optional
import anndata
import anndata as ad
import bamnostic as bs
import numpy as np
import pandas as pd
import scipy.sparse
from bx.intervals import Interval, IntervalTree
from ..utils import removeprefix
MOUSE = ['1', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19',
'2', '3', '4', '5', '6', '7', '8', '9','X', 'Y']
HUMAN = ['1', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22',
'2', '3', '4', '5', '6', '7', '8', '9','X', 'Y']
logger = logging.getLogger('epi.ct.atac_mtx')
def get_barcode_from_read(bam_file: Path, read: bs.AlignedSegment, barcode_tag: str = 'CB'):
return read.get_tag(barcode_tag)
def get_barcode_from_bam_filename(bam_file: Path, read: bs.AlignedSegment, barcode_tag: str):
return bam_file.stem
def get_feature_df(chromosomes: List[str], loaded_feat: Dict[str, List[List[int]]]) -> pd.DataFrame:
feature_dfs = []
for chrom in chromosomes:
features = loaded_feat[chrom]
chroms = [chrom] * len(features)
index = []
starts = []
ends = []
for start, end in features:
index.append(f'{chrom}:{start}-{end}')
starts.append(start)
ends.append(end)
feature_dfs.append(pd.DataFrame({'chrom': chroms, 'start': starts, 'end': ends}, index=index))
return pd.concat(feature_dfs)
class BarcodeIndexDict(dict):
barcodes: List[str]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.barcodes = []
def __missing__(self, key):
self.barcodes.append(key)
count = len(self)
self[key] = count
return count
def bld_atac_mtx(
bam_files: List[Path],
loaded_feat: Dict[str, List[List[int]]],
output_file_path: Optional[Path] = None,
check_sq=True,
chromosomes=HUMAN,
cb_tag='CB',
) -> anndata.AnnData:
"""
Build a count matrix one set of features at a time. It is specific of ATAC-seq data.
It curently do not write down a sparse matrix. It writes down a regular count matrix
as a text file.
Parameters
----------
list_bam_files: input must be a list of bam file names. One for each cell to
build the count matrix for
loaded_feat: the features for which you want to build the count matrix
output_file_name: name of the output file. The count matrix that will be written
down in the current directory. If this parameter is not specified,
the output count amtrix will be named 'std_output_ct_mtx.txt'
path: path where to find the input file. The output file will be written down
in your current directory, it is not affected by this parameter.
writing_option: standard writing options for the output file. 'a' or 'w'
'a' to append to an already existing matrix. 'w' to overwrite any
previously exisiting matrix.
default: 'a'
header: if you want to write down the feature name specify this argument.
Input must be a list.
mode: bamnostic argument 'r' or 'w' for read and write 'b' and 's' for bam or sam
if only 'r' is specified, bamnostic will try to determine if the input is
either a bam or sam file.
check_sq: bamnostic argument. when reading, check if SQ entries are present in header
chromosomes: chromosomes of the species you are considering. default value
is the human genome (not including mitochondrial genome).
HUMAN = ['1', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22',
'2', '3', '4', '5', '6', '7', '8', '9','X', 'Y']
MOUSE = '1', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19',
'2', '3', '4', '5', '6', '7', '8', '9','X', 'Y']
Return
------
It does not return any object. The function write down the desired count
matrix in a txt file
"""
if output_file_path is None:
output_file_path = Path('std_output_ct_mtx.h5ad')
feature_df = get_feature_df(chromosomes, loaded_feat)
# Maps chromosome names to interval trees, with tree values as indexes
# into the feature list for each chromosome; these indexes are used to
# count reads falling into each feature
trees: Dict[str, IntervalTree] = defaultdict(IntervalTree)
logger.debug('Constructing interval trees for features')
for ind, (i, row) in enumerate(feature_df.iterrows()):
trees[row.chrom].insert_interval(Interval(row.start, row.end, value=ind))
barcode_indexes = BarcodeIndexDict()
rows = []
columns = []
for bam_file in bam_files:
logger.debug('Mapping reads from BAM file %s to features', bam_file)
samfile = bs.AlignmentFile(bam_file, mode="rb", check_sq=check_sq)
try:
for i, read in enumerate(samfile):
if not i % 100_000:
logger.debug('Processed %d reads', i)
chrom = removeprefix(read.reference_name, 'chr')
if chrom not in trees:
continue
start, end = read.pos, read.pos + read.query_length
barcode = get_barcode_from_read(bam_file, read, cb_tag)
barcode_index = barcode_indexes[barcode]
for interval in trees[chrom].find(start, end):
feature_index = interval.value
rows.append(barcode_index)
columns.append(feature_index)
except OSError as e:
logger.exception('Caught exception while mapping reads to features')
logger.debug('Done mapping reads, building AnnData object')
count_matrix = scipy.sparse.coo_matrix(
(np.ones(len(rows), dtype=int), (rows, columns)),
shape=(len(barcode_indexes), feature_df.shape[0]),
dtype=np.uint,
)
adata = anndata.AnnData(
X=scipy.sparse.csr_matrix(count_matrix),
obs=pd.DataFrame(index=barcode_indexes.barcodes),
var=feature_df,
dtype=np.uint,
)
adata = adata[sorted(barcode_indexes.barcodes), :].copy()
adata.write_h5ad(output_file_path)
return adata
def read_mtx_bed(file_name, path='', omic='ATAC'):
"""
read this specific matrix format. It is the standard output of bedtools when you merge bam files.
"""
peak_name = []
cell_matrix = []
with open(path+file_name) as f:
head = f.readline().split('\t')
head[len(head)-1] = head[len(head)-1].split("\n")[0]
for line in f:
line = line.split('\t')
line[len(line)-1] = line[len(line)-1].split("\n")[0]
peak_name.append(line[3]) # for some reason it has rownames
cell_matrix.append([int(x) for x in line[4:]])
cell_names = head[4:]
cell_matrix=np.matrix(cell_matrix)
cell_matrix = cell_matrix.transpose()
adata = ad.AnnData(cell_matrix,
obs=pd.DataFrame(index=cell_names),
var=
|
pd.DataFrame(index=peak_name)
|
pandas.DataFrame
|
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
import codecs
import csv
from datetime import datetime
from io import StringIO
import os
import platform
from tempfile import TemporaryFile
from urllib.error import URLError
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas import DataFrame, Index, MultiIndex, Series, compat, concat
import pandas._testing as tm
from pandas.io.parsers import CParserWrapper, TextFileReader, TextParser
def test_override_set_noconvert_columns():
# see gh-17351
#
# Usecols needs to be sorted in _set_noconvert_columns based
# on the test_usecols_with_parse_dates test from test_usecols.py
class MyTextFileReader(TextFileReader):
def __init__(self):
self._currow = 0
self.squeeze = False
class MyCParserWrapper(CParserWrapper):
def _set_noconvert_columns(self):
if self.usecols_dtype == "integer":
# self.usecols is a set, which is documented as unordered
# but in practice, a CPython set of integers is sorted.
# In other implementations this assumption does not hold.
# The following code simulates a different order, which
# before GH 17351 would cause the wrong columns to be
# converted via the parse_dates parameter
self.usecols = list(self.usecols)
self.usecols.reverse()
return CParserWrapper._set_noconvert_columns(self)
data = """a,b,c,d,e
0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
cols = {
"a": [0, 0],
"c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],
}
expected = DataFrame(cols, columns=["c_d", "a"])
parser = MyTextFileReader()
parser.options = {
"usecols": [0, 2, 3],
"parse_dates": parse_dates,
"delimiter": ",",
}
parser._engine = MyCParserWrapper(StringIO(data), **parser.options)
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_empty_decimal_marker(all_parsers):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = "Only length-1 decimal markers supported"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), decimal="")
def test_bad_stream_exception(all_parsers, csv_dir_path):
# see gh-13652
#
# This test validates that both the Python engine and C engine will
# raise UnicodeDecodeError instead of C engine raising ParserError
# and swallowing the exception that caused read to fail.
path = os.path.join(csv_dir_path, "sauron.SHIFT_JIS.csv")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup("utf-8")
parser = all_parsers
msg = "'utf-8' codec can't decode byte"
# Stream must be binary UTF8.
with open(path, "rb") as handle, codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter
) as stream:
with pytest.raises(UnicodeDecodeError, match=msg):
parser.read_csv(stream)
def test_read_csv_local(all_parsers, csv1):
prefix = "file:///" if compat.is_platform_windows() else "file://"
parser = all_parsers
fname = prefix + str(os.path.abspath(csv1))
result = parser.read_csv(fname, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
def test_1000_sep(all_parsers):
parser = all_parsers
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({"A": [1, 10], "B": [2334, 13], "C": [5, 10.0]})
result = parser.read_csv(StringIO(data), sep="|", thousands=",")
tm.assert_frame_equal(result, expected)
def test_squeeze(all_parsers):
data = """\
a,1
b,2
c,3
"""
parser = all_parsers
index = Index(["a", "b", "c"], name=0)
expected = Series([1, 2, 3], name=1, index=index)
result = parser.read_csv(StringIO(data), index_col=0, header=None, squeeze=True)
tm.assert_series_equal(result, expected)
# see gh-8217
#
# Series should not be a view.
assert not result._is_view
def test_malformed(all_parsers):
# see gh-6607
parser = all_parsers
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = "Expected 3 fields in line 4, saw 5"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), header=1, comment="#")
@pytest.mark.parametrize("nrows", [5, 3, None])
def test_malformed_chunks(all_parsers, nrows):
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
parser = all_parsers
msg = "Expected 3 fields in line 6, saw 5"
reader = parser.read_csv(
StringIO(data), header=1, comment="#", iterator=True, chunksize=1, skiprows=[2]
)
with pytest.raises(ParserError, match=msg):
reader.read(nrows)
def test_unnamed_columns(all_parsers):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
parser = all_parsers
expected = DataFrame(
[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]],
dtype=np.int64,
columns=["A", "B", "C", "Unnamed: 3", "Unnamed: 4"],
)
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_csv_mixed_type(all_parsers):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
parser = all_parsers
expected = DataFrame({"A": ["a", "b", "c"], "B": [1, 3, 4], "C": [2, 4, 5]})
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_read_csv_low_memory_no_rows_with_index(all_parsers):
# see gh-21141
parser = all_parsers
if not parser.low_memory:
pytest.skip("This is a low-memory specific test")
data = """A,B,C
1,1,1,2
2,2,3,4
3,3,4,5
"""
result = parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0)
expected = DataFrame(columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_read_csv_dataframe(all_parsers, csv1):
parser = all_parsers
result = parser.read_csv(csv1, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
def test_read_csv_no_index_name(all_parsers, csv_dir_path):
parser = all_parsers
csv2 = os.path.join(csv_dir_path, "test2.csv")
result = parser.read_csv(csv2, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738, "foo"],
[1.047916, -0.041232, -0.16181208307, 0.212549, "bar"],
[0.498581, 0.731168, -0.537677223318, 1.346270, "baz"],
[1.120202, 1.567621, 0.00364077397681, 0.675253, "qux"],
[-0.487094, 0.571455, -1.6116394093, 0.103469, "foo2"],
],
columns=["A", "B", "C", "D", "E"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
]
),
)
tm.assert_frame_equal(result, expected)
def test_read_csv_wrong_num_columns(all_parsers):
# Too few columns.
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
parser = all_parsers
msg = "Expected 6 fields in line 3, saw 7"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data))
def test_read_duplicate_index_explicit(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=0)
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=Index(["foo", "bar", "baz", "qux", "foo", "bar"], name="index"),
)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(all_parsers):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=Index(["foo", "bar", "baz", "qux", "foo", "bar"]),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
"A,B\nTrue,1\nFalse,2\nTrue,3",
dict(),
DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
),
(
"A,B\nYES,1\nno,2\nyes,3\nNo,3\nYes,3",
dict(true_values=["yes", "Yes", "YES"], false_values=["no", "NO", "No"]),
DataFrame(
[[True, 1], [False, 2], [True, 3], [False, 3], [True, 3]],
columns=["A", "B"],
),
),
(
"A,B\nTRUE,1\nFALSE,2\nTRUE,3",
dict(),
DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
),
(
"A,B\nfoo,bar\nbar,foo",
dict(true_values=["foo"], false_values=["bar"]),
DataFrame([[True, False], [False, True]], columns=["A", "B"]),
),
],
)
def test_parse_bool(all_parsers, data, kwargs, expected):
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
def test_int_conversion(all_parsers):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [3, 3.0])
def test_read_nrows(all_parsers, nrows):
# see gh-10476
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
expected = DataFrame(
[["foo", 2, 3, 4, 5], ["bar", 7, 8, 9, 10], ["baz", 12, 13, 14, 15]],
columns=["index", "A", "B", "C", "D"],
)
parser = all_parsers
result = parser.read_csv(StringIO(data), nrows=nrows)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [1.2, "foo", -1])
def test_read_nrows_bad(all_parsers, nrows):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
msg = r"'nrows' must be an integer >=0"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), nrows=nrows)
@pytest.mark.parametrize("index_col", [0, "index"])
def test_read_chunksize_with_index(all_parsers, index_col):
parser = all_parsers
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = parser.read_csv(StringIO(data), index_col=0, chunksize=2)
expected = DataFrame(
[
["foo", 2, 3, 4, 5],
["bar", 7, 8, 9, 10],
["baz", 12, 13, 14, 15],
["qux", 12, 13, 14, 15],
["foo2", 12, 13, 14, 15],
["bar2", 12, 13, 14, 15],
],
columns=["index", "A", "B", "C", "D"],
)
expected = expected.set_index("index")
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
tm.assert_frame_equal(chunks[2], expected[4:])
@pytest.mark.parametrize("chunksize", [1.3, "foo", 0])
def test_read_chunksize_bad(all_parsers, chunksize):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
msg = r"'chunksize' must be an integer >=1"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), chunksize=chunksize)
@pytest.mark.parametrize("chunksize", [2, 8])
def test_read_chunksize_and_nrows(all_parsers, chunksize):
# see gh-15755
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0, nrows=5)
reader = parser.read_csv(StringIO(data), chunksize=chunksize, **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(concat(reader), expected)
def test_read_chunksize_and_nrows_changing_size(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0, nrows=5)
reader = parser.read_csv(StringIO(data), chunksize=8, **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(reader.get_chunk(size=2), expected.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), expected.iloc[2:5])
with pytest.raises(StopIteration, match=""):
reader.get_chunk(size=3)
def test_get_chunk_passed_chunksize(all_parsers):
parser = all_parsers
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
reader = parser.read_csv(StringIO(data), chunksize=2)
result = reader.get_chunk()
expected = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [dict(), dict(index_col=0)])
def test_read_chunksize_compat(all_parsers, kwargs):
# see gh-12185
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
reader = parser.read_csv(StringIO(data), chunksize=2, **kwargs)
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(concat(reader), result)
def test_read_chunksize_jagged_names(all_parsers):
# see gh-23509
parser = all_parsers
data = "\n".join(["0"] * 7 + [",".join(["0"] * 10)])
expected = DataFrame([[0] + [np.nan] * 9] * 7 + [[0] * 10])
reader = parser.read_csv(StringIO(data), names=range(10), chunksize=4)
result = concat(reader)
tm.assert_frame_equal(result, expected)
def test_read_data_list(all_parsers):
parser = all_parsers
kwargs = dict(index_col=0)
data = "A,B,C\nfoo,1,2,3\nbar,4,5,6"
data_list = [["A", "B", "C"], ["foo", "1", "2", "3"], ["bar", "4", "5", "6"]]
expected = parser.read_csv(StringIO(data), **kwargs)
parser = TextParser(data_list, chunksize=2, **kwargs)
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_iterator(all_parsers):
# see gh-6607
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0)
expected = parser.read_csv(StringIO(data), **kwargs)
reader = parser.read_csv(StringIO(data), iterator=True, **kwargs)
first_chunk = reader.read(3)
tm.assert_frame_equal(first_chunk, expected[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, expected[3:])
def test_iterator2(all_parsers):
parser = all_parsers
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = parser.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["foo", "bar", "baz"],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(result[0], expected)
def test_reader_list(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0)
lines = list(csv.reader(StringIO(data)))
reader = TextParser(lines, chunksize=2, **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
tm.assert_frame_equal(chunks[2], expected[4:])
def test_reader_list_skiprows(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0)
lines = list(csv.reader(StringIO(data)))
reader = TextParser(lines, chunksize=2, skiprows=[1], **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[1:3])
def test_iterator_stop_on_chunksize(all_parsers):
# gh-3967: stopping iteration when chunksize is specified
parser = all_parsers
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = parser.read_csv(StringIO(data), chunksize=1)
result = list(reader)
assert len(result) == 3
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["foo", "bar", "baz"],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(concat(result), expected)
@pytest.mark.parametrize(
"kwargs", [dict(iterator=True, chunksize=1), dict(iterator=True), dict(chunksize=1)]
)
def test_iterator_skipfooter_errors(all_parsers, kwargs):
msg = "'skipfooter' not supported for 'iteration'"
parser = all_parsers
data = "a\n1\n2"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), skipfooter=1, **kwargs)
def test_nrows_skipfooter_errors(all_parsers):
msg = "'skipfooter' not supported with 'nrows'"
data = "a\n1\n2\n3\n4\n5\n6"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), skipfooter=1, nrows=5)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
"""foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
""",
dict(index_col=0, names=["index", "A", "B", "C", "D"]),
DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
index=Index(["foo", "bar", "baz", "qux", "foo2", "bar2"], name="index"),
columns=["A", "B", "C", "D"],
),
),
(
"""foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
""",
dict(index_col=[0, 1], names=["index1", "index2", "A", "B", "C", "D"]),
DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
index=MultiIndex.from_tuples(
[
("foo", "one"),
("foo", "two"),
("foo", "three"),
("bar", "one"),
("bar", "two"),
],
names=["index1", "index2"],
),
columns=["A", "B", "C", "D"],
),
),
],
)
def test_pass_names_with_index(all_parsers, data, kwargs, expected):
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", [[0, 1], [1, 0]])
def test_multi_index_no_level_names(all_parsers, index_col):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
headless_data = "\n".join(data.split("\n")[1:])
names = ["A", "B", "C", "D"]
parser = all_parsers
result = parser.read_csv(
StringIO(headless_data), index_col=index_col, header=None, names=names
)
expected = parser.read_csv(StringIO(data), index_col=index_col)
# No index names in headless data.
expected.index.names = [None] * 2
tm.assert_frame_equal(result, expected)
def test_multi_index_no_level_names_implicit(all_parsers):
parser = all_parsers
data = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=MultiIndex.from_tuples(
[
("foo", "one"),
("foo", "two"),
("foo", "three"),
("bar", "one"),
("bar", "two"),
]
),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,expected,header",
[
("a,b", DataFrame(columns=["a", "b"]), [0]),
(
"a,b\nc,d",
DataFrame(columns=
|
MultiIndex.from_tuples([("a", "c"), ("b", "d")])
|
pandas.MultiIndex.from_tuples
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
处理与合约名字有关的变量
"""
import re
# 上期所
PRODUCTS_SHFE = {'cu', 'al', 'zn', 'pb', 'ni', 'sn', 'au', 'ag', 'rb', 'wr', 'hc', 'fu', 'bu', 'ru'}
# 中金所
PRODUCTS_CFFEX = {'IF', 'IC', 'IH', 'T', 'TF'}
# 郑商所
PRODUCTS_CZCE = {'SR', 'CF', 'ZC', 'FG', 'TA', 'WH', 'PM', 'RI', 'LR', 'JR', 'RS', 'OI', 'RM', 'SF', 'SM', 'MA', 'WT',
'WS', 'RO', 'ER', 'ME', 'TC'}
# 大商所
PRODUCTS_DCE = {'m', 'y', 'a', 'b', 'p', 'c', 'cs', 'jd', 'fb', 'bb', 'l', 'v', 'pp', 'j', 'jm', 'i'}
EXCHANGES_wind_code_xapi = {
'CFE': 'CFFEX',
'SHF': 'SHFE',
'CZC': 'CZCE',
'DCE': 'DCE',
'SH': 'SSE',
'SZ': 'SZSE',
}
EXCHANGES_xapi_wind_code = dict((v, k) for k, v in EXCHANGES_wind_code_xapi.items())
def product_to_exchange(product):
"""
将合约产品码转成交易所
:param product:
:return:
"""
PRODUCT_ = product.upper()
if PRODUCT_ in PRODUCTS_CFFEX:
return 'CFFEX'
if PRODUCT_ in PRODUCTS_CZCE:
return 'CZCE'
product_ = product.lower()
if product_ in PRODUCTS_SHFE:
return 'SHFE'
if product_ in PRODUCTS_DCE:
return 'DCE'
return 'Unknown'
def is_shfe(product):
"""
是否上期所
多添加此函数的主要原因是上期所需要区分平今与平昨
:param product:
:return:
"""
product_ = product.lower()
return product_ in PRODUCTS_SHFE
def get_product(symbol):
"""
从合约名中提取产品名
:param symbol:
:return:
"""
pattern = re.compile(r'(\D{1,2})(\d{0,1})(\d{3})')
match = pattern.match(symbol)
if match:
return match.expand(r'\g<1>')
else:
return symbol
def get_exchange(symbol):
"""
从带.的合约名中提取交易所
:param symbol:
:return:
"""
pattern = re.compile(r'(\.)(\D{1,4})')
match = pattern.match(symbol)
if match:
return match.expand(r'\g<2>')
else:
return symbol
if __name__ == '__main__':
import pandas as pd
df =
|
pd.DataFrame({'Symbol': ['IF1603', 'rb1708','600000']})
|
pandas.DataFrame
|
'''
Created Sep 19 10:04:10 2018
Significant updates March 17th 2020
@author: <NAME> <EMAIL>
'''
import os, glob, sys, pyproj, csv, statistics
from argparse import ArgumentParser
from osgeo import gdal
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pyhdf.SD import SD, SDC
from h5py import File
from datetime import datetime
import seaborn as sns
def hdf_to_np(hdf_fname, sds):
#TODO close the dataset, probably using 'with'
hdf_ds = SD(hdf_fname, SDC.READ)
dataset_3d = hdf_ds.select(sds)
data_np = dataset_3d[:,:]
return data_np
def h5_to_np(h5_fname, sds):
with File(h5_fname, 'r') as h5_ds:
data_np = h5_ds['HDFEOS']['GRIDS']['VIIRS_Grid_BRDF']['Data Fields'][sds][()]
return data_np
def convert_ll_vnp(lat, lon, tile, in_dir, prdct):
# prdct = prdct
# Convert the lat/long point of interest to a row/col location
template_h_list = glob.glob(os.path.join(in_dir, '*.A*{tile}*.h*'.format(tile=tile)))
try:
template_h_file = template_h_list[0]
except IndexError:
print('Sorry, due to gdal not liking VIIRS h5 files, you need to download an MCD image of the same tile'
' and put it in ../input_dir/copy_srs/')
sys.exit(1)
template_h_ds = gdal.Open(template_h_file, gdal.GA_ReadOnly)
template_h_band = gdal.Open(template_h_ds.GetSubDatasets()[0][0], gdal.GA_ReadOnly)
# Use pyproj to create a geotransform between
# WGS84 geographic (lat/long; epsg 4326) and
# the funky crs that modis/viirs use.
# Note that this modis crs seems to have units
# in meters from the geographic origin, i.e.
# lat/long (0, 0), and has 2400 rows/cols per tile.
# gdal does NOT read the corner coords correctly,
# but they ARE stored correctly in the hdf metadata. Although slightly
# difft than reported by gdal...
# Using pyproj to transform coords of interes to meters
in_proj = pyproj.Proj(init='epsg:4326')
# out_proj = pyproj.Proj(template_h_band.GetProjection())
out_proj = pyproj.Proj('+proj=sinu +lon_0=0 +x_0=0 +y_0=0 +a=6371007.181 +b=6371007.181 +units=m +no_defs')
# Current sample location convert from ll to m
smpl_x, smpl_y = pyproj.transform(in_proj, out_proj, lon, lat)
# FOR VIIRS, use manual
# h12v04 UL: -6671703.1179999997839332 5559752.5983330002054572 LR: -5559752.5983330002054572 4447802.0786669999361038
# out_proj = pyproj.Proj('+proj=sinu +lon_0=0 +x_0=0 +y_0=0 +a=6371007.181 +b=6371007.181 +units=m +no_defs')
# Getting bounding coords from meta
# Perhaps no longer neededm but they're slilghtly difft than gdal geotransofrm
# NOTE gdal works fine if you call the geotransform
# on the BAND!!! (sds), not the DS
meta = template_h_ds.GetMetadata_Dict()
# FOR MODIS, us ALL CAPS
y_origin_meta = float(meta['NORTHBOUNDINGCOORDINATE'])
y_min_meta = float(meta['SOUTHBOUNDINGCOORDINATE'])
x_max_meta = float(meta['EASTBOUNDINGCOORDINATE'])
x_origin_meta = float(meta['WESTBOUNDINGCOORDINATE'])
# n_rows_meta = 1200 # int(meta['DATAROWS'])
# n_cols_meta = 1200 # int(meta['DATACOLUMNS'])
pixel_height_meta_m = 926.6254330558330139 #(y_origin_meta - y_min_meta) / n_rows_meta
pixel_width_meta_m = 926.6254330558330139 #pixel_height_meta_m
# # Make calculations to get row/col value
# # NOTE that for geotifs, it would also be possible
# # to simply open with rasterio, then use .index()
# # to return the row/col. This does not work for hdf
x_origin_meta_m, y_origin_meta_m = pyproj.transform(in_proj, out_proj, x_origin_meta, y_origin_meta)
# x_max_meta_m, y_min_meta_m = pyproj.transform(in_proj, out_proj, x_max_meta, y_min_meta)
col_m = int((smpl_x - x_origin_meta_m) / pixel_width_meta_m)
row_m = int(-1 * (smpl_y - y_origin_meta_m) / pixel_height_meta_m)
smp_rc = row_m, col_m
return smp_rc
def convert_ll(lat, lon, tile, in_dir, prdct):
prdct = prdct
# Convert the lat/long point of interest to a row/col location
if 'h' in tile:
template_h_list = glob.glob(os.path.join(in_dir,
'{prdct}.A*{tile}*.h*'.format(prdct=prdct,
tile=tile)))
else:
template_h_list = glob.glob(os.path.join(in_dir, '{prdct}*{tile}*.h*'.format(prdct=prdct,
tile=tile)))
template_h_file = template_h_list[0]
template_h_ds = gdal.Open(template_h_file, gdal.GA_ReadOnly)
template_h_band = gdal.Open(template_h_ds.GetSubDatasets()[0][0],
gdal.GA_ReadOnly)
# Use pyproj to create a geotransform betweensds
# WGS84 geographic (lat/l1200ong; epsg 4326) and
# the funky crs that modis/viirs use.
# Note that this modis crs seems to have units
# in meters from the geographic origin, i.e.
# lat/long (0, 0), and has 2400 rows/cols per tile.
# gdal does NOT read the corner coords correctly,
# but they ARE stored correctly in the hdf metadata. Although slightly
# difft than reported by gdal, which is odd.
# # Using pyproj to transform coords of interes to meters
in_proj = pyproj.Proj(init='epsg:4326')
out_proj = pyproj.Proj(template_h_band.GetProjection())
# # Current sample location convert from ll to m
smpl_x, smpl_y = pyproj.transform(in_proj, out_proj, lon, lat)
# Getting bounding coords from meta
# Perhaps no longer neededm but they're slilghtly difft than gdal geotransofrm
# NOTE gdal works fine if you call the geotransform
# on the BAND!!! (sds), not the DS
# meta = template_h_ds.GetMetadata_Dict()
# FOR MODIS, us ALL CAPS
# y_origin_meta = float(meta['NORTHBOUNDINGCOORDINATE'])
# y_min_meta = float(meta['SOUTHBOUNDINGCOORDINATE'])
# x_max_meta = float(meta['EASTBOUNDINGCOORDINATE'])
# x_origin_meta = float(meta['WESTBOUNDINGCOORDINATE'])
# n_rows_meta = int(meta['DATAROWS'])
# n_cols_meta = int(meta['DATACOLUMNS'])
# pixel_height_meta_m = float(meta['CHARACTERISTICBINSIZE'])
# pixel_width_meta_m = float(meta['CHARACTERISTICBINSIZE'])
#TESTING these are conversions of the metadata extents to meters
# x_origin_meta_m, y_origin_meta_m = pyproj.transform(in_proj, out_proj, x_origin_meta, y_origin_meta)
# x_max_meta_m, y_min_meta_m= pyproj.transform(in_proj, out_proj, x_max_meta, y_min_meta)
# pixel_width_meta_m = (x_max_meta_m - x_origin_meta_m) / n_cols_meta
# pixel_height_meta_m = (y_origin_meta_m - y_min_meta_m) / n_rows_meta
# col_meta_m = int((smpl_x - x_origin_meta_m) / pixel_width_meta_m)
# row_meta_m = int(-1 * (smpl_y - y_origin_meta_m) / pixel_height_meta_m)
# smp_rc_meta = row_meta_m, col_meta_m
# Getting bounding coords etc from gdal geotransform
n_cols = template_h_band.RasterXSize
n_rows = template_h_band.RasterYSize
x_origin, x_res, x_skew, y_origin, y_skew, y_res = template_h_band.GetGeoTransform()
# Using the skew is in case there is any affine transformation
# in place in the input raster. Not so for modis tiles, so not really necessary, but complete.
x_max = x_origin + n_cols * x_res + n_cols * x_skew
y_min = y_origin + n_rows * y_res + n_rows * y_skew
# # Make calculations to get row/col value
# # NOTE that for geotifs, it would also be possible
# # to simply open with rasterio, then use .index()
# # to return the row/col. This does not work for hdf
pixel_width_m = (x_max - x_origin) / n_cols
pixel_height_m = (y_origin - y_min) / n_rows
col_m = int((smpl_x - x_origin) / pixel_width_m)
row_m = int( -1 * (smpl_y - y_origin) / pixel_height_m)
smp_rc = row_m, col_m
print(smp_rc)
return smp_rc
def make_prod_list(in_dir, prdct, year, day, tile):
if 'MCD' in prdct or 'VNP' in prdct or 'VJ1' in prdct:
h_file_list = glob.glob(os.path.join(in_dir,
'{prdct}.A{year}{day:03d}*.h*'.format(prdct=prdct,
day=day, year=year)))
elif 'LC08' in prdct:
dt_string = str(year) + '-' + str(day)
date_complete = datetime.strptime(dt_string, '%Y-%j')
mm = date_complete.strftime('%m')
dd = date_complete.strftime('%d')
h_file_list = glob.glob(os.path.join(in_dir, '{prdct}*{tile}_{year}{month}{day}_*.h*'.format(prdct=prdct,
tile=tile,
month=mm,
day=dd,
year=year)))
else:
print('Product type unknown! Please check that input is MCD, VNP, VJ1 or LC08.')
sys.exit()
return h_file_list
def extract_pixel_value(in_dir, site, prdct, h_file_day, sds_names, base_dir):
# Open tifs as gdal ds
# print('Opening: ' + h_file_day + ' ' + sds_name_wsa_sw)
if 'VNP' in prdct or 'VJ1' in prdct:
# print('Found VIIRS product.')
copy_srs_dir = os.path.join(base_dir, 'copy_srs')
wsa_band = h5_to_np(h_file_day, sds_names[0])
bsa_band = h5_to_np(h_file_day, sds_names[1])
qa_band = h5_to_np(h_file_day, sds_names[2])
elif 'MCD' in prdct or 'LC08' in prdct:
# print('Found MODIS product.')
wsa_band = hdf_to_np(h_file_day, sds_names[0])
bsa_band = hdf_to_np(h_file_day, sds_names[1])
qa_band = hdf_to_np(h_file_day, sds_names[2])
else:
print('Unknown product! This only works for MCD, VNP, VJ1 or LC8/LC08 hdf or h5 files!')
sys.exit()
# Mask out nodata values
wsa_swir_masked = np.ma.masked_array(wsa_band, wsa_band == 32767)
wsa_swir_masked_qa = np.ma.masked_array(wsa_swir_masked, qa_band > 0)
bsa_swir_masked = np.ma.masked_array(bsa_band, bsa_band == 32767)
bsa_swir_masked_qa = np.ma.masked_array(bsa_swir_masked, qa_band > 0)
#TODO is the plotting in this script appropriately ignoring values masked here?
# Extract pixel value from product by converting lat/lon to row/col
if 'VNP' in prdct or 'VJ1' in prdct:
smp_rc = convert_ll_vnp(site[1][0], site[1][1], site[1][2], copy_srs_dir, prdct)
elif 'MCD' in prdct:
smp_rc = convert_ll(site[1][0], site[1][1], site[1][2], in_dir, prdct)
elif 'LC08' in prdct:
smp_rc = convert_ll(site[1][0], site[1][1], site[1][2], in_dir, prdct)
else:
print('Unknown product! This only works for MCD, VNP/VJ1, or LC8/LC08 hdf or h5 files!')
sys.exit()
# Take just the sampled location's value, and scale to float
wsa_swir_subset = wsa_swir_masked_qa[smp_rc]
wsa_swir_subset_flt = np.multiply(wsa_swir_subset, 0.001)
bsa_swir_subset = bsa_swir_masked_qa[smp_rc]
bsa_swir_subset_flt = np.multiply(bsa_swir_subset, 0.001)
# Return a tuple of numpy arrays for wsa and bsa (and probably also qa?)
print(wsa_swir_subset_flt, bsa_swir_subset_flt)
print(wsa_swir_subset, bsa_swir_subset)
return wsa_swir_subset_flt, bsa_swir_subset_flt
def draw_plot(year, year_smpl_cmb_df, fig_dir, prdct, sites_dict):
sns.set_style('darkgrid')
for site in sites_dict.keys():
for sds in ['wsa', 'bsa']:
col_name = str(site) + '_' + str(sds)
# Create a seaborn scatterplot (or replot for now, small differences)
sct = sns.regplot(x='doy', y=col_name, data=year_smpl_cmb_df, marker='o', label='sw ' + str(sds),
fit_reg=False, scatter_kws={'color':'darkblue', 'alpha':0.3,'s':20})
sct.set_ylim(0, 1.0)
sct.set_xlim(1, 366)
sct.legend(loc='best')
# Access the figure, add title
plt_name = str(year + ' ' + prdct + ' SW ' + str(sds))
plt.title(plt_name)
#plt.show()
plt_name = plt_name.replace(' ', '_') + '_' + str(site)
# Save each plot to figs dir
print('Saving plot to: ' + '{fig_dir}/{plt_name}.png'.format(fig_dir=fig_dir, plt_name=plt_name))
plt.savefig('{fig_dir}/{plt_name}.png'.format(fig_dir=fig_dir, plt_name=plt_name))
plt.clf()
def check_leap(year):
leap_status = False
year = int(year)
if (year % 4) == 0:
if (year % 100) == 0:
if (year % 400) == 0:
leap_status = True
else:
leap_status = False
else:
leap_status = True
else:
leap_status = False
return leap_status
def main():
# CLI args
parser = ArgumentParser()
parser.add_argument('-y', '--years', dest='years', help='Years to extract data for.', metavar='YEARS')
#TODO why include a required tile option if the tile has to be in the csv? change this.
parser.add_argument('-d', '--input-dir', dest='base_dir',
help='Base directory containing sample and dir of imagery data called the product name'
', e.g. ../MCD43A3/',
metavar='IN_DIR')
parser.add_argument('-s', '--sites', dest='sites_csv_fname', help='CSV with no headings containing smpls. '+\
'must look like: id,lat,long,tile_it_is_in',
metavar='SITES')
parser.add_argument('-p', '--product', dest='prdct', help='Imagery product to be input, e.g. LC08, MCD43A3.',
metavar='PRODUCT')
args = parser.parse_args()
# Note: I have chosen to call the landsat product LC08, rather than LC8, due to the file naming convention
# of the inputs specific to the albedo code. LC8 is also used in different Landsat data products, annoyingly.
prdct = args.prdct
base_dir = args.base_dir
years = [args.years]
sites_csv_input = os.path.join(base_dir, args.sites_csv_fname)
sites_dict = {}
with open(sites_csv_input, mode='r') as sites_csv:
reader = csv.reader(sites_csv)
for row in reader:
key = row[0]
sites_dict[key] = row[1:]
# TODO this 'copy_srs_dir' location is here because currently VNP43 has broken spatial reference
# TODO information. Check V002 and remove this if it has been fixed, as this is ludicrously clunky.
sds_name_wsa_sw = 'Albedo_WSA_shortwave'
sds_name_bsa_sw = 'Albedo_BSA_shortwave'
#TODO: the LC08 hdfs have a differently named qa sds. yaay.
sds_name_qa_sw = 'BRDF_Albedo_Band_Mandatory_Quality_shortwave'
#sds_name_qa_sw = 'Albedo_Band_Quality_shortwave'
sds_names = [sds_name_wsa_sw, sds_name_bsa_sw, sds_name_qa_sw]
# Loop through the years provided, and extract the pixel values at the provided coordinates. Outputs CSV and figs.
for year in years:
doy_list = []
if check_leap(year):
for i in range(1, 367):
doy_list.append(i)
else:
for i in range(1, 366):
doy_list.append(i)
# Make a blank pandas dataframe that results will be appended to,
# and start it off with all possible doys (366)
year_smpl_cmb_df = pd.DataFrame(doy_list, columns=['doy'])
# Loop through each site and extract the pixel values
for site in sites_dict.items():
tile = site[1][2]
in_dir = os.path.join(base_dir, prdct, year, tile)
print(in_dir)
fig_dir = os.path.join(base_dir, 'figs')
if not os.path.isdir(fig_dir):
os.makedirs(fig_dir)
print('Made new folder for figs: ' + str(fig_dir))
else:
pass
try:
os.chdir(in_dir)
except FileNotFoundError:
print('Sorry, data directory must be organized like: ../MCD43A3/2016/h12v04/ e.g.')
sys.exit(1)
print('Processing site: ' + str(site))
# Create empty arrays for mean, sd
wsa_swir_mean = []
wsa_swir_sd = []
bsa_swir_mean = []
bsa_swir_sd = []
for day in doy_list:
# Open the shortwave white sky albedo band.
# The list approach is because of the processing date part of the file
# name, which necessitates the wildcard -- this was just the easiest way.
h_file_list = make_prod_list(in_dir, prdct, year, day, tile)
file_name = '{in_dir}/{prdct}.A{year}{day:03d}*.h*'.format(in_dir=in_dir, prdct=prdct, day=day,
year=year)
# See if there is a raster for the date, if not use a fill value for the graph
if len(h_file_list) == 0: # or len(bsa_tif_list) == 0 or len(qa_tif_list) == 0:
print('File not found: ' + file_name)
# wsa_swir_subset_flt = float('nan')
# bsa_swir_subset_flt = float('nan')
#TODO change the below to be nulls, not zeros.
pixel_values = np.nan, np.nan
elif len(h_file_list) > 1:
print('Multiple matching files found for same date! Please remove one.')
sys.exit()
else:
# print('Found file: ' + file_name)
h_file_day = h_file_list[0]
# Extract pixel values and append to dataframe
# Note the base_dir argument should go away when the correctly georeferenced VNP43 are available,
# because I can likely eliminate the vnp-specific value extractor function
try:
pixel_values = extract_pixel_value(in_dir, site, prdct, h_file_day, sds_names, base_dir)
except:
print('Warning! Pixel out of tile boundaries!')
pixel_values = np.nan, np.nan
# Add each point to a temporary list
wsa_smpl_results = []
bsa_smpl_results = []
wsa_smpl_results.append(pixel_values[0])
bsa_smpl_results.append(pixel_values[1])
#TODO this is currently silly, but ultimately will be replaced by an averaging
#TODO function for points of the same sample area
try:
wsa_tmp_mean = statistics.mean(wsa_smpl_results)
wsa_swir_mean.append(wsa_tmp_mean)
bsa_tmp_mean = statistics.mean(bsa_smpl_results)
bsa_swir_mean.append(bsa_tmp_mean)
except:
wsa_swir_mean.append(np.nan)
bsa_swir_mean.append(np.nan)
wsa_smpl_results_df =
|
pd.DataFrame(wsa_swir_mean)
|
pandas.DataFrame
|
import pandas as pd
import os
import zipfile
import requests
import io
import glob
import numpy as np
from EnergyIntensityIndicators.utilities.dataframe_utilities \
import DFUtilities as df_utils
class NonCombustion:
"""Class to handle and explore
zipped Emissions data from the EPA
TODO automate to use latest data available. Is file naming consistent?
"""
def __init__(self, base_dir):
self.base_dir = base_dir
self.annex = \
'https://www.epa.gov/sites/production/files/2020-07/annex_1.zip'
self.chapter_0 = \
'https://www.epa.gov/sites/production/files/2020-08/chapter_0.zip'
self.archive = \
"https://www.eia.gov/electricity/data/eia923/archive/xls/f906nonutil1989.zip"
self.years = list(range(1990, 2018 + 1))
self.categories_level1 = \
{'Liming':
{'activity':
{'source': 'EPA',
'table': 'Table 5-22'}, # Emissions from Liming (MMT C)
'emissions':
{'source': 'EPA',
'table': 'Table 5-21'}}, # Emissions from Liming (MMT CO2 Eq.)
'Adipic Acid Production':
{'activity':
{'source': 'EPA',
'table': 'Table 4-31'}, # Adipic Acid Production (kt)
'emissions':
{'source': 'EPA',
'table': 'Table 4-30'}}, # N2O Emissions from Adipic Acid Production (MMT CO2 Eq. and kt N2O)
'Aluminum Production':
{'activity':
{'source': 'EPA',
'table': 'Table 4-82'}, # Production of Primary Aluminum (kt)
'emissions':
{'source': 'EPA',
'table': 'Table 4-79'}}, # CO2 Emissions from Aluminum Production (MMT CO2 Eq. and kt)
# PFC Emissions from Aluminum Production (MMT CO2 Eq.) TAble 4-80 ?
'Ammonia Production':
{'activity':
{'source': 'EPA',
'table': 'Table 4-21'}, # Ammonia Production, Recovered CO2 Consumed for Urea Production, and Urea Production (kt)
'emissions':
{'source': 'EPA',
'table': 'Table 4-19'}}, # CO2 Emissions from Ammonia Production (MMT CO2 Eq.)
'Caprolactam, Glyoxal, and Glyoxylic Acid Production':
{'activity':
{'source': 'EPA',
'table': 'Table 4-34'}, # Caprolactam Production (kt)
'emissions':
{'source': 'EPA',
'table': 'Table 4-33'}}, # N2O Emissions from Caprolactam Production (MMT CO2 Eq. and kt N2O)
# Table 4-35 Approach 2 Quantitative Uncertainty Estimates for N2O Emissions from Caprolactam, Glyoxal and Glyoxylic Acid Production (MMT CO2 Eq. and Percent) ?
'Carbide Production and Consumption':
{'activity':
{'source': 'EPA',
'table': 'Table 4-38'}, # Production and Consumption of Silicon Carbide (Metric Tons)
'emissions':
{'source': 'EPA',
'table': 'Table 4-36'}}, # CO2 and CH4 Emissions from Silicon Carbide Production and Consumption (MMT CO2 Eq.)
'Carbon Dioxide Consumption':
{'activity':
{'source': 'EPA',
'table': 'Table 4-54'}, # CO2 Production (kt CO2) and the Percent Used for Non-EOR Applications
'emissions':
{'source': 'EPA',
'table': 'Table 4-53'}}, # CO2 Emissions from CO2 Consumption (MMT CO2 Eq. and kt)
'Cement Production':
{'activity':
{'source': 'EPA',
'table': 'Table 4-4'}, # Production Thousand Tons
'emissions':
{'source': 'EPA',
'table': 'Table 4-3'}}, # CO2 Emissions from Cement Production (MMT CO2 Eq. and kt)
'Coal Mining':
{'activity':
{'source': 'EPA',
'table': 'Table 3-29'}, # Coal Production (kt)
'emissions':
{'source': 'EPA',
'table': 'Table 3-30'}}, # CH4 Emissions from Coal Mining (MMT CO2 Eq.)
'Composting':
{'activity':
{'source': 'EPA',
'table': 'Table 7-20'}, # U.S. Waste Composted (kt)
'emissions':
{'source': 'EPA',
'table': 'Table 7-18'}}, # CH4 and N2O Emissions from Composting (MMT CO2 Eq.)
'Ferroalloy Production':
{'activity':
{'source': 'EPA',
'table': 'Table 4-77'}, # Production of Ferroalloys (Metric Tons)
'emissions':
{'source': 'EPA',
'table': 'Table 4-75'}}, # CO2 and CH4 Emissions from Ferroalloy Production (MMT CO2 Eq.)
'Glass Production':
{'activity':
{'source': 'EPA',
'table': 'Table 4-12'}, # Limestone, Dolomite, and Soda Ash Consumption Used in Glass Production (kt)
'emissions':
{'source': 'EPA',
'table': 'Table 4-11'}}, # CO2 Emissions from Glass Production (MMT CO2 Eq. and kt)
'Lead Production':
{'activity':
{'source': 'EPA',
'table': 'Table 4-89'}, # Lead Production (Metric Tons)
'emissions':
{'source': 'EPA',
'table': 'Table 4-88'}}, # CO2 Emissions from Lead Production (MMT CO2 Eq. and kt)
'Lime Production':
{'activity':
{'source': 'EPA',
'table': 'Table 4-9'}, # Adjusted Lime Production (kt)
'emissions':
{'source': 'EPA',
'table': 'Table 4-6'}}, # CO2 Emissions from Lime Production (MMT CO2 Eq. and kt)
'N2O from Product Uses':
{'activity':
{'source': 'EPA',
'table': 'Table 4-109'}, # N2O Production (kt)
'emissions':
{'source': 'EPA',
'table': 'Table 4-110'}}, # N2O Emissions from N2O Product Usage (MMT CO2 Eq. and kt)
'Nitric Acid Production':
{'activity':
{'source': 'EPA',
'table': 'Table 4-28'}, # Nitric Acid Production (kt)
'emissions':
{'source': 'EPA',
'table': 'Table 4-27'}}, # N2O Emissions from Nitric Acid Production (MMT CO2 Eq. and kt N2O)
'Other Process Uses of Carbonates':
{'activity':
{'source': 'EPA',
'table': 'Table 4-16'}, # Limestone and Dolomite Consumption (kt)
'emissions':
{'source': 'EPA',
'table': 'Table 4-14'}}, # CO2 Emissions from Other Process Uses of Carbonates (MMT CO2 Eq.)
'Petrochemical Production':
{'activity':
{'source': 'EPA',
'table': 'Table 4-48'}, # Production of Selected Petrochemicals (kt)
'emissions':
{'source': 'EPA',
'table': 'Table 4-46'}}, # CO2 and CH4 Emissions from Petrochemical Production (MMT CO2 Eq.)
'Phosphoric Acid Production':
{'activity':
{'source': 'EPA',
'table': 'Table 4-57'}, # Phosphate Rock Domestic Consumption, Exports, and Imports (kt) ** Use domestic consumption
# 'Table 4-58']}, # Chemical Composition of Phosphate Rock (Percent by Weight)
'emissions':
{'source': 'EPA',
'table': 'Table 4-56'}}, # CO2 Emissions from Phosphoric Acid Production (MMT CO2 Eq. and kt)
'Soda Ash Production':
{'activity':
{'source': 'EPA',
'table': 'Table 4-44'}, # Soda Ash Production (kt)
'emissions':
{'source': 'EPA',
'table': 'Table 4-43'}}, # CO2 Emissions from Soda Ash Production (MMT CO2 Eq. and kt CO2)
'Stationary Combustion':
{'activity':
{'source': 'EPA',
'table': 'Table A-90'}, # Fuel Consumption by Stationary Combustion for Calculating CH4 and N2O Emissions (TBtu)
# 'Table A-91']}, # CH4 and N2O Emission Factors by Fuel Type and Sector (g/GJ)a
'emissions':
{'source': 'EPA',
'table': ['Table 3-10', # CH4 Emissions from Stationary Combustion (MMT CO2 Eq.)
'Table 3-11']}}, # N2O Emissions from Stationary Combustion (MMT CO2 Eq.)
'Titanium Dioxide Production':
{'activity':
{'source': 'EPA',
'table': 'Table 4-41'}, # Titanium Dioxide Production (kt)
'emissions':
{'source': 'EPA',
'table': 'Table 4-40'}}, # CO2 Emissions from Titanium Dioxide (MMT CO2 Eq. and kt)
'Urea Consumption for NonAgricultural Purposes':
{'activity':
{'source': 'EPA',
'table': 'Table 4-25'}, # Urea Production, Urea Applied as Fertilizer, Urea Imports, and Urea Exports (kt) ** subtract urea applied as fertilizer
'emissions':
{'source': 'EPA',
'table': 'Table 4-23'}}, # CO2 Emissions from Urea Consumption for Non-Agricultural Purposes (MMT CO2 Eq.)
'Urea Fertilization':
{'activity':
{'source': 'EPA',
'table': 'Table 4-25'}, # Urea Production, Urea Applied as Fertilizer, Urea Imports, and Urea Exports (kt)
'emissions':
{'source': 'EPA',
'table': 'Table 5-25'}}, # CO2 Emissions from Urea Fertilization (MMT CO2 Eq.)
'Zinc Production':
{'activity':
{'source': 'EPA',
'table': 'Table 4-92'}, # Zinc Production (Metric Tons)
'emissions':
{'source': 'EPA',
'table': 'Table 4-91'}}} # CO2 Emissions from Zinc Production (MMT CO2 Eq. and kt)
self.categories_level2 = {
'Iron and Steel Production & Metallurgical Coke Production': {
'activity': {
'source': 'EPA',
'table': {
'Metallurgical coke': ['Table 4-67', # Production and Consumption Data for the Calculation of CO2 Emissions from Metallurgical Coke Production (Thousand Metric Tons)
'Table 4-68'], # Material Carbon Contents for Iron and Steel Production
'Iron and Steel': ['Table 4-72', # Production and Consumption Data for the Calculation of CO2 and CH4 Emissions from Iron and Steel Production (Thousand Metric Tons)
'Table 4-73']} # Production and Consumption Data for the Calculation of CO2 Emissions from Iron and Steel Production (Million ft3 unless otherwise specified)
},
'emissions': {
'source': 'EPA',
'table': {
'Metallurgical coke': 'Table 4-60', # CO2 Emissions from Metallurgical Coke Production (MMT CO2 Eq.)
'Iron and Steel': ['Table 4-62', # CO2 Emissions from Iron and Steel Production (MMT CO2 Eq.)
'Table 4-64']}}}, # CH4 Emissions from Iron and Steel Production (MMT CO2 Eq.)
'Non-Energy Use of Fuels': {
'activity': {
'source': 'EPA',
'table': 'Table 3-21'}, # Adjusted Consumption of Fossil Fuels for Non-Energy Uses (TBtu)
# 'Table 3-22']}, # 2018 Adjusted Non-Energy Use Fossil Fuel Consumption, Storage, and Emissions
'emissions': {
'source': 'EPA',
'table': 'Table 3-20'
}
}
} # CO2 Emissions from Non-Energy Use Fossil Fuel Consumption (MMT CO2 Eq. and Percent)
def unpack_noncombustion_data(self, zip_file):
"""Unpack zipped file into folder stored locally
Args:
zip_file (str): URL / path to zipfile
"""
r = requests.get(zip_file)
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall(self.base_dir)
print('zipfile collected')
@staticmethod
def noncombustion_emissions(dir):
"""Create a dataframe (saved to base_dir) matching the filename and title
of each csv in base_dir
Args:
base_dir (str): Local folder containing unzipped
emissions data
"""
files_list = glob.glob(f"{dir}*.csv")
if files_list:
data = dict()
for f in files_list:
f = f.replace('\\', '/')
df =
|
pd.read_csv(f, engine='python')
|
pandas.read_csv
|
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
df1=pd.read_csv('../input/forest-cover-type-prediction/train.csv')
df_test1=pd.read_csv('../input/forest-cover-type-prediction/test.csv')
df_test2=pd.read_csv('../input/forest-cover-type-prediction/test3.csv')
df=df1.copy()
df_test=df_test1.copy()
df
pd.set_option('display.max_columns',None)
df.drop(columns=['Id','Cover_Type'],inplace=True)
df_test.drop(columns=['Id'],inplace=True)
df_test
X_train=df
Y_train=df1.iloc[:,-1]
X_train
df_test
from collections import Counter
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier, VotingClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier
from sklearn.model_selection import GridSearchCV, cross_val_score, StratifiedKFold, learning_curve
from lightgbm import LGBMClassifier
sns.set(style='white', context='notebook', palette='deep')
kfold = StratifiedKFold(n_splits=10)
random_state = 2
classifiers = []
classifiers.append(SVC(random_state=random_state))
classifiers.append(DecisionTreeClassifier(random_state=random_state))
classifiers.append(AdaBoostClassifier(DecisionTreeClassifier(random_state=random_state),random_state=random_state,learning_rate=0.1))
classifiers.append(RandomForestClassifier(random_state=random_state))
classifiers.append(ExtraTreesClassifier(random_state=random_state))
classifiers.append(GradientBoostingClassifier(random_state=random_state))
classifiers.append(MLPClassifier(random_state=random_state))
classifiers.append(KNeighborsClassifier())
classifiers.append(LogisticRegression(random_state = random_state))
classifiers.append(LinearDiscriminantAnalysis())
classifiers.append(XGBClassifier(random_state = random_state))
classifiers.append(LGBMClassifier(random_state = random_state))
cv_results = []
for classifier in classifiers :
score=cross_val_score(classifier, X_train, y = Y_train, scoring = "accuracy", cv = kfold, n_jobs=-1)
cv_results.append(score)
print('{} crossvalidation score:{}\n'.format(classifier,score.mean()))
cv_means = []
cv_std = []
for cv_result in cv_results:
cv_means.append(cv_result.mean())
cv_std.append(cv_result.std())
cv_res = pd.DataFrame({"CrossValMeans":cv_means,"CrossValerrors": cv_std,"Algorithm":["SVC","DecisionTree","AdaBoost",
"RandomForest","ExtraTrees","GradientBoosting","MultipleLayerPerceptron","KNeighboors","LogisticRegression","LinearDiscriminantAnalysis",'XGboost','LGboost']})
g = sns.barplot("CrossValMeans","Algorithm",data = cv_res, palette="Set3",orient = "h",**{'xerr':cv_std})
g.set_xlabel("Mean Accuracy")
g = g.set_title("Cross validation scores")
from sklearn.model_selection import train_test_split
xtrain,xtest,ytrain,ytest=train_test_split(X_train.values,Y_train.values,test_size=0.2)
from sklearn.metrics import accuracy_score
RFC = RandomForestClassifier(random_state=random_state)
RFC.fit(xtrain,ytrain)
ypred=RFC.predict(xtest)
score=cross_val_score(RFC,X_train,Y_train,scoring='accuracy',cv=kfold,n_jobs=-1)
# Best score
print('Crossval score for random forest: {}'.format(score.mean()))
print('Accuracy score for random forest: {}'.format(accuracy_score(ytest,ypred)))
RFC.get_params()
from sklearn.metrics import accuracy_score
RFC2 = RandomForestClassifier(random_state=random_state,
n_estimators=500,
max_depth=32,
min_samples_leaf=1,
criterion='entropy')
RFC2.fit(xtrain,ytrain)
ypred=RFC2.predict(xtest)
score=cross_val_score(RFC2,X_train,Y_train,scoring='accuracy',cv=kfold,n_jobs=-1)
# Best score
print('Crossval score for random forest: {}'.format(score.mean()))
print('Accuracy score for random forest: {}'.format(accuracy_score(ytest,ypred)))
et=ExtraTreesClassifier(random_state=random_state)
et.fit(xtrain,ytrain)
ypred=et.predict(xtest)
score=cross_val_score(et,X_train,Y_train,scoring='accuracy',cv=kfold,n_jobs=-1)
# Best score
print('Crossval score for extra trees classifier: {}'.format(score.mean()))
print('Accuracy score for extra trees classifier: {}'.format(accuracy_score(ytest,ypred)))
et2=ExtraTreesClassifier()
et2.get_params()
et2=ExtraTreesClassifier(bootstrap=False, ccp_alpha=0.0, class_weight=None,
criterion='entropy', max_depth=38, max_features='auto',
max_leaf_nodes=None, max_samples=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=500,
n_jobs=None, oob_score=False, random_state=0, verbose=0,
warm_start=False)
et2.fit(xtrain,ytrain)
ypred=et2.predict(xtest)
score=cross_val_score(et2,X_train,Y_train,scoring='accuracy',cv=kfold,n_jobs=-1)
# Best score
print('Crossval score for extra trees classifier: {}'.format(score.mean()))
print('Accuracy score for extra trees classifier: {}'.format(accuracy_score(ytest,ypred)))
lgb2=LGBMClassifier(random_state=random_state)
lgb2.fit(xtrain,ytrain)
ypred=lgb2.predict(xtest)
score=cross_val_score(lgb2,X_train,Y_train,scoring='accuracy',cv=kfold,n_jobs=-1)
# Best score
print('Crossval score for Lightgb classifier: {}'.format(score.mean()))
print('Accuracy score for Lightgb classifier: {}'.format(accuracy_score(ytest,ypred)))
lgb=LGBMClassifier(boosting_type='gbdt', class_weight=None, colsample_bytree=1.0,
importance_type='split', learning_rate=0.2, max_depth=-1,
min_child_samples=20, min_child_weight=0.001, min_split_gain=0.0,
n_estimators=200, n_jobs=4, num_leaves=63, objective=None,
random_state=None, reg_alpha=0.0, reg_lambda=0.0, silent=True,
subsample=1.0, subsample_for_bin=200000, subsample_freq=0)
lgb.fit(xtrain,ytrain)
ypred=lgb.predict(xtest)
score=cross_val_score(lgb,X_train,Y_train,scoring='accuracy',cv=kfold,n_jobs=-1)
# Best score
print('Crossval score for Lightgb classifier: {}'.format(score.mean()))
print('Accuracy score for Lightgb classifier: {}'.format(accuracy_score(ytest,ypred)))
vc= VotingClassifier(estimators=[('rfc', RFC2), ('extc', et2),
('lgb',lgb)], voting='soft', n_jobs=-1)
vc.fit(xtrain,ytrain)
ypred=vc.predict(xtest)
score=cross_val_score(vc,X_train,Y_train,scoring='accuracy',cv=kfold,n_jobs=-1)
# Best score
print('Crossval score for Lightgb classifier: {}'.format(score.mean()))
print('Accuracy score for Lightgb classifier: {}'.format(accuracy_score(ytest,ypred)))
"""
from sklearn.ensemble import StackingClassifier
estimators = [ ('rf', RFC2),
('et', et2)]
sc= StackingClassifier(estimators=estimators, final_estimator=lgb)
sc.fit(xtrain,ytrain)
ypred=sc.predict(xtest)
score=cross_val_score(sc,X_train,Y_train,scoring='accuracy',cv=kfold,n_jobs=-1)
# Best score
print('Crossval score for Lightgb classifier: {}'.format(score.mean()))
print('Accuracy score for Lightgb classifier: {}'.format(accuracy_score(ytest,ypred)))"""
vc.fit(X_train,Y_train)
ypred=vc.predict(df_test.values)
id=df_test1['Id']
dict={'ID':id,'Cover_Type':ypred}
dfsub=
|
pd.DataFrame(dict)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# skipfooter
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skipfooter=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
pytest.raises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
assert len(df) == 3
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
tm.assert_index_equal(df.columns,
Index(['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4']))
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
expected = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D']))
assert df.index.name == 'index'
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
assert df.values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns,
pd.Index(['A', 'B', 'C', 'D', 'E']))
assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp))
assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
assert isinstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
pytest.raises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
assert data['A'].dtype == np.bool_
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.float64
assert data['B'].dtype == np.int64
def test_read_nrows(self):
expected = self.read_csv(StringIO(self.data1))[:3]
df = self.read_csv(StringIO(self.data1), nrows=3)
tm.assert_frame_equal(df, expected)
# see gh-10476
df = self.read_csv(StringIO(self.data1), nrows=3.0)
tm.assert_frame_equal(df, expected)
msg = r"'nrows' must be an integer >=0"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=1.2)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=-1)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# with invalid chunksize value:
msg = r"'chunksize' must be an integer >=1"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=1.3)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=0)
def test_read_chunksize_and_nrows(self):
# gh-15755
# With nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=2, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# chunksize > nrows
reader = self.read_csv(
|
StringIO(self.data1)
|
pandas.compat.StringIO
|
import pandas as pd
import numpy as np
from scipy import signal
from eeg_globals import *
def cut_signal(eeg_df, dic_cut_opts):
"""
Cut signal into small windows with overlapping
Return:
A new dataframe
"""
sample_win = int(PSF * dic_cut_opts['window'])
sample_over = int(PSF * dic_cut_opts['overlap'])
sample_stride = sample_win - sample_over
# To data, add a column observation based on phase
print('split data into observations')
eeg_window = []
for subject in eeg_df.subject.unique():
print(subject)
for test in eeg_df.test.unique():
print(' ' + str(test))
for phase in eeg_df.phase.unique():
print(' ' + str(phase))
df = eeg_df.loc[(eeg_df.subject==subject) &
(eeg_df.test==test) &
(eeg_df.phase==phase)].copy()
df = df.reset_index(drop=True)
n_intervals = int(np.floor(( df.shape[0] - sample_win ) / sample_stride) + 1)
for k in range(n_intervals):
data = df.iloc[k * sample_stride : k * sample_stride + sample_win].copy()
data = data.reset_index(drop=True)
data['observation'] = k + 1
eeg_window.append(data)
del data
del df
eeg_window =
|
pd.concat(eeg_window, axis=0, ignore_index=True)
|
pandas.concat
|
from datetime import datetime
from pathlib import Path
import h5py
import numpy as np
import pandas as pd
import pytest
from turn_by_turn import iota, ptc, trackone
from turn_by_turn.constants import PLANES, PRINT_PRECISION
from turn_by_turn.errors import DataTypeError, ExclusiveArgumentsError, HDF5VersionError, PTCFormatError
from turn_by_turn.io import read_tbt, write_lhc_ascii, write_tbt
from turn_by_turn.structures import TbtData, TransverseData
from turn_by_turn.utils import add_noise, generate_average_tbtdata
INPUTS_DIR = Path(__file__).parent / "inputs"
ASCII_PRECISION = 0.5 / np.power(10, PRINT_PRECISION)
@pytest.mark.parametrize("datatype", ["invalid", "not_supported"])
def test_tbt_read_raises_on_invalid_datatype(_sdds_file, caplog, datatype):
with pytest.raises(DataTypeError):
_ = read_tbt(_sdds_file, datatype=datatype)
for record in caplog.records:
assert record.levelname == "ERROR"
def test_tbt_write_read_sdds_binary(_sdds_file, _test_file):
origin = read_tbt(_sdds_file)
write_tbt(_test_file, origin)
new = read_tbt(f"{_test_file}.sdds")
_compare_tbt(origin, new, False)
def test_tbt_write_read_sdds_binary_with_noise(_sdds_file, _test_file):
origin = read_tbt(_sdds_file)
write_tbt(_test_file, origin, noise=2)
new = read_tbt(f"{_test_file}.sdds")
with pytest.raises(AssertionError): # should be different
_compare_tbt(origin, new, False)
def test_tbt_read_hdf5(_hdf5_file):
origin = TbtData(
matrices=[
TransverseData(
X=pd.DataFrame(
index=["IBPMA1C", "IBPME2R"],
data=_create_data(np.linspace(-np.pi, np.pi, 2000, endpoint=False), 2, np.sin),
dtype=float,
),
Y=pd.DataFrame(
index=["IBPMA1C", "IBPME2R"],
data=_create_data(np.linspace(-np.pi, np.pi, 2000, endpoint=False), 2, np.cos),
dtype=float,
),
)
],
date=datetime.now(),
bunch_ids=[1],
nturns=2000,
)
new = iota.read_tbt(_hdf5_file, hdf5_version=1)
_compare_tbt(origin, new, False)
def test_tbt_read_hdf5_v2(_hdf5_file_v2):
origin = TbtData(
matrices=[
TransverseData(
X=pd.DataFrame(
index=["IBPMA1C", "IBPME2R"],
data=_create_data(np.linspace(-np.pi, np.pi, 2000, endpoint=False), 2, np.sin),
dtype=float,
),
Y=pd.DataFrame(
index=["IBPMA1C", "IBPME2R"],
data=_create_data(np.linspace(-np.pi, np.pi, 2000, endpoint=False), 2, np.cos),
dtype=float,
),
)
],
date=datetime.now(),
bunch_ids=[1],
nturns=2000,
)
new = iota.read_tbt(_hdf5_file_v2)
_compare_tbt(origin, new, False)
def test_tbt_raises_on_wrong_hdf5_version(_hdf5_file):
with pytest.raises(HDF5VersionError):
new = iota.read_tbt(_hdf5_file, hdf5_version=2)
def test_compare_average_Tbtdata():
npart = 10
data = {
plane: np.concatenate(
[
[
_create_data(
np.linspace(1, 10, 10, endpoint=False, dtype=int),
2,
(lambda x: np.random.randn(len(x))),
)
]
for _ in range(npart)
],
axis=0,
)
for plane in PLANES
}
origin = TbtData(
matrices=[
TransverseData(
X=pd.DataFrame(index=["IBPMA1C", "IBPME2R"], data=data["X"][i], dtype=float),
Y=pd.DataFrame(index=["IBPMA1C", "IBPME2R"], data=data["Y"][i], dtype=float),
)
for i in range(npart)
],
date=datetime.now(),
bunch_ids=range(npart),
nturns=10,
)
new = TbtData(
matrices=[
TransverseData(
X=pd.DataFrame(
index=["IBPMA1C", "IBPME2R"],
data=np.mean(data["X"], axis=0),
dtype=float,
),
Y=pd.DataFrame(
index=["IBPMA1C", "IBPME2R"],
data=np.mean(data["Y"], axis=0),
dtype=float,
),
)
],
date=datetime.now(),
bunch_ids=[1],
nturns=10,
)
_compare_tbt(generate_average_tbtdata(origin), new, False)
def test_tbt_read_ptc(_ptc_file):
new = ptc.read_tbt(_ptc_file)
origin = _original_trackone()
_compare_tbt(origin, new, True)
def test_tbt_read_ptc_raises_on_invalid_file(_invalid_ptc_file):
with pytest.raises(PTCFormatError):
_ = ptc.read_tbt(_invalid_ptc_file)
def test_tbt_read_ptc_defaults_date(_ptc_file_no_date):
new = ptc.read_tbt(_ptc_file_no_date)
assert new.date.day == datetime.today().day
assert new.date.tzname() == "UTC"
def test_tbt_read_trackone(_ptc_file):
new = trackone.read_tbt(_ptc_file)
origin = _original_trackone(True)
_compare_tbt(origin, new, True)
def test_tbt_read_ptc_sci(_ptc_file_sci):
new = ptc.read_tbt(_ptc_file_sci)
origin = _original_trackone()
_compare_tbt(origin, new, True)
def test_tbt_read_trackone_sci(_ptc_file_sci):
new = trackone.read_tbt(_ptc_file_sci)
origin = _original_trackone(True)
_compare_tbt(origin, new, True)
def test_tbt_read_ptc_looseparticles(_ptc_file_losses):
new = ptc.read_tbt(_ptc_file_losses)
assert len(new.matrices) == 3
assert len(new.matrices[0].X.columns) == 9
assert all(new.matrices[0].X.index == np.array([f"BPM{i+1}" for i in range(3)]))
assert not new.matrices[0].X.isna().any().any()
def test_tbt_read_trackone_looseparticles(_ptc_file_losses):
new = trackone.read_tbt(_ptc_file_losses)
assert len(new.matrices) == 3
assert len(new.matrices[0].X.columns) == 9
assert all(new.matrices[0].X.index == np.array([f"BPM{i+1}" for i in range(3)]))
assert not new.matrices[0].X.isna().any().any()
def test_tbt_write_read_ascii(_sdds_file, _test_file):
origin = read_tbt(_sdds_file)
write_lhc_ascii(_test_file, origin)
new = read_tbt(_test_file)
_compare_tbt(origin, new, True)
def test_noise_addition():
array = _create_data(np.linspace(-np.pi, np.pi, 2000, endpoint=False), 1, np.sin).flatten()
noised = add_noise(array, noise=0)
np.testing.assert_array_equal(array, noised)
noised = add_noise(array, sigma=0)
np.testing.assert_array_equal(array, noised)
noised = add_noise(array, noise=5)
assert np.std(array) != np.std(noised)
with pytest.raises(AssertionError):
np.testing.assert_array_equal(array, noised)
noised = add_noise(array, sigma=1)
assert np.std(array) != np.std(noised)
with pytest.raises(AssertionError):
np.testing.assert_array_equal(array, noised)
@pytest.mark.parametrize("seed", [1236, 6749, 23495564])
def test_noise_addition_with_seed(seed):
array = _create_data(np.linspace(-np.pi, np.pi, 2000, endpoint=False), 1, np.sin).flatten()
noised_1 = add_noise(array, sigma=5, seed=seed)
noised_2 = add_noise(array, sigma=5, seed=seed)
np.testing.assert_array_equal(noised_1, noised_2) # should be equal with same noise seed
noised_3 = add_noise(array, noise=5, seed=seed * 5)
with pytest.raises(AssertionError): # Should be different with different seeds
np.testing.assert_array_equal(noised_1, noised_3)
def test_add_noise_raises_on_both_arguments():
array = _create_data(np.linspace(-np.pi, np.pi, 2000, endpoint=False), 1, np.sin).flatten()
with pytest.raises(ExclusiveArgumentsError):
_ = add_noise(array, noise=5, sigma=1)
# ----- Helpers ----- #
def _compare_tbt(origin: TbtData, new: TbtData, no_binary: bool, max_deviation=ASCII_PRECISION) -> None:
assert new.nturns == origin.nturns
assert new.nbunches == origin.nbunches
assert new.bunch_ids == origin.bunch_ids
for index in range(origin.nbunches):
for plane in PLANES:
assert np.all(new.matrices[index][plane].index == origin.matrices[index][plane].index)
origin_mat = origin.matrices[index][plane].to_numpy()
new_mat = new.matrices[index][plane].to_numpy()
if no_binary:
assert np.max(np.abs(origin_mat - new_mat)) < max_deviation
else:
assert np.all(origin_mat == new_mat)
def _original_trackone(track: bool = False) -> TbtData:
names = np.array(["C1.BPM1"])
matrix = [
TransverseData(
X=pd.DataFrame(index=names, data=[[0.001, -0.0003606, -0.00165823, -0.00266631]]),
Y=
|
pd.DataFrame(index=names, data=[[0.001, 0.00070558, -0.00020681, -0.00093807]])
|
pandas.DataFrame
|
import string
import numpy as np
from numpy.testing import assert_array_equal
from pandas import DataFrame, MultiIndex, Series
from shapely.geometry import LinearRing, LineString, MultiPoint, Point, Polygon
from shapely.geometry.collection import GeometryCollection
from shapely.ops import unary_union
from geopandas import GeoDataFrame, GeoSeries
from geopandas.base import GeoPandasBase
from geopandas.testing import assert_geodataframe_equal
from geopandas.tests.util import assert_geoseries_equal, geom_almost_equals, geom_equals
from geopandas import _compat as compat
from pandas.testing import assert_frame_equal, assert_series_equal
import pytest
def assert_array_dtype_equal(a, b, *args, **kwargs):
a = np.asanyarray(a)
b = np.asanyarray(b)
assert a.dtype == b.dtype
assert_array_equal(a, b, *args, **kwargs)
class TestGeomMethods:
def setup_method(self):
self.t1 = Polygon([(0, 0), (1, 0), (1, 1)])
self.t2 = Polygon([(0, 0), (1, 1), (0, 1)])
self.t3 = Polygon([(2, 0), (3, 0), (3, 1)])
self.tz = Polygon([(1, 1, 1), (2, 2, 2), (3, 3, 3)])
self.tz1 = Polygon([(2, 2, 2), (1, 1, 1), (3, 3, 3)])
self.sq = Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
self.sqz = Polygon([(1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4)])
self.t4 = Polygon([(0, 0), (3, 0), (3, 3), (0, 2)])
self.t5 = Polygon([(2, 0), (3, 0), (3, 3), (2, 3)])
self.inner_sq = Polygon(
[(0.25, 0.25), (0.75, 0.25), (0.75, 0.75), (0.25, 0.75)]
)
self.nested_squares = Polygon(self.sq.boundary, [self.inner_sq.boundary])
self.p0 = Point(5, 5)
self.p3d = Point(5, 5, 5)
self.g0 = GeoSeries(
[
self.t1,
self.t2,
self.sq,
self.inner_sq,
self.nested_squares,
self.p0,
None,
]
)
self.g1 = GeoSeries([self.t1, self.sq])
self.g2 = GeoSeries([self.sq, self.t1])
self.g3 = GeoSeries([self.t1, self.t2])
self.gz = GeoSeries([self.tz, self.sqz, self.tz1])
self.g3.crs = "epsg:4326"
self.g4 = GeoSeries([self.t2, self.t1])
self.g4.crs = "epsg:4326"
self.g_3d = GeoSeries([self.p0, self.p3d])
self.na = GeoSeries([self.t1, self.t2, Polygon()])
self.na_none = GeoSeries([self.t1, None])
self.a1 = self.g1.copy()
self.a1.index = ["A", "B"]
self.a2 = self.g2.copy()
self.a2.index = ["B", "C"]
self.esb = Point(-73.9847, 40.7484, 30.3244)
self.sol = Point(-74.0446, 40.6893, 31.2344)
self.landmarks = GeoSeries([self.esb, self.sol], crs="epsg:4326")
self.pt2d = Point(-73.9847, 40.7484)
self.landmarks_mixed = GeoSeries([self.esb, self.sol, self.pt2d], crs=4326)
self.l1 = LineString([(0, 0), (0, 1), (1, 1)])
self.l2 = LineString([(0, 0), (1, 0), (1, 1), (0, 1)])
self.g5 = GeoSeries([self.l1, self.l2])
self.g6 = GeoSeries([self.p0, self.t3])
self.g7 = GeoSeries([self.sq, self.t4])
self.g8 = GeoSeries([self.t1, self.t5])
self.empty = GeoSeries([])
self.all_none = GeoSeries([None, None])
self.empty_poly = Polygon()
self.g9 = GeoSeries(self.g0, index=range(1, 8))
# Crossed lines
self.l3 = LineString([(0, 0), (1, 1)])
self.l4 = LineString([(0, 1), (1, 0)])
self.crossed_lines = GeoSeries([self.l3, self.l4])
# Placeholder for testing, will just drop in different geometries
# when needed
self.gdf1 = GeoDataFrame(
{"geometry": self.g1, "col0": [1.0, 2.0], "col1": ["geo", "pandas"]}
)
self.gdf2 = GeoDataFrame(
{"geometry": self.g1, "col3": [4, 5], "col4": ["rand", "string"]}
)
self.gdf3 = GeoDataFrame(
{"geometry": self.g3, "col3": [4, 5], "col4": ["rand", "string"]}
)
self.gdfz = GeoDataFrame(
{"geometry": self.gz, "col3": [4, 5, 6], "col4": ["rand", "string", "geo"]}
)
def _test_unary_real(self, op, expected, a):
""" Tests for 'area', 'length', 'is_valid', etc. """
fcmp = assert_series_equal
self._test_unary(op, expected, a, fcmp)
def _test_unary_topological(self, op, expected, a):
if isinstance(expected, GeoPandasBase):
fcmp = assert_geoseries_equal
else:
def fcmp(a, b):
assert a.equals(b)
self._test_unary(op, expected, a, fcmp)
def _test_binary_topological(self, op, expected, a, b, *args, **kwargs):
""" Tests for 'intersection', 'union', 'symmetric_difference', etc. """
if isinstance(expected, GeoPandasBase):
fcmp = assert_geoseries_equal
else:
def fcmp(a, b):
assert geom_equals(a, b)
if isinstance(b, GeoPandasBase):
right_df = True
else:
right_df = False
self._binary_op_test(op, expected, a, b, fcmp, True, right_df, *args, **kwargs)
def _test_binary_real(self, op, expected, a, b, *args, **kwargs):
fcmp = assert_series_equal
self._binary_op_test(op, expected, a, b, fcmp, True, False, *args, **kwargs)
def _test_binary_operator(self, op, expected, a, b):
"""
The operators only have GeoSeries on the left, but can have
GeoSeries or GeoDataFrame on the right.
If GeoDataFrame is on the left, geometry column is used.
"""
if isinstance(expected, GeoPandasBase):
fcmp = assert_geoseries_equal
else:
def fcmp(a, b):
assert geom_equals(a, b)
if isinstance(b, GeoPandasBase):
right_df = True
else:
right_df = False
self._binary_op_test(op, expected, a, b, fcmp, False, right_df)
def _binary_op_test(
self, op, expected, left, right, fcmp, left_df, right_df, *args, **kwargs
):
"""
This is a helper to call a function on GeoSeries and GeoDataFrame
arguments. For example, 'intersection' is a member of both GeoSeries
and GeoDataFrame and can take either GeoSeries or GeoDataFrame inputs.
This function has the ability to test all four combinations of input
types.
Parameters
----------
expected : str
The operation to be tested. e.g., 'intersection'
left: GeoSeries
right: GeoSeries
fcmp: function
Called with the result of the operation and expected. It should
assert if the result is incorrect
left_df: bool
If the left input should also be called with a GeoDataFrame
right_df: bool
Indicates whether the right input should be called with a
GeoDataFrame
"""
def _make_gdf(s):
n = len(s)
col1 = string.ascii_lowercase[:n]
col2 = range(n)
return GeoDataFrame(
{"geometry": s.values, "col1": col1, "col2": col2},
index=s.index,
crs=s.crs,
)
# Test GeoSeries.op(GeoSeries)
result = getattr(left, op)(right, *args, **kwargs)
fcmp(result, expected)
if left_df:
# Test GeoDataFrame.op(GeoSeries)
gdf_left = _make_gdf(left)
result = getattr(gdf_left, op)(right, *args, **kwargs)
fcmp(result, expected)
if right_df:
# Test GeoSeries.op(GeoDataFrame)
gdf_right = _make_gdf(right)
result = getattr(left, op)(gdf_right, *args, **kwargs)
fcmp(result, expected)
if left_df:
# Test GeoDataFrame.op(GeoDataFrame)
result = getattr(gdf_left, op)(gdf_right, *args, **kwargs)
fcmp(result, expected)
def _test_unary(self, op, expected, a, fcmp):
# GeoSeries, (GeoSeries or geometry)
result = getattr(a, op)
fcmp(result, expected)
# GeoDataFrame, (GeoSeries or geometry)
gdf = self.gdf1.set_geometry(a)
result = getattr(gdf, op)
fcmp(result, expected)
# TODO reenable for all operations once we use pyproj > 2
# def test_crs_warning(self):
# # operations on geometries should warn for different CRS
# no_crs_g3 = self.g3.copy()
# no_crs_g3.crs = None
# with pytest.warns(UserWarning):
# self._test_binary_topological('intersection', self.g3,
# self.g3, no_crs_g3)
def test_intersection(self):
self._test_binary_topological("intersection", self.t1, self.g1, self.g2)
with pytest.warns(UserWarning, match="The indices .+ different"):
self._test_binary_topological(
"intersection", self.all_none, self.g1, self.empty
)
assert len(self.g0.intersection(self.g9, align=True) == 8)
assert len(self.g0.intersection(self.g9, align=False) == 7)
def test_union_series(self):
self._test_binary_topological("union", self.sq, self.g1, self.g2)
assert len(self.g0.union(self.g9, align=True) == 8)
assert len(self.g0.union(self.g9, align=False) == 7)
def test_union_polygon(self):
self._test_binary_topological("union", self.sq, self.g1, self.t2)
def test_symmetric_difference_series(self):
self._test_binary_topological("symmetric_difference", self.sq, self.g3, self.g4)
assert len(self.g0.symmetric_difference(self.g9, align=True) == 8)
assert len(self.g0.symmetric_difference(self.g9, align=False) == 7)
def test_symmetric_difference_poly(self):
expected = GeoSeries([GeometryCollection(), self.sq], crs=self.g3.crs)
self._test_binary_topological(
"symmetric_difference", expected, self.g3, self.t1
)
def test_difference_series(self):
expected = GeoSeries([GeometryCollection(), self.t2])
self._test_binary_topological("difference", expected, self.g1, self.g2)
assert len(self.g0.difference(self.g9, align=True) == 8)
assert len(self.g0.difference(self.g9, align=False) == 7)
def test_difference_poly(self):
expected = GeoSeries([self.t1, self.t1])
self._test_binary_topological("difference", expected, self.g1, self.t2)
def test_geo_op_empty_result(self):
l1 = LineString([(0, 0), (1, 1)])
l2 = LineString([(2, 2), (3, 3)])
expected = GeoSeries([GeometryCollection()])
# binary geo resulting in empty geometry
result = GeoSeries([l1]).intersection(l2)
assert_geoseries_equal(result, expected)
# binary geo empty result with right GeoSeries
result = GeoSeries([l1]).intersection(GeoSeries([l2]))
assert_geoseries_equal(result, expected)
# unary geo resulting in emtpy geometry
result = GeoSeries([GeometryCollection()]).convex_hull
assert_geoseries_equal(result, expected)
def test_boundary(self):
l1 = LineString([(0, 0), (1, 0), (1, 1), (0, 0)])
l2 = LineString([(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)])
expected = GeoSeries([l1, l2], index=self.g1.index, crs=self.g1.crs)
self._test_unary_topological("boundary", expected, self.g1)
def test_area(self):
expected = Series(np.array([0.5, 1.0]), index=self.g1.index)
self._test_unary_real("area", expected, self.g1)
expected = Series(np.array([0.5, np.nan]), index=self.na_none.index)
self._test_unary_real("area", expected, self.na_none)
def test_area_crs_warn(self):
with pytest.warns(UserWarning, match="Geometry is in a geographic CRS"):
self.g4.area
def test_bounds(self):
# Set columns to get the order right
expected = DataFrame(
{
"minx": [0.0, 0.0],
"miny": [0.0, 0.0],
"maxx": [1.0, 1.0],
"maxy": [1.0, 1.0],
},
index=self.g1.index,
columns=["minx", "miny", "maxx", "maxy"],
)
result = self.g1.bounds
assert_frame_equal(expected, result)
gdf = self.gdf1.set_geometry(self.g1)
result = gdf.bounds
assert_frame_equal(expected, result)
def test_bounds_empty(self):
# test bounds of empty GeoSeries
# https://github.com/geopandas/geopandas/issues/1195
s = GeoSeries([])
result = s.bounds
expected = DataFrame(
columns=["minx", "miny", "maxx", "maxy"], index=s.index, dtype="float64"
)
assert_frame_equal(result, expected)
def test_unary_union(self):
p1 = self.t1
p2 = Polygon([(2, 0), (3, 0), (3, 1)])
expected = unary_union([p1, p2])
g = GeoSeries([p1, p2])
self._test_unary_topological("unary_union", expected, g)
def test_contains(self):
expected = [True, False, True, False, False, False, False]
assert_array_dtype_equal(expected, self.g0.contains(self.t1))
expected = [False, True, True, True, True, True, False, False]
assert_array_dtype_equal(expected, self.g0.contains(self.g9, align=True))
expected = [False, False, True, False, False, False, False]
assert_array_dtype_equal(expected, self.g0.contains(self.g9, align=False))
def test_length(self):
expected = Series(np.array([2 + np.sqrt(2), 4]), index=self.g1.index)
self._test_unary_real("length", expected, self.g1)
expected = Series(np.array([2 + np.sqrt(2), np.nan]), index=self.na_none.index)
self._test_unary_real("length", expected, self.na_none)
def test_length_crs_warn(self):
with pytest.warns(UserWarning, match="Geometry is in a geographic CRS"):
self.g4.length
def test_crosses(self):
expected = [False, False, False, False, False, False, False]
assert_array_dtype_equal(expected, self.g0.crosses(self.t1))
expected = [False, True]
assert_array_dtype_equal(expected, self.crossed_lines.crosses(self.l3))
expected = [False] * 8
assert_array_dtype_equal(expected, self.g0.crosses(self.g9, align=True))
expected = [False] * 7
assert_array_dtype_equal(expected, self.g0.crosses(self.g9, align=False))
def test_disjoint(self):
expected = [False, False, False, False, False, True, False]
assert_array_dtype_equal(expected, self.g0.disjoint(self.t1))
expected = [False] * 8
assert_array_dtype_equal(expected, self.g0.disjoint(self.g9, align=True))
expected = [False, False, False, False, True, False, False]
assert_array_dtype_equal(expected, self.g0.disjoint(self.g9, align=False))
def test_relate(self):
expected = Series(
[
"212101212",
"212101212",
"212FF1FF2",
"2FFF1FFF2",
"FF2F112F2",
"FF0FFF212",
None,
],
index=self.g0.index,
)
assert_array_dtype_equal(expected, self.g0.relate(self.inner_sq))
expected = Series(["FF0FFF212", None], index=self.g6.index)
assert_array_dtype_equal(expected, self.g6.relate(self.na_none))
expected = Series(
[
None,
"2FFF1FFF2",
"2FFF1FFF2",
"2FFF1FFF2",
"2FFF1FFF2",
"0FFFFFFF2",
None,
None,
],
index=range(8),
)
assert_array_dtype_equal(expected, self.g0.relate(self.g9, align=True))
expected = Series(
[
"FF2F11212",
"2FF11F212",
"212FF1FF2",
"FF2F1F212",
"FF2FF10F2",
None,
None,
],
index=self.g0.index,
)
assert_array_dtype_equal(expected, self.g0.relate(self.g9, align=False))
def test_distance(self):
expected = Series(
np.array([np.sqrt((5 - 1) ** 2 + (5 - 1) ** 2), np.nan]), self.na_none.index
)
assert_array_dtype_equal(expected, self.na_none.distance(self.p0))
expected = Series(np.array([np.sqrt(4 ** 2 + 4 ** 2), np.nan]), self.g6.index)
assert_array_dtype_equal(expected, self.g6.distance(self.na_none))
expected = Series(np.array([np.nan, 0, 0, 0, 0, 0, np.nan, np.nan]), range(8))
assert_array_dtype_equal(expected, self.g0.distance(self.g9, align=True))
val = self.g0.iloc[4].distance(self.g9.iloc[4])
expected = Series(np.array([0, 0, 0, 0, val, np.nan, np.nan]), self.g0.index)
assert_array_dtype_equal(expected, self.g0.distance(self.g9, align=False))
def test_distance_crs_warning(self):
with pytest.warns(UserWarning, match="Geometry is in a geographic CRS"):
self.g4.distance(self.p0)
def test_intersects(self):
expected = [True, True, True, True, True, False, False]
assert_array_dtype_equal(expected, self.g0.intersects(self.t1))
expected = [True, False]
assert_array_dtype_equal(expected, self.na_none.intersects(self.t2))
expected = np.array([], dtype=bool)
assert_array_dtype_equal(expected, self.empty.intersects(self.t1))
expected = np.array([], dtype=bool)
assert_array_dtype_equal(expected, self.empty.intersects(self.empty_poly))
expected = [False] * 7
assert_array_dtype_equal(expected, self.g0.intersects(self.empty_poly))
expected = [False, True, True, True, True, True, False, False]
assert_array_dtype_equal(expected, self.g0.intersects(self.g9, align=True))
expected = [True, True, True, True, False, False, False]
assert_array_dtype_equal(expected, self.g0.intersects(self.g9, align=False))
def test_overlaps(self):
expected = [True, True, False, False, False, False, False]
assert_array_dtype_equal(expected, self.g0.overlaps(self.inner_sq))
expected = [False, False]
assert_array_dtype_equal(expected, self.g4.overlaps(self.t1))
expected = [False] * 8
assert_array_dtype_equal(expected, self.g0.overlaps(self.g9, align=True))
expected = [False] * 7
assert_array_dtype_equal(expected, self.g0.overlaps(self.g9, align=False))
def test_touches(self):
expected = [False, True, False, False, False, False, False]
assert_array_dtype_equal(expected, self.g0.touches(self.t1))
expected = [False] * 8
assert_array_dtype_equal(expected, self.g0.touches(self.g9, align=True))
expected = [True, False, False, True, False, False, False]
assert_array_dtype_equal(expected, self.g0.touches(self.g9, align=False))
def test_within(self):
expected = [True, False, False, False, False, False, False]
assert_array_dtype_equal(expected, self.g0.within(self.t1))
expected = [True, True, True, True, True, False, False]
assert_array_dtype_equal(expected, self.g0.within(self.sq))
expected = [False, True, True, True, True, True, False, False]
assert_array_dtype_equal(expected, self.g0.within(self.g9, align=True))
expected = [False, True, False, False, False, False, False]
assert_array_dtype_equal(expected, self.g0.within(self.g9, align=False))
def test_covers_itself(self):
# Each polygon in a Series covers itself
res = self.g1.covers(self.g1)
exp = Series([True, True])
assert_series_equal(res, exp)
def test_covers(self):
res = self.g7.covers(self.g8)
exp = Series([True, False])
assert_series_equal(res, exp)
expected = [False, True, True, True, True, True, False, False]
assert_array_dtype_equal(expected, self.g0.covers(self.g9, align=True))
expected = [False, False, True, False, False, False, False]
assert_array_dtype_equal(expected, self.g0.covers(self.g9, align=False))
def test_covers_inverse(self):
res = self.g8.covers(self.g7)
exp = Series([False, False])
|
assert_series_equal(res, exp)
|
pandas.testing.assert_series_equal
|
# Authors: <NAME>
# <NAME>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['font.size']=6
from scipy import linalg
import pandas as pd
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
print(__doc__)
import os
root_path = os.path.dirname(os.path.abspath('__file__'))
root_path = os.path.abspath(os.path.join(root_path,os.path.pardir)) # For run in CMD
graphs_path = root_path+'\\results_analysis\\graphs\\'
# #############################################################################
# Create the data
def model_select_pca(station,decomposer,predict_pattern,ax=None,wavelet_level='db10-2'):
# Set project parameters
STATION = station
DECOMPOSER = decomposer
PREDICT_PATTERN = predict_pattern # hindcast or forecast
SIGNALS = STATION+'_'+DECOMPOSER
# Set parameters for PCA
# load one-step one-month forecast or hindcast samples and the normalization indicators
if decomposer=='dwt':
train = pd.read_csv(root_path+'/'+SIGNALS+'/data/'+wavelet_level+'/'+PREDICT_PATTERN+'/minmax_unsample_train.csv')
dev = pd.read_csv(root_path+'/'+SIGNALS+'/data/'+wavelet_level+'/'+PREDICT_PATTERN+'/minmax_unsample_dev.csv')
test =
|
pd.read_csv(root_path+'/'+SIGNALS+'/data/'+wavelet_level+'/'+PREDICT_PATTERN+'/minmax_unsample_test.csv')
|
pandas.read_csv
|
"""Compute statistics of sea ice concentration cubes
"""
import copy
from datetime import date as dt_date
from functools import lru_cache
import os
import re
import numpy as np
import pandas as pd
import logging
import seaice.data as sid
import seaice.datastore as sds
from .cube import ConcentrationCube as Cube
import seaice.nasateam as nt
log = logging.getLogger(__name__)
def _sea_ice_statistics(gridset, period, config, failed_qa=None):
"""Given a seaicedata gridset, a pandas.Period, and a config dict, return a
dictionary containing statistics and some metadata.
Positional Arguments:
---------------------
gridset: gridset with data and metadata, like one retrieved from seaice.data
period: pandas.Period, with frequency either day or month.
config: dict containing:
hemisphere: nt.NORTH or nt.SOUTH
extent_threshold: cutoff value for extent
grid_areas: grid whose shape matches a layer of the data in gridset,
whose values are the areas of the grid cells in square km
failed_qa: optional boolean value to use as default for 'failed qa' field,
if not present or None, no failed_qa will be returned from the
statistics row.
"""
hemisphere = config['hemisphere']
cube = _get_cube(gridset, period, config)
total_extent_km2 = cube.extent()
total_area_km2 = cube.area()
missing_km2 = cube.missing()
regional_masks = _fetch_regional_config(config)
regional_stats = _get_regional_stats(cube, regional_masks, hemisphere, period)
row = _create_row(
(period, hemisphere['short_name']), total_extent_km2, total_area_km2, missing_km2,
gridset['metadata'], regional_stats, failed_qa)
return row
def _get_cube(gridset, period, config):
hemisphere = config['hemisphere']
extent_threshold = config['extent_threshold']
grid_areas = config['grid_areas']
missing_value = gridset['metadata']['missing_value']
valid_data_range = gridset['metadata']['valid_data_range']
flags = gridset['metadata']['flags']
invalid_ice_mask = nt.invalid_ice_mask(hemisphere, period.month)
return Cube(gridset['data'],
missing_value=missing_value, invalid_data_mask=invalid_ice_mask,
grid_areas=grid_areas, extent_threshold=extent_threshold,
valid_data_range=valid_data_range, flags=flags)
def _set_failed_qa_flag(frame, eval_days, regression_delta_km2):
"""Given a frame of total extent and failed_qa; regression_delta_km2, and a number of
eval days evaluate the frame based on a simple linear regression and return
a frame with 'failed_qa' marked appropriately.
Evaluation will interpolate on the fly - meaning that if a day in a series
is marked bad the next day will be evaluated with the previously marked
day filled in with an interpolated value based on the evaluation frame.
Positional Arguments:
---------------------
frame: Pandas dataframe with total_extent_km2 and failed_qa columns
eval_days: Number of days to evaluate the linear regression
regression_delta_km2: Maximum difference in value between the value expected by
the regression and the actual value.
"""
update_frame = frame.copy()
for period in update_frame.index[eval_days:]:
filename = update_frame['filename'].loc[period]
if filename == []:
update_frame.at[period, 'failed_qa'] = False
continue
period_total_extent_km2 = update_frame['total_extent_km2'].loc[period]
poly_fit_series = update_frame['total_extent_km2'][period-eval_days:period]
delta = _poly_fit_delta(poly_fit_series)
if np.isnan(period_total_extent_km2) or abs(delta) > regression_delta_km2:
update_frame.at[period, 'total_extent_km2'] = np.nan
update_frame.at[period, 'failed_qa'] = True
elif not np.isnan(delta):
update_frame.at[period, 'failed_qa'] = False
return update_frame
def _poly_fit_delta(data_series_in):
"""Given an input Pandas Series (data_series_in) with a PeriodIndex and at
least 3 non-nan values, models the expected last value of the series and
returns the difference between the modeled and actual value.
A linear regression is computed for the input data_series_in excluding the last value
and is used to compute the expected value at data_series_in[-1].
Returns the difference of the actual - expected value, or np.nan if
regression cannot be performed.
"""
data_series = data_series_in.copy()
target = data_series[-1:]
data_series = data_series[:-1]
data_series = data_series.dropna()
if len(data_series) < 2:
log.warn('Cannot calculate regression fit difference for '
'{} without at least 2 previous days data. Skipping.'.format(target))
return np.nan
x_values = [np.float(v.to_timestamp().to_julian_date()) for v in data_series.index.values]
poly = np.polyfit(x_values, data_series, 1)
log.debug('poly %s', poly)
expected_value = target.index.values[0].to_timestamp().to_julian_date() * poly[0] + poly[1]
return target.values[0] - expected_value
def _get_extent(date, config):
extent_grid = sid.extent_daily(hemisphere=config['hemisphere'],
year=date.year, month=date.month, day=date.day,
search_paths=config['search_paths'],
interpolation_radius=0)['data']
return np.sum(((extent_grid == 1) * config['grid_areas']))
def _get_concentration(date, config):
return sid.concentration_daily(hemisphere=config['hemisphere'],
year=date.year, month=date.month, day=date.day,
search_paths=config['search_paths'],
interpolation_radius=0)
def merge_daily_datastore_with_validation_dataframe(validation_frame, data_store):
"""Given a validation dataframe and a data store location update the
datastore with the new validation information"""
frame = _dataframe_from_data_store_daily(data_store)
frame.update(validation_frame)
columns = frame.columns.tolist()
sds.write_daily_datastore(frame, columns, data_store)
def get_validation_frame(dates, data_store, hemisphere, regression_delta_km2, eval_days):
"""Returns a validation frame (multiindexed dataframe with 'seaice_extent_km2' and
failed_qa columns).
Parameters:
dates: Pandas period range to evaluate
data_store: Location to load the sedna datastore from
hemisphere: Hemisphere to work with
regression_delta_km2: Maximum delta to allow when evaluating the data against
the regression fit
eval_days: How many prior days to use when calculating the linear regression
"""
frame = _dataframe_from_data_store_daily(data_store)
validation_frame = _create_validation_frame(dates, frame.copy(), hemisphere,
eval_days, regression_delta_km2)
return validation_frame
def update_sea_ice_statistics_daily(dates, config, validate_data=True):
"""Update total sea ice extent and area in the datastore for a set of dates,
run validation and update QA flag as appropriate for NRT data.
Parameters:
dates: List of dates to return statistics for
config: Sedna configuration dict
validate_data : bool to set if the data should run validation. Defaults
to True
Returns a bool indicating if the updates occurred without validation failures
"""
columns = _column_names(config)
data_store = config.get('data_store', 'daily.p')
df = _dataframe_from_data_store_daily(data_store)
df = _add_columns_to_dataframe(df, columns)
default_failed_qa_value = False if validate_data else None
new_rows = dict()
for date in dates:
gridset = sid.concentration_daily(hemisphere=config['hemisphere'],
year=date.year, month=date.month, day=date.day,
search_paths=config['search_paths'],
interpolation_radius=config['interpolation_radius'])
new_rows.update(_sea_ice_statistics(gridset, date, config,
failed_qa=default_failed_qa_value))
log.info('stats for {hemi} {date}'.format(
hemi=config['hemisphere']['short_name'],
date=date.to_timestamp().date().isoformat()))
new_values =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
import xlrd
import ast
import sys
#--------
# Taking phenotype with multiple jd_codes and condensing back down to number of drug-GWAS phenotype pairs
# Sums up number of matches for each drug-phenotype pair (can be > 1 since 1 phenotype can have multiple jd_codes)
# Imports condensed dataset to .csv file
# PARAMETERS:
# in_medi_dataset = contains information about documentation of drug-GWAS phenotype pairs in MEDI
def condensing_medi(in_medi_dataset):
df = pd.read_csv(in_medi_dataset)
short = df[['phenotype','drug','IN_MEDI']].groupby(['phenotype','drug']).sum()
short.to_csv('all-drugs-gwas-medi.csv')
#--------
# Counts number of jd_codes for each phenotype, sets count in 'count' column
# Returns resulting dataframe
# PARAMETERS:
# in_medi_dataset = contains information about documentation of drug-GWAS phenotype pairs in MEDI
def count_jdcode(in_medi_dataset):
df = pd.read_csv(in_medi_dataset)
data1 = df.filter(['phenotype', 'jd_code'], axis=1).drop_duplicates()
# drops all phenotypes with no jd_nodes
no_codes = data1[data1['jd_code'].isnull()].drop_duplicates()
no_codes['count'] = 0
data1 = data1.dropna(how='any', axis=0)
data1['jd_code'] = data1['jd_code'].astype(str)
#data1.to_csv('phenotypes_nona.txt', index=False, sep=' ')
# for each phenotype, group all phecodes and put into a list
temp = data1.groupby('phenotype')['jd_code'].apply(list)
jd_list = pd.DataFrame({'phenotype':temp.index, 'jd_code':temp.values})
jd_list['count'] = 0
# count number of phecodes for each phenotype
for index, row in jd_list.iterrows():
jd_list.set_value(index, 'count', len(row['jd_code']))
# reads phenotypes with no jd_codes
result = jd_list.append(no_codes).sort_values('phenotype').reset_index(drop=True)
return result
#--------
# Creates dataframe with 2 additional columns: 'drug' = drug name, 'drug_in_medi' = 1 if drug is in MEDI
# Returns resulting dataframe
# PARAMETERS:
# in_medi_dataset = contains information about documentation of drug-GWAS phenotype pairs in MEDI
def drug_medi(in_medi_dataset):
df = pd.read_csv(in_medi_dataset)
drugs = df['drug'].drop_duplicates().sort_values().tolist()
data =
|
pd.DataFrame({'drug': drugs})
|
pandas.DataFrame
|
#!/usr/bin/env python3
"""
Python 3
Try to get all questions from an instrument
"""
import colectica
from colectica import ColecticaObject
import api
import pandas as pd
import os
import numpy as np
import json
def from_instrument_get_question_response(C, Agency, ID):
"""
From an instrument get all questions, all response
"""
df_instrument_set, instrument_info = C.item_info_set(Agency, ID)
df_question = df_instrument_set.loc[(df_instrument_set.ItemType == 'Question') , :]
question_df_list = []
codelist_df_list = []
response_df_list = []
for question_id in df_question['Identifier']:
# print(question_id)
df_question, df_response = C.get_question_all(Agency, question_id)
# store DataFrame in list
question_df_list.append(df_question)
if df_question['response_type'][0] == 'CodeList':
codelist_df_list.append(df_response)
else:
response_df_list.append(df_response)
df_question_all = pd.concat(question_df_list)
if codelist_df_list == []:
df_codelist_all =
|
pd.DataFrame()
|
pandas.DataFrame
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.