prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from ...config import options
from ...serialize import BoolField, AnyField, DataTypeField, Int32Field
from ..utils import parse_index, build_empty_df
from ..operands import DataFrameOperandMixin, DataFrameOperand, ObjectType
from ..merge import DataFrameConcat
class DataFrameReductionOperand(DataFrameOperand):
_axis = AnyField('axis')
_skipna = BoolField('skipna')
_level = AnyField('level')
_min_count = Int32Field('min_count')
_need_count = BoolField('need_count')
_dtype = DataTypeField('dtype')
_combine_size = Int32Field('combine_size')
def __init__(self, axis=None, skipna=None, level=None, min_count=None, need_count=None, dtype=None,
combine_size=None, gpu=None, sparse=None, **kw):
super(DataFrameReductionOperand, self).__init__(_axis=axis, _skipna=skipna, _level=level, _min_count=min_count,
_need_count=need_count, _dtype=dtype,
_combine_size=combine_size, _gpu=gpu, _sparse=sparse, **kw)
@property
def axis(self):
return self._axis
@property
def skipna(self):
return self._skipna
@property
def level(self):
return self._level
@property
def min_count(self):
return self._min_count
@property
def need_count(self):
return self._need_count
@property
def dtype(self):
return self._dtype
@property
def combine_size(self):
return self._combine_size
class DataFrameReductionMixin(DataFrameOperandMixin):
@classmethod
def _tile_one_chunk(cls, op):
df = op.outputs[0]
chk = op.inputs[0].chunks[0]
new_chunk_op = op.copy().reset_key()
chunk = new_chunk_op.new_chunk(op.inputs[0].chunks, shape=df.shape, index=chk.index,
index_value=df.index_value, dtype=df.dtype)
new_op = op.copy()
nsplits = tuple((s,) for s in chunk.shape)
return new_op.new_seriess(op.inputs, df.shape, nsplits=nsplits, chunks=[chunk],
index_value=df.index_value, dtype=df.dtype)
@classmethod
def tile(cls, op):
df = op.outputs[0]
in_df = op.inputs[0]
combine_size = op.combine_size or options.combine_size
if len(in_df.chunks) == 1:
return cls._tile_one_chunk(op)
n_rows, n_cols = in_df.chunk_shape
chunk_dtypes = []
if op.numeric_only and op.axis == 0:
cum_nsplits = np.cumsum((0,) + in_df.nsplits[0])
for i in range(len(cum_nsplits) - 1):
chunk_dtypes.append(build_empty_df(
in_df.dtypes[cum_nsplits[i]: cum_nsplits[i + 1]]).select_dtypes(np.number).dtypes)
# build reduction chunks
reduction_chunks = np.empty(op.inputs[0].chunk_shape, dtype=np.object)
for c in op.inputs[0].chunks:
new_chunk_op = op.copy().reset_key()
if op.min_count > 0:
new_chunk_op._need_count = True
new_chunk_op._object_type = ObjectType.dataframe
if op.axis == 0:
if op.numeric_only:
dtypes = chunk_dtypes[c.index[1]]
else:
dtypes = c.dtypes
reduced_shape = (1, len(dtypes))
index_value = parse_index(pd.RangeIndex(1))
dtypes = c.dtypes
else:
reduced_shape = (c.shape[0], 1)
index_value = c.index_value
dtypes = pd.Series(op.outputs[0].dtype)
reduction_chunks[c.index] = new_chunk_op.new_chunk([c], shape=reduced_shape,
dtypes=dtypes, index_value=index_value)
out_chunks = []
if op.axis is None or op.axis == 0:
for col in range(n_cols):
chunks = [reduction_chunks[i, col] for i in range(n_rows)]
out_chunks.append(cls.tree_reduction(chunks, op, combine_size, col))
elif op.axis == 1:
for row in range(n_rows):
chunks = [reduction_chunks[row, i] for i in range(n_cols)]
out_chunks.append(cls.tree_reduction(chunks, op, combine_size, row))
new_op = op.copy()
nsplits = (tuple(c.shape[0] for c in out_chunks),)
return new_op.new_seriess(op.inputs, df.shape, nsplits=nsplits, chunks=out_chunks,
dtype=df.dtype, index_value=df.index_value)
@classmethod
def tree_reduction(cls, chunks, op, combine_size, idx):
while len(chunks) > combine_size:
new_chunks = []
for i in range(0, len(chunks), combine_size):
chks = chunks[i: i + combine_size]
for j, c in enumerate(chunks):
c._index = (j,)
concat_op = DataFrameConcat(axis=op.axis, object_type=ObjectType.dataframe)
if op.axis == 0:
concat_index = parse_index(pd.RangeIndex(len(chks)))
concat_dtypes = chks[0].dtypes
concat_shape = (sum([c.shape[0] for c in chks]), chks[0].shape[1])
else:
concat_index = chks[0].index
concat_dtypes = pd.Series([c.dtypes[0] for c in chks])
concat_shape = (chks[0].shape[0], (sum([c.shape[1] for c in chks])))
chk = concat_op.new_chunk(chks, shape=concat_shape, index=(i,),
dtypes=concat_dtypes, index_value=concat_index)
if op.axis == 0:
reduced_shape = (1, chk.shape[1])
index_value = parse_index(pd.RangeIndex(1))
dtypes = chk.dtypes
else:
reduced_shape = (chk.shape[0], 1)
index_value = chk.index_value
dtypes = pd.Series(op.outputs[0].dtype)
new_op = op.copy().reset_key()
new_op._object_type = ObjectType.dataframe
new_chunks.append(new_op.new_chunk([chk], shape=reduced_shape, index=(i,), dtypes=dtypes,
index_value=index_value))
chunks = new_chunks
concat_op = DataFrameConcat(axis=op.axis, object_type=ObjectType.dataframe)
chk = concat_op.new_chunk(chunks, index=(idx,))
empty_df = build_empty_df(chunks[0].dtypes)
reduced_df = getattr(empty_df, getattr(cls, '_func_name'))(axis=op.axis, level=op.level,
numeric_only=op.numeric_only)
reduced_shape = (np.nan,) if op.axis == 1 else reduced_df.shape
new_op = op.copy().reset_key()
return new_op.new_chunk([chk], shape=reduced_shape, index=(idx,), dtype=reduced_df.dtype,
index_value=parse_index(reduced_df.index))
@classmethod
def execute(cls, ctx, op):
inputs = ctx[op.inputs[0].key]
if isinstance(inputs, tuple):
in_df, concat_count = inputs
count = concat_count.sum(axis=op.axis)
else:
in_df = inputs
count = 0
res = getattr(in_df, getattr(cls, '_func_name'))(axis=op.axis, level=op.level,
skipna=op.skipna, numeric_only=op.numeric_only)
if op.object_type == ObjectType.series:
if op.min_count > 0:
res[count < op.min_count] = np.nan
ctx[op.outputs[0].key] = res
else:
ctx[op.outputs[0].key] = res
else:
if op.need_count:
count = in_df.notnull().sum(axis=op.axis)
if op.axis == 0:
if op.min_count > 0:
ctx[op.outputs[0].key] = (pd.DataFrame(res).transpose(), pd.DataFrame(count).transpose())
else:
ctx[op.outputs[0].key] = pd.DataFrame(res).transpose()
else:
if op.min_count > 0:
ctx[op.outputs[0].key] = (pd.DataFrame(res), pd.DataFrame(count))
else:
ctx[op.outputs[0].key] = pd.DataFrame(res)
def __call__(self, df):
axis = getattr(self, 'axis', None)
level = getattr(self, 'level', None)
numeric_only = getattr(self, 'numeric_only', None)
if axis == 'index':
axis = 0
if axis == 'columns':
axis = 1
self._axis = axis
# TODO: enable specify level if we support groupby
if level is not None:
raise NotImplementedError('Not support specify level now')
if axis is None:
self._axis = 0
empty_df = build_empty_df(df.dtypes)
reduced_df = getattr(empty_df, getattr(self, '_func_name'))(axis=axis, level=level,
numeric_only=numeric_only)
reduced_shape = (df.shape[0],) if axis == 1 else reduced_df.shape
return self.new_series([df], shape=reduced_shape, dtype=reduced_df.dtype,
index_value=parse_index(reduced_df.index))
class SeriesReductionMixin(DataFrameOperandMixin):
@classmethod
def _tile_one_chunk(cls, op):
df = op.outputs[0]
chk = op.inputs[0].chunks[0]
new_chunk_op = op.copy().reset_key()
chunk = new_chunk_op.new_chunk(op.inputs[0].chunks, shape=df.shape, index=chk.index,
index_value=df.index_value, dtype=df.dtype)
new_op = op.copy()
nsplits = tuple((s,) for s in chunk.shape)
return new_op.new_seriess(op.inputs, df.shape, nsplits=nsplits, chunks=[chunk],
index_value=df.index_value, dtype=df.dtype)
@classmethod
def tile(cls, op):
df = op.outputs[0]
in_chunks = op.inputs[0].chunks
combine_size = op.combine_size or options.combine_size
if len(in_chunks) == 1:
return cls._tile_one_chunk(op)
chunks = np.empty(op.inputs[0].chunk_shape, dtype=np.object)
for c in op.inputs[0].chunks:
new_chunk_op = op.copy().reset_key()
if op.min_count > 0:
new_chunk_op._need_count = True
chunks[c.index] = new_chunk_op.new_chunk([c], shape=(), dtype=df.dtype, index_value=df.index_value)
while len(chunks) > 1:
new_chunks = []
for i in range(0, len(chunks), combine_size):
chks = chunks[i: i + combine_size]
concat_op = DataFrameConcat(object_type=ObjectType.series)
length = sum([c.shape[0] for c in chks if len(c.shape) > 0])
chk = concat_op.new_chunk(chks, shape=(length,), index=(i,), dtype=chks[0].dtype,
index_value=parse_index(pd.RangeIndex(length)))
new_op = op.copy().reset_key()
new_chunks.append(new_op.new_chunk([chk], shape=(), index=(i,), dtype=chk.dtype,
index_value=parse_index( | pd.RangeIndex(0) | pandas.RangeIndex |
import typing
from typing import List
import numpy as np
import pandas as pd
from numpy import ndarray
from models.analysis import Analysis
import logging
from utils.a_weighting import A_weighting
from utils.audio_calcs import calc_db_from_frequency_dbs, magnitude_to_db
logger = logging.getLogger(__name__)
class Device:
def __init__(self, title):
self.title = title
self.analysis_list: List[Analysis] = []
def add_analysis(self, analysis: Analysis):
self.analysis_list.append(analysis)
def get_multiindex_data(self, analysis: Analysis) -> (typing.Tuple, typing.List):
test_id = analysis.config_test.test_id
device = analysis.config_device.device_name
buffer_size = analysis.analysis_data.buffer_size
sample_rate = analysis.analysis_data.sample_rate
windowing_function = analysis.analysis_data.windowing_function
local_index = (test_id, device, sample_rate, buffer_size, windowing_function)
local_index_names = ['TestID', 'Device', 'SampleRate', 'BufferSize', 'WindowingFunction']
# noise preset
if analysis.config_noise_preset:
local_index = local_index + (
analysis.config_noise_preset.noise_type, analysis.config_noise_preset.noise_preset,)
local_index_names.extend(['NoiseType', 'ConfigNoisePreset'])
# distance info
if analysis.config_distance:
local_index = local_index + (analysis.config_distance.distance_key,)
local_index_names.extend(['DistanceKey'])
# config_frog
if analysis.config_frog:
local_index = local_index + (
analysis.config_frog.frog_size, analysis.config_frog.frog_id, analysis.config_frog.frog_position,)
local_index_names.extend(['FrogSize', 'FrogId', 'FrogPosition'])
# config_pen
if analysis.config_pen:
local_index = local_index + (analysis.config_pen.pen_brand, analysis.config_pen.pen_id,)
local_index_names.extend(['PenBrand', 'PenId'])
# config_click
if analysis.config_click:
local_index = local_index + (analysis.config_click.click_count,)
local_index_names.extend(['Clicks'])
test_iteration = analysis.config_test.test_iteration
local_index_names.extend(['TestIteration'])
local_index = local_index + (test_iteration,)
return local_index, local_index_names
def get_dba_data_frame(self) -> pd.DataFrame:
dataframe = pd.DataFrame()
index = []
index_names = []
for analysis in self.analysis_list:
local_index, local_index_names = self.get_multiindex_data(analysis)
bins = range(0, len(analysis.analysis_data.dbas))
a_series = pd.Series(analysis.analysis_data.dbas, index=bins)
index.append(local_index)
index_names.append(local_index_names)
dataframe = dataframe.append(a_series, ignore_index=True)
multi_index = pd.MultiIndex.from_tuples(index)
multi_index.names = index_names[0]
dataframe.set_index(multi_index, inplace=True)
dataframe = dataframe.sort_index()
logger.debug('get_dbas_by_noise_preset')
logger.debug(dataframe)
return dataframe
def _get_rms(self, numbers: ndarray) -> ndarray:
return np.sqrt(np.mean(numbers ** 2))
def get_dba_data_frame_by_raw_data(self) -> pd.DataFrame:
dataframe = | pd.DataFrame() | pandas.DataFrame |
import requests
from bs4 import BeautifulSoup
import json
import pandas as pd
from selenium import webdriver
import time
import re
all_games_list = | pd.read_fwf('all_games_nbasite.txt', header=None) | pandas.read_fwf |
"""
Python source code to extract listing from mudah.my
"""
from functools import total_ordering
from mudah.config import General, Region, PropertyCategory, SupportedPropertyRegionArea, PropertyArea
import pandas as pd
import requests
import webbrowser as web
import urllib.parse as urlparse
from urllib.parse import urlencode
from bs4 import BeautifulSoup
from datetime import datetime, timedelta
import dateutil.relativedelta as rd
import math
import logging as logger
import mechanicalsoup
# For logging purpose
logger.basicConfig(level=logger.DEBUG,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
)
urllib3_logger = logger.getLogger('urllib3')
urllib3_logger.setLevel(logger.CRITICAL)
class PropertyExtractor:
"""
Extractor for getting property dataset from mudah.my
"""
__base_url__ = General.MUDAH_URL.value
__chrome_path__ = General.CHROME_PATH.value
def __authenticate__(self):
browser = mechanicalsoup.StatefulBrowser()
return browser
# scraping from mudah.my. Will collect every properties
def __scraping__(self, region, property_category, search_area, wanted_region=[]):
"""
Class method to scrap data from mudah.my
:param region:
:type Region
:param property_category:
:type PropertyCategory
:param wanted_region:
:type List of String
:return:
:type pandas.core.frame.DataFrame
"""
browser = self.__authenticate__(self)
# Add search criteria
logger.info(region.value)
logger.info(property_category.value)
search_criteria = [region.value, property_category.value]
# Add advance criteria, dependent on property_category
filter_criteria = {}
hide_images = {'th': '1'}
max_monthly_rent = {'mre': '2'}
max_sqft = {'se': '1'}
if search_area is not None:
pass
filter_criteria.update(hide_images)
filter_criteria.update(max_monthly_rent)
filter_criteria.update(max_sqft)
# Combine URL, search and filter criteria here.
page_url = self.__base_url__ + ''.join(search_criteria)
url_parts = list(urlparse.urlparse(page_url))
# Init request to get total list
url_parts[4] = urlencode(filter_criteria)
page_url = urlparse.urlunparse(url_parts)
response = browser.get(page_url)
if response.status_code != 200:
raise ConnectionError("Cannot connect to " + page_url)
# get total lists
total_list = BeautifulSoup(response.content, "html.parser").find("div", class_="list-total").string
logger.info("Attempt to parse " + total_list + " properties at most")
pages = math.ceil(int(total_list) / 40) # 40 is item per page
# only choose up to last 1 months
minimum_date_posted = datetime.now() + rd.relativedelta(months=-1)
exceed_minimum = False
titles = []
prices = []
links = []
bedrooms = []
bathrooms = []
sizes = []
areas = []
dates_posted = []
final_df = pd.DataFrame()
# To prevent over-scraping
if General.PAGE_THRESHOLD.value != -1 and General.PAGE_THRESHOLD.value < pages :
pages = General.PAGE_THRESHOLD.value
for page in range(1, pages + 1):
if exceed_minimum:
break
# request page
page_criteria = {'o': str(page)}
filter_criteria.update(page_criteria)
url_parts[4] = urlencode(filter_criteria)
page_url = urlparse.urlunparse(url_parts)
logger.info("Parsing page " + str(page) + " ... " + page_url)
response = browser.get(page_url)
if response.status_code != 200:
raise ConnectionError("Cannot connect to " + page_url)
raw_listing = BeautifulSoup(response.content, "html.parser").find_all("div", {'class': 'list_ads'})
for element in raw_listing:
temp = element.find("h2", class_="list_title")
if temp is None:
continue
if not temp.contents[0] is None:
temp = temp.contents[0]
title = temp.string
if title is None:
continue
else:
title = title.strip()
link = temp.get("href")
price = self.__get_el_string__(element.find("div", class_="ads_price"))
if not price == "":
price = int(price.replace("RM", "").replace("per month", "").strip().replace(" ", ""))
else:
price = 0
bedroom = self.__get_el_string__(element.find("div", class_="bedroom"))
bathroom = self.__get_el_string__(element.find("div", class_="bathroom"))
# House square feet
size = self.__get_el_string__(element.find("div", class_="size"))
if not size == "":
size = float(size.replace(" sq.ft", ""))
else:
size = 0
temp = element.find("div", class_="location")
if temp is None:
area = ""
date_posted = ""
else:
temp = temp.contents
area = temp[3].string.strip()
date_posted = temp[1].string.strip()
if date_posted.split(",")[0] == 'Today':
date_posted = datetime.now().strftime("%d %b %Y") + ", " + date_posted.split(",")[1]
elif date_posted.split(",")[0] == 'Yesterday':
date_posted = (datetime.now() - timedelta(days=1)).strftime("%d %b %Y") + ", " + \
date_posted.split(",")[1]
else:
# sometime their system is weird
temp = datetime.strptime(date_posted.split(",")[0], "%d %b").replace(year=datetime.now().year)
if temp > datetime.now():
temp = temp.replace(year=datetime.now().year - 1)
if temp < minimum_date_posted:
exceed_minimum = True
break
date_posted = temp.strftime("%d %b %Y") + ", " + date_posted.split(",")[1]
titles.append(title)
links.append(link)
prices.append(price)
bedrooms.append(bedroom)
bathrooms.append(bathroom)
sizes.append(size)
areas.append(area)
dates_posted.append(date_posted)
df = pd.concat([pd.Series(titles),
pd.Series(areas),
pd.Series(sizes),
pd.Series(dates_posted),
pd.Series(bedrooms),
pd.Series(bathrooms),
pd.Series(prices),
| pd.Series(links) | pandas.Series |
"""
Purpose: Data type transforms
Contributors:
<Include Your Name/Names>
Sponsor: DataDisca Pty Ltd. Australia
https://github.com/DataDisca
"""
import pandas as pd
import numpy as np
from abc import ABC, abstractmethod
from meta_data import DataTypes, DateTimeTransforms
from fractions import Fraction
import datetime
from dateutil.parser import parse
class TypeTransformedData(ABC):
def __init__(self, srs: pd.Series, run: bool = True, **kwargs):
self.data_type: int = None
self.srs_out: pd.Series = None
self.success_count: float = None
self.percentage: float = None
self.threshold: float = 80
self.sample_size: float = 5
self.iterations: int = 3
self.srs: pd.Series = srs
self.run: bool = self.run if run is None else run
self._import_kwargs(**kwargs)
if self.run:
self.is_my_type()
def _import_kwargs(self, **kwargs):
accepted_keys: set = {'threshold', 'sample_size', 'iterations'}
self.__dict__.update((key, value) for key, value in kwargs.items() if key in accepted_keys)
@abstractmethod
def is_my_type(self) -> bool:
"""
:return: True if self.percentage >= self.threshold else False
TODO:
Identify if the series is of my type
Set the following parameters of the object
srs_out : transformed data to my type
success_count: number of successfully transformed values to my type
percentage: number of successfully transformed values to my type
set type as given in the constants in the DataTypes class if self.percentage >= self.threshold
"""
class BooleanTransformedData(TypeTransformedData):
null_count = 0
def is_my_type(self) -> bool:
"""
:return: True if self.percentage >= self.threshold else False
TODO:
Check if the given column is boolean.
The series can be of any type originally.
You can identify boolean fields by looking at data, when the pandas data type is misleading.
Use the following criterion for the first version.
string or boolean data {true, false}
string or any numeric data {0,1}
string {'yes','no'}
-
In string types the values are not case sensitive
Read the the abstract method notes
"""
self.success_count = 0
def identify_type(value):
if pd.isnull(value):
self.null_count +=1
return
if type(value) == str:
value = str.lower(value)
if value in ['yes', '1', 'true', 1, True]:
value = True
self.success_count += 1
elif value in ['no', '0', 'false', 0, False]:
value = False
self.success_count += 1
self.srs_out = self.srs.apply(identify_type)
self.percentage = (self.success_count/(self.srs_out.size - self.null_count))*100
if self.percentage >= self.threshold:
self.data_type = 1
return True
else:
return False
class NumericTransformedData(TypeTransformedData):
null_count = 0
def is_my_type(self) -> bool:
"""
:return: True if self.percentage >= self.threshold else False
TODO:
Check if the given column is numeric.
The column may already be numeric or the numeric values may be:
1. Included in quotations eg: '534', "534"
2. May be leading or trailing spaces there eg: " 534"
3. Original data may have meaningful decorators eg: $, %, l, kg, 3/5.
identify the best type: integer or float
set the corresponding type {'INTEGER','FLOAT'} as in DataTypes
set the number of decimal points you observe as self.precision
Read the the abstract method notes
"""
self.success_count = 0
float_count = 0
convert_to_type = int
self.null_count = 0
float_count = pd.Series(filter(lambda x:type(x) == float, self.srs)).size
if (float_count/self.srs.size) >= 0.10:
convert_to_type = float
def identify_type(value):
if pd.isnull(value):
self.null_count +=1
return
if type(value) == int:
if convert_to_type == float:
value = float(value)
self.success_count +=1
elif type(value) == float:
if convert_to_type == int:
value = int(value)
self.success_count +=1
elif type(value) == str:
value = value.strip()
if not(value.isnumeric):
if value[0] == '.':
value = '0' + value
elif not(value[0].isnumeric()):
value = value[1:]
elif not(value[-1].isnumeric()):
value = value[:-1]
else:
try:
value = float(Fraction(value))
if convert_to_type == int:
value = int(value)
self.success_count +=1
return
except:
pass
try:
value = float(value)
except:
return
value = value
self.success_count += 1
self.srs_out = self.srs.apply(identify_type)
self.percentage = (self.success_count/(self.srs_out.size - self.null_count))*100
if self.percentage >= self.threshold:
if convert_to_type == float:
self.data_type = 3
else:
self.data_type = 2
return True
else:
return False
class CategoryTransformedData(TypeTransformedData):
def __init__(self, srs: pd.Series, run: bool = True, **kwargs):
self.category_threshold: float = kwargs['category_threshold'] if 'category_threshold' in kwargs.keys() else 10
super(CategoryTransformedData, self).__init__(srs=srs, run=run, **kwargs)
def is_my_type(self) -> bool:
"""
:return: True if self.percentage >= self.threshold else False
TODO:
Check if the given column is categorical.
The values may :
1. Small number of numeric or string options relative to the length of the series
i.e number of unique values <= self.category_threshold which is a parameter you can pass
2. May be leading or trailing spaces there eg: " 534"
Read the the abstract method notes
"""
self.success_count = 0
self.srs_out = self.srs
null_count = 0
for index, value in self.srs.items():
if pd.isnull(value):
null_count +=1
continue
if type(value) == int:
self.srs_out.loc[index] = str(float(value))
self.success_count +=1
elif type(value) == float:
self.srs_out.loc[index] = str(value)
self.success_count +=1
else:
try:
self.srs_out.loc[index] = str(float(value))
self.success_count +=1
except:
self.srs_out.loc[index] = value
self.success_count +=1
if self.srs_out.unique().size <= self.category_threshold:
self.percentage = (self.success_count/(self.srs_out.size - null_count))*100
if self.percentage >= self.threshold:
self.data_type = 5
return True
else:
return False
else:
self.percentage = 0
self.success_count = 0
return False
class DateTimeTransformedData(TypeTransformedData):
null_count = 0
time_format_count = 0
datetime_format_count = 0
date_format_count = 0
short_time_format_count = 0
short_date_time_format_count = 0
def __init__(self, srs: pd.Series, run: bool = True, **kwargs):
self.original_format: float = None
super(DateTimeTransformedData, self).__init__(srs=srs, run=run, **kwargs)
def is_my_type(self) -> bool:
"""
:return: True if self.percentage >= self.threshold else False
TODO:
Check if the given column is date time. The original series data may be datetime, string or any other type
eg: "2020-05-17 15:12:23", "2020-May-17 05:12:23PM", "17/05/2020" , "13:12", datetime(2016, 3
, 13, 5, tzinfo=timezone.utc)
Refer the following links for non-exhaustive lists of different formats
https://www.ibm.com/support/knowledgecenter/bg/SSLVMB_23.0.0/spss/base/syn_date_and_time_date_time_formats.html
There can be all combination of the date time formats:
https://docs.python.org/3/library/datetime.html
Programmatically identify the best for series you have then transform
#
set the identified original format in self.original_format
# reading https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
convert data to date time format
Read the the abstract method notes
"""
self.success_count = 0
self.srs_out = self.srs
self.time_format_count = 0
self.datetime_format_count = 0
self.date_format_count = 0
self.short_time_format_count = 0
self.short_date_time_format_count = 0
self.null_count = 0
def identify_type(value):
if pd.isnull(value):
self.null_count +=1
return
if type(value) == datetime.datetime:
datetime_format_count +=1
self.success_count +=1
return
else:
if type(value) == datetime.time:
time_format_count +=1
elif type(value) == datetime.date:
date_format_count +=1
value = str(value)
value = value.strip()
try:
value = parse(value)
value = value.strftime('%d/%m/%Y %H:%M:%S')
value = datetime.datetime.strptime(value, '%d/%m/%Y %H:%M:%S')
self.success_count +=1
value = value
except:
pass
self.srs_out = self.srs.apply(identify_type)
self.percentage = (self.success_count/(self.srs_out.size - self.null_count))*100
if self.percentage >= self.threshold:
self.data_type = 8
return True
else:
return False
class StringTransformedData(TypeTransformedData):
null_count = 0
def is_my_type(self) -> bool:
"""
:return: True if self.percentage >= self.threshold else False
TODO:
Check if the given column is string or sting convertible.
You will see almost everything is string convertible.
We use this method as the last resort.
Read the the abstract method notes
"""
self.success_count = 0
self.null_count = 0
def identify_type(value):
if | pd.isnull(value) | pandas.isnull |
import vectorbt as vbt
import numpy as np
import pandas as pd
from numba import njit
from datetime import datetime
import pytest
from vectorbt.generic import nb as generic_nb
from vectorbt.generic.enums import range_dt
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
mask = pd.DataFrame([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]), columns=['a', 'b', 'c'])
ts = pd.Series([1., 2., 3., 2., 1.], index=mask.index)
price = pd.DataFrame({
'open': [10, 11, 12, 11, 10],
'high': [11, 12, 13, 12, 11],
'low': [9, 10, 11, 10, 9],
'close': [11, 12, 11, 10, 9]
})
group_by = pd.Index(['g1', 'g1', 'g2'])
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# accessors.py ############# #
class TestAccessors:
def test_indexing(self):
assert mask.vbt.signals['a'].total() == mask['a'].vbt.signals.total()
def test_freq(self):
assert mask.vbt.signals.wrapper.freq == day_dt
assert mask['a'].vbt.signals.wrapper.freq == day_dt
assert mask.vbt.signals(freq='2D').wrapper.freq == day_dt * 2
assert mask['a'].vbt.signals(freq='2D').wrapper.freq == day_dt * 2
assert pd.Series([False, True]).vbt.signals.wrapper.freq is None
assert pd.Series([False, True]).vbt.signals(freq='3D').wrapper.freq == day_dt * 3
assert pd.Series([False, True]).vbt.signals(freq=np.timedelta64(4, 'D')).wrapper.freq == day_dt * 4
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_fshift(self, test_n):
pd.testing.assert_series_equal(mask['a'].vbt.signals.fshift(test_n), mask['a'].shift(test_n, fill_value=False))
np.testing.assert_array_equal(
mask['a'].vbt.signals.fshift(test_n).values,
generic_nb.fshift_1d_nb(mask['a'].values, test_n, fill_value=False)
)
pd.testing.assert_frame_equal(mask.vbt.signals.fshift(test_n), mask.shift(test_n, fill_value=False))
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_bshift(self, test_n):
pd.testing.assert_series_equal(
mask['a'].vbt.signals.bshift(test_n),
mask['a'].shift(-test_n, fill_value=False))
np.testing.assert_array_equal(
mask['a'].vbt.signals.bshift(test_n).values,
generic_nb.bshift_1d_nb(mask['a'].values, test_n, fill_value=False)
)
pd.testing.assert_frame_equal(mask.vbt.signals.bshift(test_n), mask.shift(-test_n, fill_value=False))
def test_empty(self):
pd.testing.assert_series_equal(
pd.Series.vbt.signals.empty(5, index=np.arange(10, 15), name='a'),
pd.Series(np.full(5, False), index=np.arange(10, 15), name='a')
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.empty((5, 3), index=np.arange(10, 15), columns=['a', 'b', 'c']),
pd.DataFrame(np.full((5, 3), False), index=np.arange(10, 15), columns=['a', 'b', 'c'])
)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.empty_like(mask['a']),
pd.Series(np.full(mask['a'].shape, False), index=mask['a'].index, name=mask['a'].name)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.empty_like(mask),
pd.DataFrame(np.full(mask.shape, False), index=mask.index, columns=mask.columns)
)
def test_generate(self):
@njit
def choice_func_nb(from_i, to_i, col, n):
if col == 0:
return np.arange(from_i, to_i)
elif col == 1:
return np.full(1, from_i)
else:
return np.full(1, to_i - n)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate(5, choice_func_nb, 1, index=mask['a'].index, name=mask['a'].name),
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.generate((5, 2), choice_func_nb, 1)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate(
(5, 3), choice_func_nb, 1, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[True, True, False],
[True, False, False],
[True, False, False],
[True, False, False],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate(
(5, 3), choice_func_nb, 1, pick_first=True, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[True, True, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_both(self):
@njit
def entry_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
@njit
def exit_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
temp_int = np.empty((mask.shape[0],), dtype=np.int_)
en, ex = pd.Series.vbt.signals.generate_both(
5, entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask['a'].index, name=mask['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, True, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, True, False]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_both(
(5, 3), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[True, True, True],
[False, False, False],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[True, True, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.Series.vbt.signals.generate_both(
(5,), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask['a'].index, name=mask['a'].name, entry_wait=1, exit_wait=0)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.Series.vbt.signals.generate_both(
(5,), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask['a'].index, name=mask['a'].name, entry_wait=0, exit_wait=1)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
@njit
def entry_func2_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
if from_i + 1 < to_i:
temp_int[1] = from_i + 1
return temp_int[:2]
return temp_int[:1]
@njit
def exit_func2_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
if from_i + 1 < to_i:
temp_int[1] = from_i + 1
return temp_int[:2]
return temp_int[:1]
en, ex = pd.DataFrame.vbt.signals.generate_both(
(5, 3), entry_func2_nb, (temp_int,), exit_func2_nb, (temp_int,),
entry_pick_first=False, exit_pick_first=False,
index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[False, False, False],
[False, False, False],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[True, True, True],
[True, True, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_exits(self):
@njit
def choice_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
temp_int = np.empty((mask.shape[0],), dtype=np.int_)
pd.testing.assert_series_equal(
mask['a'].vbt.signals.generate_exits(choice_func_nb, temp_int),
pd.Series(
np.array([False, True, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_exits(choice_func_nb, temp_int),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_exits(choice_func_nb, temp_int, wait=0),
pd.DataFrame(
np.array([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
]),
index=mask.index,
columns=mask.columns
)
)
@njit
def choice_func2_nb(from_i, to_i, col, temp_int):
for i in range(from_i, to_i):
temp_int[i - from_i] = i
return temp_int[:to_i - from_i]
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_exits(choice_func2_nb, temp_int, until_next=False, pick_first=False),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[True, True, False],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
mask2 = pd.Series([True, True, True, True, True], index=mask.index)
pd.testing.assert_series_equal(
mask2.vbt.signals.generate_exits(choice_func_nb, temp_int, until_next=False, skip_until_exit=True),
pd.Series(
np.array([False, True, False, True, False]),
index=mask.index
)
)
def test_clean(self):
entries = pd.DataFrame([
[True, False, True],
[True, False, False],
[True, True, True],
[False, True, False],
[False, True, True]
], index=mask.index, columns=mask.columns)
exits = pd.Series([True, False, True, False, True], index=mask.index)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(),
pd.DataFrame(
np.array([
[True, False, True],
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries),
pd.DataFrame(
np.array([
[True, False, True],
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits)[1],
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits, entry_first=False)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits, entry_first=False)[1],
pd.DataFrame(
np.array([
[False, True, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries, exits)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries, exits)[1],
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.clean(entries, entries, entries)
def test_generate_random(self):
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate_random(
5, n=3, seed=seed, index=mask['a'].index, name=mask['a'].name),
pd.Series(
np.array([False, True, True, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.generate_random((5, 2), n=3)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), n=3, seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, False, True],
[True, True, True],
[True, True, False],
[False, True, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), n=[0, 1, 2], seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, False, True],
[False, False, True],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate_random(
5, prob=0.5, seed=seed, index=mask['a'].index, name=mask['a'].name),
pd.Series(
np.array([True, False, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.generate_random((5, 2), prob=3)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), prob=0.5, seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[True, True, True],
[False, True, False],
[False, False, False],
[False, False, True],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), prob=[0., 0.5, 1.], seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, True, True],
[False, True, True],
[False, False, True],
[False, False, True],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
with pytest.raises(Exception):
pd.DataFrame.vbt.signals.generate_random((5, 3))
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), prob=[0., 0.5, 1.], pick_first=True, seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_random_both(self):
# n
en, ex = pd.Series.vbt.signals.generate_random_both(
5, n=2, seed=seed, index=mask['a'].index, name=mask['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, True, False, False]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), n=2, seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[True, True, False],
[False, False, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[False, True, False],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), n=[0, 1, 2], seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[False, False, True],
[False, True, False],
[False, False, False],
[False, False, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, True],
[False, False, False],
[False, True, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both((2, 3), n=2, seed=seed, entry_wait=1, exit_wait=0)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
])
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True]
])
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both((3, 3), n=2, seed=seed, entry_wait=0, exit_wait=1)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[False, False, False]
])
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[True, True, True],
])
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both((7, 3), n=2, seed=seed, entry_wait=2, exit_wait=2)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[True, True, True],
[False, False, False],
[False, False, False]
])
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[True, True, True]
])
)
)
n = 10
a = np.full(n * 2, 0.)
for i in range(10000):
en, ex = pd.Series.vbt.signals.generate_random_both(1000, n, entry_wait=2, exit_wait=2)
_a = np.empty((n * 2,), dtype=np.int_)
_a[0::2] = np.flatnonzero(en)
_a[1::2] = np.flatnonzero(ex)
a += _a
greater = a > 10000000 / (2 * n + 1) * np.arange(0, 2 * n)
less = a < 10000000 / (2 * n + 1) * np.arange(2, 2 * n + 2)
assert np.all(greater & less)
# probs
en, ex = pd.Series.vbt.signals.generate_random_both(
5, entry_prob=0.5, exit_prob=1., seed=seed, index=mask['a'].index, name=mask['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, False, False]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=0.5, exit_prob=1., seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=[0., 0.5, 1.], exit_prob=[0., 0.5, 1.],
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[False, True, True],
[False, False, False],
[False, False, True],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=1., exit_prob=1., exit_wait=0,
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=1., exit_prob=1., entry_pick_first=False, exit_pick_first=True,
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=1., exit_prob=1., entry_pick_first=True, exit_pick_first=False,
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
# none
with pytest.raises(Exception):
pd.DataFrame.vbt.signals.generate_random((5, 3))
def test_generate_random_exits(self):
pd.testing.assert_series_equal(
mask['a'].vbt.signals.generate_random_exits(seed=seed),
pd.Series(
np.array([False, False, True, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[True, True, False],
[False, False, False],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(seed=seed, wait=0),
pd.DataFrame(
np.array([
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, True],
[True, True, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_series_equal(
mask['a'].vbt.signals.generate_random_exits(prob=1., seed=seed),
pd.Series(
np.array([False, True, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=1., seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=[0., 0.5, 1.], seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=1., wait=0, seed=seed),
pd.DataFrame(
np.array([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=1., until_next=False, seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_stop_exits(self):
e = pd.Series([True, False, False, False, False, False])
t = pd.Series([2, 3, 4, 3, 2, 1]).astype(np.float64)
# stop loss
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1),
pd.Series(np.array([False, False, False, False, False, True]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True),
pd.Series(np.array([False, False, False, True, False, False]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, pick_first=False),
pd.Series(np.array([False, False, False, True, True, True]))
)
pd.testing.assert_frame_equal(
e.vbt.signals.generate_stop_exits(t.vbt.tile(3), [np.nan, -0.5, -1.], trailing=True, pick_first=False),
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, False],
[False, True, False]
]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, exit_wait=3),
pd.Series(np.array([False, False, False, False, True, False]))
)
# take profit
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1),
pd.Series(np.array([False, False, False, False, False, True]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1, trailing=True),
pd.Series(np.array([False, False, False, True, False, False]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1, trailing=True, pick_first=False),
pd.Series(np.array([False, False, False, True, True, True]))
)
pd.testing.assert_frame_equal(
e.vbt.signals.generate_stop_exits((4 - t).vbt.tile(3), [np.nan, 0.5, 1.], trailing=True, pick_first=False),
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, True],
[False, True, True],
[False, True, True]
]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1, trailing=True, exit_wait=3),
pd.Series(np.array([False, False, False, False, True, False]))
)
# chain
e = pd.Series([True, True, True, True, True, True])
en, ex = e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, chain=True)
pd.testing.assert_series_equal(
en,
pd.Series(np.array([True, False, False, False, True, False]))
)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, False, False, True, False, True]))
)
en, ex = e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, entry_wait=2, chain=True)
pd.testing.assert_series_equal(
en,
pd.Series(np.array([True, False, False, False, False, True]))
)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, False, False, True, False, False]))
)
en, ex = e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, exit_wait=2, chain=True)
pd.testing.assert_series_equal(
en,
pd.Series(np.array([True, False, False, False, True, False]))
)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, False, False, True, False, False]))
)
# until_next and pick_first
e2 = pd.Series([True, True, True, True, True, True])
t2 = pd.Series([6, 5, 4, 3, 2, 1]).astype(np.float64)
ex = e2.vbt.signals.generate_stop_exits(t2, -0.1, until_next=False, pick_first=False)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, True, True, True, True, True]))
)
def test_generate_ohlc_stop_exits(self):
with pytest.raises(Exception):
_ = mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=-0.1)
with pytest.raises(Exception):
_ = mask.vbt.signals.generate_ohlc_stop_exits(ts, tp_stop=-0.1)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, -0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, -0.1, trailing=True),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1, sl_trail=True)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, 0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, tp_stop=0.1)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, 0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1, reverse=True)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, 0.1, trailing=True),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1, sl_trail=True, reverse=True)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, -0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, tp_stop=0.1, reverse=True)
)
def _test_ohlc_stop_exits(**kwargs):
out_dict = {'stop_price': np.nan, 'stop_type': -1}
result = mask.vbt.signals.generate_ohlc_stop_exits(
price['open'], price['high'], price['low'], price['close'],
out_dict=out_dict, **kwargs
)
if isinstance(result, tuple):
_, ex = result
else:
ex = result
return result, out_dict['stop_price'], out_dict['stop_type']
ex, stop_price, stop_type = _test_ohlc_stop_exits()
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, 0],
[0, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1, sl_trail=True)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 11.7, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, 1, 1],
[1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(tp_stop=0.1)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, False],
[False, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[11.0, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[2, -1, -1],
[-1, 2, -1],
[-1, -1, -1],
[-1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1, sl_trail=True, tp_stop=0.1)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[11.0, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[2, -1, -1],
[-1, 2, -1],
[-1, -1, 1],
[1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(
sl_stop=[np.nan, 0.1, 0.2], sl_trail=True, tp_stop=[np.nan, 0.1, 0.2])
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, True, False],
[False, False, False],
[False, False, True]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 9.6]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, 2, -1],
[-1, -1, -1],
[-1, -1, 1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1, sl_trail=True, tp_stop=0.1, exit_wait=0)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, True],
[True, True, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[9.0, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, 11.7],
[10.8, 9.0, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[1, -1, -1],
[-1, -1, -1],
[-1, 2, -1],
[-1, -1, 1],
[1, 1, -1]
]), index=mask.index, columns=mask.columns)
)
(en, ex), stop_price, stop_type = _test_ohlc_stop_exits(
sl_stop=0.1, sl_trail=True, tp_stop=0.1, chain=True)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(np.array([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[11.0, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[2, -1, -1],
[-1, 2, -1],
[-1, -1, 1],
[1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
def test_between_ranges(self):
ranges = mask.vbt.signals.between_ranges()
record_arrays_close(
ranges.values,
np.array([
(0, 0, 0, 3, 1), (1, 1, 1, 4, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask.vbt.wrapper
mask2 = pd.DataFrame([
[True, True, True],
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False]
], index=mask.index, columns=mask.columns)
other_mask = pd.DataFrame([
[False, False, False],
[True, False, False],
[True, True, False],
[False, True, True],
[False, False, True]
], index=mask.index, columns=mask.columns)
ranges = mask2.vbt.signals.between_ranges(other=other_mask)
record_arrays_close(
ranges.values,
np.array([
(0, 0, 0, 1, 1), (1, 0, 1, 1, 1), (2, 1, 0, 2, 1),
(3, 1, 1, 2, 1), (4, 2, 0, 3, 1), (5, 2, 1, 3, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
ranges = mask2.vbt.signals.between_ranges(other=other_mask, from_other=True)
record_arrays_close(
ranges.values,
np.array([
(0, 0, 1, 1, 1), (1, 0, 1, 2, 1), (2, 1, 1, 2, 1),
(3, 1, 1, 3, 1), (4, 2, 1, 3, 1), (5, 2, 1, 4, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
def test_partition_ranges(self):
mask2 = pd.DataFrame([
[False, False, False],
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True]
], index=mask.index, columns=mask.columns)
ranges = mask2.vbt.signals.partition_ranges()
record_arrays_close(
ranges.values,
np.array([
(0, 0, 1, 3, 1), (1, 0, 4, 4, 0), (2, 1, 2, 4, 1), (3, 2, 3, 4, 0)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
def test_between_partition_ranges(self):
mask2 = pd.DataFrame([
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True],
[False, True, False]
], index=mask.index, columns=mask.columns)
ranges = mask2.vbt.signals.between_partition_ranges()
record_arrays_close(
ranges.values,
np.array([
(0, 0, 1, 3, 1), (1, 1, 2, 4, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
def test_pos_rank(self):
pd.testing.assert_series_equal(
(~mask['a']).vbt.signals.pos_rank(),
pd.Series([-1, 0, 1, -1, 0], index=mask['a'].index, name=mask['a'].name)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 0, -1],
[-1, 1, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(after_false=True),
pd.DataFrame(
np.array([
[-1, -1, -1],
[0, -1, -1],
[1, 0, -1],
[-1, 1, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(allow_gaps=True),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 1, -1],
[-1, 2, 2],
[2, -1, 3]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(reset_by=mask['a'], allow_gaps=True),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 1, -1],
[-1, 0, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(reset_by=mask, allow_gaps=True),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 0, -1],
[-1, 1, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
def test_partition_pos_rank(self):
pd.testing.assert_series_equal(
(~mask['a']).vbt.signals.partition_pos_rank(),
pd.Series([-1, 0, 0, -1, 1], index=mask['a'].index, name=mask['a'].name)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 0],
[0, 1, -1],
[-1, 1, 1],
[1, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(after_false=True),
pd.DataFrame(
np.array([
[-1, -1, -1],
[0, -1, -1],
[0, 0, -1],
[-1, 0, 0],
[1, -1, 0]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(reset_by=mask['a']),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 0],
[0, 1, -1],
[-1, 0, 0],
[0, -1, 0]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(reset_by=mask),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 0],
[0, 0, -1],
[-1, 0, 0],
[0, -1, 0]
]),
index=mask.index,
columns=mask.columns
)
)
def test_pos_rank_fns(self):
pd.testing.assert_frame_equal(
(~mask).vbt.signals.first(),
pd.DataFrame(
np.array([
[False, True, True],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.nth(1),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, True],
[True, False, False],
[False, True, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.nth(2),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.from_nth(0),
pd.DataFrame(
np.array([
[False, True, True],
[True, False, True],
[True, True, False],
[False, True, True],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
def test_pos_rank_mapped(self):
mask2 = pd.DataFrame([
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True],
[False, True, False]
], index=mask.index, columns=mask.columns)
mapped = mask2.vbt.signals.pos_rank_mapped()
np.testing.assert_array_equal(
mapped.values,
np.array([0, 1, 0, 0, 1, 0, 0, 1])
)
np.testing.assert_array_equal(
mapped.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2])
)
np.testing.assert_array_equal(
mapped.idx_arr,
np.array([0, 1, 3, 1, 2, 4, 2, 3])
)
assert mapped.wrapper == mask2.vbt.wrapper
def test_partition_pos_rank_mapped(self):
mask2 = pd.DataFrame([
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True],
[False, True, False]
], index=mask.index, columns=mask.columns)
mapped = mask2.vbt.signals.partition_pos_rank_mapped()
np.testing.assert_array_equal(
mapped.values,
np.array([0, 0, 1, 0, 0, 1, 0, 0])
)
np.testing.assert_array_equal(
mapped.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2])
)
np.testing.assert_array_equal(
mapped.idx_arr,
np.array([0, 1, 3, 1, 2, 4, 2, 3])
)
assert mapped.wrapper == mask2.vbt.wrapper
def test_nth_index(self):
assert mask['a'].vbt.signals.nth_index(0) == pd.Timestamp('2020-01-01 00:00:00')
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(0),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-02 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=mask.columns, name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(-1),
pd.Series([
pd.Timestamp('2020-01-04 00:00:00'),
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=mask.columns, name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(-2),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-02 00:00:00'),
np.nan
], index=mask.columns, name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(0, group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=['g1', 'g2'], name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(-1, group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=['g1', 'g2'], name='nth_index', dtype='datetime64[ns]')
)
def test_norm_avg_index(self):
assert mask['a'].vbt.signals.norm_avg_index() == -0.25
pd.testing.assert_series_equal(
mask.vbt.signals.norm_avg_index(),
pd.Series([-0.25, 0.25, 0.0], index=mask.columns, name='norm_avg_index')
)
pd.testing.assert_series_equal(
mask.vbt.signals.norm_avg_index(group_by=group_by),
pd.Series([0.0, 0.0], index=['g1', 'g2'], name='norm_avg_index')
)
def test_index_mapped(self):
mapped = mask.vbt.signals.index_mapped()
np.testing.assert_array_equal(
mapped.values,
np.array([0, 3, 1, 4, 2])
)
np.testing.assert_array_equal(
mapped.col_arr,
np.array([0, 0, 1, 1, 2])
)
np.testing.assert_array_equal(
mapped.idx_arr,
np.array([0, 3, 1, 4, 2])
)
assert mapped.wrapper == mask.vbt.wrapper
def test_total(self):
assert mask['a'].vbt.signals.total() == 2
pd.testing.assert_series_equal(
mask.vbt.signals.total(),
pd.Series([2, 2, 1], index=mask.columns, name='total')
)
pd.testing.assert_series_equal(
mask.vbt.signals.total(group_by=group_by),
pd.Series([4, 1], index=['g1', 'g2'], name='total')
)
def test_rate(self):
assert mask['a'].vbt.signals.rate() == 0.4
pd.testing.assert_series_equal(
mask.vbt.signals.rate(),
pd.Series([0.4, 0.4, 0.2], index=mask.columns, name='rate')
)
pd.testing.assert_series_equal(
mask.vbt.signals.rate(group_by=group_by),
| pd.Series([0.4, 0.2], index=['g1', 'g2'], name='rate') | pandas.Series |
#!/usr/bin/env python
"""
Represent connectivity pattern using pandas DataFrame.
"""
from collections import OrderedDict
import itertools
import re
from future.utils import iteritems
from past.builtins import basestring
import networkx as nx
import numpy as np
import pandas as pd
from .plsel import Selector, SelectorMethods
from .pm import BasePortMapper
class Interface(object):
"""
Container for set of interface comprising ports.
This class contains information about a set of interfaces comprising
path-like identifiers and the attributes associated with them.
By default, each port must have at least the following attributes;
other attributes may be added:
- interface - indicates which interface a port is associated with.
- io - indicates whether the port receives input ('in') or
emits output ('out').
- type - indicates whether the port emits/receives spikes or
graded potentials.
All port identifiers in an interface must be unique. For two interfaces
to be deemed compatible, they must contain the same port identifiers and
their identifiers' 'io' attributes must be the inverse of each other
(i.e., every 'in' port in one interface must be mirrored by an 'out' port
in the other interface.
Examples
--------
>>> i = Interface('/foo[0:4],/bar[0:3]')
>>> i['/foo[0:2]', 'interface', 'io', 'type'] = [0, 'in', 'spike']
>>> i['/foo[2:4]', 'interface', 'io', 'type'] = [1, 'out', 'spike']
Attributes
----------
data : pandas.DataFrame
Port attribute data.
index : pandas.MultiIndex
Index of port identifiers.
Parameters
----------
selector : str, unicode, or sequence
Selector string (e.g., 'foo[0:2]') or sequence of token
sequences (e.g., [['foo', (0, 2)]]) describing the port
identifiers comprised by the interface.
columns : list, default = ['interface', 'io', 'type']
Data column names.
See Also
--------
plsel.SelectorMethods
"""
def __init__(self, selector='', columns=['interface', 'io', 'type']):
# All ports in an interface must contain at least the following
# attributes:
assert set(columns).issuperset(['interface', 'io', 'type'])
self.sel = SelectorMethods()
assert not(self.sel.is_ambiguous(selector))
self.num_levels = self.sel.max_levels(selector)
names = [i for i in range(self.num_levels)]
idx = self.sel.make_index(selector, names)
self.__validate_index__(idx)
self.data = pd.DataFrame(index=idx, columns=columns, dtype=object)
# Dictionary containing mappers for different port types:
self.pm = {}
def __validate_index__(self, idx):
"""
Raise an exception if the specified index will result in an invalid interface.
"""
if idx.duplicated().any():
raise ValueError('Duplicate interface index entries detected.')
def __getitem__(self, key):
if type(key) == tuple and len(key) > 1:
return self.sel.select(self.data[list(key[1:])], key[0])
else:
return self.sel.select(self.data, key)
def __setitem__ambiguous__(self, key, value):
if type(key) == tuple:
selector = key[0]
else:
selector = key
# Ensure that the specified selector can actually be used against the
# Interface's internal DataFrame:
try:
idx = self.sel.get_index(self.data, selector,
names=self.data.index.names)
except ValueError:
raise ValueError('cannot create index with '
'selector %s and column names %s' \
% (selector, str(self.data.index.names)))
# If the data specified is not a dict, convert it to a dict:
if type(key) == tuple and len(key) > 1:
if np.isscalar(value):
data = {k:value for k in key[1:]}
elif type(value) == dict:
data = value
elif np.iterable(value) and len(value) <= len(key[1:]):
data={k:v for k, v in zip(key[1:], value)}
else:
raise ValueError('cannot assign specified value')
else:
if np.isscalar(value):
data = {self.data.columns[0]: value}
elif type(value) == dict:
data = value
elif np.iterable(value) and len(value) <= len(self.data.columns):
data={k:v for k, v in zip(self.data.columns, value)}
else:
raise ValueError('cannot assign specified value')
for k, v in iteritems(data):
self.data[k].loc[idx] = v
def __setitem__(self, key, value):
if type(key) == tuple:
selector = key[0]
else:
selector = key
# Fall back to slower method if the selector is ambiguous:
if self.sel.is_ambiguous(selector):
self.__setitem__ambiguous__(key, value)
return
else:
selector = Selector(selector)
# Don't waste time trying to do anything if the selector is empty:
if not selector.nonempty:
return
# If the number of specified identifiers doesn't exceed the size of the
# data array, enlargement by specifying identifiers that are not in
# the index will not occur:
assert len(selector) <= len(self.data)
# If the data specified is not a dict, convert it to a dict:
if type(key) == tuple and len(key) > 1:
if np.isscalar(value):
data = {k:value for k in key[1:]}
elif type(value) == dict:
data = value
elif np.iterable(value) and len(value) <= len(key[1:]):
data={k:v for k, v in zip(key[1:], value)}
else:
raise ValueError('cannot assign specified value')
else:
if np.isscalar(value):
data = {self.data.columns[0]: value}
elif type(value) == dict:
data = value
elif np.iterable(value) and len(value) <= len(self.data.columns):
data={k:v for k, v in zip(self.data.columns, value)}
else:
raise ValueError('cannot assign specified value')
if selector.max_levels == 1:
s = [i for i in itertools.chain(*selector.expanded)]
else:
s = self.sel.pad_selector(selector.expanded,
len(self.index.levshape))
for k, v in iteritems(data):
self.data[k].loc[s] = v
@property
def index(self):
"""
Interface index.
"""
return self.data.index
@index.setter
def index(self, i):
self.data.index = i
@property
def interface_ids(self):
"""
Interface identifiers.
"""
return set(self.data['interface'])
@property
def io_inv(self):
"""
Returns new Interface instance with inverse input-output attributes.
Returns
-------
i : Interface
Interface instance whose 'io' attributes are the inverse of those of
the current instance.
"""
data_inv = self.data.copy()
f = lambda x: 'out' if x == 'in' else \
('in' if x == 'out' else x)
data_inv['io'] = data_inv['io'].apply(f)
return self.from_df(data_inv)
@property
def idx_levels(self):
"""
Number of levels in Interface index.
"""
if isinstance(self.data.index, pd.MultiIndex):
return len(self.index.levels)
else:
return 1
def clear(self):
"""
Clear all ports in class instance.
"""
self.data.drop(self.data.index, inplace=True)
def data_select(self, f, inplace=False):
"""
Restrict Interface data with a selection function.
Returns an Interface instance containing only those rows
whose data is passed by the specified selection function.
Parameters
----------
f : function
Selection function with a single dict argument whose keys
are the Interface's data column names.
inplace : bool, default=False
If True, update and return the given Interface instance.
Otherwise, return a new instance.
Returns
-------
i : Interface
Interface instance containing data selected by `f`.
"""
assert callable(f)
result = self.data[f(self.data)]
if inplace:
self.data = result
return self
else:
return Interface.from_df(result)
@classmethod
def from_df(cls, df):
"""
Create an Interface from a properly formatted DataFrame.
Examples
--------
>>> import plsel, pattern
>>> import pandas
>>> idx = plsel.SelectorMethods.make_index('/foo[0:2]')
>>> data = [[0, 'in', 'spike'], [1, 'out', 'gpot']]
>>> columns = ['interface', 'io', 'type']
>>> df = pandas.DataFrame(data, index=idx, columns=columns)
>>> i = pattern.Interface.from_df(df)
Parameters
----------
df : pandas.DataFrame
DataFrame with a MultiIndex and data columns 'interface',
'io', and 'type' (additional columns may also be present).
Returns
-------
i : Interface
Generated Interface instance.
Notes
-----
The contents of the specified DataFrame instance are copied into the
new Interface instance.
"""
assert set(df.columns).issuperset(['interface', 'io', 'type'])
if isinstance(df.index, pd.MultiIndex):
if len(df.index):
i = cls(df.index.tolist(), df.columns)
else:
i = cls([()], df.columns)
elif isinstance(df.index, pd.Index):
if len(df.index):
i = cls([(s,) for s in df.index.tolist()], df.columns)
else:
i = cls([()], df.columns)
else:
raise ValueError('invalid index type')
i.data = df.copy()
i.__validate_index__(i.index)
return i
@classmethod
def from_csv(cls, file_name, **kwargs):
"""
Create an Interface from a properly formatted CSV file.
Parameters
----------
file_name : str
File name of CSV file containing interface data.
kwargs : dict
Options to pass to `DataFrame.from_csv()`
Returns
-------
i : Interface
Generated Interface instance.
"""
df = pd.DataFrame.from_csv(file_name, **kwargs)
return cls.from_df(df)
@classmethod
def from_dict(cls, d):
"""
Create an Interface from a dictionary of selectors and data values.
Examples
--------
>>> d = {'/foo[0]': [0, 'in', 'gpot'], '/foo[1]': [1, 'in', 'gpot']}
>>> i = Interface.from_dict(d)
Parameters
----------
d : dict
Dictionary that maps selectors to the data that should be associated
with the corresponding ports. If a scalar, the data is assigned to
the first attribute; if an iterable, the data is assigned to the
attributes in order.
Returns
-------
i : Interface
Generated interface instance.
"""
i = cls(','.join(d.keys()))
for k, v in iteritems(d):
i[k] = v
i.data.sort_index(inplace=True)
return i
@classmethod
def from_graph(cls, g):
"""
Create an Interface from a NetworkX graph.
Examples
--------
>>> import networkx as nx
>>> g = nx.Graph()
>>> g.add_node('/foo[0]', interface=0, io='in', type='gpot')
>>> g.add_node('/foo[1]', interface=0, io='in', type='gpot')
>>> i = Interface.from_graph(g)
Parameters
----------
g : networkx.Graph
Graph whose node IDs are path-like port identifiers. The node attributes
are assigned to the ports.
Returns
-------
i : Interface
Generated interface instance.
"""
assert isinstance(g, nx.Graph)
return cls.from_dict(g.node)
@classmethod
def from_selectors(cls, sel, sel_in='', sel_out='',
sel_spike='', sel_gpot='', *sel_int_list):
"""
Create an Interface instance from selectors.
Parameters
----------
sel : str, unicode, or sequence
Selector describing all ports comprised by interface.
sel_in : str, unicode, or sequence
Selector describing the interface's input ports.
sel_out : str, unicode, or sequence
Selector describing the interface's output ports.
sel_spike : str, unicode, or sequence
Selector describing the interface's spiking ports.
sel_gpot : str, unicode, or sequence
Selector describing the interface's graded potential ports.
sel_int_list : list of str, unicode, or sequence
Selectors consecutively describing the ports associated with interface 0,
interface 1, etc.
Returns
-------
i : Interface
Generated interface instance.
"""
i = cls(sel)
i[sel_in, 'io'] = 'in'
i[sel_out, 'io'] = 'out'
i[sel_spike, 'type'] = 'spike'
i[sel_gpot, 'type'] = 'gpot'
for n, sel_int in enumerate(sel_int_list):
i[sel_int, 'interface'] = n
return i
def gpot_ports(self, i=None, tuples=False):
"""
Restrict Interface ports to graded potential ports.
Parameters
----------
i : int
Interface identifier. If None, return all graded potential ports.
tuples : bool
If True, return a list of tuples; if False, return an
Interface instance.
Returns
-------
interface : Interface or list of tuples
Either an Interface instance containing all graded potential ports and
their attributes in the specified interface, or a list of tuples
corresponding to the expanded ports.
"""
if i is None:
try:
df = self.data[self.data['type'] == 'gpot']
except:
df = None
else:
try:
df = self.data[(self.data['type'] == 'gpot') & \
(self.data['interface'] == i)]
except:
df = None
if tuples:
if df is None:
return []
else:
return df.index.tolist()
else:
if df is None:
return Interface()
else:
return self.from_df(df)
def in_ports(self, i=None, tuples=False):
"""
Restrict Interface ports to input ports.
Parameters
----------
i : int
Interface identifier. If None, return all input ports.
tuples : bool
If True, return a list of tuples; if False, return an
Interface instance.
Returns
-------
interface : Interface or list of tuples
Either an Interface instance containing all input ports and
their attributes in the specified interface, or a list of tuples
corresponding to the expanded ports.
"""
if i is None:
try:
df = self.data[self.data['io'] == 'in']
except:
df = None
else:
try:
df = self.data[(self.data['io'] == 'in') & \
(self.data['interface'] == i)]
except:
df = None
if tuples:
if df is None:
return []
else:
return df.index.tolist()
else:
if df is None:
return Interface()
else:
return self.from_df(df)
def interface_ports(self, i=None, tuples=False):
"""
Restrict Interface ports to specific interface.
Parameters
----------
i : int
Interface identifier. If None, return all ports.
tuples : bool
If True, return a list of tuples; if False, return an
Interface instance.
Returns
-------
interface : Interface
Either an Interface instance containing all ports and
their attributes in the specified interface, or a list of tuples
corresponding to the expanded ports.
"""
if i is None:
if tuples:
return self.index.tolist()
else:
return self.copy()
else:
try:
df = self.data[self.data['interface'] == i]
except:
df = None
if tuples:
if df is None:
return []
else:
return df.index.tolist()
else:
if df is None:
return Interface()
else:
return self.from_df(df)
def _merge_on_interfaces(self, a, i, b):
"""
Merge contents of this and another Interface instance.
Notes
-----
If the number of levels in one Interface instance's DataFrame index is
greater than that of the other, the number of levels in the index of the
merged DataFrames instances is set to the former and the index with the
smaller number is padded with blank entries to enable Panda's merge
mechanism to function properly.
"""
assert isinstance(i, Interface)
df_left = self.data[self.data['interface'] == a]
df_right = i.data[i.data['interface'] == b]
n_left_names = len(self.data.index.names)
n_right_names = len(i.data.index.names)
# Pandas' merge mechanism fails if the number of levels in each of the
# merged MultiIndex indices differs and there is overlap of more than
# one level; we therefore pad the index with the smaller number of
# levels before attempting the merge:
if n_left_names > n_right_names:
for n in range(i.num_levels, i.num_levels+(n_left_names-n_right_names)):
new_col = str(n)
df_right[new_col] = ''
df_right.set_index(new_col, append=True, inplace=True)
elif n_left_names < n_right_names:
for n in range(self.num_levels, self.num_levels+(n_right_names-n_left_names)):
new_col = str(n)
df_left[new_col] = ''
df_left.set_index(new_col, append=True, inplace=True)
return pd.merge(df_left, df_right,
left_index=True,
right_index=True)
def get_common_ports(self, a, i, b, t=None):
"""
Get port identifiers common to this and another Interface instance.
Parameters
----------
a : int
Identifier of interface in the current instance.
i : Interface
Interface instance containing the other interface.
b : int
Identifier of interface in instance `i`.
t : str or unicode
If not None, restrict output to those identifiers with the specified
port type.
Returns
-------
result : list of tuple
Expanded port identifiers shared by the two specified Interface
instances.
Notes
-----
The number of levels of the returned port identifiers is equal to the
maximum number of levels of this Interface instance.
The order of the returned port identifiers is not guaranteed.
"""
if t is None:
x = self.data[self.data['interface'] == a]
y = i.data[i.data['interface'] == b]
else:
x = self.data[(self.data['interface'] == a) & (self.data['type'] == t)]
y = i.data[(i.data['interface'] == b) & (i.data['type'] == t)]
if isinstance(x.index, pd.MultiIndex):
x_list = [tuple(a for a in b if a != '') \
for b in x.index]
else:
x_list = [(a,) for a in x.index]
if isinstance(y.index, pd.MultiIndex):
y_list = [tuple(a for a in b if a != '') \
for b in y.index]
else:
y_list = [(a,) for a in y.index]
return list(set(x_list).intersection(y_list))
def is_compatible(self, a, i, b, allow_subsets=False):
"""
Check whether two interfaces can be connected.
Compares an interface in the current Interface instance with one in
another instance to determine whether their ports can be connected.
Parameters
----------
a : int
Identifier of interface in the current instance.
i : Interface
Interface instance containing the other interface.
b : int
Identifier of interface in instance `i`.
allow_subsets : bool
If True, interfaces that contain a compatible subset of ports are
deemed to be compatible; otherwise, all ports in the two interfaces
must be compatible.
Returns
-------
result : bool
True if both interfaces comprise the same identifiers, the set 'type'
attributes for each matching pair of identifiers in the two
interfaces match, and each identifier with an 'io' attribute set
to 'out' in one interface has its 'io' attribute set to 'in' in the
other interface.
Notes
-----
Assumes that the port identifiers in both interfaces are sorted in the
same order.
"""
# Merge the interface data on their indices (i.e., their port identifiers):
data_merged = self._merge_on_interfaces(a, i, b)
# Check whether there are compatible subsets, i.e., at least one pair of
# ports from the two interfaces that are compatible with each other:
if allow_subsets:
# If the interfaces share no identical port identifiers, they are
# incompatible:
if not len(data_merged):
return False
# Compatible identifiers must have the same non-null 'type'
# attribute and their non-null 'io' attributes must be the inverse
# of each other:
if not data_merged.apply(lambda row: \
((row['type_x'] == row['type_y']) or \
(pd.isnull(row['type_x']) and pd.isnull(row['type_y']))) and \
((row['io_x'] == 'out' and row['io_y'] == 'in') or \
(row['io_x'] == 'in' and row['io_y'] == 'out') or \
(pd.isnull(row['io_x']) and pd.isnull(row['io_y']))),
axis=1).any():
return False
# Require that all ports in the two interfaces be compatible:
else:
# If one interface contains identifiers not in the other, they are
# incompatible:
if len(data_merged) < max(len(self.data[self.data['interface'] == a]),
len(i.data[i.data['interface'] == b])):
return False
# Compatible identifiers must have the same non-null 'type'
# attribute and their non-null 'io' attributes must be the inverse
# of each other:
if not data_merged.apply(lambda row: \
((row['type_x'] == row['type_y']) or \
(pd.isnull(row['type_x']) and pd.isnull(row['type_y']))) and \
((row['io_x'] == 'out' and row['io_y'] == 'in') or \
(row['io_x'] == 'in' and row['io_y'] == 'out') or \
(pd.isnull(row['io_x']) and pd.isnull(row['io_y']))),
axis=1).all():
return False
# All tests passed:
return True
def is_in_interfaces(self, s):
"""
Check whether ports comprised by a selector are in the stored interfaces.
Parameters
----------
s : str or unicode
Port selector.
Returns
-------
result : bool
True if the comprised ports are in any of the stored interfaces.
"""
try:
# Pad the expanded selector with blanks to prevent pandas from
# spurious matches such as mistakenly validating '/foo' as being in
# an Interface that only contains the ports '/foo[0:2]':
idx = self.sel.expand(s, self.idx_levels)
if not isinstance(self.data.index, pd.MultiIndex):
idx = [x[0] for x in idx]
d = self.data['interface'].loc[idx]
if isinstance(d, int):
return True
if np.any(d.isnull().tolist()):
return False
else:
return True
except:
return self.sel.is_in(s, self.index.tolist())
def out_ports(self, i=None, tuples=False):
"""
Restrict Interface ports to output ports.
Parameters
----------
i : int
Interface identifier. If None, return all output ports.
tuples : bool
If True, return a list of tuples; if False, return an
Interface instance.
Returns
-------
interface : Interface or list of tuples
Either an Interface instance containing all output ports and
their attributes in the specified interface, or a list of tuples
corresponding to the expanded ports.
"""
if i is None:
try:
df = self.data[self.data['io'] == 'out']
except:
df = None
else:
try:
df = self.data[(self.data['io'] == 'out') & \
(self.data['interface'] == i)]
except:
df = None
if tuples:
if df is None:
return []
else:
return df.index.tolist()
else:
if df is None:
return Interface()
else:
return self.from_df(df)
def port_select(self, f, inplace=False):
"""
Restrict Interface ports with a selection function.
Returns an Interface instance containing only those rows
whose ports are passed by the specified selection function.
Parameters
----------
f : function
Selection function with a single tuple argument containing
the various columns of the Interface instance's MultiIndex.
inplace : bool, default=False
If True, update and return the given Interface instance.
Otherwise, return a new instance.
Returns
-------
i : Interface
Interface instance containing ports selected by `f`.
"""
assert callable(f)
if inplace:
self.data = self.data.select(f)
return self
else:
return Interface.from_df(self.data.select(f))
def spike_ports(self, i=None, tuples=False):
"""
Restrict Interface ports to spiking ports.
Parameters
----------
i : int
Interface identifier. If None, return all spiking ports.
tuples : bool
If True, return a list of tuples; if False, return an
Interface instance.
Returns
-------
interface : Interface or list of tuples
Either an Interface instance containing all spiking ports and
their attributes in the specified interface, or a list of tuples
corresponding to the expanded ports.
"""
if i is None:
try:
df = self.data[self.data['type'] == 'spike']
except:
df = None
else:
try:
df = self.data[(self.data['type'] == 'spike') & \
(self.data['interface'] == i)]
except:
df = None
if tuples:
if df is None:
return []
else:
return df.index.tolist()
else:
if df is None:
return Interface()
else:
return self.from_df(df)
def to_selectors(self, i=None):
"""
Retrieve Interface's port identifiers as list of path-like selectors.
Parameters
----------
i : int
Interface identifier. If set to None, return all port identifiers.
Returns
-------
selectors : list of str
List of selector strings corresponding to each port identifier.
"""
ids = self.to_tuples(i)
result = []
for t in ids:
selector = ''
for s in t:
if isinstance(s, basestring):
selector += '/'+s
else:
selector += '[%s]' % s
result.append(selector)
return result
def to_tuples(self, i=None):
"""
Retrieve Interface's port identifiers as list of tuples.
Parameters
----------
i : int
Interface identifier. If set to None, return all port identifiers.
Returns
-------
result : list of tuple
List of token tuples corresponding to each port identifier.
"""
if i is None:
if isinstance(self.index, pd.MultiIndex):
return self.index.tolist()
else:
return [(t,) for t in self.index]
try:
if isinstance(self.index, pd.MultiIndex):
return self.data[self.data['interface'] == i].index.tolist()
else:
return [(t,) for t in self.data[self.data['interface'] == i].index]
except:
return []
def which_int(self, s):
"""
Return the interface containing the identifiers comprised by a selector.
Parameters
----------
selector : str or unicode
Port selector.
Returns
-------
i : set
Set of identifiers for interfaces that contain ports comprised by
the selector.
"""
try:
idx = self.sel.expand(s, self.idx_levels)
if not isinstance(self.data.index, pd.MultiIndex):
idx = [x[0] for x in idx]
d = self.data['interface'].loc[idx]
s = set(d)
s.discard(np.nan)
return s
except:
try:
s = set(self[s, 'interface'].values.flatten())
# Ignore unset entries:
s.discard(np.nan)
return s
except KeyError:
return set()
def __copy__(self):
"""
Make a copy of this object.
"""
return self.from_df(self.data)
copy = __copy__
copy.__doc__ = __copy__.__doc__
def set_pm(self, t, pm):
"""
Set port mapper associated with a specific port type.
Parameters
----------
t : str or unicode
Port type.
pm : neurokernel.plsel.BasePortMapper
Port mapper to save.
"""
# Ensure that the ports in the specified port mapper are a subset of
# those in the interface associated with the specified type:
assert isinstance(pm, BasePortMapper)
if not self.sel.is_in(pm.index.tolist(),
self.pm[t].index.tolist()):
raise ValueError('cannot set mapper using undefined selectors')
self.pm[t] = pm.copy()
def equals(self, other):
"""
Check whether this interface is equivalent to another interface.
Parameters
----------
other : neurokernel.pattern.Interface
Interface instance to compare to this Interface.
Returns
-------
result : bool
True if the interfaces are identical.
Notes
-----
Interfaces containing the same rows in different orders are not
regarded as equivalent.
"""
assert isinstance(other, Interface)
return self.data.equals(other.data)
def __len__(self):
return self.data.__len__()
def __repr__(self):
return 'Interface\n---------\n'+self.data.__repr__()
class Pattern(object):
"""
Connectivity pattern linking sets of interface ports.
This class represents connection mappings between interfaces comprising
sets of ports. Ports are represented using path-like identifiers;
the presence of a row linking the two identifiers in the class' internal
index indicates the presence of a connection. A single data attribute
('conn') associated with defined connections is created by default.
Specific attributes may be accessed by specifying their names after the
port identifiers; if a nonexistent attribute is specified when a sequential
value is assigned, a new column for that attribute is automatically
created: ::
p['/x[0]', '/y[0]', 'conn', 'x'] = [1, 'foo']
The direction of connections between ports in a class instance determines
whether they are input or output ports. Ports may not both receive input or
emit output. Patterns may contain fan-out connections, i.e., one source port
connected to multiple destination ports, but not fan-in connections, i.e.,
multiple source ports connected to a single destination port.
Examples
--------
>>> p = Pattern('/x[0:3]','/y[0:4]')
>>> p['/x[0]', '/y[0:2]'] = 1
>>> p['/y[2]', '/x[1]'] = 1
>>> p['/y[3]', '/x[2]'] = 1
Attributes
----------
data : pandas.DataFrame
Connection attribute data.
index : pandas.MultiIndex
Index of connections.
interface : Interface
Interfaces containing port identifiers and attributes.
Parameters
----------
sel0, sel1, ...: str, unicode, or sequence
Selectors defining the sets of ports potentially connected by the
pattern. These selectors must be disjoint, i.e., no identifier
comprised by one selector may be in any other selector.
columns : sequence of str
Data column names.
See Also
--------
plsel.SelectorMethods
"""
def __init__(self, *selectors, **kwargs):
columns = kwargs.get('columns', ['conn'])
self.sel = SelectorMethods()
# Force sets of identifiers to be disjoint so that no identifier can
# denote a port in more than one set:
assert self.sel.are_disjoint(*selectors)
# Collect all of the selectors:
selector = []
for s in selectors:
if isinstance(s, Selector) and len(s) != 0:
selector.extend(s.expanded)
elif isinstance(s, basestring):
selector.extend(self.sel.parse(s))
elif np.iterable(s):
selector.extend(s)
else:
raise ValueError('invalid selector type')
# Create Interface instance containing the ports comprised by all of the
# specified selectors:
self.interface = Interface(selector)
# Set the interface identifiers associated with each of the selectors
# consecutively:
for i, s in enumerate(selectors):
self.interface[s, 'interface'] = i
# Create a MultiIndex that can store mappings between identifiers in the
# two interfaces:
self.num_levels = {'from': self.interface.num_levels,
'to': self.interface.num_levels}
names = ['from_%s' % i for i in range(self.num_levels['from'])]+ \
['to_%s' %i for i in range(self.num_levels['to'])]
levels = [[] for i in range(len(names))]
labels = [[] for i in range(len(names))]
idx = pd.MultiIndex(levels=levels, codes=labels, names=names)
self.data = | pd.DataFrame(index=idx, columns=columns, dtype=object) | pandas.DataFrame |
import os
import pandas as pd
import matplotlib.pyplot as plt
import shap
import lightgbm as lgb
from sklearn.metrics import average_precision_score
from takaggle.training.model import Model
from takaggle.training.util import Util
# LightGBMに使えるカスタムメトリクス
# 使用例(この関数で最適化したい場合はパラメーターに metric: 'None'を指定する必要がある)
# self.model = lgb.train(
# params,
# dtrain,
# num_boost_round=num_round,
# valid_sets=(dtrain, dvalid),
# early_stopping_rounds=early_stopping_rounds,
# verbose_eval=verbose_eval,
# feval=pr_auc
# )
def pr_auc(preds, data):
"""PR-AUCスコア"""
y_true = data.get_label()
score = average_precision_score(y_true, preds)
return "pr_auc", score, True
class ModelLGB(Model):
def train(self, tr_x, tr_y, va_x=None, va_y=None):
# データのセット
validation = va_x is not None
dtrain = lgb.Dataset(tr_x, tr_y, categorical_feature=self.categoricals)
if validation:
dvalid = lgb.Dataset(va_x, va_y, categorical_feature=self.categoricals)
# ハイパーパラメータの設定
params = dict(self.params)
num_round = params.pop('num_round')
verbose_eval = params.pop('verbose_eval')
# 学習
if validation:
early_stopping_rounds = params.pop('early_stopping_rounds')
watchlist = [(dtrain, 'train'), (dvalid, 'eval')]
self.model = lgb.train(
params,
dtrain,
num_boost_round=num_round,
valid_sets=(dtrain, dvalid),
early_stopping_rounds=early_stopping_rounds,
verbose_eval=verbose_eval,
)
else:
watchlist = [(dtrain, 'train')]
self.model = lgb.train(params, dtrain, num_boost_round=num_round, evals=watchlist)
# shapを計算しないver
def predict(self, te_x):
return self.model.predict(te_x, num_iteration=self.model.best_iteration)
# shapを計算するver
def predict_and_shap(self, te_x, shap_sampling):
fold_importance = shap.TreeExplainer(self.model).shap_values(te_x[:shap_sampling])
valid_prediticion = self.model.predict(te_x, num_iteration=self.model.best_iteration)
return valid_prediticion, fold_importance
def save_model(self, path):
model_path = os.path.join(path, f'{self.run_fold_name}.model')
os.makedirs(os.path.dirname(model_path), exist_ok=True)
Util.dump(self.model, model_path)
def load_model(self, path):
model_path = os.path.join(path, f'{self.run_fold_name}.model')
self.model = Util.load(model_path)
@classmethod
def calc_feature_importance(self, dir_name, run_name, features, n_splits, type='gain'):
"""feature importanceの計算
"""
model_array = []
for i in range(n_splits):
model_path = os.path.join(dir_name, f'{run_name}-fold{i}.model')
model = Util.load(model_path)
model_array.append(model)
if type == 'gain':
# gainの計算
val_gain = model_array[0].feature_importance(importance_type='gain')
val_gain = pd.Series(val_gain)
for m in model_array[1:]:
s = pd.Series(m.feature_importance(importance_type='gain'))
val_gain = pd.concat([val_gain, s], axis=1)
if n_splits == 1:
val_gain = val_gain.values
df = pd.DataFrame(val_gain, index=features, columns=['importance']).sort_values('importance', ascending=False)
df.to_csv(dir_name + run_name + '_importance_gain.csv')
df = df.sort_values('importance', ascending=True).tail(100)
# 出力
fig, ax1 = plt.subplots(figsize=(10, 30))
plt.tick_params(labelsize=10) # 図のラベルのfontサイズ
# 棒グラフを出力
ax1.set_title('feature importance gain')
ax1.set_xlabel('feature importance')
ax1.barh(df.index, df['importance'], label='importance', align="center", alpha=0.6)
# 凡例を表示(グラフ左上、ax2をax1のやや下に持っていく)
ax1.legend(bbox_to_anchor=(1, 1), loc='upper right', borderaxespad=0.5, fontsize=12)
# グリッド表示(ax1のみ)
ax1.grid(True)
plt.tight_layout()
plt.savefig(dir_name + run_name + '_fi_gain.png', dpi=200, bbox_inches="tight")
plt.close()
else:
# 各foldの平均を算出
val_mean = val_gain.mean(axis=1)
val_mean = val_mean.values
importance_df_mean = pd.DataFrame(val_mean, index=features, columns=['importance']).sort_values('importance')
# 各foldの標準偏差を算出
val_std = val_gain.std(axis=1)
val_std = val_std.values
importance_df_std = pd.DataFrame(val_std, index=features, columns=['importance']).sort_values('importance')
# マージ
df = pd.merge(importance_df_mean, importance_df_std, left_index=True, right_index=True, suffixes=['_mean', '_std'])
# 変動係数を算出
df['coef_of_var'] = df['importance_std'] / df['importance_mean']
df['coef_of_var'] = df['coef_of_var'].fillna(0)
df = df.sort_values('importance_mean', ascending=False)
df.to_csv(dir_name + run_name + '_importance_gain.csv')
df = df.sort_values('importance_mean', ascending=True).tail(100)
# 出力
fig, ax1 = plt.subplots(figsize=(10, 30))
plt.tick_params(labelsize=10) # 図のラベルのfontサイズ
# 棒グラフを出力
ax1.set_title('feature importance gain')
ax1.set_xlabel('feature importance mean & std')
ax1.barh(df.index, df['importance_mean'], label='importance_mean', align="center", alpha=0.6)
ax1.barh(df.index, df['importance_std'], label='importance_std', align="center", alpha=0.6)
# 折れ線グラフを出力
ax2 = ax1.twiny()
ax2.plot(df['coef_of_var'], df.index, linewidth=1, color="crimson", marker="o", markersize=8, label='coef_of_var')
ax2.set_xlabel('Coefficient of variation')
# 凡例を表示(グラフ左上、ax2をax1のやや下に持っていく)
ax1.legend(bbox_to_anchor=(1, 1), loc='upper right', borderaxespad=0.5, fontsize=12)
ax2.legend(bbox_to_anchor=(1, 0.94), loc='upper right', borderaxespad=0.5, fontsize=12)
# グリッド表示(ax1のみ)
ax1.grid(True)
ax2.grid(False)
plt.tight_layout()
plt.savefig(dir_name + run_name + '_fi_gain.png', dpi=200, bbox_inches="tight")
plt.close()
else:
# splitの計算
val_split = self.model_array[0].feature_importance(importance_type='split')
val_split = pd.Series(val_split)
for m in model_array[1:]:
s = pd.Series(m.feature_importance(importance_type='split'))
val_split = pd.concat([val_split, s], axis=1)
if n_splits == 1:
val_split = val_split.values
df = pd.DataFrame(val_split, index=features, columns=['importance']).sort_values('importance', ascending=False)
df.to_csv(dir_name + run_name + '_importance_split.csv')
df = df.sort_values('importance', ascending=True).tail(100)
# 出力
fig, ax1 = plt.subplots(figsize=(10, 30))
plt.tick_params(labelsize=10) # 図のラベルのfontサイズ
# 棒グラフを出力
ax1.set_title('feature importance split')
ax1.set_xlabel('feature importance')
ax1.barh(df.index, df['importance'], label='importance', align="center", alpha=0.6)
# 凡例を表示(グラフ左上、ax2をax1のやや下に持っていく)
ax1.legend(bbox_to_anchor=(1, 1), loc='upper right', borderaxespad=0.5, fontsize=12)
# グリッド表示(ax1のみ)
ax1.grid(True)
plt.tight_layout()
plt.savefig(dir_name + run_name + '_fi_gain.png', dpi=200, bbox_inches="tight")
plt.close()
else:
# 各foldの平均を算出
val_mean = val_split.mean(axis=1)
val_mean = val_mean.values
importance_df_mean = pd.DataFrame(val_mean, index=features, columns=['importance']).sort_values('importance')
# 各foldの標準偏差を算出
val_std = val_split.std(axis=1)
val_std = val_std.values
importance_df_std = pd.DataFrame(val_std, index=features, columns=['importance']).sort_values('importance')
# マージ
df = | pd.merge(importance_df_mean, importance_df_std, left_index=True, right_index=True, suffixes=['_mean', '_std']) | pandas.merge |
import numpy as np
import pandas as pd
from collections import OrderedDict
from pandas.api.types import is_numeric_dtype, is_object_dtype, is_categorical_dtype
from typing import List, Optional, Tuple, Callable
def inspect_df(df: pd.DataFrame) -> pd.DataFrame:
""" Show column types and null values in DataFrame df
"""
resdict = OrderedDict()
# Inspect nulls
null_series = df.isnull().sum()
resdict["column"] = null_series.index
resdict["null_fraction"] = np.round(null_series.values / len(df), 3)
resdict["nulls"] = null_series.values
# Inspect types
types = df.dtypes.values
type_names = [t.name for t in types]
resdict["type"] = type_names
# Is numeric?
is_numeric = []
for col in df.columns:
is_numeric.append(is_numeric_dtype(df[col]))
resdict["is_numeric"] = is_numeric
# Dataframe
resdf = pd.DataFrame(resdict)
resdf.sort_values("null_fraction", inplace=True)
resdf.reset_index(inplace=True, drop=True)
return resdf
def summarize_df(df: pd.DataFrame) -> pd.DataFrame:
""" Show stats;
- rows:
- column types
- columns
- number of columns
- number of cols containing NaN's
"""
# Original DataFrame
(nrows, _) = df.shape
# Stats of DataFrame
stats = inspect_df(df)
data_types = np.unique(stats["type"].values)
resdict = OrderedDict()
# Column: data types
resdict["type"] = data_types
ncols_type = []
ncols_nan = []
n_nans = []
n_total = []
for dt in data_types:
# Column: number of columns with type
nc = len(stats[stats["type"] == dt])
ncols_type.append(nc)
# Column: number of columns with NaNs
nan_cols = stats[(stats["type"] == dt) & (stats["nulls"] > 0)]
ncols_nan.append(len(nan_cols))
# Column: number of NaNs
n_nans.append(nan_cols["nulls"].sum())
# Column: total number of values
n_total.append(nc * nrows)
# Prepare dict for the df
resdict["ncols"] = ncols_type
resdict["ncols_w_nans"] = ncols_nan
resdict["n_nans"] = n_nans
resdict["n_total"] = n_total
# Proportions of NaNs in each column group.
# Division by zero shouldn't occur
nan_frac = np.array(n_nans) / np.array(n_total)
resdict["nan_frac"] = np.round(nan_frac, 2)
resdf = pd.DataFrame(resdict)
resdf.sort_values("type", inplace=True)
resdf.reset_index(inplace=True, drop=True)
return resdf
def add_datefields(
df: pd.DataFrame,
column: str,
drop_original: bool = False,
inplace: bool = False,
attrs: Optional[List[str]] = None,
) -> pd.DataFrame:
""" Add attributes of the date to dataFrame df
"""
raw_date = df[column]
# Pandas datetime attributes
if attrs is None:
attributes = [
"dayofweek",
"dayofyear",
"is_month_end",
"is_month_start",
"is_quarter_end",
"is_quarter_start",
"quarter",
"week",
]
else:
attributes = attrs
# Return new?
if inplace:
resdf = df
else:
resdf = df.copy(deep=True)
# Could probably be optimized with pd.apply()
for attr in attributes:
new_column = f"{column}_{attr}"
# https://stackoverflow.com/questions/2612610/
new_vals = [getattr(d, attr) for d in raw_date]
resdf[new_column] = new_vals
if drop_original:
resdf.drop(columns=column, inplace=True)
return resdf
def add_nan_columns(
df: pd.DataFrame, inplace: bool = False, column_list: Optional[List[str]] = None
) -> pd.DataFrame:
""" For each column containing NaNs, add a boolean
column specifying if the column is NaN. Can be used
if the data is later imputated.
"""
if column_list is not None:
nan_columns = column_list
else:
# Get names of columns containing at least one NaN
temp = df.isnull().sum() != 0
nan_columns = temp.index[temp.values]
# Return new?
if inplace:
resdf = df
else:
resdf = df.copy(deep=True)
for column in nan_columns:
new_column = f"{column}_isnull"
nans = df[column].isnull()
resdf[new_column] = nans
return resdf
def numeric_nans(df: pd.DataFrame) -> pd.DataFrame:
""" Inspect numerical NaN values of a DataFrame df
"""
stats = inspect_df(df)
nan_stats = stats.loc[stats["is_numeric"] & (stats["nulls"] > 0)].copy(deep=True)
len_uniques = []
uniques = []
for row in nan_stats["column"].values:
uniq = np.unique(df[row][df[row].notnull()].values)
len_uniques.append(len(uniq))
uniques.append(uniq)
nan_stats["num_uniques"] = len_uniques
nan_stats["uniques"] = uniques
nan_stats.reset_index(inplace=True, drop=True)
return nan_stats
def categorize_df(
df: pd.DataFrame,
columns: Optional[List[str]] = None,
inplace: bool = False,
drop_original: bool = True,
) -> Tuple[pd.DataFrame, pd.DataFrame]:
""" Categorize values in columns, and replace value with category.
If no columns are given, default to all 'object' columns
"""
if columns is not None:
cat_cols = columns
else:
cat_cols = df.columns[[dt.name == "object" for dt in df.dtypes.values]]
if inplace:
resdf = df
else:
resdf = df.copy(deep=True)
df_codes = []
df_cats = []
n_cats = []
for column in cat_cols:
new_column = f"{column}_cat"
cat_column = df[column].astype("category")
# By default, NaN is -1. We convert to zero by incrementing all.
col_codes = cat_column.cat.codes + 1
resdf[new_column] = col_codes
# DataFrame with the codes
df_codes.append(col_codes)
df_cats.append(cat_column.cat.categories)
n_cats.append(len(np.unique(col_codes)))
cat_dict = OrderedDict()
cat_dict["column"] = cat_cols
# MyPy picks up an error in the next line. Bug is where?
# Additionally, Flake8 will report the MyPy ignore as an error
cat_dict["n_categories"] = n_cats # type: ignore[assignment] # noqa: F821,F821
cat_dict["categories"] = df_cats
cat_dict["codes"] = df_codes
cat_df = pd.DataFrame(cat_dict)
if drop_original:
resdf.drop(columns=cat_cols, inplace=True)
return (resdf, cat_df)
def replace_numeric_nulls(
df: pd.DataFrame,
columns: Optional[List[str]] = None,
function: Callable = np.median,
inplace: bool = False,
) -> pd.DataFrame:
""" Replace nulls in all numerical column with the median (default) or
another callable function that works on NumPy arrays
"""
if columns is None:
columns = [
colname for colname, column in df.items() if is_numeric_dtype(column)
]
if inplace:
resdf = df
else:
resdf = df.copy(deep=True)
fillers = OrderedDict()
for column in columns:
values = resdf[resdf[column].notnull()][column].values
fillers[column] = function(values)
resdf.fillna(value=fillers, inplace=True)
return resdf
def object_nan_to_empty(df: pd.DataFrame, inplace: bool = False) -> pd.DataFrame:
""" Replace NaN in Pandas object columns with an empty string
indicating a missing value.
"""
columns = [colname for colname, column in df.items() if is_object_dtype(column)]
fillers = {c: "" for c in columns}
if inplace:
resdf = df
else:
resdf = df.copy(deep=True)
resdf.fillna(value=fillers, inplace=True)
return resdf
def categorical_columns(
df: pd.DataFrame, columns: Optional[List[str]] = None, inplace: bool = False
) -> pd.DataFrame:
""" For any object columns, create categorical columns instead.
"""
if columns is None:
columns = [colname for colname, column in df.items() if is_object_dtype(column)]
if inplace:
resdf = df
else:
resdf = df.copy(deep=True)
for column in columns:
resdf[column] = df[column].astype("category")
return resdf
def apply_categories(
df: pd.DataFrame,
columns: Optional[List[str]] = None,
inplace: bool = False,
drop: bool = False,
) -> pd.DataFrame:
""" For any categorical columns, add a new column with the codes, postfixed with '_cat'.
If 'drop' is tru, drop the original columns
"""
if columns is None:
columns = [
colname for colname, column in df.items() if | is_categorical_dtype(column) | pandas.api.types.is_categorical_dtype |
#!/usr/bin/env python
# encoding: utf-8
'''
\ \ / /__| | ___ _ _ __ / ___| | | | / \ |_ _|
\ V / _ \ |/ / | | | '_ \ | | | |_| | / _ \ | |
| | __/ <| |_| | | | | | |___| _ |/ ___ \ | |
|_|\___|_|\_\\__,_|_| |_| \____|_| |_/_/ \_\___
==========================================================================
@author: CYK
@license: School of Informatics, Edinburgh
@contact: <EMAIL>
@file: plot_graph.py
@time: 08/03/2018 19:41
@desc:
'''
import matplotlib.pyplot as plt
import os
import pandas as pd
# pp = []
# bleu = []
# val_loss=[]
# mean_loss=[]
data = {'pp':[],
'bleu':[],
'mean_loss':[] ,
'val_loss':[]
}
save_dir='plot_data'
def save_data(data, csv_name, subdir=False, save_dir=save_dir):
assert csv_name[-4:] == '.csv', "Error: didnot give a valid csv_name!"
if not os.path.exists(save_dir):
os.mkdir(save_dir)
if subdir is not False:
subdir = os.path.join(save_dir, subdir)
if not os.path.isdir(subdir):
os.mkdir(subdir)
csv_file = os.path.join(subdir, csv_name)
# =========================
# 1. save to txt
# with open(filename, 'w') as f:
# f.write(str(history))
# ==========================
hist = | pd.DataFrame.from_dict(data, orient='columns') | pandas.DataFrame.from_dict |
import os
import pandas as pd
from pandas.util.testing import assert_equal
from nlp_profiler.constants \
import HIGH_LEVEL_OPTION, GRANULAR_OPTION, GRAMMAR_CHECK_OPTION, \
SPELLING_CHECK_OPTION, EASE_OF_READING_CHECK_OPTION
from nlp_profiler.core import apply_text_profiling
from tests.common_functions import generate_data, remove_joblib_cache
CURRENT_SOURCE_FILEPATH = os.path.abspath(__file__)
EXPECTED_DATA_PATH = f'{os.path.dirname(CURRENT_SOURCE_FILEPATH)}/data'
def setup_model(module):
remove_joblib_cache()
def test_given_a_text_column_when_profiler_is_applied_grammar_check_analysis_then_profiled_dataset_is_returned():
# given
source_dataframe = create_source_dataframe()
csv_filename = f'{EXPECTED_DATA_PATH}/expected_profiled_dataframe_grammar_check.csv'
expected_dataframe = | pd.read_csv(csv_filename) | pandas.read_csv |
"""
ncaa_scraper
A module to scrape and parse college baseball statistics from stats.ncaa.org
Created by <NAME> in Spring 2022
"""
import pandas as pd
import time
import random
from bs4 import BeautifulSoup
import requests
import numpy as np
#lookup paths
_SCHOOL_ID_LU_PATH = 'collegebaseball/data/schools.parquet'
_SEASON_ID_LU_PATH = 'collegebaseball/data/seasons.parquet'
_PLAYERS_HISTORY_LU_PATH = 'collegebaseball/data/players_history.parquet'
_PLAYER_ID_LU_PATH = 'collegebaseball/data/player_id_lookup.parquet'
#pre-load lookup tables for performance
_SCHOOL_ID_LU_DF = pd.read_parquet(_SCHOOL_ID_LU_PATH)
_SEASON_LU_DF = | pd.read_parquet(_SEASON_ID_LU_PATH) | pandas.read_parquet |
"""
SPDX-FileCopyrightText: 2019 oemof developer group <<EMAIL>>
SPDX-License-Identifier: MIT
"""
import pandas as pd
import numpy as np
from pandas.util.testing import assert_series_equal
from numpy.testing import assert_allclose
from windpowerlib.density import barometric, ideal_gas
class TestDensity:
def test_barometric(self):
parameters = {'pressure': pd.Series(data=[101125, 101000]),
'pressure_height': 0,
'hub_height': 100,
'temperature_hub_height': pd.Series(data=[267, 268])}
# Test pressure as pd.Series and temperature_hub_height as pd.Series
# and np.array
rho_exp = | pd.Series(data=[1.30305336, 1.29656645]) | pandas.Series |
from functools import reduce
import os
import pandas as pd
import numpy as np
import multiprocessing as mp
class MapReducer:
def __init__(self, df):
self.df = df
self.counter = 0
def mapper(self, group):
gp_name, lst = group
gp_df = pd.DataFrame([self.df.loc[x] for x in lst], columns=self.df.columns).sort_values('created_utc').iloc[
-100:]
res = " ||| ".join(gp_df.body.to_list())
self.counter += 1
print("author {} done".format(self.counter))
return gp_name, res
# tmp.groupby('author')["proper_tokenized"].agg(sum)
def get_100_recent_posts(data_file):
recent_df_path = os.path.dirname(data_file) + "/recent100.pkl"
if os.path.isfile(recent_df_path):
return pd.read_pickle(recent_df_path)
else:
print("extracting 100 most recent posts per author")
multi_core = False
reddits = pd.read_csv(data_file)
map_reducer = MapReducer(reddits)
groups = reddits.groupby("author").groups.items()
results = []
if multi_core:
p = mp.Pool(mp.cpu_count()) # Data parallelism Object
results = p.map(map_reducer.mapper, groups)
else:
for group in groups:
results.append(map_reducer.mapper(group))
x = | pd.DataFrame(results, columns=['author', 'text']) | pandas.DataFrame |
"""
Low pass filter implementation in python.
The low pass filter is defined by the recurrence relation:
y_(n+1) = y_n + alpha (x_n - y_n)
where x is the measured data and y is the filtered data. Alpha is a constant
dependent on the cutoff frequency, f, and is defined as:
alpha = 2 pi dt f
2 pi dt f + 1
where dt is the time step used.
"""
import numpy as np
import scipy
import pandas as pd
from . import filter_base
from ..misc import s2o, o2s
class LowPassData(filter_base.FilterData):
"""
Low pass filter implementation.
"""
name = "LowPassData"
# Special methods--------------------------------------------------------------
def __init__(self, *args, cutoff=None):
filter_base.FilterData.__init__(self, *args)
# Set up private variables.
if self._obmt is not None:
self._dt = o2s(self._obmt[1] - self._obmt[0])
else:
self._dt = 1
if isinstance(cutoff, (float, int)):
self._cutoff = cutoff
else:
print(self._data)
self._cutoff = self._get_frequency_from_psd(self._data)
self._alpha = (2 * np.pi * self._dt * self._cutoff)/(2 * np.pi *
self._dt *
self._cutoff + 1)
# -----------------------------------------------------------------------------
# Public methods --------------------------------------------------------------
def tweak_cutoff(self, cutoff):
"""Change the cutoff frequency for the lowpass filter."""
self._cutoff = cutoff
self._alpha = 1 - np.exp(-1 * self._dt * self._cutoff)
self.reset()
# -----------------------------------------------------------------------------
# Private methods--------------------------------------------------------------
def _low_pass(self, data_array, alpha=None):
if alpha is None:
alpha = self._alpha
x = data_array[0]
i = 0
while True:
try:
x += alpha * (data_array[i] - x)
except(IndexError):
break
yield x
i += 1
def _get_frequency_from_psd(data):
# Caluculate the sampling rate of the data. Since there are
# occasionally jumps, it is worth checking that two random intervals
# are the same to avoid accidentally calculating an incorrect rate.
f = self._dt ** (-1)
d = | pd.DataFrame() | pandas.DataFrame |
# ##### run this script for each project to produce the true link for that project #####
import pandas as pd
import numpy as np
dummy_commit = pd.read_parquet('path to read commit')
dummy_commit
# deleting all the null issue_ids
dummy_commit.reset_index(drop=True, inplace=True)
print(np.where(pd.isnull(dummy_commit['issue_id']))[0])
print(len(np.where(pd.isnull(dummy_commit['issue_id']))[0]))
dummy_commit = dummy_commit.drop(np.where(pd.isnull(dummy_commit['issue_id']))[0])
dummy_commit.shape
dummy_commit.issue_id = dummy_commit.issue_id.astype(int)
dummy_commit.issue_id = dummy_commit.issue_id.astype(str)
# working on issue
dummy_issue = pd.read_parquet('path to read issue')
dummy_issue
print(np.where(pd.isnull(dummy_issue['issue_id'])))
print(len(np.where(pd.isnull(dummy_issue['issue_id']))[0]))
#
dummy_issue.issue_id = dummy_issue.issue_id.astype(str)
# Building True Links
x = list(dummy_commit.columns)
x.remove('source')
x.remove('issue_id')
cols = list(dummy_issue.columns) + x
True_link = pd.DataFrame(columns=cols)
unique_issue_id_in_commits = dummy_commit.issue_id.unique()
print(len(unique_issue_id_in_commits))
for i in range(len(unique_issue_id_in_commits)):
selected_commit = dummy_commit.loc[dummy_commit['issue_id'] == unique_issue_id_in_commits[i]]
selected_issue = dummy_issue.loc[dummy_issue['issue_id'] == unique_issue_id_in_commits[i]]
resulted_true_link = | pd.merge(left=selected_issue, right=selected_commit, how='left', left_on=['source', 'issue_id'], right_on=['source', 'issue_id']) | pandas.merge |
import numpy as np
import pandas as pd
from random import randint
from statistics import mode
from datetime import datetime
import backend.utils.finder as finder
from dateutil.relativedelta import relativedelta
def arrange_df(df, df_type, relevant_col_idx=None, items_to_delete=None, assembly_df=None, bom_trim=False):
"""
:param bom_trim:
:param df:
pandas.DataFrame object that contains the raw format that is read from the file.
:param df_type:
File type of
:param relevant_col_idx:
:param items_to_delete:
:param assembly_df:
:return:
"""
df = df.copy()
if df_type.lower() == "bom":
# Reformatting the columns
df = reformat_columns(df, relevant_col_idx, "bom")
df.part_no = df.part_no.astype(str)
# If specified, bom will be trimmed
if bom_trim:
df = trim_bom(df)
# This part will be discarded for the time being, 15.04.2020
# Deleting the trial products
# df.drop(df[df.product_no.str.split(".", 0).apply(lambda x: int(x[2]) > 900)].index, inplace = True)
# Deleting the entries where two successive entries are level 1s.
df.drop(df[df.level.eq(df.level.shift(-1, fill_value=1)) & df.level.eq(1)].index, inplace=True)
# This to be deleted parts can be redundant, so it will be decided that if these codes are going to stay or not
tbd_list = items_to_delete["Silinecekler"].unique().tolist()
df.drop(df[df["part_no"].str.split(".").apply(lambda x: x[0] in tbd_list)].index, inplace=True)
# Deleting the entries where two successive entries are level 1s.
df.drop(df[df.level.eq(df.level.shift(-1, fill_value=1)) & df.level.eq(1)].index, inplace=True)
# Check if the product structure is okay or not, if not okay, delete the corresponding products from the BOM
df.drop(df[df.groupby("product_no").apply(corrupt_product_bom).values].index, inplace=True)
# Transforming the amounts to a desired format for the simulation model.
df.amount = determine_amounts(df)
# Making sure that the dataframe returns in order
df.reset_index(drop=True, inplace=True)
return df
if df_type.lower() == "times":
# Reformatting the columns
df = reformat_columns(df, relevant_col_idx, "times")
# Transforming the machine names to ASCII characters.
df["station"] = format_machine_names(df, "station")
# Transforming non-numeric values to numeric values
df.cycle_times = pd.to_numeric(df.cycle_times, errors="coerce").fillna(0)
df.setup_times = pd.to_numeric(df.setup_times, errors="coerce").fillna(0)
# Grouping by the times of the parts that has multiple times in the same work station
df = df.groupby(["part_no", "station"], as_index=False).agg({"cycle_times": sum, "setup_times": max})
df.drop(df[df["part_no"].duplicated(keep="last")].index, inplace=True)
# Creating the setup matrix
set_list_df = df[["station", "setup_times"]].copy()
set_list_df.columns = ["stations_list", "setup_time"]
set_list_df = set_list_df.groupby(by="stations_list", as_index=False).agg({"setup_time": mode})
set_list_df["setup_prob"] = 1
set_list_df.loc[(set_list_df.stations_list == "ANKASTRE_BOYAHANE") |
(set_list_df.stations_list == "ENDUSTRI_BOYAHANE"), "setup_prob"] = 3 / 100
set_list_df.loc[set_list_df.stations_list == "ANKASTRE_BOYAHANE", "setup_time"] = 900
set_list_df.loc[set_list_df.stations_list == "ENDUSTRI_BOYAHANE", "setup_time"] = 1200
# Creating a dataframe with the assembly times
montaj_df = df[(df["station"] == "BANT") | (df["station"] == "LOOP")]
# Creating a dataframe with the glass bonding
cmy_df = df[df["station"] == "CAM_YAPISTIRMA"]
# Dropping the assembly times from the original times dataframe and resetting the index
df.drop(df[(df["station"] == "BANT") |
(df["station"] == "LOOP") |
(df["station"] == "CAM_YAPISTIRMA") |
(df["part_no"].apply(lambda x: len(x)) == 13)].index, inplace=True)
# Resetting the index
df.reset_index(drop="index", inplace=True)
# Getting rid of the setup column of time matrix
# df.drop("setup_times", axis = 1, inplace = True)
return df, montaj_df, cmy_df, set_list_df
if df_type.lower() == "merged":
df["station"] = level_lookup(df, "level", "station")
df["cycle_times"] = level_lookup(df, "level", "cycle_times")
df.loc[df.level == 1, ["station", "cycle_times"]] = \
pd.merge(df["product_no"], assembly_df[["part_no", "station", "cycle_times"]], "left",
left_on="product_no",
right_on="part_no")[["station", "cycle_times"]]
missing_dict = missing_values_df(df)
missing_df = pd.DataFrame(missing_dict).transpose().reset_index()
missing_df.columns = ["code", "station", "cycle_times"]
# Ask for what are the values for the NAs in the missing dictionary
"""
THIS WILL CHANGE
"""
missing_df.station.fillna("CAM_YAPISTIRMA", inplace=True)
missing_df.cycle_times.fillna(np.random.randint(25, 60), inplace=True)
"""
END OF THIS WILL CHANGE
"""
# Rounding all the numerical values to integers.
missing_df.loc[~missing_df.station.isna(), "cycle_times"] = \
missing_df.loc[~missing_df.station.isna(), "cycle_times"].apply(np.ceil)
# Creating the missing slice to fill it to the merged bom dataframe later
missing_slice = pd.merge(left=df[df.station.isna()].part_no.str.split(".").apply(lambda x: x[0]),
right=missing_df, left_on="part_no", right_on="code", how="left")
missing_slice.index = df.loc[df.station.isna()].index
# Equating the filled missing data slice into the bom
df.loc[df.station.isna(), ["station", "cycle_times"]] = \
missing_slice[["station", "cycle_times"]]
return df
def reformat_columns(df, relevant_col_idx, df_type):
if df_type == "bom":
df = df.copy()
# Rearranging the level amount
df["Seviye"] = [int(str(x)[-1]) for x in df[df.columns[relevant_col_idx][2]]]
relevant_col_idx[2] = len(df.columns) - 1
# Determining the columns names to use for reindex later
relevant_col_names = df.columns[relevant_col_idx]
# Columns to be dropped from the dataframe
cols_to_drop = list(set(df.columns) - set(df.columns[relevant_col_idx]))
# Dropping, sorting and indexing the corresponding columns
df.drop(cols_to_drop, axis=1, inplace=True)
df = df.reindex(columns=relevant_col_names)
df.columns = ["product_no", "part_no", "level", "amount", "explanation"]
if df_type == "times":
# Determining the columns names to use for reindex later
relevant_col_names = df.columns[relevant_col_idx]
# Columns to be dropped from the dataframe
cols_to_drop = list(set(df.columns) - set(df.columns[relevant_col_idx]))
# Dropping, sorting and indexing the corresponding columns
df.drop(cols_to_drop, axis=1, inplace=True)
df = df.reindex(columns=relevant_col_names)
df.columns = ["part_no", "station", "cycle_times", "setup_times"]
return df
def trim_bom(df):
# This little piece of code trims the bom so that there is only one instance of product for each product family.
df["product_family"] = [x.split(".")[0] for x in df.product_no]
first_products = df[df.product_family.ne(df.product_family.shift(1, fill_value=0))]["product_no"].to_list()
a = pd.Series([x in first_products for x in df.product_no])
df.drop(df[~a].index, inplace=True)
df.reset_index(drop=True, inplace=True)
# This part is a bit more tricky, this code takes the 90th percentile in the past orders and then takes them into
# consideration, enable if proper use is needed. For a one time usage, just replace the FILE_PATH with pivoted total
# demand info as a csv file. Else, bind the variable to the frontend.
# Order data processing and finding 90 Percentile
# order_data = pd.read_csv(FILE_PATH)
# order_data.drop(order_data.columns[1:4], axis = 1, inplace = True)
# order_data["sum"] = order_data[order_data.columns[1:]].sum(axis = 1)
# order_data.sort_values(by = "sum", inplace = True, ascending = False)
# This part drops the items that are not in the demand page, will change later.
# bom_list = set(df.product_no.to_list())
# order_list = order_data[order_data.columns[0]].to_list()
# order_data.drop(order_data[[x not in bom_list for x in order_list]].index, inplace = True)
# End of that part.
# order_data["perc"] = order_data["sum"].cumsum()/order_data["sum"].sum().cumsum()
# order_data.reset_index(drop = True, inplace = True)
# perc_count = order_data[order_data.perc > 0.9].head(1).index.astype(int)[0]
# prod_list = order_data[order_data.columns[0]].tolist()
# perc_list = [x for x in prod_list if x in frozenset(df.product_no.to_list())][:perc_count]
# a = pd.Series([x in perc_list for x in df.product_no])
# df.drop(df[~a].index, inplace = True)
# df.reset_index(drop = True, inplace = True)
return df
def trim_order(order_df, bom_df):
order_df = order_df.pivot()
return order_df[order_df.index.to_series().str.split(".").apply(lambda x: x[0]).isin(
bom_df.product_no.str.split(".").apply(lambda x: x[0]))]
def trim_df(df, plan_df):
temp_df = df.copy()
products_to_be_taken = pd.DataFrame(plan_df.product_no.unique().tolist(), columns=["product_no"])
products_to_be_taken["is_in_merged"] = products_to_be_taken.product_no.isin(temp_df.product_no.unique().tolist())
missing_dict = find_most_close_products(products_to_be_taken[products_to_be_taken.is_in_merged.eq(0)], temp_df)
products_to_be_taken.product_no.replace(missing_dict, inplace=True)
temp_df = temp_df[temp_df.product_no.isin(products_to_be_taken.product_no.to_list()).eq(1)]
temp_df.reset_index(drop=True, inplace=True)
return temp_df, missing_dict
def schedule_changer_dict(df, days):
current_month = datetime(df.start_date.dt.year.mode(), df.start_date.dt.month.mode(), 1).date()
availability = [
1 if (list(days.values)[0][x] == 1) & ((current_month + pd.to_timedelta(x, unit="d")).weekday() < 5) else 0 for
x in range(0, days.columns.max())]
replace_dict = {}
if current_month.weekday() >= 5:
replace_dict[current_month] = pd.to_datetime(
(current_month + pd.to_timedelta(7 - current_month.weekday(), unit="d")))
else:
replace_dict[current_month] = pd.to_datetime(current_month)
for x in range(1, days.columns.max()):
if availability[x] == 1:
replace_dict[(current_month + pd.to_timedelta(x, unit="d"))] = \
pd.to_datetime(current_month + pd.to_timedelta(x, unit="d"))
else:
replace_dict[(current_month + pd.to_timedelta(x, unit="d"))] = \
pd.to_datetime(max(replace_dict.values()))
renewed_dict = {x: replace_dict[x].date() for x in list(replace_dict.keys())}
# days["day"] = days["date"].dt.day
# days.day.replace(renewed_dict, inplace = True)
return renewed_dict
def level_lookup(df, level_col, lookup_col):
dummies = pd.get_dummies(df[level_col])
idx = dummies.index.to_series()
last_index = dummies.apply(lambda col: idx.where(col != 0, np.nan).fillna(method="ffill"))
last_index[0] = 1
idx = last_index.lookup(last_index.index, df[level_col] - 1)
return pd.DataFrame({lookup_col: df.reindex(idx)[lookup_col].values}, index=df.index)
def missing_values_df(df):
missing_parts = df[df.station.isna()].part_no.str.split(".").apply(lambda x: x[0]).unique()
missing_dict = {}
for items in missing_parts:
temp_station = df[df.part_no.apply(lambda x: x.split(".")[0]) == items].station.mode()
if temp_station.shape == (0,):
temp_station = np.nan
else:
temp_station = temp_station[0]
temp_cycle = df[df.part_no.apply(lambda x: x.split(".")[0]) == items].cycle_times.mean()
missing_dict[items] = [temp_station, temp_cycle]
return missing_dict
def merge_bom_and_times(df_bom, df_times):
df = pd.merge(left=df_bom, right=df_times, how="left", on="part_no")
df = df[list(df.columns[0:4]) + list(df.columns[5:]) + list(df.columns[4:5])].copy()
return df
def format_word(word, letter_dict):
new_word = ""
for i in range(len(word)):
if word[i] in letter_dict.keys():
new_word += letter_dict[word[i]].upper()
continue
new_word += word[i].upper()
return new_word
def format_machine_names(df, column):
turkish_char_dict = {"ı": "i", "ğ": "g", "Ğ": "G", "ü": "u", "Ü": "U", "Ş": "s", "-": "_",
"ş": "s", "İ": "I", "Ö": "O", "ö": "o", "Ç": "C", "ç": "c", " ": "_", "/": "_"}
machine_dict = {}
for item in list(set(df[column])):
machine_dict[item] = (format_word(item, turkish_char_dict))
return df[column].replace(machine_dict)
def determine_amounts(df):
copy_series = pd.Series(np.nan, index=df.index)
idx = df.level.lt(df.level.shift(1, fill_value=2)) | df.level.eq(1)
copy_series[idx] = df.loc[idx, "amount"]
copy_series.ffill(inplace=True)
return copy_series
def corrupt_product_bom(group):
if (sum(group.level == 1) == 0) | (
sum((group.level - group.level.shift(1, fill_value=group.at[group.index[0], "level"])) >= 2) != 0):
return pd.DataFrame({"is_valid": [True] * len(group)}, index=group.index)
else:
return pd.DataFrame({"is_valid": [False] * len(group)}, index=group.index)
def find_most_close_products(missing_products, complete_df) -> dict:
product_parameter = [10, 5, 3, 1]
products = pd.DataFrame(complete_df.product_no.unique(), columns=["product_no"])
products[["product_family", "length", "customer", "option"]] = products.product_no.str.split(".", expand=True)
missing_prods = pd.DataFrame(missing_products.product_no.unique(), columns=["product_no"])
missing_prods[["product_family", "length", "customer", "option"]] = missing_prods.product_no.str.split(".",
expand=True)
missing_dict = {missing_prods.product_no[x]:
products[::-1].product_no[
((products[products.columns[-4:]] == missing_prods.iloc[x, -4:]) * product_parameter).sum(
axis=1).idxmax()] for x in
missing_prods.index}
return missing_dict
# noinspection PyTypeChecker
def create_operational_table(df, table_type, aux=None, *args):
if table_type == "legend":
df = df.iloc[finder.input_indices(df)][["product_no", "part_no"]]
df.index = list(range(1, len(df) + 1))
return df
elif table_type == "input":
df = df.iloc[finder.input_indices(df)][["product_no", "amount"]]
df["product_no"] = finder.product_numerator(df)
df.index = list(range(1, len(df) + 1))
df.columns = ["product", "amount"]
return df
elif table_type == "dup":
# This function gets the input table as a base dataframe to work on and make calculations with.
df = create_operational_table(df=df, table_type="input")
# The following three lines creates the products' index in the process input list, i.e. from the input table
s = df["product"].ne(df["product"].shift(fill_value=df.iloc[0]["product"]))
product_idx = pd.Series([1] + list(np.where(s)[0] + 1))
product_idx.index += 1
# Following line calculates the entity amounts to be duplicated in the simulation software
dup_count = product_idx.shift(-1, fill_value=len(df) + 1) - product_idx
# The next two lines concatanates, basically zipps the created product index and the duplication amounts and
# converts them to a pandas dataframe with the product # with them.
duplication_table = pd.concat(
[pd.Series(list(range(1, len(product_idx) + 1)), index=list(range(1, len(product_idx) + 1))), product_idx,
dup_count], axis=1)
duplication_table.columns = ["product", "start", "number to duplicate"]
return duplication_table
elif table_type == "sequence":
df_copy = df.copy().reset_index()
dummies = pd.get_dummies(df_copy["level"])
lookup_series = df_copy["station"]
gross_matrix = dummies.apply(lambda col: lookup_series.where(col != 0, np.nan).fillna(method="ffill"))
gross_matrix.index = df.index
gross_matrix = gross_seq_matrix_trimmer(gsm=gross_matrix, df=df, matrix_type="station")
gross_matrix.index = list(range(1, gross_matrix.shape[0] + 1))
return gross_matrix
elif table_type == "time":
df_copy = df.copy().reset_index()
dummies = pd.get_dummies(df_copy["level"])
lookup_series = df_copy["cycle_times"].copy()
gross_matrix = dummies.apply(lambda col: lookup_series.where(col != 0, np.nan).fillna(method="ffill"))
gross_matrix.index = df.index
gross_matrix = gross_seq_matrix_trimmer(gsm=gross_matrix, df=df, matrix_type="time")
gross_matrix.index = list(range(1, gross_matrix.shape[0] + 1))
return gross_matrix
elif table_type == "joins":
# Tutorial df for joining matrix
df = df[["product_no", "level"]].copy()
df["product_no"] = finder.product_numerator(df)
# df = df[df["product_no"].le(100)].copy()
input_idx = finder.input_indices(df)
join_df = df.loc[finder.joining_indices(df)].copy()
join_matrix = pd.DataFrame(index=input_idx, columns=list(range(1, df.level.max() + 1)))
join_idx = 2
product_assembly_amount = df.loc[finder.input_indices(df)].copy().reset_index().groupby(by="product_no").agg(
{"index": list, "level": list})
product_assembly_amount["count"] = df.copy().reset_index().groupby(by="product_no").apply(num_of_input)
join_amount_count = [1]
# start loop here
while len(join_df) > 0:
curr_row = int(join_df.tail(1).index[0])
curr_level = df.loc[curr_row, "level"]
start_row = curr_row
end_row = int(df[df["level"].eq(df.loc[curr_row, "level"] - 1) & (df.index < curr_row)].tail(1).index[0])
middle_parts = df[
df["level"].eq(df.loc[curr_row, "level"]) & (df.index <= start_row) & (df.index >= end_row)]
inputs_n_levels = [[input_idx[input_idx >= x][0], df.loc[input_idx[input_idx >= x][0], "level"]] for x in
middle_parts.index]
if pd.isna(join_matrix.loc[inputs_n_levels[0][0], inputs_n_levels[0][1] - curr_level + 1]):
product_assembly_amount.loc[df.loc[inputs_n_levels[0][0], "product_no"], "count"] -= (
len(inputs_n_levels) - 1)
for inputs in inputs_n_levels:
join_matrix.loc[inputs[0], inputs[1] - curr_level + 1] = join_idx
join_df.drop(join_df.tail(1).index[0], inplace=True)
join_amount_count.append(len(inputs_n_levels))
join_idx += 1
else:
join_df.drop(join_df.tail(1).index[0], inplace=True)
for product_idx in product_assembly_amount.index:
temp_idx = product_assembly_amount.loc[product_idx, "index"]
for idx in temp_idx:
join_matrix.loc[idx, df.loc[idx, "level"]] = join_idx
join_matrix.loc[idx, list(range(1, df.loc[idx, "level"]))] = \
join_matrix.loc[idx, list(range(1, df.loc[idx, "level"]))].fillna(1)
join_amount_count.append(product_assembly_amount.loc[product_idx, "count"])
join_idx += 1
join_amount_df = pd.DataFrame(
{"join_code": list(range(1, len(join_amount_count) + 1)), "amount": join_amount_count},
index=list(range(1, len(join_amount_count) + 1)))
join_matrix.reset_index(drop=True, inplace=True)
join_matrix.index = list(range(1, join_matrix.shape[0] + 1))
join_amount_df.amount[join_amount_df.amount < 1] = 1
return join_matrix, join_amount_df
elif table_type == "set_list":
x_y_coord = pd.merge(left=df, right=aux, left_on="stations_list", right_on="machine", how="left").loc[:,
["x_coordinate", "y_coordinate"]]
df["queues_list"] = [str(x) + "_Q" for x in df.stations_list]
df["resources_list"] = [str(x) + "_RES" for x in df.stations_list]
df[["x_coordinates", "y_coordinates"]] = x_y_coord
df = df[[df.columns[0]] + list(df.columns[3:]) + list(df.columns[1:3])]
df.index = list(range(1, df.shape[0] + 1))
return df
elif table_type == "order":
products = aux.copy()
prod_idx = products["product_no"].ne(
products["product_no"].shift(1, fill_value=products.iloc[0]["product_no"])).cumsum() + 1
products["prod_code"] = prod_idx
idx_dict = \
products.drop_duplicates("product_no", keep="first").drop("part_no", axis=1).set_index("product_no",
drop=True).to_dict()[
"prod_code"]
whole_dataframe = df.copy()
whole_dataframe.product_no.replace(idx_dict, inplace=True)
whole_dataframe["day_of_month"] = whole_dataframe.start_date.dt.day
if args[0] > 1:
if args[1]:
whole_dataframe.drop(whole_dataframe[~whole_dataframe["start_date"].dt.month ==
whole_dataframe["start_date"].dt.month.mode().values[0]].index,
inplace=True)
if whole_dataframe.start_date.dt.month.mode().values[0] != 12:
curr_month_day = (datetime(year=whole_dataframe.start_date.dt.year.mode().values[0],
month=whole_dataframe.start_date.dt.month.mode().values[0] + 1,
day=1) - | pd.to_timedelta(1, unit="d") | pandas.to_timedelta |
# Neural network for pop assignment
# Load packages
import tensorflow.keras as tf
from kerastuner.tuners import RandomSearch
from kerastuner import HyperModel
import numpy as np
import pandas as pd
import allel
import zarr
import h5py
from sklearn.model_selection import RepeatedStratifiedKFold, train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import log_loss
import itertools
import shutil
import sys
import os
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
import seaborn as sn
def hyper_tune(
infile,
sample_data,
max_trials=10,
runs_per_trial=10,
max_epochs=100,
train_prop=0.8,
seed=None,
save_dir="out",
mod_name="hyper_tune",
):
"""
Tunes hyperparameters of keras model for population assignment.
Paramters
---------
infile : string
Path to VCF file containing genetic data.
sample_data : string
Path to tab-delimited file containing columns x, y,
pop, and sampleID.
max_trials : int
Number of trials to run for RandomSearch (Default=10).
runs_per_trial : int
Number of runs per trial for RandomSearch (Default=10).
max_epochs : int
Number of epochs to train model (Default=100).
train_prop : float
Proportion of data to train on. Remaining data will be kept
as a test set and not used until final model is trained
(Default=0.8).
seed : int
Random seed (Default=None).
save_dir : string
Directory to save output to (Default='out').
mod_name : string
Name of model in save directory (Default='hyper_tune').
Returns
-------
best_mod : keras sequential model
Best model from hyperparameter tuning
y_train : pd.DataFrame
training labels
y_val : pd.DataFrame
Validation labels
"""
# Check input types
if os.path.exists(infile) is False:
raise ValueError("infile does not exist")
if os.path.exists(sample_data) is False:
raise ValueError("sample_data does not exist")
if isinstance(max_trials, np.int) is False:
raise ValueError("max_trials should be integer")
if isinstance(runs_per_trial, np.int) is False:
raise ValueError("runs_per_trial should be integer")
if isinstance(max_epochs, np.int) is False:
raise ValueError("max_epochs should be integer")
if isinstance(train_prop, np.float) is False:
raise ValueError("train_prop should be float")
if isinstance(seed, np.int) is False and seed is not None:
raise ValueError("seed should be integer or None")
if isinstance(save_dir, str) is False:
raise ValueError("save_dir should be string")
if isinstance(mod_name, str) is False:
raise ValueError("mod_name should be string")
# Create save_dir if doesn't already exist
print(f"Output will be saved to: {save_dir}")
if os.path.exists(save_dir):
shutil.rmtree(save_dir)
os.makedirs(save_dir)
# Read data
samp_list, dc = read_data(
infile=infile,
sample_data=sample_data,
save_allele_counts=False,
kfcv=True,
)
# Train prop can't be greater than num samples
if len(dc) * (1 - train_prop) < len(np.unique(samp_list["pops"])):
raise ValueError("train_prop is too high; not enough samples for test")
# Create test set that will be used to assess model performance later
X_train_0, X_test, y_train_0, y_test = train_test_split(
dc, samp_list, stratify=samp_list["pops"], train_size=train_prop
)
# Save train and test set to save_dir
np.save(save_dir + "/X_train.npy", X_train_0)
y_train_0.to_csv(save_dir + "/y_train.csv", index=False)
np.save(save_dir + "/X_test.npy", X_test)
y_test.to_csv(save_dir + "/y_test.csv", index=False)
# Split data into training and hold-out test set
X_train, X_val, y_train, y_val = train_test_split(
dc,
samp_list,
stratify=samp_list["pops"],
train_size=train_prop,
random_state=seed,
)
# Make sure all classes represented in y_val
if len(np.unique(y_train["pops"])) != len(np.unique(y_val["pops"])):
raise ValueError(
"Not all pops represented in validation set \
choose smaller value for train_prop."
)
# One hot encoding
enc = OneHotEncoder(handle_unknown="ignore")
y_train_enc = enc.fit_transform(
y_train["pops"].values.reshape(-1, 1)).toarray()
y_val_enc = enc.fit_transform(
y_val["pops"].values.reshape(-1, 1)).toarray()
popnames = enc.categories_[0]
hypermodel = classifierHyperModel(
input_shape=X_train.shape[1], num_classes=len(popnames)
)
tuner = RandomSearch(
hypermodel,
objective="val_loss",
seed=seed,
max_trials=max_trials,
executions_per_trial=runs_per_trial,
directory=save_dir,
project_name=mod_name,
)
tuner.search(
X_train - 1,
y_train_enc,
epochs=max_epochs,
validation_data=(X_val - 1, y_val_enc),
)
best_mod = tuner.get_best_models(num_models=1)[0]
tuner.get_best_models(num_models=1)[0].save(save_dir + "/best_mod")
return best_mod, y_train, y_val
def kfcv(
infile,
sample_data,
mod_path=None,
n_splits=5,
n_reps=5,
ensemble=False,
save_dir="kfcv_output",
return_plot=True,
save_allele_counts=False,
**kwargs,
):
"""
Runs K-fold cross-validation to get an accuracy estimate of the model.
Parameters
----------
infile : string
Path to VCF or hdf5 file with genetic information
for all samples (including samples of unknown origin).
sample_data : string
Path to input file with all samples present (including
samples of unknown origin), which is a tab-delimited
text file with columns x, y, pop, and sampleID.
n_splits : int
Number of folds in k-fold cross-validation
(Default=5).
n_reps : int
Number of times to repeat k-fold cross-validation,
creating the number of models in the ensemble
(Default=5).
ensemble : bool
Whether to use ensemble of models of single model (Default=False).
save_dir : string
Directory where results will be stored (Default='kfcv_output').
return_plot : boolean
Returns a confusion matrix of correct assignments (Default=True).
save_allele counts : boolean
Whether or not to store derived allele counts in hdf5
file (Default=False).
**kwargs
Keyword arguments for pop_finder function.
Returns
-------
report : pd.DataFrame
Classification report for all models.
ensemble_report : pd.DataFrame
Classification report for ensemble of models.
"""
# Check inputs
# Check is sample_data path exists
if os.path.exists(sample_data) is False:
raise ValueError("path to sample_data incorrect")
# Make sure hdf5 file is not used as gen_dat
if os.path.exists(infile) is False:
raise ValueError("path to infile does not exist")
# Check data types
if isinstance(n_splits, np.int) is False:
raise ValueError("n_splits should be an integer")
if isinstance(n_reps, np.int) is False:
raise ValueError("n_reps should be an integer")
if isinstance(ensemble, bool) is False:
raise ValueError("ensemble should be a boolean")
if isinstance(save_dir, str) is False:
raise ValueError("save_dir should be a string")
# Check nsplits is > 1
if n_splits <= 1:
raise ValueError("n_splits must be greater than 1")
samp_list, dc = read_data(
infile=infile,
sample_data=sample_data,
save_allele_counts=save_allele_counts,
kfcv=True,
)
popnames = np.unique(samp_list["pops"])
# Check there are more samples in the smallest pop than n_splits
if n_splits > samp_list.groupby(["pops"]).agg(["count"]).min().values[0]:
raise ValueError(
"n_splits cannot be greater than number of samples in smallest pop"
)
# Create stratified k-fold
rskf = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_reps)
pred_labels = []
true_labels = []
pred_labels_ensemble = []
true_labels_ensemble = []
ensemble_preds = pd.DataFrame()
preds = pd.DataFrame()
fold_var = 1
for t, v in rskf.split(dc, samp_list["pops"]):
# Subset train and validation data
X_train = dc[t, :] - 1
X_val = dc[v, :] - 1
y_train = samp_list.iloc[t]
y_val = samp_list.iloc[v]
if ensemble:
test_dict, tot_bag_df = pop_finder(
X_train,
y_train,
X_val,
y_val,
save_dir=save_dir,
ensemble=True,
**kwargs,
)
# Unit tests for results from pop_finder
if bool(test_dict) is False:
raise ValueError("Empty dictionary from pop_finder")
if tot_bag_df.empty:
raise ValueError("Empty dataframe from pop_finder")
if len(test_dict) == 1:
raise ValueError(
"pop_finder results consists of single dataframe\
however ensemble set to True"
)
ensemble_preds = ensemble_preds.append(tot_bag_df)
else:
test_dict = pop_finder(
X_train,
y_train,
X_val,
y_val,
save_dir=save_dir,
**kwargs,
)
# Unit tests for results from pop_finder
if bool(test_dict) is False:
raise ValueError("Empty dictionary from pop_finder")
if len(test_dict["df"]) != 1:
raise ValueError(
"pop_finder results contains ensemble of models\
should be a single dataframe"
)
preds = preds.append(test_dict["df"][0])
tmp_pred_label = []
tmp_true_label = []
for i in range(0, len(test_dict["df"])):
tmp_pred_label.append(
test_dict["df"][i].iloc[
:, 0:len(popnames)
].idxmax(axis=1).values
)
tmp_true_label.append(test_dict["df"][i]["true_pops"].values)
if ensemble:
pred_labels_ensemble.append(
tot_bag_df.iloc[:, 0:len(popnames)].idxmax(axis=1).values
)
true_labels_ensemble.append(tmp_true_label[0])
pred_labels.append(np.concatenate(tmp_pred_label, axis=0))
true_labels.append(np.concatenate(tmp_true_label, axis=0))
fold_var += 1
# return pred_labels, true_labels
pred_labels = np.concatenate(pred_labels)
true_labels = np.concatenate(true_labels)
report = classification_report(
true_labels, pred_labels, zero_division=1, output_dict=True
)
report = pd.DataFrame(report).transpose()
report.to_csv(save_dir + "/classification_report.csv")
if ensemble:
ensemble_preds.to_csv(save_dir + "/ensemble_preds.csv")
true_labels_ensemble = np.concatenate(true_labels_ensemble)
pred_labels_ensemble = np.concatenate(pred_labels_ensemble)
ensemble_report = classification_report(
true_labels_ensemble,
pred_labels_ensemble,
zero_division=1,
output_dict=True,
)
ensemble_report = | pd.DataFrame(ensemble_report) | pandas.DataFrame |
import os
import sqlite3
import pandas as pd
from pygbif import occurrences
from pygbif import species
from datetime import datetime
import geopandas as gpd
import shapely
import numpy as np
import fiona
from shapely.geometry import shape, Polygon, LinearRing, Point
from dwca.read import DwCAReader
import random
from shapely import wkt
# occurrece records table datatypes
output_schema = {"GBIF_download_doi": "str",
"accessRights": "str",
"basisOfRecord": "str",
"bibliographicCitation": "str",
"collectionCode": "str",
"coordinatePrecision": "float",
"coordinateUncertaintyInMeters": "float",
"dataGeneralizations": "str",
"datasetName": "str",
"decimalLatitude": "str",
"decimalLongitude": "str",
"detection_distance_m": "int",
"ebird_id": "str",
"effort_distance_m": "int",
"establishmentMeans": "str",
"eventDate": "str",
"eventRemarks": "str",
"filter_set_name": "str",
"footprintSRS": "str",
"footprintWKT": "str",
"gbif_id": "str",
"general_remarks": "str",
"geodeticDatum": "str",
"georeferenceProtocol": "str",
"georeferenceRemarks": "str",
"georeferenceVerificationStatus": "str",
"georeferencedBy": "str",
"gps_accuracy_m": "int",
"habitat": "str",
"identificationQualifier": "str",
"identifiedBy": "str",
"identifiedRemarks": "str",
"individualCount": "int",
"informationWitheld": "str",
"institutionID": "str",
"issues": "str",
"license": "str",
"locality": "str",
"locationAccordingTo": "str",
"locationRemarks": "str",
"modified": "str",
"nominal_xy_precision": "float",
"occurrenceRemarks": "str",
"occurrenceStatus": "str",
"organismQuantity": "str",
"organismQuantityType": "str",
"radius_m": "float",
"record_id": "str",
"recordedBy": "str",
"retrieval_date": "str",
"samplingProtocol": "str",
"samplingEffort": "str",
"scientificName": "str",
"source": "str",
"taxonConceptID": "str",
"taxon_info_name": "str",
"verbatimLocality": "str",
"weight": "int",
"weight_notes": "str"}
# Core functions >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
def build_output_database(output_database):
"""
Create a database for storing occurrence and taxon concept data.
The column names that are "camel case" are Darwin Core attributes, whereas
lower case names containing "_" between words are not. Only those Darwin
core attributes that could be useful for filtering and assigning weights
are included.
Parameters
----------
output_database : Path for sqlite database to create; string.
Returns
-------
Nothing.
"""
# Delete the database if it already exists
if os.path.exists(output_database):
os.remove(output_database)
# Create or connect to the database
conn = sqlite3.connect(output_database)
# Create a table for occurrence records.
df = (pd.DataFrame(columns=output_schema.keys())
.astype(output_schema)
.to_sql(name='occurrence_records', con=conn, if_exists='replace'))
conn.close()
return
def get_EBD_records(taxon_info, filter_set, working_directory, EBD_file,
query_name, R_home):
'''
Gets eBird records from a copy of the Ebird Basic Dataset that you
acquired. Primarily runs R code that uses the Auk package to query the
data set in an efficient manner. Some filters can be applied during the
query, but others have to be applied to the query results. Date and
bounding box filters require quite a bit of preparation and conditions.
Parameters
----------
taxon_info : your taxon concept; dictionary
filter_set : name of the filter set to apply; dictionary
working_directory : path to use for table of filtered query results; string
EBD_file : path to your downloaded copy of the Ebird Basic Dataset; string
query_name : the name you chose for your query; string
R_home : path to R install to use, get from wranglerconfig; string
Returns
-------
Data frame of eBird records
'''
# Point to R home
os.environ["R_HOME"] = R_home
import rpy2.robjects as robjects
import rpy2.robjects.packages as rpackages
from rpy2.robjects.vectors import StrVector
# import R's utility package, select a mirror for R packages
utils = rpackages.importr('utils')
# select the first mirror in the list
utils.chooseCRANmirror(ind=1)
# R packages to load
packnames = ('sf', 'auk', 'lubridate', 'tidyverse')
names_to_install = [x for x in packnames if not rpackages.isinstalled(x)]
if len(names_to_install) > 0:
utils.install_packages(StrVector(names_to_install))
# Some file names
queried_ebd = working_directory + "tmp_ebd.txt"
processed_ebd = working_directory + query_name + ".csv"
output_database = working_directory + query_name + '.sqlite'
# Replace None values in fitler_set with "" to fit R code.
for x in filter_set.keys():
if filter_set[x] == None:
filter_set[x] = ""
for x in taxon_info.keys():
if taxon_info[x] == None:
taxon_info[x] = ""
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< R CODE
code = '''
EBD_file <- "{0}"
queried_ebd <- "{1}"
processed_ebd <- "{2}"
species <- "{3}"
country <- "{4}"
months_range <- "{5}"
years_range <- "{6}"
lon_range <- "{7}"
lat_range <- "{8}"
max_coordinate_uncertainty <- {9}
taxon_polygon <- "{10}"
query_polygon <- "{11}"
# Auk uses filters that are compiled and incorporated into a query.
# This poses a challenge for dynamic filtering where filters may or may
# not be used. We have to set defaults.
library(auk)
library(tidyverse)
library(sf)
library(lubridate)
starttime = Sys.time() # Runtime has been 30 min
# prep dates -------------------------------------------------------------
# auk doesn't allow filtering on months AND year with read_ebd; they have
# to be done separately, one with auk filters and the other after with
# dplyr. I chose to do the year filtering with auk to minimize size of
# returned tibble. This all requires formatting dates as text correctly.
# format start month
if (months_range != "") {{
if (as.numeric(strsplit(months_range, ",")[[1]][1]) < 10) {{
start_month <- paste(c("0", strsplit(months_range, ",")[[1]][1]), collapse="")
}} else {{
start_month <- strsplit(months_range, ",")[[1]][1]}}
start_month <- str_trim(start_month)
# format end month
if (as.numeric(strsplit(months_range, ",")[[1]][2]) < 10) {{
end_month <- paste(c("0", strsplit(months_range, ",")[[1]][2]), collapse="")
}} else {{
end_month <- strsplit(months_range, ",")[[1]][2]}}
end_month <- str_trim(end_month)
# create vector of ok months for filtering with dplyr
ok_months <- seq(as.numeric(start_month), as.numeric(end_month))
}}
print(ok_months)
# pull out start and end years
if (years_range != "") {{
start_yr <- str_trim(strsplit(years_range, ",")[[1]][1])
end_yr <- str_trim(strsplit(years_range, ",")[[1]][2])
}}
# define data filter according to different scenarios
if (months_range == "" && years_range == "") {{
# get all dates
date_filter <- c("*-01-31", "*-12-31")
}} else if (months_range != "" && years_range != "") {{
# get certain months and years. we have to find the last possible day.
end_day <- lubridate::days_in_month(paste(c(end_yr, "-", end_month, "-01"),
collapse=""))
date_filter <- c(paste(c(start_yr, "-", start_month, "-01"), collapse=""),
paste(c(end_yr, "-", end_month, "-", end_day), collapse=""))
}} else if (months_range == "" && years_range != "") {{
# get all months from certain years
date_filter <- c(paste(c(start_yr, "-01-01"), collapse=""),
paste(c(end_yr, "-12-31"), collapse=""))
}} else if (months_range != "" && years_range == "") {{
# get certain months from all years. we have to find the last possible day.
yr <- year(today())
end_day <- lubridate::days_in_month(paste(c(yr, "-", end_month, "-01"),
collapse=""))
date_filter <- c(paste(c("*-", start_month, "-01"), collapse=""),
paste(c("*-", end_month, "-", end_day), collapse=""))
}}
# prep bounding box -------------------------------------------------------
# make a full earth extent for use below
earth <- c(-180, -90, 180, 90)
bbox <- NULL
if (query_polygon == "" && taxon_polygon == "") {{
bbox <- earth
}} else if (query_polygon != "" && taxon_polygon == "") {{
bbox <- st_bbox(st_as_sfc(query_polygon))
}} else if (query_polygon == "" && taxon_polygon != "") {{
bbox <- st_bbox(st_as_sfc(taxon_polygon))
}} else if (query_polygon != "" && taxon_polygon != "") {{
# Get/use the intersection of the two polygons
filter_polygon <- st_as_sfc(query_polygon)
sp_polygon <- st_as_sfc(taxon_polygon)
bbox <- st_bbox(st_intersection(filter_polygon, sp_polygon))
}}
# prep bounding box vector for filter if lat and lon ranges were provided,
# and if other polygons were not
if (lat_range == "" || lon_range == "") {{
null_box <- TRUE
}} else {{
null_box <- FALSE
}}
if (bbox == earth && null_box == FALSE) {{
lat_min <- as.numeric(strsplit(lat_range, ",")[[1]][1])
lat_max <- as.numeric(strsplit(lat_range, ",")[[1]][2])
lng_min <- as.numeric(strsplit(lon_range, ",")[[1]][1])
lng_max <- as.numeric(strsplit(lon_range, ",")[[1]][2])
bbox <- c(lng_min, lat_min, lng_max, lat_max)
names(bbox) <- c("xmin", "ymin", "xmax", "ymax")
attr(bbox, "class") = "bbox"
}}
# prep country ------------------------------------------------------------
if (country == "") {{country <- "US"}}
# prep distance -----------------------------------------------------------
# a gps precision for eBird checklists must be assumed, since not given,
# for estimation of coordinateUncertaintyInMeters
EBD_gps_precision <- 10
# account for gps precision in distance filter. error could exist on
# either end of a straight line path, so double precision when subtracting.
max_distance <- as.integer(ceiling((max_coordinate_uncertainty-(2*EBD_gps_precision))/1000))
print(max_distance)
# query -------------------------------------------------------------------
records0 <- EBD_file %>%
# 1. reference file
auk_ebd() %>%
# 2. define filters
auk_species(species=c(species)) %>%
auk_date(date=date_filter) %>%
auk_country(country=country) %>%
auk_bbox(bbox=bbox) %>%
auk_distance(distance=c(0, max_distance)) %>%
# 3. run filtering
auk_filter(file = queried_ebd, overwrite = TRUE) %>%
# 4. read text file into r data frame
read_ebd(unique=TRUE)
# prep data frame for python ----------------------------------------------
# add column for eBird species code
ebird_code <- select(filter(ebird_taxonomy, common_name==species),
species_code)[[1]]
ebd_data <- records0 %>%
mutate(eBird_sp_code = ebird_code,
retrieval_date = auk_ebd_version(EBD_file)[1][1]) %>%
select(eBird_sp_code, global_unique_identifier, checklist_id,
project_code, last_edited_date, common_name,
observation_count, locality, latitude, longitude,
observation_date, observer_id, effort_distance_km,
protocol_type, effort_area_ha, trip_comments,
species_comments) %>%
mutate(effort_distance_m = as.numeric(effort_distance_km)*1000) %>%
filter(month(observation_date) %in% ok_months) %>%
write_csv(processed_ebd)
endtime = Sys.time()
print(endtime - starttime)
'''.format(EBD_file, queried_ebd, processed_ebd, taxon_info['EBIRD_ID'],
filter_set["country"], filter_set["months_range"],
filter_set["years_range"], filter_set["lon_range"],
filter_set["lat_range"],
filter_set["max_coordinate_uncertainty"],
taxon_info["TAXON_EOO"], filter_set["query_polygon"])
# Run code
timestamp = datetime.now()
robjects.r(code)
print("Ran EBD query with Auk: " + str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< READ OUTPUT
records0 = pd.read_csv(processed_ebd)
'''
This should eventually be usable (or something similar) to avoid having to
write the R data frame and then read it in with pandas. It is supposed to
be possible to convert robject data frames to pandas data frames but the
rpy2 available from conda 2.x doesn't actually work.
# *************************************************************************
# Retrieve the filtered ebird data frame --- this should work, but doesn't
rdf = robjects.globalenv['ebd_data']
# Using a conversion context in which the pandas conversion is
# added to the default conversion rules, the rpy2 object
# (an R data frame) is converted to a pandas data frame.
from rpy2.robjects import pandas2ri
robjects.pandas2ri.activate() # should automatically convert r data frame to pandas
from rpy2.robjects import default_converter
from rpy2.robjects.conversion import localconverter
with localconverter(robjects.default_converter + pandas2ri.converter):
records0 = robjects.conversion.ri2py(rdf)
'''
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< APPLY SPATIAL FILTER
timestamp = datetime.now()
# Make data frame spatial
gdf = gpd.GeoDataFrame(records0,
geometry=gpd.points_from_xy(records0['longitude'],
records0['latitude']))
# It could be that user opted not to use species geometry.
if filter_set['use_taxon_geometry'] == False:
EOO = None
# A geometry could be stated for the species, assess what to do
# Replace "" values in fitler_set with None to fit Python code.
for x in filter_set.keys():
if filter_set[x] == "":
filter_set[x] = None
for x in taxon_info.keys():
if taxon_info[x] == "":
taxon_info[x] = None
EOO = taxon_info["TAXON_EOO"]
AOI = filter_set["query_polygon"]
if AOI is None and EOO is None:
filter_polygon = None
elif AOI is not None and EOO is None:
filter_polygon = shapely.wkt.loads(AOI)
elif AOI is None and EOO is not None:
filter_polygon = shapely.wkt.loads(EOO)
elif AOI is not None and EOO is not None:
# Get/use the intersection of the two polygons in this case
AOI_polygon = shapely.wkt.loads(AOI)
EOO_polygon = shapely.wkt.loads(EOO)
filter_polygon = AOI_polygon.intersection(EOO_polygon)
print("Calculated the spatial filter polygon: "
+ str(datetime.now() - timestamp))
# Find which records have coordinates that fall within the polygon
timestamp = datetime.now()
if filter_polygon is not None:
gdf = gdf[gdf["geometry"].within(filter_polygon)]
print("Applied spatial filter: " + str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< SUMMARIZE
timestamp = datetime.now()
df_populated1 = pd.DataFrame(records0.count(axis=0).T.iloc[1:])
df_populated1['included(n)'] = len(records0)
df_populated1['populated(n)'] = df_populated1[0]
df_populated2 = df_populated1.filter(items=['included(n)', 'populated(n)'],
axis='columns')
df_populated2.index.name = 'attribute'
conn = sqlite3.connect(output_database, isolation_level='DEFERRED')
df_populated2.to_sql(name='eBird_fields_returned', con=conn, if_exists='replace')
print("Summarized fields returned: " + str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< PREP FOR OUTPUT
timestamp = datetime.now()
# Rename columns
gdf = gdf.rename({'eBird_sp_code': 'ebird_id',
'global_unique_identifier': 'record_id',
'latitude': 'decimalLatitude',
'longitude': 'decimalLongitude',
'observation_count': 'individualCount',
'observation_date': 'eventDate',
'project_code': 'collectionCode',
'protocol_type': 'samplingProtocol',
'species_comments': 'identifiedRemarks',
'trip_comments': 'eventRemarks'}, axis=1)
# Drop columns
records1 = gdf.filter(list(output_schema.keys()), axis=1)
# Populate columns
records1["institutionID"] = "clo"
records1["collectionCode"] = "EBIRD"
records1["datasetName"] = "EBD"
records1["source"] = "eBird"
records1["basisOfRecord"] = "HUMAN_OBSERVATION"
records1["GBIF_download_doi"] = "bypassed"
records1["occurrenceStatus"] = "PRESENT"
records1 = (records1
.fillna({"effort_distance_m": 0, "gps_accuracy_m": 30})
.replace({"individualCount": {"X": 1}}))
# Add EBD records to a template data frame
schema_df = pd.DataFrame(columns=list(output_schema.keys()))
records2 = schema_df.combine_first(records1)
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< Results
print("Prepared the eBird records for processing: "
+ str(datetime.now() - timestamp))
return records2
def get_GBIF_records(taxon_info, filter_set, query_name, working_directory,
username, password, email):
'''
Retrieves species occurrence records from GBIF. Filters occurrence
records, buffers the xy points, and saves them in a database. Finally,
exports some Shapefiles.
Gets species occurrence records from GBIF. Can accomodate use of the GBIF
API or Darwin Core Archive download via email. Some filters can be applied
during the query, but others have to be applied to the query results.
Parameters
----------
taxon_info : your taxon concept; dictionary
filter_set : name of the filter set to apply; dictionary
query_name : the name you chose for your query; string
working_directory : path to use for table of filtered query results; string
username : your GBIF username; string
password : your GBIF password; string
email : the email account associated with your GBIF account; string
Returns
-------
Data frame of GBIF occurrence records
'''
pd.set_option('display.width', 1000)
os.chdir('/')
timestamp = datetime.now()
# Some prep
output_database = working_directory + query_name + '.sqlite'
conn = sqlite3.connect(output_database, isolation_level='DEFERRED')
cursor = conn.cursor()
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< TAXON INFO
gbif_id = taxon_info["GBIF_ID"]
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< PREP FILTERS
years = filter_set["years_range"]
months = filter_set["months_range"]
latRange = filter_set["lat_range"]
lonRange = filter_set["lon_range"]
geoIssue = filter_set["geoissue"]
country = filter_set["country"]
dwca_download = filter_set["get_dwca"]
EOO = taxon_info["TAXON_EOO"]
AOI = filter_set["query_polygon"]
# It could be that user opted not to use species geometry.
if filter_set['use_taxon_geometry'] == False:
EOO = None
# A geometry could be stated for the species, assess what to do
if AOI is None and EOO is None:
filter_polygon = None
elif AOI is not None and EOO is None:
filter_polygon = AOI
elif AOI is None and EOO is not None:
filter_polygon = EOO
elif AOI is not None and EOO is not None:
# Get/use the intersection of the two polygons in this case
AOI_polygon = shapely.wkt.loads(AOI)
EOO_polygon = shapely.wkt.loads(EOO)
intersection = AOI_polygon.intersection(EOO_polygon)
# Make the polygon's outer ring counter clockwise
if intersection.exterior.is_ccw == False:
print("Reordered filter polygon coordinates")
intersection = shapely.geometry.polygon.orient(intersection,
sign=1.0)
# Get the well-known text version of the polygon
filter_polygon = shapely.wkt.dumps(intersection)
else:
filter_polygon = shapely.wkt.dumps(intersection)
print("Prepared filter set and sorted out geometry constraints: "
+ str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< GET RECORD COUNT
timestamp = datetime.now()
# First, find out how many records there are that meet criteria
occ_search = occurrences.search(taxonKey=gbif_id,
year=years,
month=months,
decimalLatitude=latRange,
decimalLongitude=lonRange,
hasGeospatialIssue=geoIssue,
hasCoordinate=True,
country=country,
geometry=filter_polygon)
record_count = occ_search["count"]
# Return a message if number of records excedes the known dwca-reader limit
print(str(record_count) + " records available")
if record_count > 4500000:
print("!!!!!!! Too many records to proceed. Break up the query",
" with year or other parameters.")
if record_count <= 0 or record_count > 4500000:
# no records available so delete database and return empty data frame
conn.close()
os.remove(output_database)
return pd.DataFrame()
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< API QUERY
if dwca_download == False:
# Get records in batches, saving into master list.
all_jsons = []
batches = range(0, record_count, 300)
for i in batches:
batch = occurrences.search(gbif_id,
limit=300,
offset=i,
year=years,
month=months,
decimalLatitude=latRange,
decimalLongitude=lonRange,
hasGeospatialIssue=geoIssue,
hasCoordinate=True,
country=country,
geometry=filter_polygon)
occs = batch['results']
all_jsons = all_jsons + occs
# Get a list of keys that were returned
api_keys = set([])
for j in all_jsons:
api_keys = api_keys | set(j.keys())
# Load json records into a data frame, via a dictionary
insertDict = {}
for k in list(api_keys):
insertDict[k] = []
for j in all_jsons:
present_keys = list(set(j.keys()) & api_keys)
for prk in present_keys:
insertDict[prk] = insertDict[prk] + [str(j[prk])]
missing_keys = list(api_keys - set(j.keys()))
for mik in missing_keys:
insertDict[mik] = insertDict[mik] + ["UNKNOWN"]
dfRaw = pd.DataFrame(insertDict).rename({"occurrenceID": "record_id"},
axis=1)
print("Downloaded records: " + str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< EMAIL QUERY
if dwca_download == True:
timestamp = datetime.now()
'''
Request data using the download function. Results are emailed as a zip
file containing the Darwin Core files. The download can take a while
to generate and is not immediately available once the download_get
command has been issued. Use a while and try loop to handle the wait.
The zipdownload variable will be a dictionary of the path,
the file size, and the download key unique code. It can be used
to change the file name, unzip the file, etc.
'''
# First, build a query list. NoneType values cause problems, so only
# add arguments if their value isn't NoneType.
download_filters = ['taxonKey = {0}'.format(gbif_id)]
download_filters.append('hasCoordinate = True')
if country is not None:
download_filters.append('country = {0}'.format(country))
if years is not None:
download_filters.append('year >= {0}'.format(years.split(",")[0]))
download_filters.append('year <= {0}'.format(years.split(",")[1]))
if months is not None:
download_filters.append('month >= {0}'.format(months.split(",")[0]))
download_filters.append('month <= {0}'.format(months.split(",")[1]))
if filter_polygon is not None:
download_filters.append("geometry = {0}".format(filter_polygon))
if geoIssue is not None:
download_filters.append('hasGeospatialIssue = {0}'.format(geoIssue))
if latRange is not None:
download_filters.append('decimalLatitude >= {0}'.format(latRange.split(",")[0]))
download_filters.append('decimalLatitude <= {0}'.format(latRange.split(",")[1]))
if lonRange is not None:
download_filters.append('decimalLongitude >= {0}'.format(lonRange.split(",")[0]))
download_filters.append('decimalLongitude <= {0}'.format(lonRange.split(",")[1]))
# Get the value of the download key
try:
d = occurrences.download(download_filters,
pred_type='and',
user=username,
pwd=password,
email=email)
dkey = d[0]
except Exception as e:
print(e)
print(download_filters)
# Get the download, if not ready, keep trying
print("Waiting for the Darwin Core Archive.....")
timestamp2 = datetime.now()
gotit = False
while gotit == False:
try:
# Download the file
timestamp = datetime.now()
zipdownload = occurrences.download_get(key=dkey, path=working_directory)
print("Wait time for DWcA creation: "
+ str(datetime.now() - timestamp2))
print("Wait time for DWcA download: "
+ str(datetime.now() - timestamp))
gotit = True
except:
wait = datetime.now() - timestamp2
if wait.seconds > 60*1440:
gotit = True
print("FAILED!!! -- timed out after 24 hrs. ",
"Try again later or split up query with ",
"year paramters")
# Read the relevant files from within the Darwin Core archive
timestamp = datetime.now()
with DwCAReader(zipdownload["path"]) as dwca:
try:
dfRaw = dwca.pd_read('occurrence.txt', low_memory=False)
except Exception as e:
print("Read error:")
print(e)
try:
doi = dwca.metadata.attrib["packageId"]
except Exception as e:
print("DOI error:")
print(e)
try:
citations = dwca.open_included_file('citations.txt').read()
except Exception as e:
citations = "Failed"
print("Citation error:")
print(e)
try:
rights = dwca.open_included_file('rights.txt').read()
except Exception as e:
rights = "Failed"
print("Rights error:")
print(e)
print("Wait time for reading the DwCA: "
+ str(datetime.now() - timestamp))
# Record DWCA metadata
# Store the value summary for the selected fields in a table.
timestamp = datetime.now()
cursor.executescript("""CREATE TABLE GBIF_download_info
(download_key TEXT, doi TEXT, citations TEXT,
rights TEXT);""")
cursor.execute('''INSERT INTO GBIF_download_info (doi, download_key)
VALUES ("{0}", "{1}")'''.format(doi, dkey))
try:
cursor.execute('''UPDATE GBIF_download_info
SET citations = "{0}"
WHERE doi = "{1}"'''.format(citations, doi))
except Exception as e:
print(e)
cursor.execute('''UPDATE GBIF_download_info
SET citations = "Failed"
WHERE doi = "{0}"'''.format(doi))
try:
cursor.execute('''UPDATE GBIF_download_info
SET rights = "{0}"
WHERE doi = "{1}"'''.format(rights, doi))
except Exception as e:
print(e)
cursor.execute('''UPDATE GBIF_download_info
SET rights = "Failed"
WHERE doi = "{0}"'''.format(doi))
print("Stored GBIF Download DOI etc.: "
+ str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< SUMMARIZE
timestamp = datetime.now()
# We don't want to count the "UNKNOWNS" we added
if dwca_download == False:
df_raw2 = dfRaw.replace({"UNKNOWN": np.nan})
df_populated1 = pd.DataFrame(df_raw2.count(axis=0).T.iloc[1:])
df_populated1['included(n)'] = df_populated1[0]
df_populated1['populated(n)'] = df_populated1[0]
if dwca_download == True:
df_raw2 = dfRaw.copy()
df_populated1 = pd.DataFrame(df_raw2.count(axis=0).T.iloc[1:])
df_populated1['included(n)'] = len(dfRaw)
df_populated1['populated(n)'] = df_populated1[0]
df_populated2 = df_populated1.filter(items=['included(n)', 'populated(n)'],
axis='columns')
df_populated2.index.name = 'attribute'
df_populated2.to_sql(name='gbif_fields_returned', con=conn,
if_exists='replace')
print("Summarized fields returned: " + str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< PREPARE
timestamp = datetime.now()
# Rename columns
records1 = dfRaw.rename({"issue": "issues", "id": "record_id"}, axis=1)
# Drop columns
records1 = records1.filter(items=output_schema.keys(), axis=1)
# Populate columns
records1["retrieval_date"] = str(datetime.now())
if filter_set["get_dwca"] == True:
records1["GBIF_download_doi"] = doi
else:
records1["GBIF_download_doi"] = "bypassed"
records1["source"] = "GBIF"
# Add GBIF records to template; replace and fillna to support astype()
records2 = (pd.DataFrame(columns=output_schema.keys())
.combine_first(records1)
# this replace is needed for API method
.replace({"coordinateUncertaintyInMeters": {"UNKNOWN": np.nan},
"radius_m": {"UNKNOWN": np.nan},
"coordinatePrecision": {"UNKNOWN": np.nan},
"nominal_xy_precision": {"UNKNOWN": np.nan},
"individualCount": {"UNKNOWN": 1},
"weight": {"UNKNOWN": 10},
"detection_distance_m": {"UNKNOWN": 0}})
.fillna({"coordinateUncertaintyInMeters": 0,
"radius_m": 0,
"individualCount": 1,
"weight": 10,
"detection_distance_m": 0,
"effort_distance_m": 0,
"coordinate_precision": 1,
"gps_accuracy_m": 30})
.astype(output_schema))
print("Prepared GBIF records for processing: "
+ str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< Results
return records2
def process_records(ebird_data, gbif_data, filter_set, taxon_info,
working_directory, query_name):
'''
Summarizes the values in the data frames, populates some fields,
apply filters, summarize what values persisted after filtering. Insert
results into the output db.
Parameters
----------
ebird_data : a data frame of records from eBird
gbif_data : a data frame of records from GBIF
output_database : path to the output database; string
filter_set : the filter set dictionary
taxon_info : the taxon information dictionary
Returns
-------
filtered_records : a data frame of filtered records.
'''
timestamp = datetime.now()
# Create or connect to the database
output_database = working_directory + query_name + ".sqlite"
conn = sqlite3.connect(output_database, isolation_level='DEFERRED')
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< MANAGE DATA TYPES
schema = output_schema
string_atts = {key:value for (key, value) in schema.items() if schema[key] == 'str'}
if ebird_data is not None:
ebird_data = ebird_data.astype(string_atts)
if gbif_data is not None:
gbif_data = gbif_data.astype(string_atts)
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< REMOVE EBIRD FROM GBIF
if gbif_data is not None:
if ebird_data is not None:
gbif_data = gbif_data[gbif_data["collectionCode"].str.contains("EBIRD*") == False]
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< COMBINE DATA FRAMES
if ebird_data is None:
df_unfiltered = gbif_data
if gbif_data is None:
df_unfiltered = ebird_data
if gbif_data is not None and ebird_data is not None:
# Concatenate the gbif and ebird tables
df_unfiltered = pd.concat([ebird_data, gbif_data])
print("Prepared data frames for processing: "
+ str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< SUMMARIZE VALUES
timestamp = datetime.now()
# Make a list of columns to summarize values from
do_not_summarize = ['decimalLatitude', 'decimalLongitude',
'GBIF_download_doi', 'coordinateUncertaintyInMeters',
'detection_distance_m', 'eventDate', 'eventRemarks',
'filter_set_name', 'footprintSRS', 'footprintWKT',
'gbif_id', 'ebird_id', "effort_distance_m",
'general_remarks', 'georeferencedBy', 'habitat',
'georeferenceRemarks', 'identificationQualifier',
'identifiedBy', 'identifiedRemarks', 'individualCount',
'informationWitheld', 'locality',
'locationAccordingTo', 'locationRemarks', "modified",
'occurrenceRemarks', 'radius_m', 'record_id',
'recordedBy', 'retrieval_date', 'taxonConceptID',
'verbatimLocality', 'weight', 'weight_notes']
# Make a function to do the summarizing
def summarize_values(dataframe, step):
"""
Loops through columns and gets a count of unique values. Packages in
a df.
"""
attributes = []
summarize = [x for x in dataframe.columns if x not in do_not_summarize]
for column in summarize:
value_count = dataframe['record_id'].groupby(dataframe[column]).count()
value_df = (pd.DataFrame(value_count)
.reset_index()
.rename({'record_id': step, column: 'value'}, axis=1))
value_df["attribute"] = column
value_df = value_df[["attribute", "value", step]]
if value_df.empty == False:
attributes.append(value_df)
result = pd.concat(attributes)
return result
# Store value summary in a data frame
acquired = summarize_values(dataframe=df_unfiltered, step='acquired')
# Summarize sources
source_df1 = df_unfiltered[['institutionID', 'collectionCode',
'datasetName', 'record_id']]
source_summary1 = (source_df1
.groupby(by=['institutionID', 'collectionCode',
'datasetName'])
.size()
.reset_index(name='acquired'))
print("Summarized values acquired: " + str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< POPULATE SOME COLUMNS
timestamp = datetime.now()
df_unfiltered.fillna(value={'individualCount': int(1)}, inplace=True)
df_unfiltered["weight"] = 10
df_unfiltered["weight_notes"] = ""
df_unfiltered["taxon_id"] = taxon_info["ID"]
df_unfiltered["gbif_id"] = taxon_info["GBIF_ID"]
df_unfiltered["ebird_id"] = taxon_info["EBIRD_ID"]
df_unfiltered["detection_distance_m"] = taxon_info["detection_distance_m"]
df_unfiltered["filter_set_name"] = filter_set["name"]
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< COORDINATE PRECISION
'''In WGS84, coordinate precision is limited by longitude and varies across
latitudes and number of digits provided. Thus, coordinates have a nominal
precision that may limit values. Populate a column for this...'''
# Trim decimal length to 5 digits (lat and long).
# Anything more is false precision.
df_unfiltered["decimalLatitude"] = df_unfiltered["decimalLatitude"].apply(lambda x: coord_rounded(x, 5))
df_unfiltered["decimalLongitude"] = df_unfiltered["decimalLongitude"].apply(lambda x: coord_rounded(x, 5))
# Drop rows without a valid latitude or longitude
df_unfiltered.dropna(subset=["decimalLatitude", "decimalLongitude"],
inplace=True)
# Calculate the number of digits for latitude and longitude
df_unfiltered['digits_latitude'] = [len(x.split(".")[1]) for x in df_unfiltered['decimalLatitude']]
df_unfiltered['digits_longitude'] = [len(x.split(".")[1]) for x in df_unfiltered['decimalLongitude']]
# Estimate longitude precisions
df_unfiltered = nominal_x_precision(dataframe=df_unfiltered,
lat_column="decimalLatitude",
digits_column="digits_longitude",
output_column="nominal_x_precision")
# Latitude precision; lookup for latitude precision
digitsY = {1: 11112.0, 2: 1111.2, 3: 111.1, 4: 11.1, 5: 1.1}
df_unfiltered["nominal_y_precision"] = df_unfiltered["digits_latitude"].apply(lambda x: digitsY[x])
# Put the larger of the two nominal precisions in a column
df_unfiltered["nominal_xy_precision"] = np.where(df_unfiltered["nominal_y_precision"] > df_unfiltered["nominal_x_precision"], df_unfiltered["nominal_y_precision"], df_unfiltered["nominal_x_precision"])
# Clean up
df_unfiltered.drop(["temp", "temp2", "digits_latitude", "digits_longitude",
"nominal_x_precision", "nominal_y_precision"], axis=1,
inplace=True)
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< BUFFER RADIUS
'''
Calculate a buffer distance from various parameters for the
point-radius method. Compilation has to differ with data source and
whether the user chose to use a default coordinate uncertainty. Components
of radius may include coordinateUncertaintyInMeters, coordinatePrecision,
GPS_accuracy_m, effort_distance_m, detection_distance_m.
Records are broken apart by source (GBIF, GBIF/EOD, EBD), processed,
and then concatenated in order to account for all conditions.
If footprintWKT is provided, it will be used by spatial_output instead
of point buffering.
'''
# Records from GBIF with coordinate uncertainty (georeferenced)
georef = df_unfiltered[df_unfiltered["coordinateUncertaintyInMeters"] > 0.0].copy()
if georef.empty == False:
#georef.fillna({"coordinatePrecision": 0.00001}, inplace=True)
georef["gps_accuracy_m"] = np.where(georef["eventDate"].apply(lambda x: datetime.strptime(x, "%Y-%m-%dT%H:%M:%S").year) < 2000, 100, 30)
georef["radius_m"] = georef["coordinateUncertaintyInMeters"]
print("Number of georeferenced GBIF records: " + str(len(georef)))
# Records from GBIF without coordinate uncertainty
gbif_nogeo = df_unfiltered[(df_unfiltered["coordinateUncertaintyInMeters"] == 0.0) & (df_unfiltered["collectionCode"].str.contains("EBIRD*") == False)].copy()
if gbif_nogeo.empty == False:
gbif_nogeo["gps_accuracy_m"] = np.where(gbif_nogeo["eventDate"].apply(lambda x: datetime.strptime(x, "%Y-%m-%dT%H:%M:%S").year) < 2000, 100, 30)
if filter_set["default_coordUncertainty"] is not None:
print("Applying default coordinate uncertainties for GBIF records")
#gbif_nogeo.fillna({"coordinatePrecision": 0.00001}, inplace=True)
gbif_nogeo["radius_m"] = filter_set["default_coordUncertainty"]
if filter_set["default_coordUncertainty"] is None:
print("Approximating coordinate uncertanties for GBIF records")
#gbif_nogeo.fillna({"coordinatePrecision": 0.00001}, inplace=True)
gbif_nogeo["radius_m"] = gbif_nogeo["gps_accuracy_m"] + gbif_nogeo["detection_distance_m"] + gbif_nogeo["effort_distance_m"]
# Records from EBD
ebd_geo = df_unfiltered[df_unfiltered["source"] == "eBird"].copy()
if ebd_geo.empty == False:
#ebd_geo.fillna({"coordinatePrecision": 0.00001}, inplace=True)
ebd_geo["gps_accuracy_m"] = np.where(ebd_geo["eventDate"].apply(lambda x: datetime.strptime(x, "%Y-%m-%d").year) < 2000, 100, 30)
ebd_geo["radius_m"] = ebd_geo["effort_distance_m"] + ebd_geo["gps_accuracy_m"] + ebd_geo["detection_distance_m"]
# Records from EOD (via GBIF)
eod_nogeo = df_unfiltered[(df_unfiltered["source"] == "GBIF") & (df_unfiltered["collectionCode"].str.contains("EBIRD*") == True)].copy()
if eod_nogeo.empty == False:
#eod_nogeo.fillna({"coordinatePrecision": 0.00001}, inplace=True)
eod_nogeo["gps_accuracy_m"] = np.where(eod_nogeo["eventDate"].apply(lambda x: datetime.strptime(x, "%Y-%m-%dT%H:%M:%S").year) < 2000, 100, 30)
eod_nogeo["effort_distance_m"] = 8047 # eBird best practices allows distance up to 5 mi length.
eod_nogeo["radius_m"] = eod_nogeo["effort_distance_m"] + eod_nogeo["gps_accuracy_m"] + eod_nogeo["detection_distance_m"]
# Concat df's if necessary
if filter_set['has_coordinate_uncertainty'] == True:
df_unfiltered2 = georef
to_concat = []
for x in [gbif_nogeo, georef, eod_nogeo, ebd_geo]:
if x.empty == False:
to_concat.append(x)
if len(to_concat) > 1:
df_unfiltered2 = pd.concat(to_concat)
if len(to_concat) == 1:
df_unfiltered2 = to_concat[0]
# Where coordinate precision is poor, overwrite the radius to be the precision.
df_unfiltered2["radius_m"] = np.where(df_unfiltered2["nominal_xy_precision"] > df_unfiltered2["radius_m"], df_unfiltered2["nominal_xy_precision"], df_unfiltered2["radius_m"])
#df_unfiltered2["radius_m"] = np.where(df_unfiltered2["coordinatePrecision"] > df_unfiltered2["radius_m"], df_unfiltered2["coordinatePrecision"], df_unfiltered2["radius_m"])
# Test to make sure that no records were lost in the previous steps
if len(df_unfiltered2) != len(df_unfiltered):
print("AN ERROR OCCURRED !!!!!!!!!!!!!")
else:
print("Prepared records and calculated radii:" + str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< FILTER
timestamp = datetime.now()
# Some filters to be prepped for use
for x in ['bases_omit', 'collection_codes_omit', 'datasets_omit',
'institutions_omit', 'issues_omit', 'sampling_protocols_omit']:
if filter_set[x] == None:
filter_set[x] = []
df_filter2 = (df_unfiltered2[df_unfiltered2['radius_m'] <= filter_set['max_coordinate_uncertainty']]
[lambda x: x['collectionCode'].isin(filter_set['collection_codes_omit']) == False]
[lambda x: x['institutionID'].isin(filter_set['institutions_omit']) == False]
[lambda x: x['basisOfRecord'].isin(filter_set['bases_omit']) == False]
[lambda x: x['samplingProtocol'].isin(filter_set['sampling_protocols_omit']) == False]
[lambda x: x['datasetName'].isin(filter_set['datasets_omit']) == False]
[lambda x: x['occurrenceStatus'] != "ABSENT"]
)
# Case where user demands records had coordinate uncertainty
if filter_set['has_coordinate_uncertainty'] == True:
df_filter2 = df_filter2[df_filter2["coordinateUncertaintyInMeters"] > 0]
''' ISSUES are more complex because multiple issues can be listed per record
Method used is complex, but hopefully faster than simple iteration over all records
'''
df_filter2.fillna(value={'issues': ""}, inplace=True)
# Format of issues entries differ by method, change json format to email
# format
if filter_set['get_dwca'] == True:
df_filter2['issues'] = [x.replace(', ', ';').replace('[', '').replace(']', '').replace("'", "")
for x in df_filter2['issues']]
unique_issue = list(df_filter2['issues'].unique())
violations = [x for x in unique_issue if len(set(str(x).split(";")) & set(filter_set['issues_omit'])) != 0] # entries that contain violations
df_filter3 = df_filter2[df_filter2['issues'].isin(violations) == False] # Records without entries that are violations.
print("Performed filtering: " + str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< REMOVE SPACE-TIME DUPLICATES
# Prep some columns by changing data type
df_filter3 = (df_filter3
.astype({'decimalLatitude': 'str',
'decimalLongitude': 'str'})
.reset_index(drop=True))
if filter_set["duplicate_coord_date_OK"] == False:
df_filterZ = drop_duplicates_latlongdate(df_filter3)
if filter_set["duplicate_coord_date_OK"] == True:
df_filterZ = df_filter3.copy()
print("DUPLICATES ON LATITUDE, LONGITUDE, DATE-TIME INCLUDED")
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< SPATIAL FILTERING
# Spatial filtering happens in the get functions (ebird and gbif), not here
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< SUMMARIZE VALUES AGAIN
timestamp = datetime.now()
# Store value summary in a data frame
if df_filterZ.empty == False:
retained = summarize_values(dataframe=df_filterZ, step='retained')
if df_filterZ.empty == True:
retained = acquired.copy().drop(["acquired"], axis=1)
retained["retained"] = 0
# Concat acquired and retained data frames
summary_df = pd.merge(retained, acquired, on=['attribute', 'value'],
how='inner')
# Calculate a difference column
summary_df['removed'] = summary_df['acquired'] - summary_df['retained']
summary_df = summary_df[['attribute', 'value', 'acquired', 'removed',
'retained']]
# Summarize sources
if df_filterZ.empty == False:
source_df2 = df_filterZ[['institutionID', 'collectionCode',
'datasetName', 'record_id']]
source_summary2 = (source_df2
.groupby(by=['institutionID', 'collectionCode',
'datasetName'])
.size()
.reset_index(name='retained'))
if df_filterZ.empty == True:
print(source_summary1)
source_summary2 = source_summary1.copy().drop(["acquired"], axis=1)
source_summary2["retained"] = 0
# Concat acquired and retained source summary data frames
source_summaries = pd.merge(source_summary1, source_summary2,
on=['institutionID', 'collectionCode',
'datasetName'],
how='inner')
# Calculate a difference column
source_summaries['removed'] = source_summaries['acquired'] - source_summaries['retained']
source_summaries = source_summaries[['institutionID', 'collectionCode',
'datasetName', 'acquired', 'removed',
'retained']]
# Save the summaries in the output database
summary_df.to_sql(name='attribute_value_counts', con=conn,
if_exists='replace')
source_summaries.to_sql(name='sources', con=conn,
if_exists='replace')
print("Saved summary of filtering results: "
+ str(datetime.now() - timestamp))
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< SAVE
# Reformat data to strings and insert into db.
df_filterZ.replace("nan",
pd.NA).applymap(str).to_sql(name='occurrence_records',
con=conn,
if_exists='replace')
conn.close()
return None
def nominal_precisions(longitude, latitude, produce):
'''
Calculates the nominal precisions based on WGS84 coordinates.
Method is based on information from wikipedia page on latitude and posts at
https://gis.stackexchange.com/questions/8650/measuring-accuracy-of-latitude-and-longitude
https://wiki.openstreetmap.org/wiki/Precision_of_coordinates
Parameters
----------
latitude : decimal degrees (EPSG:4326) latitude as string.
longitude : decimal degrees (EPSG:4326) longitude as string.
produce : 'longitude', 'latitude', or 'both'
Returns
-------
x : uncertainty in longitude (meters) as float.
y : uncertianty in latitude (meters) as float.
EXAMPLE
-------
x, y = nominal_precisions("-93.455", "26.3455", produce="both")
'''
lat = latitude.split(".")
long = longitude.split(".")
# Longitude - decimal gets moved based on digits.
digitsX = {1: 10, 2: 100, 3: 1000, 4: 10000, 5: 100000}
x = (111321 * np.cos(float(latitude) * np.pi/180))/digitsX[len(long[1])]
# Latitude lookup
digitsY = {1: 11112.0, 2: 1111.2, 3: 111.1, 4: 11.1, 5: 1.1}
y = digitsY[len(lat[1])]
if produce == "both":
return x, y
if produce == "longitude":
return x
if produce == "latitude":
return y
def drop_duplicates_latlongdate(df):
'''
Function to find and remove duplicate occurrence records within the
wildlife wrangler workflow. When duplicates exist, the record with the
higher decimal precision is kept, and if precision values are equal, then
the record with the smallest radius_m is retained. Accounts for existence
of records with a mix of decimal precision in latitude and longitude
values. The process is a little complex. The first data frame is cleaned
up by dropping duplicates based on which record has smaller buffer radius.
Before doing that, records with unequal decimal precision in the latitude
and longitude fields and those fields are rounded to the coarser
precision present. An input data frame likely contains records with equal
decimal precision in latitude and longitude fields, but that is lower than
the rest (i.e. latitude and longitude have 3 places right of the decimal
whereas most records have 4). Duplication may occur between lower and
higher precision records at the lower precision. Therefore, duplication
must be assessed at each of the lower precision levels present. The
strategy for that is to, at each precision level, split the main data
frame in two: one with records having the precision level of the
investigation and another with records greater than the precision level.
The "greater than" data frame records' latitude and longitude values are
then rounded to the precision level. Records are identified from the
"equals precision" data frame that have their latitude, longitude, and date
values represented in the "greater than" df, and such records ID’s are
collected in a list of records to be removed from the input/main data
frame. This process is iterated over all precision levels present in the
data.
Parameters
----------
df : input pandas data frame.
Returns
-------
df2 : a data frame equal to df but without duplicates. Use to drop records
from the occurrences table.
'''
startduptime = datetime.now()
# Record df length before removing duplicates
initial_length = len(df)
"""
############ RECTIFY UNEQUAL LAT-LONG PRECISION
First, trim decimal length in cases where decimal length differs between
latitude and longitude values, result is equal latitude and longitude
length. Record the trimmed decimal precision in a temp column for use
later as a record to "verbatim" precision.
"""
df['dup_latPlaces'] = [len(x.split(".")[1]) for x in df['decimalLatitude']]
df['dup_lonPlaces'] = [len(x.split(".")[1]) for x in df['decimalLongitude']]
df['dup_OGprec'] = df['dup_latPlaces']
prec_unequal = df[df['dup_latPlaces'] != df['dup_lonPlaces']]
for i in prec_unequal.index:
x = prec_unequal.loc[i]
if x['dup_latPlaces'] < x['dup_lonPlaces']:
trim_len = int(x['dup_latPlaces'])
else:
trim_len = int(x['dup_lonPlaces'])
df.loc[i, 'decimalLatitude'] = x['decimalLatitude'][:trim_len + 3]
df.loc[i, 'decimalLongitude'] = x['decimalLongitude'][:trim_len + 4]
# Record the resulting precision for reference later
df.loc[i, 'dup_OGprec'] = trim_len
df.drop(['dup_latPlaces', 'dup_lonPlaces'], axis=1, inplace=True)
"""
######## INITIAL DROP OF DUPLICATES
Initial drop of duplicates on 'latitude', 'longitude', 'eventDate',
keeping the first (lowest radius_m)
Sort so that the lowest radius_m is first
"""
df = (df
.sort_values(by=['decimalLatitude', 'decimalLongitude', 'eventDate',
'radius_m'],
ascending=True, kind='mergesort', na_position='last')
.drop_duplicates(subset=['decimalLatitude', 'decimalLongitude',
'eventDate'],
keep='first'))
"""
######### FIND IMPRECISE DUPLICATES
Get a list of "verbatim" precisions that are present in the data to loop
through. Next, iterate through this list collecting id's of records that
need to be removed from the main df.
"""
# Get list of unique precisions. Order is important: descending.
precisions = list(set(df['dup_OGprec']))
precisions.sort(reverse=True)
# The highest precisions listed at this point has been done: drop it.
precisions = precisions[1:]
# List for collecting records that are duplicates
duplis = []
# The precision-specific duplicate testing happens repeatedly, so make it a
# function.
def drop_duplicate_coord_date(precision, df):
"""
Function to find undesirable duplicates at a particular decimal
precision
Parameters
----------
precision : The level of precision (places right of decimal) in
decimalLatitude and longitude values for the assessment of duplicates.
df : data frame to assess and drop duplicates from. This function
works 'inplace'.
Returns
-------
A data frame without duplicates
"""
# Create a df with records from the input df having decimal
# precision > the precision level being assessed.
dfLonger = df[df['dup_OGprec'] > precision].copy()
# Round lat and long values
dfLonger['decimalLatitude'] = [str(round(float(x), precision)) for x in dfLonger['decimalLatitude']]
dfLonger['decimalLongitude'] = [str(round(float(x), precision)) for x in dfLonger['decimalLongitude']]
# Create a df with records having the precision being
# investigated
dfShorter1 = df[df['dup_OGprec'] == precision]
# Find records in dfShorter1 with latitude, longitude, date combo
# existing in dfLonger and append to list of duplis
dfduplis = pd.merge(dfShorter1, dfLonger, how='inner',
on=['decimalLatitude', 'decimalLongitude',
'eventDate'])
dups_ids = dfduplis['record_id_x']
for d in dups_ids:
duplis.append(d)
# Drop latitude longitude duplicates at lower decimal precisions
for p in precisions:
drop_duplicate_coord_date(p, df)
# Drop rows from the current main df that were identified as duplicates
df2 = df[df['record_id'].isin(duplis) == False].copy()
# Drop excess columns
df2.drop(columns=['dup_OGprec'], axis=1, inplace=True)
# Print status
duptime = datetime.now() - startduptime
print(str(initial_length - len(df2))
+ " duplicate records dropped: {0}".format(duptime))
return df2
def verify_results(database):
'''
Compares the occurrence record attributes to the filters that were
supposed to be applied.
Parameters
----------
database : path to a wrangler output database; string.
Like "Z:/Occurrence_Records/test1.sqlite"
RESULTS
-------
prints messages if tests are failed. No output indicates all tests were
passed.
'''
# Connect to a database
conn = sqlite3.connect(database)
# Get the taxon concept ---------------------------------------------------
taxon_concept = (pd.read_sql(sql="SELECT * FROM taxon_concept;", con=conn)
.rename({"index": "key", "0": "value"}, axis=1)
.set_index("key"))
# Get the filter set that was applied -------------------------------------
filter_set = ( | pd.read_sql(sql="SELECT * FROM filter_set", con=conn) | pandas.read_sql |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-04-15 22:20
# @Author : erwin
import pandas as pd
from common.util_function import *
import numpy as np
df = | pd.DataFrame({'col1': ['a'] * 2 + ['b'] * 3, 'col2': [1, 1, 2, 3, 3]}) | pandas.DataFrame |
import pandas as pd
import numpy as np
from .utils import store_data, stoi
# ------------------------------------------------------------------------
# Globals
cols = ['time', 'cases', 'deaths', 'hospitalized', 'icu', 'recovered']
# ------------------------------------------------------------------------
# Main point of entry
def parse():
#Data extraction from source
dataframe=pd.read_csv("https://raw.githubusercontent.com/kaz-ogiwara/covid19/master/data/prefectures.csv")
#Time format conversion
time_format = '%Y-%m-%d'
dataframe['date'] = pd.to_datetime(dataframe.year.astype(str)+'-'+dataframe.month.astype(str)+'-'+dataframe.date.astype(str), format=time_format)
#Separate by region
dummy=pd.get_dummies(dataframe['prefectureNameE'])
dataframe=pd.concat([dataframe,dummy],axis=1)
regions_name=dataframe.iloc[:,9:].columns
dataframe['icu']=None
dataframe['hospital']=None
dataframe_region=dataframe[['date', 'testedPositive', 'deaths', 'hospital', 'icu', 'discharged']].copy()
dataframe_region['date']=dataframe_region['date'].astype(str)
cols_int=['testedPositive', 'deaths', 'discharged']
for col in cols_int:
dataframe_region[col] = dataframe_region[col].apply(lambda x: int(x) if x == x else "")
dataframe_region = dataframe_region.where(pd.notnull(dataframe_region), None)
region_tables = {}
for region in regions_name:
region_tables['-'.join(['JPN',region])] = dataframe_region[dataframe[region]==1].values.tolist()
#All Japan cases
#Data extraction from source
dataframe=pd.read_csv("https://raw.githubusercontent.com/kaz-ogiwara/covid19/master/data/summary.csv")
#Time format conversion
time_format = '%Y-%m-%d'
dataframe['date'] = pd.to_datetime(dataframe.year.astype(str)+'-'+dataframe.month.astype(str)+'-'+dataframe.date.astype(str), format=time_format)
dataframe['date']=dataframe['date'].astype(str)
dataframe_japan=dataframe[['date', 'tested_positive', 'death', 'hospitalized', 'serious', 'discharged']].copy()
cols_int=['tested_positive', 'death', 'hospitalized', 'serious', 'discharged']
for col in cols_int:
dataframe_japan[col] = dataframe_japan[col].apply(lambda x: int(x) if x == x else "")
dataframe_japan = dataframe_japan.where( | pd.notnull(dataframe_japan) | pandas.notnull |
# Question: Please concatenate this file with this one to a single text file.
# The content of the output file should look like below.
# http://www.pythonhow.com/data/sampledata.txt
# http://pythonhow.com/data/sampledata_x_2.txt
# Expected output:
# x,y
# 3,5
# 4,9
# 6,10
# 7,11
# 8,12
# 6,10
# 8,18
# 12,20
# 14,22
# 16,24
# Answer:
import pandas as pd
df1 = pd.read_csv('http://www.pythonhow.com/data/sampledata.txt')
df2 = pd.read_csv('http://pythonhow.com/data/sampledata_x_2.txt')
frames = [df1,df2]
df_result = pd.concat(frames)
df_result.to_csv('./output/output_74.csv', index=None)
# Explanation 1:
# Again we are using pandas to load the data into Python. Then in line 5, we use the concat method. The method expects as input a list of dataframe objects to be concatenated. Lastly, in line 6, we export the data to a new text file.
# Answer 2:
import io
import pandas
import requests
r = requests.get("http://www.pythonhow.com/data/sampledata.txt")
c = r.content
data1 = pandas.read_csv(io.StringIO(c.decode('utf-8')))
data2 = | pandas.read_csv("sampledata_x_2.txt") | pandas.read_csv |
import datetime
import glob
import pathlib
import tempfile
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from mockito import ANY, unstub, when
from src.constants import ROOT_DIR
from src.data.forecast import CAMSProcessor
from src.data.observations import OpenAQDownloader
from src.data.transformer import LocationTransformer
from src.data.utils import (Location, get_elevation_for_location,
remove_intermediary_paths, write_netcdf)
class TestUtils:
@pytest.fixture()
def mocked_location(self):
args = dict(
location_id="ES",
city="Sebastopol",
country="Rusia",
latitude=37.9,
longitude=39.8,
timezone="Asia/China",
elevation=10.5,
)
location = Location(**args)
return location
@pytest.fixture()
def mocked_dataset_with_closest_stations(self):
ds_dict = {
"coords": {
"time": {
"dims": ("time",),
"attrs": {},
"data": [
datetime.datetime(2019, 6, 1, 8, 0),
datetime.datetime(2019, 6, 1, 10, 0),
datetime.datetime(2019, 6, 1, 11, 0),
datetime.datetime(2019, 6, 4, 16, 0),
datetime.datetime(2019, 6, 8, 2, 0),
datetime.datetime(2019, 6, 8, 3, 0),
datetime.datetime(2019, 6, 8, 10, 0),
datetime.datetime(2019, 6, 8, 11, 0),
datetime.datetime(2019, 6, 15, 10, 0),
datetime.datetime(2019, 6, 15, 11, 0),
datetime.datetime(2019, 6, 21, 1, 0),
datetime.datetime(2019, 6, 22, 8, 0),
datetime.datetime(2019, 6, 29, 8, 0),
datetime.datetime(2019, 7, 4, 13, 0),
datetime.datetime(2019, 7, 4, 14, 0),
datetime.datetime(2019, 7, 4, 15, 0),
datetime.datetime(2019, 7, 4, 16, 0),
datetime.datetime(2019, 7, 4, 17, 0),
datetime.datetime(2019, 7, 4, 18, 0),
datetime.datetime(2019, 7, 4, 19, 0),
],
},
"station_id": {
"dims": ("station_id",),
"attrs": {"long_name": "station name", "cf_role": "timeseries_id"},
"data": [3298],
},
"x": {
"dims": (),
"attrs": {
"units": "degrees_east",
"long_name": "Longitude",
"standard_name": "longitude",
},
"data": 2.15382196,
},
"y": {
"dims": (),
"attrs": {
"units": "degrees_north",
"long_name": "Latitude",
"standard_name": "latitude",
},
"data": 41.3853432834672,
},
"_x": {
"dims": (),
"attrs": {
"units": "degrees_east",
"long_name": "Longitude of the location of interest",
"standard_name": "longitude_interest",
},
"data": 2.16,
},
"_y": {
"dims": (),
"attrs": {
"units": "degrees_north",
"long_name": "Latitude of the location of interest",
"standard_name": "latitude_interest",
},
"data": 41.39,
},
"distance": {
"dims": (),
"attrs": {
"units": "km",
"long_name": "Distance",
"standard_name": "distance",
},
"data": 0.7308156936731197,
},
},
"attrs": {"featureType": "timeSeries", "Conventions": "CF-1.4"},
"dims": {"station_id": 1, "time": 20},
"data_vars": {
"no2": {
"dims": ("station_id", "time"),
"attrs": {
"units": "microgram / m^3",
"standard_name": "no2",
"long_name": "Nitrogen dioxide",
},
"data": [
[
48,
43,
52,
60,
28,
26,
32,
27,
30,
30,
21,
26,
137,
0,
0,
0,
0,
0,
0,
0,
]
],
}
},
}
ds = xr.Dataset.from_dict(ds_dict)
return ds
def test_location_string_method(self, mocked_location):
string_to_check = (
f"Location(location_id={mocked_location.location_id}, "
f"city={mocked_location.city}, "
f"country={mocked_location.country}, "
f"latitude={mocked_location.latitude}, "
f"longitude={mocked_location.longitude}, "
f"elevation={mocked_location.elevation}, "
f"timezone={mocked_location.timezone}"
)
assert str(mocked_location) == string_to_check
def test_location_get_observations_path(self, mocked_location):
observation_path = mocked_location.get_observations_path(
"/tmp", "no2", "20190101-20210331"
)
args_from_path = str(observation_path).split("/")
assert args_from_path[2] == mocked_location.country.lower().replace(" ", "-")
assert args_from_path[3] == mocked_location.city.lower().replace(" ", "-")
assert args_from_path[4] == mocked_location.location_id.lower()
assert args_from_path[5] == "no2"
assert type(observation_path) == pathlib.PosixPath
def test_location_get_forecasts_path(self, mocked_location):
forecast_path = mocked_location.get_forecast_path("/tmp", "20190101-20210331")
args_from_path = str(forecast_path).split("/")
assert args_from_path[2] == mocked_location.country.lower().replace(" ", "-")
assert args_from_path[3] == mocked_location.city.lower().replace(" ", "-")
assert args_from_path[4] == mocked_location.location_id.lower()
assert type(forecast_path) == pathlib.PosixPath
def test_location_get_location_by_id(self, location_id="ES001"):
data_stations = pd.read_csv(
ROOT_DIR / "tests" / "data_test" / "stations.csv",
index_col=0,
usecols=list(range(1, 8)),
)
when(pd).read_csv(
ANY(),
index_col=ANY(int),
usecols=ANY(list),
).thenReturn(data_stations)
location = Location.get_location_by_id(location_id)
assert type(location) == Location
assert location.country == "Spain"
assert location.city == "Madrid"
unstub()
def test_location_get_elevation(self, mocked_location):
elevation = get_elevation_for_location(
mocked_location.latitude, mocked_location.longitude
)
assert type(elevation) == int
assert elevation >= 0
def test_write_netcdf(self, tmp_path, mocked_dataset_with_closest_stations):
tempdir = tmp_path / "sub"
tempdir.mkdir()
temppath = tempdir / "output_test.nc"
write_netcdf(temppath, mocked_dataset_with_closest_stations)
assert temppath.exists()
def test_remove_intermediary_paths(self, tmp_path):
tempdir = tmp_path / "sub"
tempdir.mkdir()
temppaths = []
for i in range(10):
temppath = tempdir / f"output_test_{i}.nc"
with open(temppath, "w") as file:
file.write("Hi!")
temppaths.append(temppath)
remove_intermediary_paths(temppaths)
for temppath_removed in temppaths:
assert not temppath_removed.exists()
class TestOpenAQDownload:
@pytest.fixture()
def mocked_download_obj(self):
args = dict(
location_id="ES002",
city="Barcelona",
country="Spain",
latitude=41.39,
longitude=2.16,
timezone="Europe/Madrid",
elevation=47,
)
location = Location(**args)
openaq_obj = OpenAQDownloader(
location=location,
output_dir=pathlib.Path("/tmp"),
variable="no2",
time_range=dict(start="2019-06-01", end="2021-03-31"),
)
return openaq_obj
@pytest.fixture()
def mocked_output_path(self, tmp_path):
tempdir = tmp_path / "sub"
tempdir.mkdir()
output_path = tempdir / "output_file.nc"
return output_path
@pytest.fixture()
def _mocked_dataframe_with_closest_stations(self):
df_dict = {
"id": {22: 3298},
"city": {22: "Barcelona"},
"name": {22: "ES1438A"},
"entity": {22: "government"},
"country": {22: "ES"},
"sources": {
22: [
{
"id": "eea",
"url": "http://www.eea.europa.eu/themes/air/air-quality",
"name": "EEA",
}
]
},
"isMobile": {22: False},
"isAnalysis": {22: False},
"parameters": {
22: [
{
"id": 7189,
"unit": "µg/m³",
"count": 43299,
"average": 33.3172359638791,
"lastValue": 40,
"parameter": "no2",
"displayName": "NO₂ mass",
"lastUpdated": "2021-08-24T06:00:00+00:00",
"parameterId": 5,
"firstUpdated": "2016-11-17T23:00:00+00:00",
}
]
},
"sensorType": {22: "reference grade"},
"lastUpdated": {22: pd.Timestamp("2021-08-24 06:00:00+0000", tz="UTC")},
"firstUpdated": {22: pd.Timestamp("2016-11-17 23:00:00+0000", tz="UTC")},
"measurements": {22: 161377},
"coordinates.latitude": {22: 41.3853432834672},
"coordinates.longitude": {22: 2.15382196},
}
df = pd.DataFrame(df_dict)
return df
@pytest.fixture()
def mocked_dataframe_with_closest_stations(self):
df_dict = {
"id": {22: 3298},
"city": {22: "Barcelona"},
"name": {22: "ES1438A"},
"entity": {22: "government"},
"country": {22: "ES"},
"sources": {
22: [
{
"id": "eea",
"url": "http://www.eea.europa.eu/themes/air/air-quality",
"name": "EEA",
}
]
},
"isMobile": {22: False},
"isAnalysis": {22: False},
"parameters": {
22: [
{
"id": 7189,
"unit": "µg/m³",
"count": 43299,
"average": 33.3172359638791,
"lastValue": 40,
"parameter": "no2",
"displayName": "NO₂ mass",
"lastUpdated": "2021-08-24T06:00:00+00:00",
"parameterId": 5,
"firstUpdated": "2016-11-17T23:00:00+00:00",
}
]
},
"sensorType": {22: "reference grade"},
"lastUpdated": {22: | pd.Timestamp("2021-08-24 06:00:00+0000", tz="UTC") | pandas.Timestamp |
import copy
import re
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from data.dataloader import JHULoader
from pytz import timezone
from utils.fitting.loss import Loss_Calculator
from utils.generic.config import read_config
"""
Helper functions for processing different reichlab submissions, processing reichlab ground truth,
Comparing reichlab models with gt, processing and formatting our (Wadhwani AI) submission,
comparing that with gt as well
"""
def get_mapping(which='location_name_to_code', reichlab_path='../../../covid19-forecast-hub', read_from_github=False):
if read_from_github:
reichlab_path = 'https://raw.githubusercontent.com/reichlab/covid19-forecast-hub/master'
df = pd.read_csv(f'{reichlab_path}/data-locations/locations.csv')
df.dropna(how='any', axis=0, inplace=True)
if which == 'location_name_to_code':
mapping_dict = dict(zip(df['location_name'], df['location']))
elif which == 'location_name_to_abbv':
mapping_dict = dict(zip(df['location_name'], df['abbreviation']))
else:
mapping_dict = {}
return mapping_dict
def get_list_of_models(date_of_submission, comp, reichlab_path='../../../covid19-forecast-hub', read_from_github=False,
location_id_filter=78, num_submissions_filter=50):
"""Given an input of submission date, comp, gets list of all models that submitted.
Args:
date_of_submission (str): The ensemble creation date (always a Mon), for selecting a particular week
comp (str): Which compartment (Can be 'inc_case', 'cum_case', 'inc_death', or 'cum_death')
reichlab_path (str, optional): Path to reichlab repo (if cloned on machine).
Defaults to '../../../covid19-forecast-hub'.
read_from_github (bool, optional): If true, reads files directly from github
instead of cloned repo. Defaults to False.
location_id_filter (int, optional): Only considers locations with location code <= this input.
Defaults to 78. All states, territories have code <= 78. > 78, locations are counties
num_submissions_filter (bool, optional): Only selects models with submissions more than this.
Defaults to 50.
Returns:
list: list of eligible models
"""
if comp == 'cum_case':
comp = 'inc_case'
if read_from_github:
reichlab_path = 'https://raw.githubusercontent.com/reichlab/covid19-forecast-hub/master'
try:
df = pd.read_csv(f'{reichlab_path}/ensemble-metadata/' + \
f'{date_of_submission}-{comp}-model-eligibility.csv')
except:
date_convert = datetime.strptime(date_of_submission, '%Y-%m-%d')
date_of_filename = (date_convert - timedelta(days=1)).date()
df = pd.read_csv(f'{reichlab_path}/ensemble-metadata/' +
f'{date_of_filename}-{comp}-model-eligibility.csv')
df['location'] = df['location'].apply(lambda x : int(x) if x != 'US' else 0)
all_models = list(df['model'])
df_all_states = df[df['location'] <= location_id_filter]
df_eligible = df_all_states[df_all_states['overall_eligibility'] == 'eligible']
df_counts = df_eligible.groupby('model').count()
# Filter all models with > num_submissions_filter submissions
df_counts = df_counts[df_counts['overall_eligibility'] > num_submissions_filter]
eligible_models = list(df_counts.index)
# Add Wadhwani_AI-BayesOpt incase it isn't a part of the list
if ('Wadhwani_AI-BayesOpt' in all_models) & ('Wadhwani_AI-BayesOpt' not in eligible_models):
eligible_models.append('Wadhwani_AI-BayesOpt')
print(eligible_models)
return eligible_models
def process_single_submission(model, date_of_submission, comp, df_true, reichlab_path='../../../covid19-forecast-hub',
read_from_github=False, location_id_filter=78, num_weeks_filter=4):
"""Processes the CSV file of a single submission (one model, one instance of time)
Args:
model (str): The model name to process CSV of
date_of_submission (str): The ensemble creation date (always a Mon), for selecting a particular week
comp (str): Which compartment (Can be 'inc_case', 'cum_case', 'inc_death', or 'cum_death')
df_true (pd.DataFrame): The ground truth dataframe (Used for processing cum_cases submissions)
reichlab_path (str, optional): Path to reichlab repo (if cloned on machine).
Defaults to '../../../covid19-forecast-hub'.
read_from_github (bool, optional): If true, reads files directly from github
instead of cloned repo. Defaults to False.
location_id_filter (int, optional): All location ids <= this will be kept. Defaults to 78.
num_weeks_filter (int, optional): Only forecasts num_weeks_filter weeks ahead
will be kept. Defaults to 4.
Returns:
pd.DataFrame: model submssion processed dataframe
"""
if read_from_github:
reichlab_path = 'https://raw.githubusercontent.com/reichlab/covid19-forecast-hub/master'
try:
df = pd.read_csv(f'{reichlab_path}/data-processed/' + \
f'{model}/{date_of_submission}-{model}.csv')
except:
date_convert = datetime.strptime(date_of_submission, '%Y-%m-%d')
date_of_filename = date_convert - timedelta(days=1)
try:
df = pd.read_csv(f'{reichlab_path}/data-processed/' + \
f'{model}/{date_of_filename.strftime("%Y-%m-%d")}-{model}.csv')
except:
date_of_filename = date_of_filename - timedelta(days=1)
try:
df = pd.read_csv(f'{reichlab_path}/data-processed/' + \
f'{model}/{date_of_filename.strftime("%Y-%m-%d")}-{model}.csv')
except:
return None
# Converting all locations to integers
df['location'] = df['location'].apply(lambda x : int(x) if x != 'US' else 0)
# Keeping only states and territories forecasts
df = df[df['location'] <= location_id_filter]
df['model'] = model
# Only keeping the wk forecasts
df = df[df['target'].apply(lambda x : 'wk' in x)]
# Only the forecasts corresponding the comp user are interested in
if comp == 'cum_case':
df = df[df['target'].apply(lambda x : 'inc_case'.replace('_', ' ') in x)]
else:
df = df[df['target'].apply(lambda x : comp.replace('_', ' ') in x)]
# Pruning the forecasts which are beyond 4 weeks ahead
df = df[df['target'].apply(lambda x : int(re.findall(r'\d+', x)[0])) <= num_weeks_filter]
df['target_end_date'] = pd.to_datetime(df['target_end_date'])
df['forecast_date'] = pd.to_datetime(df['forecast_date'])
if comp == 'cum_case':
grouped = df.groupby(['location', 'type', 'quantile'], dropna=False)
df_cumsum = pd.DataFrame(columns=df.columns)
for _, group in grouped:
group['value'] = group['value'].cumsum()
df_cumsum = pd.concat([df_cumsum, group], ignore_index=True)
gt_cases = df_true.loc[df_true['date'] == df_cumsum['target_end_date'].min() -
timedelta(days=7), ['Province_State','Confirmed']]
loc_code_df = pd.read_csv(
f'{reichlab_path}/data-locations/locations.csv')
gt_cases = gt_cases.merge(loc_code_df, left_on='Province_State',
right_on='location_name')
gt_cases.drop(['Province_State', 'abbreviation',
'location_name', 'population'], axis=1, inplace=True)
gt_cases['location'] = gt_cases['location'].astype(int)
gt_cases = gt_cases[gt_cases['location'] < 100]
gt_cases.reset_index(drop=True, inplace=True)
gt_cases.loc[len(gt_cases), :] = [int(gt_cases.sum(axis=0)['Confirmed']), 0]
df_cumsum = df_cumsum.merge(gt_cases)
df_cumsum['value'] = df_cumsum['value'] + df_cumsum['Confirmed']
df_cumsum.drop(['Confirmed'], axis=1, inplace=True)
df_cumsum['target'] = df_cumsum['target'].apply(
lambda x: x.replace('inc case', 'cum case'))
df = df_cumsum
return df
def process_all_submissions(list_of_models, date_of_submission, comp, reichlab_path='../../../covid19-forecast-hub',
read_from_github=False, location_id_filter=78, num_weeks_filter=4):
"""Process submissions for all models given as input and concatenate them
Args:
list_of_models (list): List of all models to process submission for. Typically output of get_list_of_models
date_of_submission (str): The ensemble creation date (always a Mon), for selecting a particular week
comp (str): Which compartment (Can be 'inc_case', 'cum_case', 'inc_death', or 'cum_death')
reichlab_path (str, optional): Path to reichlab repo (if cloned on machine).
Defaults to '../../../covid19-forecast-hub'.
read_from_github (bool, optional): If true, reads files directly from github
instead of cloned repo. Defaults to False.
location_id_filter (int, optional): All location ids <= this will be kept. Defaults to 78.
num_weeks_filter (int, optional): Only forecasts num_weeks_filter weeks ahead
will be kept. Defaults to 4.
Returns:
pd.DataFrame: Dataframe with all submissions processed
"""
dlobj = JHULoader()
dataframes = dlobj.pull_dataframes_cached()
df_true = dataframes['df_us_states']
df_all_submissions = process_single_submission(
list_of_models[0], date_of_submission, comp, df_true, reichlab_path, read_from_github,
location_id_filter, num_weeks_filter)
if df_all_submissions is None:
raise AssertionError('list_of_models[0] has no submission on Monday, Sunday or Saturday' + \
'. Please skip it')
for model in list_of_models:
df_model_subm = process_single_submission(
model, date_of_submission, comp, df_true, reichlab_path, read_from_github,
location_id_filter, num_weeks_filter)
if df_model_subm is not None:
df_all_submissions = pd.concat([df_all_submissions, df_model_subm], ignore_index=True)
return df_all_submissions
def process_gt(comp, start_date, end_date, reichlab_path='../../../covid19-forecast-hub',
read_from_github=False, location_id_filter=78):
"""Process gt file in reichlab repo. Aggregate by week, and truncate to dates models forecasted for.
Args:
comp (str): Which compartment (Can be 'inc_case', 'cum_case', 'inc_death', or 'cum_death')
df_all_submissions (pd.DataFrame): The dataframe of all model predictions processed.
reichlab_path (str, optional): Path to reichlab repo (if cloned on machine).
Defaults to '../../../covid19-forecast-hub'.
read_from_github (bool, optional): If true, reads files directly from github
instead of cloned repo. Defaults to False.
location_id_filter (int, optional): All location ids <= this will be kept. Defaults to 78.
Returns:
[pd.DataFrame]*3, dict : gt df, gt df truncated to model prediction dates (daily),
gt df truncated to model prediction dates (aggregated weekly), dict of location name to location key
"""
replace_dict = {'cum': 'Cumulative', 'inc': 'Incident',
'case': 'Cases', 'death': 'Deaths', '_': ' '}
truth_fname = comp
for key, value in replace_dict.items():
truth_fname = truth_fname.replace(key, value)
if read_from_github:
reichlab_path = 'https://raw.githubusercontent.com/reichlab/covid19-forecast-hub/master'
df_gt = pd.read_csv(f'{reichlab_path}/data-truth/truth-{truth_fname}.csv')
df_gt['location'] = df_gt['location'].apply(
lambda x: int(x) if x != 'US' else 0)
df_gt = df_gt[df_gt['location'] <= location_id_filter]
df_gt['date'] = pd.to_datetime(df_gt['date'])
df_gt_loss = df_gt[(df_gt['date'] > start_date) & (df_gt['date'] <= end_date)]
if 'inc' in comp:
df_gt_loss_wk = df_gt_loss.groupby(['location', 'location_name']).resample(
'7D', label='right', origin='start', on='date').sum()
else:
df_gt_loss_wk = df_gt_loss.groupby(['location', 'location_name']).resample(
'7D', label='right', origin='start', on='date').max()
df_gt_loss_wk.drop(['location', 'location_name', 'date'],
axis=1, inplace=True, errors='ignore')
df_gt_loss_wk = df_gt_loss_wk.reset_index()
df_gt_loss_wk['date'] = pd.to_datetime(df_gt_loss_wk['date'])
df_gt_loss_wk['date'] = df_gt_loss_wk['date'].apply(
lambda x: x - np.timedelta64(1, 'D'))
return df_gt, df_gt_loss, df_gt_loss_wk
def compare_gt_pred(df_all_submissions, df_gt_loss_wk, loss_fn='mape'):
"""Function for comparing all predictions to ground truth
Args:
df_all_submissions (pd.DataFrame): dataframe of all predictions processed
df_gt_loss_wk (pd.DataFrame): dataframe of ground truth numbers aggregated at a week level
Returns:
[pd.DataFrame, pd.DataFrame, pd.DataFrame]: combined dataframe, dataframe of mape values, dataframe of ranks
"""
df_comb = df_all_submissions.merge(df_gt_loss_wk,
left_on=['target_end_date', 'location'],
right_on=['date', 'location'])
df_comb = df_comb.rename({'value_x': 'forecast_value',
'value_y': 'true_value'}, axis=1)
lc = Loss_Calculator()
df_comb['mape'] = df_comb.apply(lambda row: lc.mape(
np.array([row['forecast_value']]), np.array([row['true_value']])), axis=1)
df_comb['rmse'] = df_comb.apply(lambda row: lc.rmse(
np.array([row['forecast_value']]), np.array([row['true_value']])), axis=1)
df_comb['mape_perc'] = df_comb.apply(lambda row: lc.qtile_mape(
np.array([row['forecast_value']]), np.array([row['true_value']]),
row['quantile']) if row['type'] == 'quantile' else np.nan, axis=1)
num_cols = ['mape', 'rmse', 'mape_perc', 'forecast_value']
df_comb.loc[:, num_cols] = df_comb.loc[:, num_cols].apply(pd.to_numeric)
df_temp = df_comb[df_comb['type'] == 'point']
df_mape = df_temp.groupby(['model', 'location',
'location_name']).mean().reset_index()
df_mape = df_mape.pivot(index='model', columns='location_name',
values=loss_fn)
df_rank = df_mape.rank()
return df_comb, df_mape, df_rank
def _inc_sum_matches_cum_check(df_loc_submission, which_comp):
"""Function for checking if the sum of incident cases matches cumulative
Args:
df_loc_submission (pd.DataFrame): The submission df for a particular location
which_comp (str): The name of the compartment
Returns:
bool: Whether of not sum(inc) == cum for all points in given df
"""
loc = df_loc_submission.iloc[0, :]['location']
buggy_forecasts = []
if which_comp is None:
comps_to_check_for = ['death', 'case']
else:
comps_to_check_for = [which_comp]
for comp in comps_to_check_for:
df = df_loc_submission.loc[[
comp in x for x in df_loc_submission['target']], :]
grouped = df.groupby(['type', 'quantile'])
for (type, quantile), group in grouped:
cum_diff = group.loc[['cum' in x for x in group['target']], 'value'].diff()
inc = group.loc[['inc' in x for x in group['target']], 'value']
cum_diff = cum_diff.to_numpy()[1:]
inc = inc.to_numpy()[1:]
if int(np.sum(np.logical_not((cum_diff - inc) < 1e-8))) != 0:
print('Sum of inc != cum for {}, {}, {}, {}'.format(
loc, comp, type, quantile))
print(cum_diff, inc)
buggy_forecasts.append((loc, comp, type, quantile))
return len(buggy_forecasts) == 0
def _qtiles_nondec_check(df_loc_submission):
"""Check if qtiles are non decreasing
Args:
df_loc_submission (pd.DataFrame): The submission dataframe for a particular location
Returns:
bool: Whether or not qtiles are non decreasing in given df
"""
grouped = df_loc_submission[df_loc_submission['type']
== 'quantile'].groupby('target')
nondec_check = [sum(np.diff(group['value']) < 0) > 0 for _, group in grouped]
nondec_check = np.array(nondec_check)
return sum(nondec_check) > 0
def _qtiles_nondec_correct(df_loc_submission):
"""If qtiles are not non decreasing, correct them
Args:
df_loc_submission (pd.DataFrame): The submission dataframe for a particular location
Returns:
pd.DataFrame: Corrected df
"""
grouped = df_loc_submission[df_loc_submission['type']
== 'quantile'].groupby('target')
for _, group in grouped:
diff_less_than_0 = np.diff(group['value']) < 0
if sum(diff_less_than_0) > 0:
indices = np.where(diff_less_than_0 == True)[0]
for idx in indices:
df_idx1, df_idx2 = (group.iloc[idx, :].name,
group.iloc[idx+1, :].name)
df_loc_submission.loc[df_idx2, 'value'] = df_loc_submission.loc[df_idx1, 'value']
return df_loc_submission
def format_wiai_submission(predictions_dict, loc_name_to_key_dict, formatting_mode='analysis',
use_as_point_forecast='ensemble_mean', which_comp=None, skip_percentiles=False):
"""Function for formatting our submission in the reichlab format
Args:
predictions_dict (dict): Predictions dict of all locations
loc_name_to_key_dict (dict): Dict mapping location names to location key
use_as_point_forecast (str, optional): Which forecast to use as point forecast ('best'/'ensemble_mean').
Defaults to 'ensemble_mean'.
skip_percentiles (bool, optional): If true, processing of all percentiles is skipped. Defaults to False.
Returns:
pd.DataFrame: Processed Wadhwani AI submission
"""
end_date = list(predictions_dict.values())[0]['run_params']['split']['end_date']
columns = ['forecast_date', 'target', 'target_end_date', 'location', 'type',
'quantile', 'value', 'model']
df_wiai_submission = pd.DataFrame(columns=columns)
# Loop across all locations
for loc in predictions_dict.keys():
df_loc_submission = | pd.DataFrame(columns=columns) | pandas.DataFrame |
#Genero el dataset de febrero para el approach de boosting. Este approach tiene algunas variables mas incluyendo sumas y promedios de valores pasados
import gc
gc.collect()
import pandas as pd
import seaborn as sns
import numpy as np
#%% Cargo los datos, Con el dataset de boosting no hice las pruebas de quitarle un dia a marzo y agregarlo a febrero por falta de tiempo
#Se toma febrero y marzo tal como vienen
train = pd.read_parquet(r'C:\Users\argomezja\Desktop\Data Science\MELI challenge\train_data.parquet', engine='pyarrow')
#Cambio las variables object a categoricas
for col in ['currency', 'listing_type', 'shipping_logistic_type', 'shipping_payment']:
train[col] = train[col].astype('category')
train['date'] = pd.to_datetime(train['date'])
train['day'] =train.date.dt.day
train['month'] = train.date.dt.month
train['listing_type'] = train['listing_type'].factorize()[0]
train['shipping_logistic_type'] = train['shipping_logistic_type'].factorize()[0]
train['shipping_payment'] = train['shipping_payment'].factorize()[0]
febrero = train.loc[train['month']==2]
marzo = train.loc[train['month']==3]
febrero.to_csv('febrero_limpio.csv.gz',index=False, compression="gzip")
marzo.to_csv('marzo_limpio.csv.gz',index=False, compression="gzip")
#%% Febrero
febrero = pd.read_csv(r'C:\Users\argomezja\Desktop\Data Science\MELI challenge\Project MELI\Dataset_limpios\febrero_limpio.csv.gz')
#Trabajo mejor el price
febrero = febrero.assign(current_price=febrero.groupby('currency').transform(lambda x: (x - x.min()) / (x.max()- x.min())))
subtest1 = febrero[['sku', 'day', 'sold_quantity']]
subtest1= subtest1.pivot_table(index = 'sku', columns= 'day', values = 'sold_quantity').add_prefix('sales')
subtest2 = febrero[['sku', 'day', 'current_price']]
subtest2= subtest2.pivot_table(index = 'sku', columns= 'day', values = 'current_price').add_prefix('price')
subtest3 = febrero[['sku', 'day', 'minutes_active']]
subtest3= subtest3.pivot_table(index = 'sku', columns= 'day', values = 'minutes_active').add_prefix('active_time')
subtest4 = febrero[['sku', 'day', 'listing_type']]
subtest4= subtest4.pivot_table(index = 'sku', columns= 'day', values = 'listing_type').add_prefix('listing_type')
subtest6 = febrero[['sku', 'day', 'shipping_logistic_type']]
subtest6= subtest6.pivot_table(index = 'sku', columns= 'day', values = 'shipping_logistic_type').add_prefix('shipping_logistic_type')
subtest7 = febrero[['sku', 'day', 'shipping_payment']]
subtest7= subtest7.pivot_table(index = 'sku', columns= 'day', values = 'shipping_payment').add_prefix('shipping_payment')
final = pd.merge(subtest1, subtest2, left_index=True, right_index=True )
final = pd.merge(final, subtest3, left_index=True, right_index=True)
final = pd.merge(final, subtest4, left_index=True, right_index=True)
final = pd.merge(final, subtest6, left_index=True, right_index=True)
final = pd.merge(final, subtest7, left_index=True, right_index=True)
del subtest1,subtest2,subtest3,subtest4,subtest6, subtest7
#%% Promedios cada 3 dias
febrero_test = febrero.sort_values(['sku','day']).reset_index(drop=True).copy()
febrero_test['promedio_3'] = febrero.groupby(['sku'])['sold_quantity'].rolling(3, min_periods=3).mean().reset_index(drop=True)
febrero_test['promedio_7'] = febrero.groupby(['sku'])['sold_quantity'].rolling(7, min_periods=7).mean().reset_index(drop=True)
febrero_test['promedio_15'] = febrero.groupby(['sku'])['sold_quantity'].rolling(15, min_periods=15).mean().reset_index(drop=True)
febrero_test['promedio_20'] = febrero.groupby(['sku'])['sold_quantity'].rolling(20, min_periods=20).mean().reset_index(drop=True)
# Pivoteo y mergeo
subtest3 = febrero_test[['sku', 'day', 'promedio_3']]
subtest3= subtest3.pivot_table(index = 'sku', columns= 'day', values = 'promedio_3', dropna=False).add_prefix('promedio_3')
subtest4 = febrero_test[['sku', 'day', 'promedio_7']]
subtest4= subtest4.pivot_table(index = 'sku', columns= 'day', values = 'promedio_7', dropna=False).add_prefix('promedio_7')
subtest6 = febrero_test[['sku', 'day', 'promedio_15']]
subtest6= subtest6.pivot_table(index = 'sku', columns= 'day', values = 'promedio_15', dropna=False).add_prefix('promedio_15')
subtest7 = febrero_test[['sku', 'day', 'promedio_20']]
subtest7= subtest7.pivot_table(index = 'sku', columns= 'day', values = 'promedio_20', dropna=False).add_prefix('promedio_20')
final = pd.merge(final, subtest3, left_index=True, right_index=True)
final = pd.merge(final, subtest4, left_index=True, right_index=True)
final = pd.merge(final, subtest6, left_index=True, right_index=True)
final = pd.merge(final, subtest7, left_index=True, right_index=True)
final = final.dropna(axis=1, how='all')
del subtest3,subtest4,subtest6, subtest7
febrero_test['promedio_3_active_time'] = febrero.groupby(['sku'])['minutes_active'].rolling(3, min_periods=3).mean().reset_index(drop=True)
febrero_test['promedio_7_active_time'] = febrero.groupby(['sku'])['minutes_active'].rolling(7, min_periods=7).mean().reset_index(drop=True)
febrero_test['promedio_15_active_time'] = febrero.groupby(['sku'])['minutes_active'].rolling(15, min_periods=15).mean().reset_index(drop=True)
febrero_test['promedio_20_active_time'] = febrero.groupby(['sku'])['minutes_active'].rolling(20, min_periods=20).mean().reset_index(drop=True)
# Pivoteo y mergeo
subtest3 = febrero_test[['sku', 'day', 'promedio_3_active_time']]
subtest3= subtest3.pivot_table(index = 'sku', columns= 'day', values = 'promedio_3_active_time', dropna=False).add_prefix('promedio_3_active_time')
subtest4 = febrero_test[['sku', 'day', 'promedio_7_active_time']]
subtest4= subtest4.pivot_table(index = 'sku', columns= 'day', values = 'promedio_7_active_time', dropna=False).add_prefix('promedio_7_active_time')
subtest6 = febrero_test[['sku', 'day', 'promedio_15_active_time']]
subtest6= subtest6.pivot_table(index = 'sku', columns= 'day', values = 'promedio_15_active_time', dropna=False).add_prefix('promedio_15_active_time')
subtest7 = febrero_test[['sku', 'day', 'promedio_20_active_time']]
subtest7= subtest7.pivot_table(index = 'sku', columns= 'day', values = 'promedio_20_active_time', dropna=False).add_prefix('promedio_20_active_time')
final = pd.merge(final, subtest3, left_index=True, right_index=True)
final = pd.merge(final, subtest4, left_index=True, right_index=True)
final = pd.merge(final, subtest6, left_index=True, right_index=True)
final = pd.merge(final, subtest7, left_index=True, right_index=True)
final = final.dropna(axis=1, how='all')
del subtest3,subtest4,subtest6, subtest7
#Sumas active time
febrero_test['suma_3_active_time'] = febrero.groupby(['sku'])['minutes_active'].rolling(3, min_periods=3).sum().reset_index(drop=True)
febrero_test['suma_7_active_time'] = febrero.groupby(['sku'])['minutes_active'].rolling(7, min_periods=7).sum().reset_index(drop=True)
febrero_test['suma_15_active_time'] = febrero.groupby(['sku'])['minutes_active'].rolling(15, min_periods=15).sum().reset_index(drop=True)
febrero_test['suma_20_active_time'] = febrero.groupby(['sku'])['minutes_active'].rolling(20, min_periods=20).sum().reset_index(drop=True)
# Pivoteo y mergeo
subtest3 = febrero_test[['sku', 'day', 'suma_3_active_time']]
subtest3= subtest3.pivot_table(index = 'sku', columns= 'day', values = 'suma_3_active_time', dropna=False).add_prefix('suma_3_active_time')
subtest4 = febrero_test[['sku', 'day', 'suma_7_active_time']]
subtest4= subtest4.pivot_table(index = 'sku', columns= 'day', values = 'suma_7_active_time', dropna=False).add_prefix('suma_7_active_time')
subtest6 = febrero_test[['sku', 'day', 'suma_15_active_time']]
subtest6= subtest6.pivot_table(index = 'sku', columns= 'day', values = 'suma_15_active_time', dropna=False).add_prefix('suma_15_active_time')
subtest7 = febrero_test[['sku', 'day', 'suma_20_active_time']]
subtest7= subtest7.pivot_table(index = 'sku', columns= 'day', values = 'suma_20_active_time', dropna=False).add_prefix('suma_20_active_time')
final = pd.merge(final, subtest3, left_index=True, right_index=True)
final = pd.merge(final, subtest4, left_index=True, right_index=True)
final = pd.merge(final, subtest6, left_index=True, right_index=True)
final = pd.merge(final, subtest7, left_index=True, right_index=True)
final = final.dropna(axis=1, how='all')
del subtest3,subtest4,subtest6, subtest7
#Sumas sales time
febrero_test['sumas_3'] = febrero.groupby(['sku'])['sold_quantity'].rolling(3, min_periods=3).sum().reset_index(drop=True)
febrero_test['sumas_7'] = febrero.groupby(['sku'])['sold_quantity'].rolling(7, min_periods=7).sum().reset_index(drop=True)
febrero_test['sumas_15'] = febrero.groupby(['sku'])['sold_quantity'].rolling(15, min_periods=15).sum().reset_index(drop=True)
febrero_test['sumas_20'] = febrero.groupby(['sku'])['sold_quantity'].rolling(20, min_periods=20).sum().reset_index(drop=True)
# Pivoteo y mergeo
subtest3 = febrero_test[['sku', 'day', 'sumas_3']]
subtest3= subtest3.pivot_table(index = 'sku', columns= 'day', values = 'sumas_3', dropna=False).add_prefix('sumas_3')
subtest4 = febrero_test[['sku', 'day', 'sumas_7']]
subtest4= subtest4.pivot_table(index = 'sku', columns= 'day', values = 'sumas_7', dropna=False).add_prefix('sumas_7')
subtest6 = febrero_test[['sku', 'day', 'sumas_15']]
subtest6= subtest6.pivot_table(index = 'sku', columns= 'day', values = 'sumas_15', dropna=False).add_prefix('sumas_15')
subtest7 = febrero_test[['sku', 'day', 'sumas_20']]
subtest7= subtest7.pivot_table(index = 'sku', columns= 'day', values = 'sumas_20', dropna=False).add_prefix('sumas_20')
final = pd.merge(final, subtest3, left_index=True, right_index=True)
final = | pd.merge(final, subtest4, left_index=True, right_index=True) | pandas.merge |
__author__ = 'brendan'
import main
import pandas as pd
import numpy as np
from datetime import datetime as dt
from matplotlib import pyplot as plt
import random
import itertools
import time
import dateutil
from datetime import timedelta
cols = ['BoP FA Net', 'BoP FA OI Net', 'BoP FA PI Net', 'CA % GDP']
raw_data = | pd.read_csv('raw_data/BoP_UK.csv', index_col=0, parse_dates=True) | pandas.read_csv |
import pandas as pd
import numpy as np
from pandas._testing import assert_frame_equal
from NEMPRO import planner, units
def test_start_off_with_initial_down_time_of_zero():
forward_data = pd.DataFrame({
'interval': [0, 1, 2],
'nsw-energy': [200, 200, 200]})
p = planner.DispatchPlanner(dispatch_interval=60, planning_horizon=3)
u = units.GenericUnit(p, initial_dispatch=0.0)
u.set_service_region('energy', 'nsw')
u.add_to_market_energy_flow(capacity=100.0)
u.add_primary_energy_source(capacity=100.0)
u.add_unit_minimum_operating_level(min_loading=50.0, shutdown_ramp_rate=100.0, start_up_ramp_rate=100.0,
min_up_time=60, min_down_time=120, time_in_initial_state=0)
p.add_regional_market('nsw', 'energy', forward_data)
p.optimise()
dispatch = u.get_dispatch()
expect_dispatch = pd.DataFrame({
'interval': [0, 1, 2],
'net_dispatch': [0.0, 0.0, 100.0]
})
assert_frame_equal(expect_dispatch, dispatch)
def test_start_off_with_initial_down_time_less_than_min_down_time():
forward_data = pd.DataFrame({
'interval': [0, 1, 2],
'nsw-energy': [200, 200, 200]})
p = planner.DispatchPlanner(dispatch_interval=60, planning_horizon=3)
u = units.GenericUnit(p, initial_dispatch=0.0)
u.set_service_region('energy', 'nsw')
u.add_to_market_energy_flow(100.0)
u.add_primary_energy_source(100.0)
u.add_unit_minimum_operating_level(min_loading=50.0, shutdown_ramp_rate=100.0, start_up_ramp_rate=100.0,
min_up_time=60, min_down_time=120, time_in_initial_state=60)
p.add_regional_market('nsw', 'energy', forward_data)
p.optimise()
dispatch = u.get_dispatch()
expect_dispatch = pd.DataFrame({
'interval': [0, 1, 2],
'net_dispatch': [0.0, 100.0, 100.0]
})
assert_frame_equal(expect_dispatch, dispatch)
def test_start_off_with_initial_down_time_equal_to_min_down_time():
forward_data = pd.DataFrame({
'interval': [0, 1, 2],
'nsw-energy': [200, 200, 200]})
p = planner.DispatchPlanner(dispatch_interval=60, planning_horizon=3)
u = units.GenericUnit(p, initial_dispatch=0.0)
u.set_service_region('energy', 'nsw')
u.add_to_market_energy_flow(100.0)
u.add_primary_energy_source(100.0)
u.add_unit_minimum_operating_level(min_loading=50.0, shutdown_ramp_rate=100.0, start_up_ramp_rate=100.0,
min_up_time=60, min_down_time=120, time_in_initial_state=120)
p.add_regional_market('nsw', 'energy', forward_data)
p.optimise()
dispatch = u.get_dispatch()
expect_dispatch = pd.DataFrame({
'interval': [0, 1, 2],
'net_dispatch': [100.0, 100.0, 100.0]
})
assert_frame_equal(expect_dispatch, dispatch)
def test_start_on_with_initial_up_time_of_zero():
forward_data = pd.DataFrame({
'interval': [0, 1, 2],
'nsw-energy': [200, 200, 200]})
p = planner.DispatchPlanner(dispatch_interval=60, planning_horizon=3)
u = units.GenericUnit(p, initial_dispatch=50.0)
u.set_service_region('energy', 'nsw')
u.add_to_market_energy_flow(100.0)
u.add_primary_energy_source(100.0, cost=1000.0)
u.add_unit_minimum_operating_level(min_loading=50.0, shutdown_ramp_rate=100.0, start_up_ramp_rate=100.0,
min_up_time=120, min_down_time=120, time_in_initial_state=0)
p.add_regional_market('nsw', 'energy', forward_data)
p.optimise()
dispatch = u.get_dispatch()
expect_dispatch = pd.DataFrame({
'interval': [0, 1, 2],
'net_dispatch': [50.0, 50.0, 0.0]
})
assert_frame_equal(expect_dispatch, dispatch)
def test_start_on_with_initial_up_time_less_than_min_up_time():
forward_data = pd.DataFrame({
'interval': [0, 1, 2],
'nsw-energy': [200, 200, 200]})
p = planner.DispatchPlanner(dispatch_interval=60, planning_horizon=3)
u = units.GenericUnit(p, initial_dispatch=50.0)
u.set_service_region('energy', 'nsw')
u.add_to_market_energy_flow(100.0)
u.add_primary_energy_source(100.0, cost=1000.0)
u.add_unit_minimum_operating_level(min_loading=50.0, shutdown_ramp_rate=100.0, start_up_ramp_rate=100.0,
min_up_time=120, min_down_time=120, time_in_initial_state=60)
p.add_regional_market('nsw', 'energy', forward_data)
p.optimise()
dispatch = u.get_dispatch()
expect_dispatch = pd.DataFrame({
'interval': [0, 1, 2],
'net_dispatch': [50.0, 0.0, 0.0]
})
assert_frame_equal(expect_dispatch, dispatch)
def test_start_on_with_initial_up_time_equal_to_up_time():
forward_data = pd.DataFrame({
'interval': [0, 1, 2],
'nsw-energy': [200, 200, 200]})
p = planner.DispatchPlanner(dispatch_interval=60, planning_horizon=3)
u = units.GenericUnit(p, initial_dispatch=50.0)
u.set_service_region('energy', 'nsw')
u.add_to_market_energy_flow(100.0)
u.add_primary_energy_source(100.0, cost=1000.0)
u.add_unit_minimum_operating_level(min_loading=50.0, shutdown_ramp_rate=100.0, start_up_ramp_rate=100.0,
min_up_time=120, min_down_time=120, time_in_initial_state=120)
p.add_regional_market('nsw', 'energy', forward_data)
p.optimise()
dispatch = u.get_dispatch()
expect_dispatch = pd.DataFrame({
'interval': [0, 1, 2],
'net_dispatch': [0.0, 0.0, 0.0]
})
assert_frame_equal(expect_dispatch, dispatch)
def test_start_on_with_initial_up_time_less_than_min_up_time_check_stays_on():
forward_data = pd.DataFrame({
'interval': [0, 1, 2],
'nsw-energy': [200, 200, 200]})
p = planner.DispatchPlanner(dispatch_interval=60, planning_horizon=3)
u = units.GenericUnit(p, initial_dispatch=50.0)
u.set_service_region('energy', 'nsw')
u.add_to_market_energy_flow(100.0)
u.add_primary_energy_source(100.0, cost=-500.0)
u.add_unit_minimum_operating_level(min_loading=50.0, shutdown_ramp_rate=100.0, start_up_ramp_rate=100.0,
min_up_time=120, min_down_time=120, time_in_initial_state=60)
p.add_regional_market('nsw', 'energy', forward_data)
p.optimise()
dispatch = u.get_dispatch()
expect_dispatch = pd.DataFrame({
'interval': [0, 1, 2],
'net_dispatch': [100.0, 100.0, 100.0]
})
assert_frame_equal(expect_dispatch, dispatch)
def test_min_down_time_120_min_constraint():
forward_data = pd.DataFrame({
'interval': [0, 1, 2, 3, 4, 5],
'nsw-energy': [500, 500, 499, 0.0, 500, 500]})
p = planner.DispatchPlanner(dispatch_interval=60, planning_horizon=6)
u = units.GenericUnit(p, initial_dispatch=50.0)
u.set_service_region('energy', 'nsw')
u.add_to_market_energy_flow(100.0)
u.add_primary_energy_source(100.0, cost=400.0)
u.add_unit_minimum_operating_level(min_loading=50.0, shutdown_ramp_rate=100.0, start_up_ramp_rate=100.0,
min_up_time=120, min_down_time=120, time_in_initial_state=60)
p.add_regional_market('nsw', 'energy', forward_data)
p.optimise()
dispatch = u.get_dispatch()
expect_dispatch = pd.DataFrame({
'interval': [0, 1, 2, 3, 4, 5],
'net_dispatch': [100.0, 100.0, 0.0, 0.0, 100.0, 100.0]
})
assert_frame_equal(expect_dispatch, dispatch)
def test_min_down_time_60_min_constraint():
forward_data = pd.DataFrame({
'interval': [0, 1, 2, 3, 4, 5],
'nsw-energy': [500, 500, 499, 0.0, 500, 500]})
p = planner.DispatchPlanner(dispatch_interval=60, planning_horizon=6)
u = units.GenericUnit(p, initial_dispatch=50.0)
u.set_service_region('energy', 'nsw')
u.add_to_market_energy_flow(100.0)
u.add_primary_energy_source(100.0, cost=400.0)
u.add_unit_minimum_operating_level(min_loading=50.0, shutdown_ramp_rate=100.0, start_up_ramp_rate=100.0,
min_up_time=120, min_down_time=60, time_in_initial_state=60)
p.add_regional_market('nsw', 'energy', forward_data)
p.optimise()
dispatch = u.get_dispatch()
expect_dispatch = pd.DataFrame({
'interval': [0, 1, 2, 3, 4, 5],
'net_dispatch': [100.0, 100.0, 100.0, 0.0, 100.0, 100.0]
})
assert_frame_equal(expect_dispatch, dispatch)
def test_min_up_time_120_min_constraint():
forward_data = pd.DataFrame({
'interval': [0, 1, 2, 3, 4, 5],
'nsw-energy': [0.0, 0.0, 0.0, 500.0, 1.0, 0.0]})
p = planner.DispatchPlanner(dispatch_interval=60, planning_horizon=6)
u = units.GenericUnit(p, initial_dispatch=0.0)
u.set_service_region('energy', 'nsw')
u.add_to_market_energy_flow(100.0)
u.add_primary_energy_source(100.0, cost=50.0)
u.add_unit_minimum_operating_level(min_loading=50.0, shutdown_ramp_rate=100.0, start_up_ramp_rate=100.0,
min_up_time=120, min_down_time=60, time_in_initial_state=60)
p.add_regional_market('nsw', 'energy', forward_data)
p.optimise()
dispatch = u.get_dispatch()
expect_dispatch = pd.DataFrame({
'interval': [0, 1, 2, 3, 4, 5],
'net_dispatch': [0.0, 0.0, 0.0, 100.0, 50.0, 0.0]
})
| assert_frame_equal(expect_dispatch, dispatch) | pandas._testing.assert_frame_equal |
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import scipy as sc
import pickle
import os
from . import preprocess
from scipy.sparse import vstack, csr_matrix, csc_matrix, lil_matrix
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import normalize
from . import builders
class Dataset(object):
@staticmethod
def load():
train = pd.read_csv('data/train_final.csv', delimiter='\t')
playlists = pd.read_csv('data/playlists_final.csv', delimiter='\t')
target_playlists = | pd.read_csv('data/target_playlists.csv', delimiter='\t') | pandas.read_csv |
import ast
import time
import numpy as np
import pandas as pd
from copy import deepcopy
from typing import Any
from matplotlib import dates as mdates
from scipy import stats
from aistac.components.aistac_commons import DataAnalytics
from ds_discovery.components.transitioning import Transition
from ds_discovery.components.commons import Commons
from aistac.properties.abstract_properties import AbstractPropertyManager
from ds_discovery.components.discovery import DataDiscovery
from ds_discovery.intent.abstract_common_intent import AbstractCommonsIntentModel
__author__ = '<NAME>'
class AbstractBuilderIntentModel(AbstractCommonsIntentModel):
_INTENT_PARAMS = ['self', 'save_intent', 'column_name', 'intent_order',
'replace_intent', 'remove_duplicates', 'seed']
def __init__(self, property_manager: AbstractPropertyManager, default_save_intent: bool=None,
default_intent_level: [str, int, float]=None, default_intent_order: int=None,
default_replace_intent: bool=None):
"""initialisation of the Intent class.
:param property_manager: the property manager class that references the intent contract.
:param default_save_intent: (optional) The default action for saving intent in the property manager
:param default_intent_level: (optional) the default level intent should be saved at
:param default_intent_order: (optional) if the default behaviour for the order should be next available order
:param default_replace_intent: (optional) the default replace existing intent behaviour
"""
default_save_intent = default_save_intent if isinstance(default_save_intent, bool) else True
default_replace_intent = default_replace_intent if isinstance(default_replace_intent, bool) else True
default_intent_level = default_intent_level if isinstance(default_intent_level, (str, int, float)) else 'A'
default_intent_order = default_intent_order if isinstance(default_intent_order, int) else 0
intent_param_exclude = ['size']
intent_type_additions = [np.int8, np.int16, np.int32, np.int64, np.float32, np.float64, pd.Timestamp]
super().__init__(property_manager=property_manager, default_save_intent=default_save_intent,
intent_param_exclude=intent_param_exclude, default_intent_level=default_intent_level,
default_intent_order=default_intent_order, default_replace_intent=default_replace_intent,
intent_type_additions=intent_type_additions)
def run_intent_pipeline(self, canonical: Any=None, intent_levels: [str, int, list]=None, run_book: str=None,
seed: int=None, simulate: bool=None, **kwargs) -> pd.DataFrame:
"""Collectively runs all parameterised intent taken from the property manager against the code base as
defined by the intent_contract. The whole run can be seeded though any parameterised seeding in the intent
contracts will take precedence
:param canonical: a direct or generated pd.DataFrame. see context notes below
:param intent_levels: (optional) a single or list of intent_level to run in order given
:param run_book: (optional) a preset runbook of intent_level to run in order
:param seed: (optional) a seed value that will be applied across the run: default to None
:param simulate: (optional) returns a report of the order of run and return the indexed column order of run
:return: a pandas dataframe
"""
simulate = simulate if isinstance(simulate, bool) else False
col_sim = {"column": [], "order": [], "method": []}
# legacy
if 'size' in kwargs.keys():
canonical = kwargs.pop('size')
canonical = self._get_canonical(canonical)
size = canonical.shape[0] if canonical.shape[0] > 0 else 1000
# test if there is any intent to run
if self._pm.has_intent():
# get the list of levels to run
if isinstance(intent_levels, (str, list)):
column_names = Commons.list_formatter(intent_levels)
elif isinstance(run_book, str) and self._pm.has_run_book(book_name=run_book):
column_names = self._pm.get_run_book(book_name=run_book)
else:
# put all the intent in order of model, get, correlate, associate
_model = []
_get = []
_correlate = []
_frame_start = []
_frame_end = []
for column in self._pm.get_intent().keys():
for order in self._pm.get(self._pm.join(self._pm.KEY.intent_key, column), {}):
for method in self._pm.get(self._pm.join(self._pm.KEY.intent_key, column, order), {}).keys():
if str(method).startswith('get_'):
if column in _correlate + _frame_start + _frame_end:
continue
_get.append(column)
elif str(method).startswith('model_'):
_model.append(column)
elif str(method).startswith('correlate_'):
if column in _get:
_get.remove(column)
_correlate.append(column)
elif str(method).startswith('frame_'):
if column in _get:
_get.remove(column)
if str(method).startswith('frame_starter'):
_frame_start.append(column)
else:
_frame_end.append(column)
column_names = Commons.list_unique(_frame_start + _get + _model + _correlate + _frame_end)
for column in column_names:
level_key = self._pm.join(self._pm.KEY.intent_key, column)
for order in sorted(self._pm.get(level_key, {})):
for method, params in self._pm.get(self._pm.join(level_key, order), {}).items():
try:
if method in self.__dir__():
if simulate:
col_sim['column'].append(column)
col_sim['order'].append(order)
col_sim['method'].append(method)
continue
result = []
params.update(params.pop('kwargs', {}))
if isinstance(seed, int):
params.update({'seed': seed})
_ = params.pop('intent_creator', 'Unknown')
if str(method).startswith('get_'):
result = eval(f"self.{method}(size=size, save_intent=False, **params)",
globals(), locals())
elif str(method).startswith('correlate_'):
result = eval(f"self.{method}(canonical=canonical, save_intent=False, **params)",
globals(), locals())
elif str(method).startswith('model_'):
canonical = eval(f"self.{method}(canonical=canonical, save_intent=False, **params)",
globals(), locals())
continue
elif str(method).startswith('frame_starter'):
canonical = self._get_canonical(params.pop('canonical', canonical), deep_copy=False)
size = canonical.shape[0]
canonical = eval(f"self.{method}(canonical=canonical, save_intent=False, **params)",
globals(), locals())
continue
elif str(method).startswith('frame_'):
canonical = eval(f"self.{method}(canonical=canonical, save_intent=False, **params)",
globals(), locals())
continue
if 0 < size != len(result):
raise IndexError(f"The index size of '{column}' is '{len(result)}', "
f"should be {size}")
canonical[column] = result
except ValueError as ve:
raise ValueError(f"intent '{column}', order '{order}', method '{method}' failed with: {ve}")
except TypeError as te:
raise TypeError(f"intent '{column}', order '{order}', method '{method}' failed with: {te}")
if simulate:
return pd.DataFrame.from_dict(col_sim)
return canonical
def _get_number(self, from_value: [int, float]=None, to_value: [int, float]=None, relative_freq: list=None,
precision: int=None, ordered: str=None, at_most: int=None, size: int=None,
seed: int=None) -> list:
""" returns a number in the range from_value to to_value. if only to_value given from_value is zero
:param from_value: (signed) integer to start from
:param to_value: optional, (signed) integer the number sequence goes to but not include
:param relative_freq: a weighting pattern or probability that does not have to add to 1
:param precision: the precision of the returned number. if None then assumes int value else float
:param ordered: order the data ascending 'asc' or descending 'dec', values accepted 'asc' or 'des'
:param at_most: the most times a selection should be chosen
:param size: the size of the sample
:param seed: a seed value for the random function: default to None
"""
if not isinstance(from_value, (int, float)) and not isinstance(to_value, (int, float)):
raise ValueError(f"either a 'range_value' or a 'range_value' and 'to_value' must be provided")
if not isinstance(from_value, (float, int)):
from_value = 0
if not isinstance(to_value, (float, int)):
(from_value, to_value) = (0, from_value)
if to_value <= from_value:
raise ValueError("The number range must be a positive different, found to_value <= from_value")
at_most = 0 if not isinstance(at_most, int) else at_most
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
precision = 3 if not isinstance(precision, int) else precision
if precision == 0:
from_value = int(round(from_value, 0))
to_value = int(round(to_value, 0))
is_int = True if (isinstance(to_value, int) and isinstance(from_value, int)) else False
if is_int:
precision = 0
# build the distribution sizes
if isinstance(relative_freq, list) and len(relative_freq) > 1:
freq_dist_size = self._freq_dist_size(relative_freq=relative_freq, size=size, seed=_seed)
else:
freq_dist_size = [size]
# generate the numbers
rtn_list = []
generator = np.random.default_rng(seed=_seed)
dtype = int if is_int else float
bins = np.linspace(from_value, to_value, len(freq_dist_size) + 1, dtype=dtype)
for idx in np.arange(1, len(bins)):
low = bins[idx - 1]
high = bins[idx]
if low >= high:
continue
elif at_most > 0:
sample = []
for _ in np.arange(at_most, dtype=dtype):
count_size = freq_dist_size[idx - 1] * generator.integers(2, 4, size=1)[0]
sample += list(set(np.linspace(bins[idx - 1], bins[idx], num=count_size, dtype=dtype,
endpoint=False)))
if len(sample) < freq_dist_size[idx - 1]:
raise ValueError(f"The value range has insufficient samples to choose from when using at_most."
f"Try increasing the range of values to sample.")
rtn_list += list(generator.choice(sample, size=freq_dist_size[idx - 1], replace=False))
else:
if dtype == int:
rtn_list += generator.integers(low=low, high=high, size=freq_dist_size[idx - 1]).tolist()
else:
choice = generator.random(size=freq_dist_size[idx - 1], dtype=float)
choice = np.round(choice * (high-low)+low, precision).tolist()
# make sure the precision
choice = [high - 10**(-precision) if x >= high else x for x in choice]
rtn_list += choice
# order or shuffle the return list
if isinstance(ordered, str) and ordered.lower() in ['asc', 'des']:
rtn_list.sort(reverse=True if ordered.lower() == 'asc' else False)
else:
generator.shuffle(rtn_list)
return rtn_list
def _get_category(self, selection: list, relative_freq: list=None, size: int=None, at_most: int=None,
seed: int=None) -> list:
""" returns a category from a list. Of particular not is the at_least parameter that allows you to
control the number of times a selection can be chosen.
:param selection: a list of items to select from
:param relative_freq: a weighting pattern that does not have to add to 1
:param size: an optional size of the return. default to 1
:param at_most: the most times a selection should be chosen
:param seed: a seed value for the random function: default to None
:return: an item or list of items chosen from the list
"""
if not isinstance(selection, list) or len(selection) == 0:
return [None]*size
_seed = self._seed() if seed is None else seed
select_index = self._get_number(len(selection), relative_freq=relative_freq, at_most=at_most, size=size,
seed=_seed)
rtn_list = [selection[i] for i in select_index]
return list(rtn_list)
def _get_datetime(self, start: Any, until: Any, relative_freq: list=None, at_most: int=None, ordered: str=None,
date_format: str=None, as_num: bool=None, ignore_time: bool=None, size: int=None,
seed: int=None, day_first: bool=None, year_first: bool=None) -> list:
""" returns a random date between two date and/or times. weighted patterns can be applied to the overall date
range.
if a signed 'int' type is passed to the start and/or until dates, the inferred date will be the current date
time with the integer being the offset from the current date time in 'days'.
if a dictionary of time delta name values is passed this is treated as a time delta from the start time.
for example if start = 0, until = {days=1, hours=3} the date range will be between now and 1 days and 3 hours
Note: If no patterns are set this will return a linearly random number between the range boundaries.
:param start: the start boundary of the date range can be str, datetime, pd.datetime, pd.Timestamp or int
:param until: up until boundary of the date range can be str, datetime, pd.datetime, pd.Timestamp, pd.delta, int
:param relative_freq: (optional) A pattern across the whole date range.
:param at_most: the most times a selection should be chosen
:param ordered: order the data ascending 'asc' or descending 'dec', values accepted 'asc' or 'des'
:param ignore_time: ignore time elements and only select from Year, Month, Day elements. Default is False
:param date_format: the string format of the date to be returned. if not set then pd.Timestamp returned
:param as_num: returns a list of Matplotlib date values as a float. Default is False
:param size: the size of the sample to return. Default to 1
:param seed: a seed value for the random function: default to None
:param year_first: specifies if to parse with the year first
If True parses dates with the year first, eg 10/11/12 is parsed as 2010-11-12.
If both dayfirst and yearfirst are True, yearfirst is preceded (same as dateutil).
:param day_first: specifies if to parse with the day first
If True, parses dates with the day first, eg %d-%m-%Y.
If False default to the a preferred preference, normally %m-%d-%Y (but not strict)
:return: a date or size of dates in the format given.
"""
# pre check
if start is None or until is None:
raise ValueError("The start or until parameters cannot be of NoneType")
# Code block for intent
as_num = False if not isinstance(as_num, bool) else as_num
ignore_time = False if not isinstance(ignore_time, bool) else ignore_time
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
if isinstance(start, int):
start = (pd.Timestamp.now() + pd.Timedelta(days=start))
if isinstance(until, int):
until = (pd.Timestamp.now() + pd.Timedelta(days=until))
if isinstance(until, dict):
until = (start + pd.Timedelta(**until))
if start == until:
rtn_list = [self._convert_date2value(start, day_first=day_first, year_first=year_first)[0]] * size
else:
_dt_start = self._convert_date2value(start, day_first=day_first, year_first=year_first)[0]
_dt_until = self._convert_date2value(until, day_first=day_first, year_first=year_first)[0]
precision = 15
if ignore_time:
_dt_start = int(_dt_start)
_dt_until = int(_dt_until)
precision = 0
rtn_list = self._get_number(from_value=_dt_start, to_value=_dt_until, relative_freq=relative_freq,
at_most=at_most, ordered=ordered, precision=precision, size=size, seed=seed)
if not as_num:
rtn_list = mdates.num2date(rtn_list)
if isinstance(date_format, str):
rtn_list = pd.Series(rtn_list).dt.strftime(date_format).to_list()
else:
rtn_list = pd.Series(rtn_list).dt.tz_convert(None).to_list()
return rtn_list
def _get_intervals(self, intervals: list, relative_freq: list=None, precision: int=None, size: int=None,
seed: int=None) -> list:
""" returns a number based on a list selection of tuple(lower, upper) interval
:param intervals: a list of unique tuple pairs representing the interval lower and upper boundaries
:param relative_freq: a weighting pattern or probability that does not have to add to 1
:param precision: the precision of the returned number. if None then assumes int value else float
:param size: the size of the sample
:param seed: a seed value for the random function: default to None
:return: a random number
"""
# Code block for intent
size = 1 if size is None else size
if not isinstance(precision, int):
precision = 0 if all(isinstance(v[0], int) and isinstance(v[1], int) for v in intervals) else 3
_seed = self._seed() if seed is None else seed
if not all(isinstance(value, tuple) for value in intervals):
raise ValueError("The intervals list must be a list of tuples")
interval_list = self._get_category(selection=intervals, relative_freq=relative_freq, size=size, seed=_seed)
interval_counts = pd.Series(interval_list, dtype='object').value_counts()
rtn_list = []
for index in interval_counts.index:
size = interval_counts[index]
if size == 0:
continue
if len(index) == 2:
(lower, upper) = index
if index == 0:
closed = 'both'
else:
closed = 'right'
else:
(lower, upper, closed) = index
if lower == upper:
rtn_list += [round(lower, precision)] * size
continue
if precision == 0:
margin = 1
else:
margin = 10**(((-1)*precision)-1)
if str.lower(closed) == 'neither':
lower += margin
upper -= margin
elif str.lower(closed) == 'right':
lower += margin
elif str.lower(closed) == 'both':
upper += margin
# correct adjustments
if lower >= upper:
upper = lower + margin
rtn_list += self._get_number(lower, upper, precision=precision, size=size, seed=_seed)
np.random.default_rng(seed=_seed).shuffle(rtn_list)
return rtn_list
def _get_dist_normal(self, mean: float, std: float, size: int=None, seed: int=None) -> list:
"""A normal (Gaussian) continuous random distribution.
:param mean: The mean (“centre”) of the distribution.
:param std: The standard deviation (jitter or “width”) of the distribution. Must be >= 0
:param size: the size of the sample. if a tuple of intervals, size must match the tuple
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
rtn_list = list(generator.normal(loc=mean, scale=std, size=size))
return rtn_list
def _get_dist_logistic(self, mean: float, std: float, size: int=None, seed: int=None) -> list:
"""A logistic continuous random distribution.
:param mean: The mean (“centre”) of the distribution.
:param std: The standard deviation (jitter or “width”) of the distribution. Must be >= 0
:param size: the size of the sample. if a tuple of intervals, size must match the tuple
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
rtn_list = list(generator.logistic(loc=mean, scale=std, size=size))
return rtn_list
def _get_dist_exponential(self, scale: [int, float], size: int=None, seed: int=None) -> list:
"""An exponential continuous random distribution.
:param scale: The scale of the distribution.
:param size: the size of the sample. if a tuple of intervals, size must match the tuple
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
rtn_list = list(generator.exponential(scale=scale, size=size))
return rtn_list
def _get_dist_gumbel(self, mean: float, std: float, size: int=None, seed: int=None) -> list:
"""An gumbel continuous random distribution.
The Gumbel (or Smallest Extreme Value (SEV) or the Smallest Extreme Value Type I) distribution is one of
a class of Generalized Extreme Value (GEV) distributions used in modeling extreme value problems.
The Gumbel is a special case of the Extreme Value Type I distribution for maximums from distributions
with “exponential-like” tails.
:param mean: The mean (“centre”) of the distribution.
:param std: The standard deviation (jitter or “width”) of the distribution. Must be >= 0
:param size: the size of the sample. if a tuple of intervals, size must match the tuple
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
rtn_list = list(generator.gumbel(loc=mean, scale=std, size=size))
return rtn_list
def _get_dist_binomial(self, trials: int, probability: float, size: int=None, seed: int=None) -> list:
"""A binomial discrete random distribution. The Binomial Distribution represents the number of
successes and failures in n independent Bernoulli trials for some given value of n
:param trials: the number of trials to attempt, must be >= 0.
:param probability: the probability distribution, >= 0 and <=1.
:param size: the size of the sample. if a tuple of intervals, size must match the tuple
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
rtn_list = list(generator.binomial(n=trials, p=probability, size=size))
return rtn_list
def _get_dist_poisson(self, interval: float, size: int=None, seed: int=None) -> list:
"""A Poisson discrete random distribution.
The Poisson distribution
.. math:: f(k; \lambda)=\frac{\lambda^k e^{-\lambda}}{k!}
For events with an expected separation :math:`\lambda` the Poisson
distribution :math:`f(k; \lambda)` describes the probability of
:math:`k` events occurring within the observed
interval :math:`\lambda`.
Because the output is limited to the range of the C int64 type, a
ValueError is raised when `lam` is within 10 sigma of the maximum
representable value.
:param interval: Expectation of interval, must be >= 0.
:param size: the size of the sample.
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
rtn_list = list(generator.poisson(lam=interval, size=size))
return rtn_list
def _get_dist_bernoulli(self, probability: float, size: int=None, seed: int=None) -> list:
"""A Bernoulli discrete random distribution using scipy
:param probability: the probability occurrence
:param size: the size of the sample
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
rtn_list = list(stats.bernoulli.rvs(p=probability, size=size, random_state=_seed))
return rtn_list
def _get_dist_bounded_normal(self, mean: float, std: float, lower: float, upper: float, precision: int=None,
size: int=None, seed: int=None) -> list:
"""A bounded normal continuous random distribution.
:param mean: the mean of the distribution
:param std: the standard deviation
:param lower: the lower limit of the distribution
:param upper: the upper limit of the distribution
:param precision: the precision of the returned number. if None then assumes int value else float
:param size: the size of the sample
:param seed: a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
precision = precision if isinstance(precision, int) else 3
_seed = self._seed() if seed is None else seed
rtn_list = stats.truncnorm((lower-mean)/std, (upper-mean)/std, loc=mean, scale=std).rvs(size).round(precision)
return rtn_list
def _get_distribution(self, distribution: str, package: str=None, precision: int=None, size: int=None,
seed: int=None, **kwargs) -> list:
"""returns a number based the distribution type.
:param distribution: The string name of the distribution function from numpy random Generator class
:param package: (optional) The name of the package to use, options are 'numpy' (default) and 'scipy'.
:param precision: (optional) the precision of the returned number
:param size: (optional) the size of the sample
:param seed: (optional) a seed value for the random function: default to None
:return: a random number
"""
size = 1 if size is None else size
_seed = self._seed() if seed is None else seed
precision = 3 if precision is None else precision
if isinstance(package, str) and package == 'scipy':
rtn_list = eval(f"stats.{distribution}.rvs(size=size, random_state=_seed, **kwargs)", globals(), locals())
else:
generator = np.random.default_rng(seed=_seed)
rtn_list = eval(f"generator.{distribution}(size=size, **kwargs)", globals(), locals())
rtn_list = list(rtn_list.round(precision))
return rtn_list
def _get_selection(self, canonical: Any, column_header: str, relative_freq: list=None, sample_size: int=None,
selection_size: int=None, size: int=None, at_most: bool=None, shuffle: bool=None,
seed: int=None) -> list:
""" returns a random list of values where the selection of those values is taken from a connector source.
:param canonical: a pd.DataFrame as the reference dataframe
:param column_header: the name of the column header to correlate
:param relative_freq: (optional) a weighting pattern of the final selection
:param selection_size: (optional) the selection to take from the sample size, normally used with shuffle
:param sample_size: (optional) the size of the sample to take from the reference file
:param at_most: (optional) the most times a selection should be chosen
:param shuffle: (optional) if the selection should be shuffled before selection. Default is true
:param size: (optional) size of the return. default to 1
:param seed: (optional) a seed value for the random function: default to None
:return: list
The canonical is normally a connector contract str reference or a set of parameter instructions on how to
generate a pd.Dataframe but can be a pd.DataFrame. the description of each is:
- pd.Dataframe -> a deep copy of the pd.DataFrame
- pd.Series or list -> creates a pd.DataFrame of one column with the 'header' name or 'default' if not given
- str -> instantiates a connector handler with the connector_name and loads the DataFrame from the connection
- int -> generates an empty pd.Dataframe with an index size of the int passed.
- dict -> use canonical2dict(...) to help construct a dict with a 'method' to build a pd.DataFrame
methods:
- model_*(...) -> one of the SyntheticBuilder model methods and parameters
- @empty -> generates an empty pd.DataFrame where size and headers can be passed
:size sets the index size of the dataframe
:headers any initial headers for the dataframe
- @generate -> generate a synthetic file from a remote Domain Contract
:task_name the name of the SyntheticBuilder task to run
:repo_uri the location of the Domain Product
:size (optional) a size to generate
:seed (optional) if a seed should be applied
:run_book (optional) if specific intent should be run only
"""
canonical = self._get_canonical(canonical)
_seed = self._seed() if seed is None else seed
if isinstance(canonical, dict):
canonical = pd.DataFrame.from_dict(data=canonical)
if column_header not in canonical.columns:
raise ValueError(f"The column '{column_header}' not found in the canonical")
_values = canonical[column_header].iloc[:sample_size]
if isinstance(selection_size, float) and shuffle:
_values = _values.sample(frac=1, random_state=_seed).reset_index(drop=True)
if isinstance(selection_size, int) and 0 < selection_size < _values.size:
_values = _values.iloc[:selection_size]
return self._get_category(selection=_values.to_list(), relative_freq=relative_freq, size=size, at_most=at_most,
seed=_seed)
def _frame_starter(self, canonical: Any, selection: list=None, headers: [str, list]=None, drop: bool=None,
dtype: [str, list]=None, exclude: bool=None, regex: [str, list]=None, re_ignore_case: bool=None,
rename_map: dict=None, default_size: int=None, seed: int=None) -> pd.DataFrame:
""" Selects rows and/or columns changing the shape of the DatFrame. This is always run last in a pipeline
Rows are filtered before the column filter so columns can be referenced even though they might not be included
the final column list.
:param canonical: a pd.DataFrame as the reference dataframe
:param selection: a list of selections where conditions are filtered on, executed in list order
An example of a selection with the minimum requirements is: (see 'select2dict(...)')
[{'column': 'genre', 'condition': "=='Comedy'"}]
:param headers: a list of headers to drop or filter on type
:param drop: to drop or not drop the headers
:param dtype: the column types to include or exclusive. Default None else int, float, bool, object, 'number'
:param exclude: to exclude or include the dtypes
:param regex: a regular expression to search the headers. example '^((?!_amt).)*$)' excludes '_amt' columns
:param re_ignore_case: true if the regex should ignore case. Default is False
:param rename_map: a from: to dictionary of headers to rename
:param default_size: if the canonical fails return an empty dataframe with the default index size
:param seed: this is a place holder, here for compatibility across methods
:return: pd.DataFrame
The starter is a pd.DataFrame, a pd.Series or list, a connector contract str reference or a set of
parameter instructions on how to generate a pd.Dataframe. the description of each is:
- pd.Dataframe -> a deep copy of the pd.DataFrame
- pd.Series or list -> creates a pd.DataFrame of one column with the 'header' name or 'default' if not given
- str -> instantiates a connector handler with the connector_name and loads the DataFrame from the connection
- int -> generates an empty pd.Dataframe with an index size of the int passed.
- dict -> use canonical2dict(...) to help construct a dict with a 'method' to build a pd.DataFrame
methods:
- model_*(...) -> one of the SyntheticBuilder model methods and parameters
- @empty -> generates an empty pd.DataFrame where size and headers can be passed
:size sets the index size of the dataframe
:headers any initial headers for the dataframe
- @generate -> generate a synthetic file from a remote Domain Contract
:task_name the name of the SyntheticBuilder task to run
:repo_uri the location of the Domain Product
:size (optional) a size to generate
:seed (optional) if a seed should be applied
:run_book (optional) if specific intent should be run only
Selections are a list of dictionaries of conditions and optional additional parameters to filter.
To help build conditions there is a static helper method called 'select2dict(...)' that has parameter
options available to build a condition.
An example of a condition with the minimum requirements is
[{'column': 'genre', 'condition': "=='Comedy'"}]
an example of using the helper method
selection = [inst.select2dict(column='gender', condition="=='M'"),
inst.select2dict(column='age', condition=">65", logic='XOR')]
Using the 'select2dict' method ensure the correct keys are used and the dictionary is properly formed. It also
helps with building the logic that is executed in order
"""
canonical = self._get_canonical(canonical, size=default_size)
# not used but in place form method consistency
_seed = self._seed() if seed is None else seed
if isinstance(selection, list):
selection = deepcopy(selection)
# run the select logic
select_idx = self._selection_index(canonical=canonical, selection=selection)
canonical = canonical.iloc[select_idx].reset_index(drop=True)
drop = drop if isinstance(drop, bool) else False
exclude = exclude if isinstance(exclude, bool) else False
re_ignore_case = re_ignore_case if isinstance(re_ignore_case, bool) else False
rtn_frame = Commons.filter_columns(canonical, headers=headers, drop=drop, dtype=dtype, exclude=exclude,
regex=regex, re_ignore_case=re_ignore_case)
if isinstance(rename_map, dict):
rtn_frame.rename(mapper=rename_map, axis='columns', inplace=True)
return rtn_frame
def _frame_selection(self, canonical: Any, selection: list=None, headers: [str, list]=None,
drop: bool=None, dtype: [str, list]=None, exclude: bool=None, regex: [str, list]=None,
re_ignore_case: bool=None, seed: int=None) -> pd.DataFrame:
""" This method always runs at the start of the pipeline, taking a direct or generated pd.DataFrame,
see context notes below, as the foundation canonical of all subsequent steps of the pipeline.
:param canonical: a direct or generated pd.DataFrame. see context notes below
:param selection: a list of selections where conditions are filtered on, executed in list order
An example of a selection with the minimum requirements is: (see 'select2dict(...)')
[{'column': 'genre', 'condition': "=='Comedy'"}]
:param headers: a list of headers to drop or filter on type
:param drop: to drop or not drop the headers
:param dtype: the column types to include or exclusive. Default None else int, float, bool, object, 'number'
:param exclude: to exclude or include the dtypes
:param regex: a regular expression to search the headers. example '^((?!_amt).)*$)' excludes '_amt' columns
:param re_ignore_case: true if the regex should ignore case. Default is False
:param seed: this is a place holder, here for compatibility across methods
:return: pd.DataFrame
Selections are a list of dictionaries of conditions and optional additional parameters to filter.
To help build conditions there is a static helper method called 'select2dict(...)' that has parameter
options available to build a condition.
An example of a condition with the minimum requirements is
[{'column': 'genre', 'condition': "=='Comedy'"}]
an example of using the helper method
selection = [inst.select2dict(column='gender', condition="=='M'"),
inst.select2dict(column='age', condition=">65", logic='XOR')]
Using the 'select2dict' method ensure the correct keys are used and the dictionary is properly formed. It also
helps with building the logic that is executed in order
"""
return self._frame_starter(canonical=canonical, selection=selection, headers=headers, drop=drop, dtype=dtype,
exclude=exclude, regex=regex, re_ignore_case=re_ignore_case, seed=seed)
def _model_custom(self, canonical: Any, code_str: str, seed: int=None, **kwargs):
""" Commonly used for custom methods, takes code string that when executed changes the the canonical returning
the modified canonical. If the method passes returns a pd.Dataframe this will be returned else the assumption is
the canonical has been changed inplace and thus the modified canonical will be returned
When referencing the canonical in the code_str it should be referenced either by use parameter label 'canonical'
or the short cut '@' symbol. kwargs can also be passed into the code string but must be preceded by a '$' symbol
for example:
assume canonical['gender'] = ['M', 'F', 'U']
code_str ='''
\n@['new_gender'] = [True if x in $value else False for x in @[$header]]
\n@['value'] = [4, 5, 6]
'''
where kwargs are header="'gender'" and value=['M', 'F']
:param canonical: a pd.DataFrame as the reference dataframe
:param code_str: an action on those column values. to reference the canonical use '@'
:param seed: (optional) a seed value for the random function: default to None
:param kwargs: a set of kwargs to include in any executable function
:return: a list (optionally a pd.DataFrame
"""
canonical = self._get_canonical(canonical)
_seed = seed if isinstance(seed, int) else self._seed()
local_kwargs = locals()
for k, v in local_kwargs.pop('kwargs', {}).items():
local_kwargs.update({k: v})
code_str = code_str.replace(f'${k}', str(v))
code_str = code_str.replace('@', 'canonical')
df = exec(code_str, globals(), local_kwargs)
if df is None:
return canonical
return df
def _model_iterator(self, canonical: Any, marker_col: str=None, starting_frame: str=None, selection: list=None,
default_action: dict=None, iteration_actions: dict=None, iter_start: int=None,
iter_stop: int=None, seed: int=None) -> pd.DataFrame:
""" This method allows one to model repeating data subset that has some form of action applied per iteration.
The optional marker column must be included in order to apply actions or apply an iteration marker
An example of use might be a recommender generator where a cohort of unique users need to be selected, for
different recommendation strategies but users can be repeated across recommendation strategy
:param canonical: a pd.DataFrame as the reference dataframe
:param marker_col: (optional) the marker column name for the action outcome. default is to not include
:param starting_frame: (optional) a str referencing an existing connector contract name as the base DataFrame
:param selection: (optional) a list of selections where conditions are filtered on, executed in list order
An example of a selection with the minimum requirements is: (see 'select2dict(...)')
[{'column': 'genre', 'condition': "=='Comedy'"}]
:param default_action: (optional) a default action to take on all iterations. defaults to iteration value
:param iteration_actions: (optional) a dictionary of actions where the key is a specific iteration
:param iter_start: (optional) the start value of the range iteration default is 0
:param iter_stop: (optional) the stop value of the range iteration default is start iteration + 1
:param seed: (optional) this is a place holder, here for compatibility across methods
:return: pd.DataFrame
The starting_frame can be a pd.DataFrame, a pd.Series, int or list, a connector contract str reference or a
set of parameter instructions on how to generate a pd.Dataframe. the description of each is:
- pd.Dataframe -> a deep copy of the pd.DataFrame
- pd.Series or list -> creates a pd.DataFrame of one column with the 'header' name or 'default' if not given
- str -> instantiates a connector handler with the connector_name and loads the DataFrame from the connection
- int -> generates an empty pd.Dataframe with an index size of the int passed.
- dict -> use canonical2dict(...) to help construct a dict with a 'method' to build a pd.DataFrame
methods:
- model_*(...) -> one of the SyntheticBuilder model methods and parameters
- @empty -> generates an empty pd.DataFrame where size and headers can be passed
:size sets the index size of the dataframe
:headers any initial headers for the dataframe
- @generate -> generate a synthetic file from a remote Domain Contract
:task_name the name of the SyntheticBuilder task to run
:repo_uri the location of the Domain Product
:size (optional) a size to generate
:seed (optional) if a seed should be applied
:run_book (optional) if specific intent should be run only
Selections are a list of dictionaries of conditions and optional additional parameters to filter.
To help build conditions there is a static helper method called 'select2dict(...)' that has parameter
options available to build a condition.
An example of a condition with the minimum requirements is
[{'column': 'genre', 'condition': "=='Comedy'"}]
an example of using the helper method
selection = [inst.select2dict(column='gender', condition="=='M'"),
inst.select2dict(column='age', condition=">65", logic='XOR')]
Using the 'select2dict' method ensure the correct keys are used and the dictionary is properly formed. It also
helps with building the logic that is executed in order
Actions are the resulting outcome of the selection (or the default). An action can be just a value or a dict
that executes a intent method such as get_number(). To help build actions there is a helper function called
action2dict(...) that takes a method as a mandatory attribute.
With actions there are special keyword 'method' values:
@header: use a column as the value reference, expects the 'header' key
@constant: use a value constant, expects the key 'value'
@sample: use to get sample values, expected 'name' of the Sample method, optional 'shuffle' boolean
@eval: evaluate a code string, expects the key 'code_str' and any locals() required
An example of a simple action to return a selection from a list:
{'method': 'get_category', selection: ['M', 'F', 'U']}
This same action using the helper method would look like:
inst.action2dict(method='get_category', selection=['M', 'F', 'U'])
an example of using the helper method, in this example we use the keyword @header to get a value from another
column at the same index position:
inst.action2dict(method="@header", header='value')
We can even execute some sort of evaluation at run time:
inst.action2dict(method="@eval", code_str='sum(values)', values=[1,4,2,1])
"""
canonical = self._get_canonical(canonical)
rtn_frame = self._get_canonical(starting_frame)
_seed = self._seed() if seed is None else seed
iter_start = iter_start if isinstance(iter_start, int) else 0
iter_stop = iter_stop if isinstance(iter_stop, int) and iter_stop > iter_start else iter_start + 1
default_action = default_action if isinstance(default_action, dict) else 0
iteration_actions = iteration_actions if isinstance(iteration_actions, dict) else {}
for counter in range(iter_start, iter_stop):
df_count = canonical.copy()
# selection
df_count = self._frame_selection(df_count, selection=selection, seed=_seed)
# actions
if isinstance(marker_col, str):
if counter in iteration_actions.keys():
_action = iteration_actions.get(counter, None)
df_count[marker_col] = self._apply_action(df_count, action=_action, seed=_seed)
else:
default_action = default_action if isinstance(default_action, dict) else counter
df_count[marker_col] = self._apply_action(df_count, action=default_action, seed=_seed)
rtn_frame = pd.concat([rtn_frame, df_count], ignore_index=True)
return rtn_frame
def _model_group(self, canonical: Any, headers: [str, list], group_by: [str, list], aggregator: str=None,
list_choice: int=None, list_max: int=None, drop_group_by: bool=False, seed: int=None,
include_weighting: bool=False, freq_precision: int=None, remove_weighting_zeros: bool=False,
remove_aggregated: bool=False) -> pd.DataFrame:
""" returns the full column values directly from another connector data source. in addition the the
standard groupby aggregators there is also 'list' and 'set' that returns an aggregated list or set.
These can be using in conjunction with 'list_choice' and 'list_size' allows control of the return values.
if list_max is set to 1 then a single value is returned rather than a list of size 1.
:param canonical: a pd.DataFrame as the reference dataframe
:param headers: the column headers to apply the aggregation too
:param group_by: the column headers to group by
:param aggregator: (optional) the aggregator as a function of Pandas DataFrame 'groupby' or 'list' or 'set'
:param list_choice: (optional) used in conjunction with list or set aggregator to return a random n choice
:param list_max: (optional) used in conjunction with list or set aggregator restricts the list to a n size
:param drop_group_by: (optional) drops the group by headers
:param include_weighting: (optional) include a percentage weighting column for each
:param freq_precision: (optional) a precision for the relative_freq values
:param remove_aggregated: (optional) if used in conjunction with the weighting then drops the aggregator column
:param remove_weighting_zeros: (optional) removes zero values
:param seed: (optional) this is a place holder, here for compatibility across methods
:return: a pd.DataFrame
"""
canonical = self._get_canonical(canonical)
_seed = self._seed() if seed is None else seed
generator = np.random.default_rng(seed=_seed)
freq_precision = freq_precision if isinstance(freq_precision, int) else 3
aggregator = aggregator if isinstance(aggregator, str) else 'sum'
headers = Commons.list_formatter(headers)
group_by = Commons.list_formatter(group_by)
df_sub = Commons.filter_columns(canonical, headers=headers + group_by).dropna()
if aggregator.startswith('set') or aggregator.startswith('list'):
df_tmp = df_sub.groupby(group_by)[headers[0]].apply(eval(aggregator)).apply(lambda x: list(x))
df_tmp = df_tmp.reset_index()
for idx in range(1, len(headers)):
result = df_sub.groupby(group_by)[headers[idx]].apply(eval(aggregator)).apply(lambda x: list(x))
df_tmp = df_tmp.merge(result, how='left', left_on=group_by, right_index=True)
for idx in range(len(headers)):
header = headers[idx]
if isinstance(list_choice, int):
df_tmp[header] = df_tmp[header].apply(lambda x: generator.choice(x, size=list_choice))
if isinstance(list_max, int):
df_tmp[header] = df_tmp[header].apply(lambda x: x[0] if list_max == 1 else x[:list_max])
df_sub = df_tmp
else:
df_sub = df_sub.groupby(group_by, as_index=False).agg(aggregator)
if include_weighting:
df_sub['sum'] = df_sub.sum(axis=1, numeric_only=True)
total = df_sub['sum'].sum()
df_sub['weighting'] = df_sub['sum'].\
apply(lambda x: round((x / total), freq_precision) if isinstance(x, (int, float)) else 0)
df_sub = df_sub.drop(columns='sum')
if remove_weighting_zeros:
df_sub = df_sub[df_sub['weighting'] > 0]
df_sub = df_sub.sort_values(by='weighting', ascending=False)
if remove_aggregated:
df_sub = df_sub.drop(headers, axis=1)
if drop_group_by:
df_sub = df_sub.drop(columns=group_by, errors='ignore')
return df_sub
def _model_merge(self, canonical: Any, other: Any, left_on: str=None, right_on: str=None,
on: str=None, how: str=None, headers: list=None, suffixes: tuple=None, indicator: bool=None,
validate: str=None, seed: int=None) -> pd.DataFrame:
""" returns the full column values directly from another connector data source. The indicator parameter can be
used to mark the merged items.
:param canonical: a pd.DataFrame as the reference dataframe
:param other: a direct or generated pd.DataFrame. see context notes below
:param left_on: the canonical key column(s) to join on
:param right_on: the merging dataset key column(s) to join on
:param on: if th left and right join have the same header name this can replace left_on and right_on
:param how: (optional) One of 'left', 'right', 'outer', 'inner'. Defaults to inner. See below for more detailed
description of each method.
:param headers: (optional) a filter on the headers included from the right side
:param suffixes: (optional) A tuple of string suffixes to apply to overlapping columns. Defaults ('', '_dup').
:param indicator: (optional) Add a column to the output DataFrame called _merge with information on the source
of each row. _merge is Categorical-type and takes on a value of left_only for observations whose
merge key only appears in 'left' DataFrame or Series, right_only for observations whose merge key
only appears in 'right' DataFrame or Series, and both if the observation’s merge key is found
in both.
:param validate: (optional) validate : string, default None. If specified, checks if merge is of specified type.
“one_to_one” or “1:1”: checks if merge keys are unique in both left and right datasets.
“one_to_many” or “1:m”: checks if merge keys are unique in left dataset.
“many_to_one” or “m:1”: checks if merge keys are unique in right dataset.
“many_to_many” or “m:m”: allowed, but does not result in checks.
:param seed: this is a place holder, here for compatibility across methods
:return: a pd.DataFrame
The other is a pd.DataFrame, a pd.Series, int or list, a connector contract str reference or a set of
parameter instructions on how to generate a pd.Dataframe. the description of each is:
- pd.Dataframe -> a deep copy of the pd.DataFrame
- pd.Series or list -> creates a pd.DataFrame of one column with the 'header' name or 'default' if not given
- str -> instantiates a connector handler with the connector_name and loads the DataFrame from the connection
- int -> generates an empty pd.Dataframe with an index size of the int passed.
- dict -> use canonical2dict(...) to help construct a dict with a 'method' to build a pd.DataFrame
methods:
- model_*(...) -> one of the SyntheticBuilder model methods and parameters
- @empty -> generates an empty pd.DataFrame where size and headers can be passed
:size sets the index size of the dataframe
:headers any initial headers for the dataframe
- @generate -> generate a synthetic file from a remote Domain Contract
:task_name the name of the SyntheticBuilder task to run
:repo_uri the location of the Domain Product
:size (optional) a size to generate
:seed (optional) if a seed should be applied
:run_book (optional) if specific intent should be run only
"""
# Code block for intent
canonical = self._get_canonical(canonical)
other = self._get_canonical(other, size=canonical.shape[0])
_seed = self._seed() if seed is None else seed
how = how if isinstance(how, str) and how in ['left', 'right', 'outer', 'inner'] else 'inner'
indicator = indicator if isinstance(indicator, bool) else False
suffixes = suffixes if isinstance(suffixes, tuple) and len(suffixes) == 2 else ('', '_dup')
# Filter on the columns
if isinstance(headers, list):
headers.append(right_on if isinstance(right_on, str) else on)
other = Commons.filter_columns(other, headers=headers)
df_rtn = pd.merge(left=canonical, right=other, how=how, left_on=left_on, right_on=right_on, on=on,
suffixes=suffixes, indicator=indicator, validate=validate)
return df_rtn
def _model_concat(self, canonical: Any, other: Any, as_rows: bool=None, headers: [str, list]=None,
drop: bool=None, dtype: [str, list]=None, exclude: bool=None, regex: [str, list]=None,
re_ignore_case: bool=None, shuffle: bool=None, seed: int=None) -> pd.DataFrame:
""" returns the full column values directly from another connector data source.
:param canonical: a pd.DataFrame as the reference dataframe
:param other: a direct or generated pd.DataFrame. see context notes below
:param as_rows: (optional) how to concatenate, True adds the connector dataset as rows, False as columns
:param headers: (optional) a filter of headers from the 'other' dataset
:param drop: (optional) to drop or not drop the headers if specified
:param dtype: (optional) a filter on data type for the 'other' dataset. int, float, bool, object
:param exclude: (optional) to exclude or include the data types if specified
:param regex: (optional) a regular expression to search the headers. example '^((?!_amt).)*$)' excludes '_amt'
:param re_ignore_case: (optional) true if the regex should ignore case. Default is False
:param shuffle: (optional) if the rows in the loaded canonical should be shuffled
:param seed: this is a place holder, here for compatibility across methods
:return: a pd.DataFrame
The other is a pd.DataFrame, a pd.Series, int or list, a connector contract str reference or a set of
parameter instructions on how to generate a pd.Dataframe. the description of each is:
- pd.Dataframe -> a deep copy of the pd.DataFrame
- pd.Series or list -> creates a pd.DataFrame of one column with the 'header' name or 'default' if not given
- str -> instantiates a connector handler with the connector_name and loads the DataFrame from the connection
- int -> generates an empty pd.Dataframe with an index size of the int passed.
- dict -> use canonical2dict(...) to help construct a dict with a 'method' to build a pd.DataFrame
methods:
- model_*(...) -> one of the SyntheticBuilder model methods and parameters
- @empty -> generates an empty pd.DataFrame where size and headers can be passed
:size sets the index size of the dataframe
:headers any initial headers for the dataframe
- @generate -> generate a synthetic file from a remote Domain Contract
:task_name the name of the SyntheticBuilder task to run
:repo_uri the location of the Domain Product
:size (optional) a size to generate
:seed (optional) if a seed should be applied
:run_book (optional) if specific intent should be run only
"""
canonical = self._get_canonical(canonical)
other = self._get_canonical(other, size=canonical.shape[0])
_seed = self._seed() if seed is None else seed
shuffle = shuffle if isinstance(shuffle, bool) else False
as_rows = as_rows if isinstance(as_rows, bool) else False
# Filter on the columns
df_rtn = Commons.filter_columns(df=other, headers=headers, drop=drop, dtype=dtype, exclude=exclude,
regex=regex, re_ignore_case=re_ignore_case, copy=False)
if shuffle:
df_rtn.sample(frac=1, random_state=_seed).reset_index(drop=True)
if canonical.shape[0] <= df_rtn.shape[0]:
df_rtn = df_rtn.iloc[:canonical.shape[0]]
axis = 'index' if as_rows else 'columns'
return pd.concat([canonical, df_rtn], axis=axis)
def _model_dict_column(self, canonical: Any, header: str, convert_str: bool=None, replace_null: Any=None,
seed: int=None) -> pd.DataFrame:
""" takes a column that contains dict and expands them into columns. Note, the column must be a flat dictionary.
Complex structures will not work.
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header of the column to be convert
:param convert_str: (optional) if the header has the dict as a string convert to dict using ast.literal_eval()
:param replace_null: (optional) after conversion, replace null values with this value
:param seed: (optional) this is a place holder, here for compatibility across methods
:return: pd.DataFrame
"""
canonical = self._get_canonical(canonical)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
_seed = self._seed() if seed is None else seed
convert_str = convert_str if isinstance(convert_str, bool) else False
# replace NaN with '{}' if the column is strings, otherwise replace with {}
if convert_str:
canonical[header] = canonical[header].fillna('{}').apply(ast.literal_eval)
else:
canonical[header] = canonical[header].fillna({i: {} for i in canonical.index})
# convert the key/values into columns (this is the fasted code)
result = pd.json_normalize(canonical[header])
if isinstance(replace_null, (int, float, str)):
result.replace(np.nan, replace_null, inplace=True)
return canonical.join(result).drop(columns=[header])
def _model_explode(self, canonical: Any, header: str, seed: int=None) -> pd.DataFrame:
""" takes a single column of list values and explodes the DataFrame so row is represented by each elements
in the row list
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header of the column to be exploded
:param seed: (optional) this is a place holder, here for compatibility across methods
:return: a pd.DataFrame
The canonical is a pd.DataFrame, a pd.Series or list, a connector contract str reference or a set of
parameter instructions on how to generate a pd.Dataframe. the description of each is:
"""
canonical = self._get_canonical(canonical)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
_seed = self._seed() if seed is None else seed
return canonical.explode(column=header, ignore_index=True)
def _model_sample(self, canonical: Any, sample: Any, columns_list: list=None, exclude_associate: list=None,
auto_transition: bool=None, detail_numeric: bool=None, strict_typing: bool=None,
category_limit: int=None, apply_bias: bool=None, seed: int = None) -> pd.DataFrame:
""" Takes a sample dataset and using analytics, builds a set of synthetic columns that are representative of
the sample but scaled to the size of the canonical
:param canonical:
:param sample:
:param columns_list:
:param exclude_associate:
:param auto_transition:
:param detail_numeric:
:param strict_typing:
:param category_limit:
:param apply_bias:
:param seed: (optional) this is a place holder, here for compatibility across methods
:return: a pd.DataFrame
"""
canonical = self._get_canonical(canonical)
sample = self._get_canonical(sample)
auto_transition = auto_transition if isinstance(auto_transition, bool) else True
columns_list = columns_list if isinstance(columns_list, list) else list(sample.columns)
sample = Commons.filter_columns(sample, headers=columns_list)
if auto_transition:
Transition.from_memory().cleaners.auto_transition(sample, inplace=True)
blob = DataDiscovery.analyse_association(sample, columns_list=columns_list, exclude_associate=exclude_associate,
detail_numeric=detail_numeric, strict_typing=strict_typing,
category_limit=category_limit)
return self._model_analysis(canonical=canonical, analytics_blob=blob, apply_bias=apply_bias, seed=seed)
def _model_script(self, canonical: Any, script_contract: str, seed: int = None) -> pd.DataFrame:
"""Takes a synthetic build script and using analytics, builds a set of synthetic columns that are that are
defined by the build script and scaled to the size of the canonical
:param canonical:
:param script_contract:
:param seed: (optional) this is a place holder, here for compatibility across methods
:return: a pd.DataFrame
"""
canonical = self._get_canonical(canonical)
script = self._get_canonical(script_contract)
type_options = {'number': '_get_number', 'date': '_get_datetime', 'category': 'get_category',
'selection': 'get_selection', 'intervals': 'get_intervals', 'distribution': 'get_distribution'}
script['params'] = script['params'].replace(['', ' '], np.nan)
script['params'].loc[script['params'].isna()] = '[]'
script['params'] = [ast.literal_eval(x) if isinstance(x, str) and x.startswith('[') and x.endswith(']')
else x for x in script['params']]
# replace all other items with list
script['params'] = [x if isinstance(x, list) else [x] for x in script['params']]
script['params'] = script['params'].astype('object')
for index, row in script.iterrows():
method = type_options.get(row['type'])
params = row['params']
canonical[row['name']] = eval(f"self.{method}(size={canonical.shape[0]}, **params)", globals(), locals())
return canonical
def _model_analysis(self, canonical: Any, analytics_blob: dict, apply_bias: bool=None,
seed: int=None) -> pd.DataFrame:
""" builds a set of columns based on an analysis dictionary of weighting (see analyse_association)
if a reference DataFrame is passed then as the analysis is run if the column already exists the row
value will be taken as the reference to the sub category and not the random value. This allows already
constructed association to be used as reference for a sub category.
:param canonical: a pd.DataFrame as the reference dataframe
:param analytics_blob: the analytics blob from DataDiscovery.analyse_association(...)
:param apply_bias: (optional) if dominant values have been excluded, re-include to maintain bias
:param seed: seed: (optional) a seed value for the random function: default to None
:return: a DataFrame
"""
def get_level(analysis: dict, sample_size: int, _seed: int=None):
_seed = self._seed(seed=_seed, increment=True)
for name, values in analysis.items():
if row_dict.get(name) is None:
row_dict[name] = list()
_analysis = DataAnalytics(analysis=values.get('insight', {}))
result_type = object
if str(_analysis.intent.dtype).startswith('cat'):
result_type = 'category'
result = self._get_category(selection=_analysis.intent.categories,
relative_freq=_analysis.patterns.get('relative_freq', None),
seed=_seed, size=sample_size)
elif str(_analysis.intent.dtype).startswith('num'):
result_type = 'int' if _analysis.params.precision == 0 else 'float'
result = self._get_intervals(intervals=[tuple(x) for x in _analysis.intent.intervals],
relative_freq=_analysis.patterns.get('relative_freq', None),
precision=_analysis.params.get('precision', None),
seed=_seed, size=sample_size)
elif str(_analysis.intent.dtype).startswith('date'):
result_type = 'object' if _analysis.params.is_element('data_format') else 'date'
result = self._get_datetime(start=_analysis.stats.lowest,
until=_analysis.stats.highest,
relative_freq=_analysis.patterns.get('relative_freq', None),
date_format=_analysis.params.get('data_format', None),
day_first=_analysis.params.get('day_first', None),
year_first=_analysis.params.get('year_first', None),
seed=_seed, size=sample_size)
else:
result = []
# if the analysis was done with excluding dominance then se if they should be added back
if apply_bias and _analysis.patterns.is_element('dominant_excluded'):
_dom_percent = _analysis.patterns.dominant_percent/100
_dom_values = _analysis.patterns.dominant_excluded
if len(_dom_values) > 0:
s_values = pd.Series(result, dtype=result_type)
non_zero = s_values[~s_values.isin(_dom_values)].index
choice_size = int((s_values.size * _dom_percent) - (s_values.size - len(non_zero)))
if choice_size > 0:
generator = np.random.default_rng(_seed)
_dom_choice = generator.choice(_dom_values, size=choice_size)
s_values.iloc[generator.choice(non_zero, size=choice_size, replace=False)] = _dom_choice
result = s_values.to_list()
# now add the result to the row_dict
row_dict[name] += result
if sum(_analysis.patterns.relative_freq) == 0:
unit = 0
else:
unit = sample_size / sum(_analysis.patterns.relative_freq)
if values.get('sub_category'):
leaves = values.get('branch', {}).get('leaves', {})
for idx in range(len(leaves)):
section_size = int(round(_analysis.patterns.relative_freq[idx] * unit, 0)) + 1
next_item = values.get('sub_category').get(leaves[idx])
get_level(next_item, section_size, _seed)
return
canonical = self._get_canonical(canonical)
apply_bias = apply_bias if isinstance(apply_bias, bool) else True
row_dict = dict()
seed = self._seed() if seed is None else seed
size = canonical.shape[0]
get_level(analytics_blob, sample_size=size, _seed=seed)
for key in row_dict.keys():
row_dict[key] = row_dict[key][:size]
return pd.concat([canonical, pd.DataFrame.from_dict(data=row_dict)], axis=1)
def _model_encoding(self, canonical: Any, headers: [str, list], encoding: bool=None, ordinal: dict=None,
prefix=None, dtype: Any=None, prefix_sep: str=None, dummy_na: bool=False,
drop_first: bool=False, seed: int=None) -> pd.DataFrame:
""" encodes categorical data types, by default, as dummy encoded but optionally can choose label
encoding
:param canonical: a pd.DataFrame as the reference dataframe
:param headers: the header(s) to apply multi-hot
:param encoding: the type of encoding to apply to the categories, types supported 'dummy', 'ordinal', 'label'
:param ordinal: a dictionary of ordinal encoding. encoding must be 'ordinal', if not mapped then returns null
:param prefix : str, list of str, or dict of str, default None
String to append DataFrame column names.
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternatively, `prefix`
can be a dictionary mapping column names to prefixes.
:param prefix_sep : str, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix`.
:param dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
:param drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
:param dtype : dtype, default np.uint8
Data type for new columns. Only a single dtype is allowed.
:param seed: seed: (optional) a seed value for the random function: default to None
:return: a pd.Dataframe
"""
# intend code block on the canonical
canonical = self._get_canonical(canonical)
headers = Commons.list_formatter(headers)
seed = self._seed() if seed is None else seed
encoding = encoding if isinstance(encoding, str) and encoding in ['label', 'ordinal'] else 'dummy'
prefix = prefix if isinstance(prefix, str) else None
prefix_sep = prefix_sep if isinstance(prefix_sep, str) else "_"
dummy_na = dummy_na if isinstance(dummy_na, bool) else False
drop_first = drop_first if isinstance(drop_first, bool) else False
dtype = dtype if dtype else np.uint8
for header in headers:
if canonical[header].dtype.name != 'category':
canonical[header] = canonical[header].astype('category')
if encoding == 'ordinal':
ordinal = ordinal if isinstance(ordinal, dict) else {}
canonical[header] = canonical[header].map(ordinal, na_action=np.nan)
elif encoding == 'label':
canonical[f"{prefix}{prefix_sep}{header}"] = canonical[header].cat.codes
if encoding == 'dummy':
dummy_df = pd.get_dummies(canonical, columns=headers, prefix=prefix, prefix_sep=prefix_sep,
dummy_na=dummy_na, drop_first=drop_first, dtype=dtype)
for name in dummy_df.columns:
canonical[name] = dummy_df[name]
return canonical
def _correlate_selection(self, canonical: Any, selection: list, action: [str, int, float, dict],
default_action: [str, int, float, dict]=None, seed: int=None, rtn_type: str=None):
""" returns a value set based on the selection list and the action enacted on that selection. If
the selection criteria is not fulfilled then the default_action is taken if specified, else null value.
If a DataFrame is not passed, the values column is referenced by the header '_default'
:param canonical: a pd.DataFrame as the reference dataframe
:param selection: a list of selections where conditions are filtered on, executed in list order
An example of a selection with the minimum requirements is: (see 'select2dict(...)')
[{'column': 'genre', 'condition': "=='Comedy'"}]
:param action: a value or dict to act upon if the select is successful. see below for more examples
An example of an action as a dict: (see 'action2dict(...)')
{'method': 'get_category', 'selection': ['M', 'F', 'U']}
:param default_action: (optional) a default action to take if the selection is not fulfilled
:param seed: (optional) a seed value for the random function: default to None
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: value set based on the selection list and the action
Selections are a list of dictionaries of conditions and optional additional parameters to filter.
To help build conditions there is a static helper method called 'select2dict(...)' that has parameter
options available to build a condition.
An example of a condition with the minimum requirements is
[{'column': 'genre', 'condition': "=='Comedy'"}]
an example of using the helper method
selection = [inst.select2dict(column='gender', condition="=='M'"),
inst.select2dict(column='age', condition=">65", logic='XOR')]
Using the 'select2dict' method ensure the correct keys are used and the dictionary is properly formed. It also
helps with building the logic that is executed in order
Actions are the resulting outcome of the selection (or the default). An action can be just a value or a dict
that executes a intent method such as get_number(). To help build actions there is a helper function called
action2dict(...) that takes a method as a mandatory attribute.
With actions there are special keyword 'method' values:
@header: use a column as the value reference, expects the 'header' key
@constant: use a value constant, expects the key 'value'
@sample: use to get sample values, expected 'name' of the Sample method, optional 'shuffle' boolean
@eval: evaluate a code string, expects the key 'code_str' and any locals() required
An example of a simple action to return a selection from a list:
{'method': 'get_category', selection: ['M', 'F', 'U']}
This same action using the helper method would look like:
inst.action2dict(method='get_category', selection=['M', 'F', 'U'])
an example of using the helper method, in this example we use the keyword @header to get a value from another
column at the same index position:
inst.action2dict(method="@header", header='value')
We can even execute some sort of evaluation at run time:
inst.action2dict(method="@eval", code_str='sum(values)', values=[1,4,2,1])
"""
canonical = self._get_canonical(canonical)
if len(canonical) == 0:
raise TypeError("The canonical given is empty")
if not isinstance(selection, list):
raise ValueError("The 'selection' parameter must be a 'list' of 'dict' types")
if not isinstance(action, (str, int, float, dict)) or (isinstance(action, dict) and len(action) == 0):
raise TypeError("The 'action' parameter is not of an accepted format or is empty")
_seed = seed if isinstance(seed, int) else self._seed()
# prep the values to be a DataFrame if it isn't already
action = deepcopy(action)
selection = deepcopy(selection)
# run the logic
select_idx = self._selection_index(canonical=canonical, selection=selection)
if not isinstance(default_action, (str, int, float, dict)):
default_action = None
rtn_values = self._apply_action(canonical, action=default_action, seed=_seed)
# deal with categories
is_category = False
if rtn_values.dtype.name == 'category':
is_category = True
rtn_values = rtn_values.astype('object')
rtn_values.update(self._apply_action(canonical, action=action, select_idx=select_idx, seed=_seed))
if is_category:
rtn_values = rtn_values.astype('category')
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
rtn_values = rtn_values.astype(rtn_type)
return rtn_values
return rtn_values.to_list()
def _correlate_custom(self, canonical: Any, code_str: str, seed: int=None, **kwargs):
""" Commonly used for custom list comprehension, takes code string that when evaluated returns a list of values
When referencing the canonical in the code_str it should be referenced either by use parameter label 'canonical'
or the short cut '@' symbol.
for example:
code_str = "[x + 2 for x in @['A']]" # where 'A' is a header in the canonical
kwargs can also be passed into the code string but must be preceded by a '$' symbol
for example:
code_str = "[True if x == $v1 else False for x in @['A']]" # where 'v1' is a kwargs
:param canonical: a pd.DataFrame as the reference dataframe
:param code_str: an action on those column values. to reference the canonical use '@'
:param seed: (optional) a seed value for the random function: default to None
:param kwargs: a set of kwargs to include in any executable function
:return: a list (optionally a pd.DataFrame
"""
canonical = self._get_canonical(canonical)
_seed = seed if isinstance(seed, int) else self._seed()
local_kwargs = locals()
for k, v in local_kwargs.pop('kwargs', {}).items():
local_kwargs.update({k: v})
code_str = code_str.replace(f'${k}', str(v))
code_str = code_str.replace('@', 'canonical')
rtn_values = eval(code_str, globals(), local_kwargs)
if rtn_values is None:
return [np.nan] * canonical.shape[0]
return rtn_values
def _correlate_aggregate(self, canonical: Any, headers: list, agg: str, seed: int=None, precision: int=None,
rtn_type: str=None):
""" correlate two or more columns with each other through a finite set of aggregation functions. The
aggregation function names are limited to 'sum', 'prod', 'count', 'min', 'max' and 'mean' for numeric columns
and a special 'list' function name to combine the columns as a list
:param canonical: a pd.DataFrame as the reference dataframe
:param headers: a list of headers to correlate
:param agg: the aggregation function name enact. The available functions are:
'sum', 'prod', 'count', 'min', 'max', 'mean' and 'list' which combines the columns as a list
:param precision: the value precision of the return values
:param seed: (optional) a seed value for the random function: default to None
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: a list of equal length to the one passed
"""
canonical = self._get_canonical(canonical)
if not isinstance(headers, list) or len(headers) < 2:
raise ValueError("The headers value must be a list of at least two header str")
if agg not in ['sum', 'prod', 'count', 'min', 'max', 'mean', 'list']:
raise ValueError("The only allowed func values are 'sum', 'prod', 'count', 'min', 'max', 'mean', 'list'")
# Code block for intent
_seed = seed if isinstance(seed, int) else self._seed()
precision = precision if isinstance(precision, int) else 3
if agg == 'list':
return canonical.loc[:, headers].values.tolist()
rtn_values = eval(f"canonical.loc[:, headers].{agg}(axis=1)", globals(), locals()).round(precision)
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
rtn_values = rtn_values.astype(rtn_type)
return rtn_values
return rtn_values.to_list()
def _correlate_choice(self, canonical: Any, header: str, list_size: int=None, random_choice: bool=None,
replace: bool=None, shuffle: bool=None, convert_str: bool=None, seed: int=None,
rtn_type: str=None):
""" correlate a column where the elements of the columns contains a list, and a choice is taken from that list.
if the list_size == 1 then a single value is correlated otherwise a list is correlated
Null values are passed through but all other elements must be a list with at least 1 value in.
if 'random' is true then all returned values will be a random selection from the list and of equal length.
if 'random' is false then each list will not exceed the 'list_size'
Also if 'random' is true and 'replace' is False then all lists must have more elements than the list_size.
By default 'replace' is True and 'shuffle' is False.
In addition 'convert_str' allows lists that have been formatted as a string can be converted from a string
to a list using 'ast.literal_eval(x)'
:param canonical: a pd.DataFrame as the reference dataframe
:param header: The header containing a list to chose from.
:param list_size: (optional) the number of elements to return, if more than 1 then list
:param random_choice: (optional) if the choice should be a random choice.
:param replace: (optional) if the choice selection should be replaced or selected only once
:param shuffle: (optional) if the final list should be shuffled
:param convert_str: if the header has the list as a string convert to list using ast.literal_eval()
:param seed: (optional) a seed value for the random function: default to None
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: a list of equal length to the one passed
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
# Code block for intent
list_size = list_size if isinstance(list_size, int) else 1
random_choice = random_choice if isinstance(random_choice, bool) else False
convert_str = convert_str if isinstance(convert_str, bool) else False
replace = replace if isinstance(replace, bool) else True
shuffle = shuffle if isinstance(shuffle, bool) else False
_seed = seed if isinstance(seed, int) else self._seed()
s_values = canonical[header].copy()
if s_values.empty:
return list()
s_idx = s_values.where(~s_values.isna()).dropna().index
if convert_str:
s_values.iloc[s_idx] = [ast.literal_eval(x) if isinstance(x, str) else x for x in s_values.iloc[s_idx]]
s_values.iloc[s_idx] = Commons.list_formatter(s_values.iloc[s_idx])
generator = np.random.default_rng(seed=_seed)
if random_choice:
try:
s_values.iloc[s_idx] = [generator.choice(x, size=list_size, replace=replace, shuffle=shuffle)
for x in s_values.iloc[s_idx]]
except ValueError:
raise ValueError(f"Unable to make a choice. Ensure {header} has all appropriate values for the method")
s_values.iloc[s_idx] = [x[0] if list_size == 1 else list(x) for x in s_values.iloc[s_idx]]
else:
s_values.iloc[s_idx] = [x[:list_size] if list_size > 1 else x[0] for x in s_values.iloc[s_idx]]
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
s_values = s_values.astype(rtn_type)
return s_values
return s_values.to_list()
def _correlate_join(self, canonical: Any, header: str, action: [str, dict], sep: str=None, seed: int=None,
rtn_type: str=None):
""" correlate a column and join it with the result of the action, This allows for composite values to be
build from. an example might be to take a forename and add the surname with a space separator to create a
composite name field, of to join two primary keys to create a single composite key.
:param canonical: a pd.DataFrame as the reference dataframe
:param header: an ordered list of columns to join
:param action: (optional) a string or a single action whose outcome will be joined to the header value
:param sep: (optional) a separator between the values
:param seed: (optional) a seed value for the random function: default to None
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: a list of equal length to the one passed
Actions are the resulting outcome of the selection (or the default). An action can be just a value or a dict
that executes a intent method such as get_number(). To help build actions there is a helper function called
action2dict(...) that takes a method as a mandatory attribute.
With actions there are special keyword 'method' values:
@header: use a column as the value reference, expects the 'header' key
@constant: use a value constant, expects the key 'value'
@sample: use to get sample values, expected 'name' of the Sample method, optional 'shuffle' boolean
@eval: evaluate a code string, expects the key 'code_str' and any locals() required
An example of a simple action to return a selection from a list:
{'method': 'get_category', selection=['M', 'F', 'U']
an example of using the helper method, in this example we use the keyword @header to get a value from another
column at the same index position:
inst.action2dict(method="@header", header='value')
We can even execute some sort of evaluation at run time:
inst.action2dict(method="@eval", code_str='sum(values)', values=[1,4,2,1])
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(action, (dict, str)):
raise ValueError(f"The action must be a dictionary of a single action or a string value")
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
# Code block for intent
_seed = seed if isinstance(seed, int) else self._seed()
sep = sep if isinstance(sep, str) else ''
s_values = canonical[header].copy()
if s_values.empty:
return list()
action = deepcopy(action)
null_idx = s_values[s_values.isna()].index
s_values.to_string()
result = self._apply_action(canonical, action=action, seed=_seed)
s_values = pd.Series([f"{a}{sep}{b}" for (a, b) in zip(s_values, result)], dtype='object')
if null_idx.size > 0:
s_values.iloc[null_idx] = np.nan
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
s_values = s_values.astype(rtn_type)
return s_values
return s_values.to_list()
def _correlate_sigmoid(self, canonical: Any, header: str, precision: int=None, seed: int=None,
rtn_type: str=None):
""" logistic sigmoid a.k.a logit, takes an array of real numbers and transforms them to a value
between (0,1) and is defined as
f(x) = 1/(1+exp(-x)
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header in the DataFrame to correlate
:param precision: (optional) how many decimal places. default to 3
:param seed: (optional) the random seed. defaults to current datetime
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: an equal length list of correlated values
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
s_values = canonical[header].copy()
if s_values.empty:
return list()
precision = precision if isinstance(precision, int) else 3
_seed = seed if isinstance(seed, int) else self._seed()
rtn_values = np.round(1 / (1 + np.exp(-s_values)), precision)
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
rtn_values = rtn_values.astype(rtn_type)
return rtn_values
return rtn_values.to_list()
def _correlate_polynomial(self, canonical: Any, header: str, coefficient: list, seed: int=None,
rtn_type: str=None, keep_zero: bool=None) -> list:
""" creates a polynomial using the reference header values and apply the coefficients where the
index of the list represents the degree of the term in reverse order.
e.g [6, -2, 0, 4] => f(x) = 4x**3 - 2x + 6
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header in the DataFrame to correlate
:param coefficient: the reverse list of term coefficients
:param seed: (optional) the random seed. defaults to current datetime
:param keep_zero: (optional) if True then zeros passed remain zero, Default is False
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: an equal length list of correlated values
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
s_values = canonical[header].copy()
if s_values.empty:
return list()
keep_zero = keep_zero if isinstance(keep_zero, bool) else False
_seed = seed if isinstance(seed, int) else self._seed()
def _calc_polynomial(x, _coefficient):
if keep_zero and x == 0:
return 0
res = 0
for index, coeff in enumerate(_coefficient):
res += coeff * x ** index
return res
rtn_values = s_values.apply(lambda x: _calc_polynomial(x, coefficient))
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
rtn_values = rtn_values.astype(rtn_type)
return rtn_values
return rtn_values.to_list()
def _correlate_missing(self, canonical: Any, header: str, granularity: [int, float]=None,
as_type: str=None, lower: [int, float]=None, upper: [int, float]=None, nulls_list: list=None,
exclude_dominant: bool=None, replace_zero: [int, float]=None, precision: int=None,
day_first: bool=None, year_first: bool=None, seed: int=None,
rtn_type: str=None):
""" imputes missing data with a weighted distribution based on the analysis of the other elements in the
column
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header in the DataFrame to correlate
:param granularity: (optional) the granularity of the analysis across the range. Default is 5
int passed - represents the number of periods
float passed - the length of each interval
list[tuple] - specific interval periods e.g []
list[float] - the percentile or quantities, All should fall between 0 and 1
:param as_type: (optional) specify the type to analyse
:param lower: (optional) the lower limit of the number value. Default min()
:param upper: (optional) the upper limit of the number value. Default max()
:param nulls_list: (optional) a list of nulls that should be considered null
:param exclude_dominant: (optional) if overly dominant are to be excluded from analysis to avoid bias (numbers)
:param replace_zero: (optional) with categories, a non-zero minimal chance relative frequency to replace zero
This is useful when the relative frequency of a category is so small the analysis returns zero
:param precision: (optional) by default set to 3.
:param day_first: (optional) if the date provided has day first
:param year_first: (optional) if the date provided has year first
:param seed: (optional) the random seed. defaults to current datetime
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return:
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
s_values = canonical[header].copy()
if s_values.empty:
return list()
as_type = as_type if isinstance(as_type, str) else s_values.dtype.name
_seed = seed if isinstance(seed, int) else self._seed()
nulls_list = nulls_list if isinstance(nulls_list, list) else [np.nan, None, 'nan', '', ' ']
if isinstance(nulls_list, list):
s_values.replace(nulls_list, np.nan, inplace=True, regex=True)
null_idx = s_values[s_values.isna()].index
if as_type.startswith('int') or as_type.startswith('float') or as_type.startswith('num'):
_analysis = DataAnalytics(DataDiscovery.analyse_number(s_values, granularity=granularity, lower=lower,
upper=upper, detail_stats=False, precision=precision,
exclude_dominant=exclude_dominant))
s_values.iloc[null_idx] = self._get_intervals(intervals=[tuple(x) for x in _analysis.intent.intervals],
relative_freq=_analysis.patterns.relative_freq,
precision=_analysis.params.precision,
seed=_seed, size=len(null_idx))
elif as_type.startswith('cat'):
_analysis = DataAnalytics(DataDiscovery.analyse_category(s_values, replace_zero=replace_zero))
s_values.iloc[null_idx] = self._get_category(selection=_analysis.intent.categories,
relative_freq=_analysis.patterns.relative_freq,
seed=_seed, size=len(null_idx))
elif as_type.startswith('date'):
_analysis = DataAnalytics(DataDiscovery.analyse_date(s_values, granularity=granularity, lower=lower,
upper=upper, day_first=day_first,
year_first=year_first))
s_values.iloc[null_idx] = self._get_datetime(start=_analysis.intent.lowest,
until=_analysis.intent.highest,
relative_freq=_analysis.patterns.relative_freq,
date_format=_analysis.params.data_format,
day_first=_analysis.params.day_first,
year_first=_analysis.params.year_first,
seed=_seed, size=len(null_idx))
else:
raise ValueError(f"The data type '{as_type}' is not supported. Try using the 'as_type' parameter")
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
s_values = s_values.astype(rtn_type)
return s_values
return s_values.to_list()
def _correlate_numbers(self, canonical: Any, header: str, to_numeric: bool=None, standardize: bool=None,
normalize: tuple=None, offset: [int, float, str]=None, jitter: float=None,
jitter_freq: list=None, precision: int=None, replace_nulls: [int, float]=None,
seed: int=None, keep_zero: bool=None, min_value: [int, float]=None,
max_value: [int, float]=None, rtn_type: str=None):
""" returns a number that correlates to the value given. The jitter is based on a normal distribution
with the correlated value being the mean and the jitter its standard deviation from that mean
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header in the DataFrame to correlate
:param to_numeric: (optional) ensures numeric type. None convertable strings are set to null
:param standardize: (optional) if the column should be standardised
:param normalize: (optional) normalise the column between two values. the tuple is the lower and upper bounds
:param offset: (optional) a fixed value to offset or if str an operation to perform using @ as the header value.
:param jitter: (optional) a perturbation of the value where the jitter is a std. defaults to 0
:param jitter_freq: (optional) a relative freq with the pattern mid point the mid point of the jitter
:param precision: (optional) how many decimal places. default to 3
:param replace_nulls: (optional) a numeric value to replace nulls
:param seed: (optional) the random seed. defaults to current datetime
:param keep_zero: (optional) if True then zeros passed remain zero, Default is False
:param min_value: a minimum value not to go below
:param max_value: a max value not to go above
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: an equal length list of correlated values
The offset can be a numeric offset that is added to the value, e.g. passing 2 will add 2 to all values.
If a string is passed if format should be a calculation with the '@' character used to represent the column
value. e.g.
'1-@' would subtract the column value from 1,
'@*0.5' would multiply the column value by 0.5
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
s_values = canonical[header].copy()
if s_values.empty:
return list()
if isinstance(to_numeric, bool) and to_numeric:
s_values = pd.to_numeric(s_values.apply(str).str.replace('[$£€, ]', '', regex=True), errors='coerce')
if not (s_values.dtype.name.startswith('int') or s_values.dtype.name.startswith('float')):
raise ValueError(f"The header column is of type '{s_values.dtype.name}' and not numeric. "
f"Use the 'to_numeric' parameter if appropriate")
keep_zero = keep_zero if isinstance(keep_zero, bool) else False
precision = precision if isinstance(precision, int) else 3
_seed = seed if isinstance(seed, int) else self._seed()
if isinstance(replace_nulls, (int, float)):
s_values[s_values.isna()] = replace_nulls
null_idx = s_values[s_values.isna()].index
zero_idx = s_values.where(s_values == 0).dropna().index if keep_zero else []
if isinstance(offset, (int, float)) and offset != 0:
s_values = s_values.add(offset)
elif isinstance(offset, str):
offset = offset.replace("@", 'x')
s_values = s_values.apply(lambda x: eval(offset))
if isinstance(jitter, (int, float)) and jitter != 0:
sample = self._get_number(-abs(jitter) / 2, abs(jitter) / 2, relative_freq=jitter_freq,
size=s_values.size, seed=_seed)
s_values = s_values.add(sample)
if isinstance(min_value, (int, float)):
if min_value < s_values.max():
min_idx = s_values.dropna().where(s_values < min_value).dropna().index
s_values.iloc[min_idx] = min_value
else:
raise ValueError(f"The min value {min_value} is greater than the max result value {s_values.max()}")
if isinstance(max_value, (int, float)):
if max_value > s_values.min():
max_idx = s_values.dropna().where(s_values > max_value).dropna().index
s_values.iloc[max_idx] = max_value
else:
raise ValueError(f"The max value {max_value} is less than the min result value {s_values.min()}")
if isinstance(standardize, bool) and standardize:
s_values = pd.Series(Commons.list_standardize(s_values.to_list()))
if isinstance(normalize, tuple):
if normalize[0] >= normalize[1] or len(normalize) != 2:
raise ValueError("The normalize tuple must be of size 2 with the first value lower than the second")
s_values = pd.Series(Commons.list_normalize(s_values.to_list(), normalize[0], normalize[1]))
# reset the zero values if any
s_values.iloc[zero_idx] = 0
s_values = s_values.round(precision)
if precision == 0 and not s_values.isnull().any():
s_values = s_values.astype(int)
if null_idx.size > 0:
s_values.iloc[null_idx] = np.nan
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
s_values = s_values.astype(rtn_type)
return s_values
return s_values.to_list()
def _correlate_categories(self, canonical: Any, header: str, correlations: list, actions: dict,
default_action: [str, int, float, dict]=None, seed: int=None, rtn_type: str=None):
""" correlation of a set of values to an action, the correlations must map to the dictionary index values.
Note. to use the current value in the passed values as a parameter value pass an empty dict {} as the keys
value. If you want the action value to be the current value of the passed value then again pass an empty dict
action to be the current value
simple correlation list:
['A', 'B', 'C'] # if values is 'A' then action is 0 and so on
multiple choice correlation:
[['A','B'], 'C'] # if values is 'A' OR 'B' then action is 0 and so on
For more complex correlation the selection logic can be used, see notes below.
for actions also see notes below.
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header in the DataFrame to correlate
:param correlations: a list of categories (can also contain lists for multiple correlations.
:param actions: the correlated set of categories that should map to the index
:param default_action: (optional) a default action to take if the selection is not fulfilled
:param seed: a seed value for the random function: default to None
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: a list of equal length to the one passed
Selections are a list of dictionaries of conditions and optional additional parameters to filter.
To help build conditions there is a static helper method called 'select2dict(...)' that has parameter
options available to build a condition.
An example of a condition with the minimum requirements is
[{'column': 'genre', 'condition': "=='Comedy'"}]
an example of using the helper method
selection = [inst.select2dict(column='gender', condition="=='M'"),
inst.select2dict(column='age', condition=">65", logic='XOR')]
Using the 'select2dict' method ensure the correct keys are used and the dictionary is properly formed. It also
helps with building the logic that is executed in order
Actions are the resulting outcome of the selection (or the default). An action can be just a value or a dict
that executes a intent method such as get_number(). To help build actions there is a helper function called
action2dict(...) that takes a method as a mandatory attribute.
With actions there are special keyword 'method' values:
@header: use a column as the value reference, expects the 'header' key
@constant: use a value constant, expects the key 'value'
@sample: use to get sample values, expected 'name' of the Sample method, optional 'shuffle' boolean
@eval: evaluate a code string, expects the key 'code_str' and any locals() required
An example of a simple action to return a selection from a list:
{'method': 'get_category', selection: ['M', 'F', 'U']}
This same action using the helper method would look like:
inst.action2dict(method='get_category', selection=['M', 'F', 'U'])
an example of using the helper method, in this example we use the keyword @header to get a value from another
column at the same index position:
inst.action2dict(method="@header", header='value')
We can even execute some sort of evaluation at run time:
inst.action2dict(method="@eval", code_str='sum(values)', values=[1,4,2,1])
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
_seed = seed if isinstance(seed, int) else self._seed()
actions = deepcopy(actions)
correlations = deepcopy(correlations)
corr_list = []
for corr in correlations:
corr_list.append(Commons.list_formatter(corr))
if not isinstance(default_action, (str, int, float, dict)):
default_action = None
rtn_values = self._apply_action(canonical, action=default_action, seed=_seed)
# deal with categories
if rtn_values.dtype.name == 'category':
rtn_values = rtn_values.astype('object')
s_values = canonical[header].copy().astype(str)
for i in range(len(corr_list)):
action = actions.get(i, actions.get(str(i), -1))
if action == -1:
continue
if isinstance(corr_list[i][0], dict):
corr_idx = self._selection_index(canonical, selection=corr_list[i])
else:
corr_idx = s_values[s_values.isin(map(str, corr_list[i]))].index
rtn_values.update(self._apply_action(canonical, action=action, select_idx=corr_idx, seed=_seed))
if isinstance(rtn_type, str):
if rtn_type in ['category', 'object'] or rtn_type.startswith('int') or rtn_type.startswith('float'):
rtn_values = rtn_values.astype(rtn_type)
return rtn_values
return rtn_values.to_list()
def _correlate_dates(self, canonical: Any, header: str, offset: [int, dict]=None, jitter: int=None,
jitter_units: str=None, jitter_freq: list=None, now_delta: str=None, date_format: str=None,
min_date: str=None, max_date: str=None, fill_nulls: bool=None, day_first: bool=None,
year_first: bool=None, seed: int=None, rtn_type: str=None):
""" correlates dates to an existing date or list of dates. The return is a list of pd
:param canonical: a pd.DataFrame as the reference dataframe
:param header: the header in the DataFrame to correlate
:param offset: (optional) and offset to the date. if int then assumed a 'days' offset
int or dictionary associated with pd. eg {'days': 1}
:param jitter: (optional) the random jitter or deviation in days
:param jitter_units: (optional) the units of the jitter, Options: 'W', 'D', 'h', 'm', 's'. default 'D'
:param jitter_freq: (optional) a relative freq with the pattern mid point the mid point of the jitter
:param now_delta: (optional) returns a delta from now as an int list, Options: 'Y', 'M', 'W', 'D', 'h', 'm', 's'
:param min_date: (optional)a minimum date not to go below
:param max_date: (optional)a max date not to go above
:param fill_nulls: (optional) if no date values should remain untouched or filled based on the list mode date
:param day_first: (optional) if the dates given are day first format. Default to True
:param year_first: (optional) if the dates given are year first. Default to False
:param date_format: (optional) the format of the output
:param seed: (optional) a seed value for the random function: default to None
:param rtn_type: (optional) changes the default return of a 'list' to a pd.Series
other than the int, float, category, string and object, passing 'as-is' will return as is
:return: a list of equal size to that given
"""
canonical = self._get_canonical(canonical, header=header)
if not isinstance(header, str) or header not in canonical.columns:
raise ValueError(f"The header '{header}' can't be found in the canonical DataFrame")
values = canonical[header].copy()
if values.empty:
return list()
def _clean(control):
_unit_type = ['years', 'months', 'weeks', 'days', 'leapdays', 'hours', 'minutes', 'seconds']
_params = {}
if isinstance(control, int):
control = {'days': control}
if isinstance(control, dict):
for k, v in control.items():
if k not in _unit_type:
raise ValueError(f"The key '{k}' in 'offset', is not a recognised unit type for pd.DateOffset")
return control
_seed = self._seed() if seed is None else seed
fill_nulls = False if fill_nulls is None or not isinstance(fill_nulls, bool) else fill_nulls
offset = _clean(offset) if isinstance(offset, (dict, int)) else None
if isinstance(now_delta, str) and now_delta not in ['Y', 'M', 'W', 'D', 'h', 'm', 's']:
raise ValueError(f"the now_delta offset unit '{now_delta}' is not recognised "
f"use of of ['Y', 'M', 'W', 'D', 'h', 'm', 's']")
units_allowed = ['W', 'D', 'h', 'm', 's']
jitter_units = jitter_units if isinstance(jitter_units, str) and jitter_units in units_allowed else 'D'
jitter = pd.Timedelta(value=jitter, unit=jitter_units) if isinstance(jitter, int) else None
# set minimum date
_min_date = pd.to_datetime(min_date, errors='coerce', infer_datetime_format=True, utc=True)
if _min_date is None or _min_date is pd.NaT:
_min_date = pd.to_datetime(pd.Timestamp.min, utc=True)
# set max date
_max_date = pd.to_datetime(max_date, errors='coerce', infer_datetime_format=True, utc=True)
if _max_date is None or _max_date is pd.NaT:
_max_date = pd.to_datetime(pd.Timestamp.max, utc=True)
if _min_date >= _max_date:
raise ValueError(f"the min_date {min_date} must be less than max_date {max_date}")
# convert values into datetime
s_values = pd.Series(pd.to_datetime(values.copy(), errors='coerce', infer_datetime_format=True,
dayfirst=day_first, yearfirst=year_first, utc=True))
if jitter is not None:
if jitter_units in ['W', 'D']:
value = jitter.days
zip_units = 'D'
else:
value = int(jitter.to_timedelta64().astype(int) / 1000000000)
zip_units = 's'
zip_spread = self._get_number(-abs(value) / 2, (abs(value + 1) / 2), relative_freq=jitter_freq,
precision=0, size=s_values.size, seed=_seed)
zipped_dt = list(zip(zip_spread, [zip_units]*s_values.size))
s_values += np.array([pd.Timedelta(x, y).to_timedelta64() for x, y in zipped_dt])
if fill_nulls:
generator = np.random.default_rng(seed=_seed)
s_values = s_values.fillna(generator.choice(s_values.mode()))
null_idx = s_values[s_values.isna()].index
if isinstance(offset, dict) and offset:
s_values = s_values.add( | pd.DateOffset(**offset) | pandas.DateOffset |
#!/usr/bin/env python
# coding: utf-8
"""
Created on Mon November 10 14:13:20 2019
@author: <NAME>
takes the condition name as input (e.g. lik or int)
"""
def covariate (cond):
# data analysis and wrangling
import pandas as pd
import numpy as np
import os
from pathlib import Path
#addpath
home = str(Path.home())
#declare variables
GLM = ("GLM-10")
s = ("01", "02", "03", "04", "05", "06", "07", "09", "10", "11", "12", "13","14", "15", "16", "17","18", "20", "21", "22","23", "24","25", "26")
taskDIR = ("hedonic")
df1 = []
df2 = []
df3 = []
df5 = []
dfsubj = []
df01 = pd.DataFrame()
df02 = pd.DataFrame()
df03 = | pd.DataFrame() | pandas.DataFrame |
# creating my first module:
# libraries
import pandas as pd
import numpy as np
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from pandas import read_csv as csv
def Explore(file, column_names=None, title_line_number=100, head_line_number=20):
#df = pd.read_csv(file, header=None, names=column_names)
df = pd.read_csv(file);print(title_line_number*'*')
print('The dataset has been loaded from | {} | Successfully'.format(file))
print(title_line_number*'*'+'\n')
print(df.head());print(title_line_number*'*'+'\n');print('\n'+title_line_number*'=')
print('The data set has {} number of records, and {} number of columns'.format(df.shape[0],df.shape[1]))
print(title_line_number*'*'+'\n');print('\n'+title_line_number*'=')
print('The Datatypes are:');print(head_line_number*'-');
print(df.dtypes);print(title_line_number*'*'+'\n');print('\n'+title_line_number*'=')
print('Other info:');print(head_line_number*'-');
print(df.info());print(title_line_number*'*'+'\n');print('\n'+title_line_number*'=')
print('Statistical Summary:');print(head_line_number*'-');
print(df.describe());print(title_line_number*'*'+'\n');print('\n'+title_line_number*'=')
return df
def title(string, icon='-'):
print(string.center(100,icon))
def setJupyterNotebook(max_rows=500,max_cols=500):
pd.set_option('display.max_rows',max_rows)
pd.set_option('display.max_columns',max_cols)
np.set_printoptions(precision=3)
pd.set_option('display.float_format', lambda x: '%.3f' % x)
np.random.seed(8)
import warnings
warnings.filterwarnings('ignore')
def Split(df,target='target',test_size=0.3,random_state=8):
'''
input: pandas dataframe, target='target', test_size=0.3,random_state=8
output: tuple of X_train, X_test, y_train, y_test
'''
X,y = df.drop([target], axis=1),df[target]
from sklearn.model_selection import train_test_split
return train_test_split(X, y, test_size=test_size, random_state=random_state)
def OHE(data,non_features,cat_features=None): # Use later OneHotEncoder of sklearn and fit_transform(X_train) and transform (X_test)
X_train, X_test, y_train, y_test = data
if cat_features is None:
cat_features = [col for col in X_train.select_dtypes('object').columns if col not in non_features]
X_train_cat, X_test_cat = tuple([pd.concat([pd.get_dummies(X_cat[col],drop_first=False,prefix=col,prefix_sep='_',)\
for col in cat_features],axis=1) for X_cat in data[:2]])
X_train = pd.concat([X_train,X_train_cat],axis=1).drop(cat_features,axis=1)
X_test = pd.concat([X_test,X_test_cat],axis=1).drop(cat_features,axis=1)
OHE_features = list(X_train_cat.columns)
return (X_train, X_test, y_train, y_test), OHE_features
def Balance(data):
'''
input: data = tuple of X_train, X_test, y_train, y_test
target='target' # column name of the target variable
output: data = the balanced version of data
=> FUNCTION DOES BALANCING ONLY ON TRAIN DATASET
'''
X_train, X_test, y_train, y_test = data
target=y_train.name #if else 'target'
print('Checking Imbalance');print(y_train.value_counts(normalize=True))
Input = input('Do You Want to Treat Data?\nPress "y" or "n" \n')
if Input.strip() == "y":
print('Treating Imbalance on Train Data')
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import NearMiss
SM = SMOTE(random_state=8, ratio=1.0)
X_train_SM, y_train_SM = SM.fit_sample(X_train, y_train)
X_train_SM = | pd.DataFrame(X_train_SM, columns=X_train.columns) | pandas.DataFrame |
import pandas as pd
import app.data.score_calculator as sc
def get_marks(df, subjects, terms=[1, 2, 3, 4, 5, 6, 7, 8]):
"""
Returns a data frame with marks for given subjects and terms for given schools
Parameters
----------
subjects : list of subjects ["History","Sinhala","English"]
terms : list of terms from 1 to 9 [1,2]
if not specified return marks for all the terms.
"""
columns = []
for subject in subjects:
for term in terms:
columns.append((subject + "_" + str(term)).rstrip())
df = df[columns]
return df
def get_demographics(df, features=["scholarship", "f_edu", "m_edu", "s_num", "s_edu", "tution"]):
return df[features]
def get_lci(df):
features = []
for i in range(1, 21):
features.append("Lci_" + str(i))
df = df[features]
return df
def put_missing_values(dataframe):
dataframe.replace('-1', -1, inplace=True)
dataframe.replace('#N/A', -1, inplace=True)
dataframe.replace('', -1, inplace=True)
dataframe.fillna(-1, inplace=True)
return dataframe
def discretize_marks(dataframe, subjects, terms=[1, 2, 3, 4, 5, 6, 7, 8]):
dataframe = dataframe.apply(pd.to_numeric, errors='ignore')
columns = []
for subject in subjects:
for term in terms:
columns.append(subject + "_" + str(term))
for column in columns:
grades = []
marks = dataframe[column]
for val in marks:
if val >= 75:
grades.append(1)
elif 75 > val >= 65:
grades.append(2)
elif 65 > val >= 55:
grades.append(3)
elif 55 > val >= 40:
grades.append(4)
elif 40 > val >= 0:
grades.append(5)
else:
grades.append(-1)
grade_series = | pd.Series(grades) | pandas.Series |
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import pickle
class SummaryEvaluationPlotter:
def __init__(self):
pass
def load_tvsum_result(self):
results = []
for method in ['Random', 'Human', 'Model']:
df = pd.read_pickle(f'data/processed/tvsum_{method.lower()}_eval.pickle')
df_mean = pd.DataFrame(
{'F1 score': df['mean'],
'type': ['mean'] * len(df),
'dataset': df['ds_name'],
'summary_type': [method] * len(df)
})
df_max = pd.DataFrame(
{'F1 score': df['max'],
'type': ['max'] * len(df),
'dataset': df['ds_name'],
'summary_type': [method] * len(df)
})
df = pd.concat([df_mean, df_max])
results.append(df)
df_all = pd.concat(results)
return df_all
df_all = load_tvsum_result()
sns.set_style('white')
sns.set_context("paper", font_scale=1.8)
sns.set_palette('Paired')
g = sns.catplot(x='dataset',
y='F1 score',
order=['tvsum', 'summe', 'moviesum'],
hue='summary_type',
ci=90, col='type',
kind='bar',
data=df_all,
aspect=1.7,
legend=False)
g.set_xlabels("")
g.axes[0, 0].legend(loc='upper left', edgecolor='w')
g.axes[0, 0].set_title('(a) Avg.')
g.axes[0, 1].set_title('(b) Max.')
plt.show()
def load_summe_result(self):
df = pd.read_csv('data/processed/summe_random_eval.csv')
results = []
for metric in ['mean', 'max']:
df_sub = pd.DataFrame(
{'F1 score': df[metric],
'type': [metric] * len(df),
'segmentation': df['seg_type'],
'summary_type': ['Random'] * len(df)
})
results.append(df_sub)
df_rand = | pd.concat(results) | pandas.concat |
from typing import Any
import numpy as np
import pandas as pd
from resources.backend_scripts.feature_selection import FeatureSelection
from resources.backend_scripts.parameter_search import ParameterSearch
DataFrame = pd.DataFrame
NpArray = np.ndarray
class GlobalVariables:
_df: DataFrame = pd.DataFrame()
_fs: bool = False
_ps: bool = False
_fsm: FeatureSelection = None
_psm: ParameterSearch = None
_clf: Any = None
_prd_type: str = ""
_prm: dict = {}
_initial_value = {"data_frame": | pd.DataFrame() | pandas.DataFrame |
"""
Visualise landmarks on images for a particular set/scale or whole dataset
The expected structure for dataset is as follows
* DATASET/<tissue>/scale-<number>pc/<image>
* DATASET/<tissue>/scale-<number>pc/<csv-file>
EXAMPLE
-------
>> python run_visualise_landmarks.py -l dataset -i dataset -o output
>> python handlers/run_visualise_landmarks.py \
-l /datagrid/Medical/dataset_ANHIR/landmarks_annot \
-i /datagrid/Medical/dataset_ANHIR/images_private \
-o /local/borovec/Data/dataset-ANHIR-visu --nb_jobs 2
Copyright (C) 2014-2019 <NAME> <<EMAIL>>
"""
import argparse
import glob
import logging
import os
import sys
import matplotlib
if os.environ.get('DISPLAY', '') == '':
print('No display found. Using non-interactive Agg backend.')
matplotlib.use('Agg')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from birl.utilities.experiments import iterate_mproc_map
from birl.utilities.dataset import estimate_scaling
try:
import cv2 as cv
OPENCV = True
except ImportError:
print('Missing OpenCV, no image warping will be performed.')
OPENCV = False
sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root
from handlers.utilities import NB_THREADS, LANDMARK_COORDS
from handlers.utilities import (
parse_args, load_image, find_images, collect_triple_dir,
estimate_affine_transform, figure_pair_images_landmarks, figure_image_landmarks
)
NAME_FIGURE_PAIR = 'PAIR___%s___AND___%s.pdf'
NAME_FIGURE_PAIR_WARPED = 'PAIR___%s___AND___%s___WARPED.pdf'
def create_arg_parser():
""" argument parser from cmd
SEE: https://docs.python.org/3/library/argparse.html
:return {str: ...}:
"""
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--path_landmarks', type=str, required=False,
help='path to folder with landmarks (particular scale)',
default='dataset')
parser.add_argument('-i', '--path_dataset', type=str, required=False,
help='path to folder with dataset (images)',
default='dataset')
parser.add_argument('-o', '--path_output', type=str, required=False,
help='path to the output directory - visualisation',
default='output')
parser.add_argument('--scales', type=int, required=False, nargs='*',
help='select scales for visualization', default=None)
parser.add_argument('--nb_jobs', type=int, required=False, default=NB_THREADS,
help='number of processes in parallel')
return parser
def load_image_landmarks(lnds_img_pair):
""" load image and related landmarks
:param (str, str) lnds_img_pair: tuple with paths
:return (str, str, ndarray, ndarray): image folder and name, landmarks and image
"""
p_lnds, p_img = lnds_img_pair
name = os.path.splitext(os.path.basename(p_img))[0]
lnd = pd.read_csv(p_lnds)
img = load_image(p_img)
folder = os.path.basename(os.path.dirname(p_lnds))
return folder, name, lnd, img
def warp_affine(img1, img2, lnd1, lnd2):
""" estimate an affine transform and perform image and landmarks warping
:param ndarray img1: reference image
:param ndarray img2: moving landmarks
:param ndarray lnd1: reference image
:param ndarray lnd2: moving landmarks
:return (ndarray, ndarray): moving image and landmarks warped to reference
"""
nb = min(len(lnd1), len(lnd2))
pts1 = lnd1[list(LANDMARK_COORDS)].values[:nb]
pts2 = lnd2[list(LANDMARK_COORDS)].values[:nb]
_, matrix_inv, _, pts2_warp = estimate_affine_transform(pts1, pts2)
lnd2_warp = pd.DataFrame(pts2_warp, columns=LANDMARK_COORDS)
matrix_inv = matrix_inv[:2, :3].astype(np.float64)
try:
img2_warp = cv.warpAffine(img2, matrix_inv, img1.shape[:2][::-1])
except Exception:
logging.exception('fail transform for matrix: \n%r', matrix_inv)
img2_warp = img1
return img2_warp, lnd2_warp
def _scale_large_images_landmarks(images, landmarks):
""" scale images and landmarks up to maximal image size
:param [ndarray] images: list of images
:param [ndarray] landmarks: list of landmarks
:return ([ndarray], [ndarray]): lists of images and landmarks
"""
if not images or not OPENCV:
return images, landmarks
scale = estimate_scaling(images)
images = [cv.resize(img, None, fx=scale, fy=scale, interpolation=cv.INTER_LINEAR)
for img in images]
landmarks = [lnds * scale for lnds in landmarks]
return images, landmarks
def export_visual_pairs(lnds_img_pair1, lnds_img_pair2, path_out):
""" export and visualise image/landmarks pair
:param (str, str) lnds_img_pair1: path to image and landmarks
:param (str, str) lnds_img_pair2: path to image and landmarks
:param path_out: output folder
"""
folder1, name1, lnd1, img1 = load_image_landmarks(lnds_img_pair1)
folder2, name2, lnd2, img2 = load_image_landmarks(lnds_img_pair2)
# scale images and landmarks
(img1, img2), (lnd1, lnd2) = _scale_large_images_landmarks((img1, img2), (lnd1, lnd2))
if img1 is None or img2 is None:
logging.warning('Fail to load one of required images.')
return
fig = figure_pair_images_landmarks((lnd1, lnd2), (img1, img2),
names=(name1, name2))
fig.savefig(os.path.join(path_out, NAME_FIGURE_PAIR % (name1, name2)))
plt.close(fig)
if not OPENCV:
return
img2_warp, lnd2_warp = warp_affine(img1, img2, lnd1, lnd2)
del img2, lnd2
fig = figure_pair_images_landmarks((lnd1, lnd2_warp), (img1, img2_warp),
names=(name1, name2 + ' [WARPED AFFINE]'))
fig.savefig(os.path.join(path_out, NAME_FIGURE_PAIR_WARPED % (name1, name2)))
plt.close(fig)
def export_visual_set_scale(d_paths):
""" export, visualise given set in particular scale
:param {str: str} d_paths: dictionary with path patterns
:return int: number of processed items
"""
list_lnds = sorted(glob.glob(os.path.join(d_paths['landmarks'], '*.csv')))
list_lnds_imgs = []
# fined relevant images to the given landmarks
for p_lnds in list_lnds:
name_ = os.path.splitext(os.path.basename(p_lnds))[0]
p_imgs = find_images(d_paths['images'], name_)
if p_imgs:
list_lnds_imgs.append((p_lnds, sorted(p_imgs)[0]))
# if there are no images or landmarks, skip it...
if not list_lnds_imgs:
logging.debug('no image-landmarks to show...')
return 0
# create the output folder
if not os.path.isdir(d_paths['output']):
os.makedirs(d_paths['output'])
# draw and export image-landmarks
for p_lnds, p_img in list_lnds_imgs:
name_ = os.path.splitext(os.path.basename(p_img))[0]
img = load_image(p_img)
if img is None:
continue
fig = figure_image_landmarks( | pd.read_csv(p_lnds) | pandas.read_csv |
"""
Module: LMR_proxy_preprocess.py
Purpose: Takes proxy data in their native format (e.g. .pckl file for PAGES2k or collection of
NCDC-templated .txt files) and generates Pandas DataFrames stored in pickle files
containing metadata and actual data from proxy records. The "pickled" DataFrames
are used as input by the Last Millennium Reanalysis software.
Currently, the data is stored as *annual averages* for original records with
subannual data.
Originator : <NAME> | Dept. of Atmospheric Sciences, Univ. of Washington
| January 2016
(Based on code written by <NAME> (U. of Washington) to handle
PAGES(2013) proxies)
Revisions :
- Addition of proxy types corresponding to "deep-times" proxy records, which are
being included in the NCDC-templated proxy collection.
[R. Tardif, U. of Washington, March 2017]
- Addition of recognized time/age definitions used in "deep-times" proxy records
and improved conversion of time/age data to year CE (convention used in LMR).
[R. Tardif, U. of Washington, March 2017]
- Improved detection & treatment of missing data, now using tags found
(or not) in each data file.
[R. Tardif, U. of Washington, March 2017]
- Added functionalities related to the merging of proxies coming from two
sources (PAGES2k phase 2 data contained in a single compressed pickle file
and "in-house" collections contained in NCDC-templated text files).
The possibility to "gaussianize" records and to calculate annual averages
on "tropical year" (Apr to Mar) or calendar year have also been implemented.
[R. Tardif, U. of Washington, Michael Erb, USC, May 2017]
- Renamed the proxy databases to less-confusing convention.
'pages' renamed as 'PAGES2kv1' and 'NCDC' renamed as 'LMRdb'
[R. Tardif, U. of Washington, Sept 2017]
"""
import glob
import os
import os.path
import numpy as np
import pandas as pd
import time as clock
from copy import deepcopy
from scipy import stats
import string
import re
import six
import ast
from os.path import join
import pickle as pickle
import gzip
import calendar
# LMR imports
from LMR_utils import gaussianize
# =========================================================================================
class EmptyError(Exception):
print(Exception)
# =========================================================================================
# ---------------------------------------- MAIN -------------------------------------------
# =========================================================================================
def main():
# ********************************************************************************
# Section for User-defined options: begin
#
#proxy_data_source = 'PAGES2Kv1' # proxies from PAGES2k phase 1 (2013)
# --- *** --- *** --- *** --- *** --- *** --- *** --- *** --- *** --- *** ---
proxy_data_source = 'LMRdb' # proxies from PAGES2k phase 2 (2017) +
# "in-house" collection in NCDC-templated files
# Determine which dataset(s) (NCDC and/or PAGES2kv2) to include in the DF.
# - Both : include_NCDC = True, include_PAGES2kphase2 = True
# - Only NCDC : include_NCDC = True, include_PAGES2kphase2 = False
# - Only PAGES2kv2 : include_NCDC = False, include_PAGES2kphase2 = True
include_NCDC = True
include_PAGES2kphase2 = True
#PAGES2kphase2file = 'PAGES2k_v2.0.0_tempOnly.pklz' # compressed version of the file
PAGES2kphase2file = 'PAGES2k_v2.0.0_tempOnly.pckl'
# version of the LMRdb proxy db to process
# - first set put together, including PAGES2k2013 trees
#LMRdb_dbversion = 'v0.0.0'
# - PAGES2k2013 trees taken out, but with NCDC-templated records from PAGES2k phase 2, version 1.9.0
#LMRdb_dbversion = 'v0.1.0'
# - NCDC collection for LMR + published PAGES2k phase 2 proxies (version 2.0.0). stored in .pklz file
#LMRdb_dbversion = 'v0.2.0'
#LMRdb_dbversion = 'v0.3.0'
# LMRdb_dbversion = 'v0.4.0'
LMRdb_dbversion = 'v1.0.0'
# File containing info on duplicates in proxy records
infoDuplicates = 'Proxy_Duplicates_PAGES2kv2_NCDC_LMR'+LMRdb_dbversion+'.xlsx'
# This option transforms all data to a Gaussian distribution. It should only be used for
# linear regressions, not physically-based PSMs.
gaussianize_data = False
# Specify the type of year to use for data averaging. "calendar year" (Jan-Dec)
# or "tropical year" (Apr-Mar)
year_type = "calendar year"
#year_type = "tropical year"
eliminate_duplicates = True
# --- *** --- *** --- *** --- *** --- *** --- *** --- *** --- *** --- *** ---
#proxy_data_source = 'DTDA'
dtda_dbversion = 'v0.0.0'
# --- *** --- *** --- *** --- *** --- *** --- *** --- *** --- *** --- *** ---
# datadir: directory where the original proxy datafiles are located
datadir = '/home/katabatic/wperkins/data/LMR/data/proxies/'
# outdir: directory where the proxy database files will be created
# The piece before /data/proxies should correspond to your "lmr_path" set in LMR_config.py
outdir = '/home/katabatic/wperkins/data/LMR/data/proxies/'
#
# Section for User-defined options: end
# ***************************************************************
main_begin_time = clock.time()
# first checking that input and output directories exist on disk
if not os.path.isdir(datadir):
print('ERROR: Directory <<datadir>> does not exist. Please revise your'
' entry for this user-defined parameter.')
raise SystemExit(1)
else:
# check that datadir ends with '/' -> expected thereafter
if not datadir[-1] == '/':
datadir = datadir+'/'
if not os.path.isdir(outdir):
print('ERROR: Directory <<outdir>> does not exist. Please revise your'
' entry for this user-defined parameter.')
raise SystemExit(1)
else:
# check that outdir ends with '/' -> expected thereafter
if not outdir[-1] == '/':
outdir = outdir+'/'
if proxy_data_source == 'PAGES2Kv1':
# ============================================================================
# PAGES2Kv1 proxy data -------------------------------------------------------
# ============================================================================
take_average_out = False
fname = datadir + 'Pages2k_DatabaseS1-All-proxy-records.xlsx'
meta_outfile = outdir + 'Pages2kv1_Metadata.df.pckl'
outfile = outdir + 'Pages2kv1_Proxies.df.pckl'
pages_xcel_to_dataframes(fname, meta_outfile, outfile, take_average_out)
elif proxy_data_source == 'LMRdb':
# ============================================================================
# LMRdb proxy data -----------------------------------------------------------
# ============================================================================
datadir = datadir+'LMRdb/ToPandas_'+LMRdb_dbversion+'/'
infoDuplicates = datadir+infoDuplicates
# Some checks
if not os.path.isdir(datadir):
print('ERROR: Directory % is not found. Directory structure'
' <<datadir>>/LMRdb/ToPandas_vX.Y.Z is expected.'
' Please revise your set-up.' %datadir)
raise SystemExit(1)
if eliminate_duplicates and not os.path.isfile(infoDuplicates):
print('ERROR: eliminate_duplicates parameter set to True but'
' required file %s not found! Please rectify.' %infoDuplicates)
raise SystemExit(1)
meta_outfile = outdir + 'LMRdb_'+LMRdb_dbversion+'_Metadata.df.pckl'
data_outfile = outdir + 'LMRdb_'+LMRdb_dbversion+'_Proxies.df.pckl'
# Specify all proxy types & associated proxy measurements to look for & extract from the data files
# This is to take into account all the possible different names found in the PAGES2kv2 and NCDC data files.
proxy_def = \
{
#old 'Tree Rings_WidthPages' : ['TRW','ERW','LRW'],\
'Tree Rings_WidthPages2' : ['trsgi'],\
'Tree Rings_WidthBreit' : ['trsgi'],\
'Tree Rings_WoodDensity' : ['max_d','min_d','early_d','earl_d','late_d','MXD','density'],\
'Tree Rings_Isotopes' : ['d18O'],\
'Corals and Sclerosponges_d18O' : ['d18O','delta18O','d18o','d18O_stk','d18O_int','d18O_norm','d18o_avg','d18o_ave','dO18','d18O_4'],\
'Corals and Sclerosponges_SrCa' : ['Sr/Ca','Sr_Ca','Sr/Ca_norm','Sr/Ca_anom','Sr/Ca_int'],\
'Corals and Sclerosponges_Rates' : ['ext','calc','calcification','calcification rate', 'composite'],\
'Ice Cores_d18O' : ['d18O','delta18O','delta18o','d18o','d18o_int','d18O_int','d18O_norm','d18o_norm','dO18','d18O_anom'],\
'Ice Cores_dD' : ['deltaD','delD','dD'],\
'Ice Cores_Accumulation' : ['accum','accumu'],\
'Ice Cores_MeltFeature' : ['MFP','melt'],\
'Lake Cores_Varve' : ['varve', 'varve_thickness', 'varve thickness', 'thickness'],\
'Lake Cores_BioMarkers' : ['Uk37', 'TEX86', 'tex86'],\
'Lake Cores_GeoChem' : ['Sr/Ca', 'Mg/Ca','Cl_cont'],\
'Lake Cores_Misc' : ['RABD660_670','X_radiograph_dark_layer','massacum'],\
'Marine Cores_d18O' : ['d18O'],\
'Marine Cores_tex86' : ['tex86'],\
'Marine Cores_uk37' : ['uk37','UK37'],\
'Speleothems_d18O' : ['d18O'],\
'Bivalve_d18O' : ['d18O'],\
# DADT proxies
# 'Marine Cores_d18Opachyderma' : ['d18O_pachyderma'],\
# 'Marine Cores_d18Obulloides' : ['d18O_bulloides'],\
# 'Marine Cores_tex86' : ['tex86'],\
# Proxy types present in the database but which should not be included/assimilated
# 'Corals and Sclerosponges_d14C' : ['d14C','d14c','ac_d14c'],\
# 'Corals and Sclerosponges_d13C' : ['d13C','d13c','d13c_ave','d13c_ann_ave','d13C_int'],\
# 'Corals and Sclerosponges_Sr' : ['Sr'],\
# 'Corals and Sclerosponges_BaCa' : ['Ba/Ca'],\
# 'Corals and Sclerosponges_CdCa' : ['Cd/Ca'],\
# 'Corals and Sclerosponges_MgCa' : ['Mg/Ca'],\
# 'Corals and Sclerosponges_UCa' : ['U/Ca','U/Ca_anom'],\
# 'Corals and Sclerosponges_Pb' : ['Pb'],\
# 'Speleothems_d13C' : ['d13C'],\
# 'Borehole_Temperature' : ['temperature'],\
# 'Hybrid_Temperature' : ['temperature'],\
# 'Documents_Temperature' : ['temperature'],\
# 'Tree Rings_Temperature' : ['temperature'],\
# 'Lake Cores_Temperature' : ['temperature'],\
# 'Marine Cores_Temperature' : ['temperature'],\
# 'Corals and Sclerosponges_Temperature' : ['temperature'],\
# 'Climate Reconstructions' : ['sst_ORSTOM','sss_ORSTOM','temp_anom'],\
}
# --- data from LMR's NCDC-templated files
if include_NCDC:
ncdc_dict = ncdc_txt_to_dict(datadir, proxy_def, year_type, gaussianize_data)
else:
ncdc_dict = []
# --- PAGES2k phase2 (2017) data
if include_PAGES2kphase2:
pages2kv2_dict = pages2kv2_pickle_to_dict(datadir, PAGES2kphase2file, proxy_def, year_type, gaussianize_data)
else:
pages2kv2_dict = []
# --- Merge datasets, scrub duplicates and write metadata & data to file
merge_dicts_to_dataframes(proxy_def, ncdc_dict, pages2kv2_dict, meta_outfile, data_outfile, infoDuplicates, eliminate_duplicates)
elif proxy_data_source == 'DTDA':
# ============================================================================
# DTDA project proxy data ----------------------------------------------------
# ============================================================================
take_average_out = False
datadir = datadir+'DTDA/'
fname = datadir + 'DTDA_proxies_'+dtda_dbversion+'.xlsx'
meta_outfile = outdir + 'DTDA_'+dtda_dbversion+'_Metadata.df.pckl'
outfile = outdir + 'DTDA_'+dtda_dbversion+'_Proxies.df.pckl'
DTDA_xcel_to_dataframes(fname, meta_outfile, outfile, take_average_out)
else:
raise SystemExit('ERROR: Unkown proxy data source! Exiting!')
elapsed_time = clock.time() - main_begin_time
print('Build of integrated proxy database completed in %s mins' %str(elapsed_time/60.))
# =========================================================================================
# ------------------------------------- END OF MAIN ---------------------------------------
# =========================================================================================
# =========================================================================================
def is_number(s):
try:
float(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
# =========================================================================================
def compute_annual_means(time_raw,data_raw,valid_frac,year_type):
"""
Computes annual-means from raw data.
Inputs:
time_raw : Original time axis
data_raw : Original data
valid_frac : The fraction of sub-annual data necessary to create annual mean. Otherwise NaN.
year_type : "calendar year" (Jan-Dec) or "tropical year" (Apr-Mar)
Outputs: time_annual, data_annual
Authors: <NAME>, Univ. of Washington; <NAME>, Univ. of Southern California
"""
# Check if dealing with multiple chronologies in one data stream (for NCDC files)
array_shape = data_raw.shape
if len(array_shape) == 2:
nbtimes, nbvalid = data_raw.shape
elif len(array_shape) == 1:
nbtimes, = data_raw.shape
nbvalid = 1
else:
raise SystemExit('ERROR in compute_annual_means: Unrecognized shape of data input array.')
time_between_records = np.diff(time_raw, n=1)
# Temporal resolution of the data, calculated as the mode of time difference.
time_resolution = abs(stats.mode(time_between_records)[0][0])
# check if time_resolution = 0.0 !!! sometimes adjacent records are tagged at same time ...
if time_resolution == 0.0:
print('***WARNING! Found adjacent records with same times!')
inderr = np.where(time_between_records == 0.0)
print(inderr)
time_between_records = np.delete(time_between_records,inderr)
time_resolution = abs(stats.mode(time_between_records)[0][0])
max_nb_per_year = int(1.0/time_resolution)
if time_resolution <=1.0:
proxy_resolution = int(1.0) # coarse-graining to annual
else:
proxy_resolution = int(time_resolution)
# Get rounded integer values of all years present in record.
years_all = [int(np.floor(time_raw[k])) for k in range(0,len(time_raw))]
years = list(set(years_all)) # 'set' is used to get unique values in list
years = sorted(years) # sort the list
years = np.insert(years,0,years[0]-1) # <NAME>
# bounds, for calendar year : [years_beg,years_end[
years_beg = np.asarray(years,dtype=np.float64) # inclusive lower bound
years_end = years_beg + 1. # exclusive upper bound
# If some of the time values are floats (sub-annual resolution)
# and year_type is tropical_year, adjust the years to cover the
# tropical year (Apr-Mar).
if np.equal(np.mod(time_raw,1),0).all() == False and year_type == 'tropical year':
print("Tropical year averaging...")
# modify bounds defining the "year"
for i, yr in enumerate(years):
# beginning of interval
if calendar.isleap(yr):
years_beg[i] = float(yr)+((31+29+31)/float(366))
else:
years_beg[i] = float(yr)+((31+28+31)/float(365))
# end of interval
if calendar.isleap(yr+1):
years_end[i] = float(yr+1)+((31+29+31)/float(366))
else:
years_end[i] = float(yr+1)+((31+28+31)/float(365))
time_annual = np.asarray(years,dtype=np.float64)
data_annual = np.zeros(shape=[len(years),nbvalid], dtype=np.float64)
# fill with NaNs for default values
data_annual[:] = np.NAN
# Calculate the mean of all data points with the same year.
for i in range(len(years)):
ind = [j for j, year in enumerate(time_raw) if (year >= years_beg[i]) and (year < years_end[i])]
nbdat = len(ind)
# TODO: check nb of non-NaN values !!!!! ... ... ... ... ... ...
if time_resolution <= 1.0:
frac = float(nbdat)/float(max_nb_per_year)
if frac > valid_frac:
data_annual[i,:] = np.nanmean(data_raw[ind],axis=0)
else:
if nbdat > 1:
print('***WARNING! Found multiple records in same year in data with multiyear resolution!')
print(' year= %d %d' %(years[i], nbdat))
# Note: this calculates the mean if multiple entries found
data_annual[i,:] = np.nanmean(data_raw[ind],axis=0)
# check and modify time_annual array to reflect only the valid data present in the annual record
# for correct tagging of "Oldest" and "Youngest" data
indok = np.where(np.isfinite(data_annual))[0]
keep = np.arange(indok[0],indok[-1]+1,1)
return time_annual[keep], data_annual[keep,:], proxy_resolution
# ===================================================================================
# For PAGES2k S1 proxy data ---------------------------------------------------------
# ===================================================================================
def pages_xcel_to_dataframes(filename, metaout, dataout, take_average_out):
"""
Takes in Pages2K CSV and converts it to dataframe storage. This increases
size on disk due to the joining along the time index (lots of null values).
Makes it easier to query and grab data for the proxy experiments.
:param filename:
:param metaout:
:param dataout:
:return:
Author: <NAME>, Univ. of Washington
"""
# check that file <<filename>> exists
if not os.path.isfile(filename):
print('ERROR: File %s does not exist. Please make sure'
' input file is located in right directory.' %filename)
raise SystemExit(1)
meta_sheet_name = 'Metadata'
metadata = pd.read_excel(filename, meta_sheet_name)
# rename 'PAGES ID' column header to more general 'Proxy ID'
metadata.rename(columns = {'PAGES ID':'Proxy ID'},inplace=True)
metadata.to_pickle(metaout)
record_sheet_names = ['AntProxies', 'ArcProxies', 'AsiaProxies',
'AusProxies', 'EurProxies', 'NAmPol', 'NAmTR',
'SAmProxies']
for i, sheet in enumerate(record_sheet_names):
tmp = pd.read_excel(filename, sheet)
# for key, series in tmp.iteritems():
# h5store[key] = series[series.notnull()]
if i == 0:
df = tmp
else:
# SQL like table join along index
df = df.merge(tmp, how='outer', on='PAGES 2k ID')
#fix index and column name
col0 = df.columns[0]
newcol0 = df[col0][0]
df.set_index(col0, drop=True, inplace=True)
df.index.name = newcol0
df = df.ix[1:]
df.sort_index(inplace=True)
if take_average_out:
# copy of dataframe
df_tmp = df
# fill dataframe with new values where temporal averages over proxy records are subtracted
df = df_tmp.sub(df_tmp.mean(axis=0), axis=1)
# TODO: make sure year index is consecutive
#write data to file
df = df.to_sparse()
df.to_pickle(dataout)
# ===================================================================================
# For PAGES2k v2.0.0 proxy data ---------------------------------------------------------
# ===================================================================================
def pages2kv2_pickle_to_dataframes(datadir, metaout, dataout, eliminate_duplicates, year_type, gaussianize_data):
"""
Takes in a Pages2k pckl file and converts it to dataframe storage.
Authors: <NAME>, Univ. of Washington, Jan 2016.
<NAME>, Univ. of Southern California, Feb 2017
"""
# ===============================================================================
# Upload proxy data from Pages2k v2 pickle file
# ===============================================================================
# Open the pickle file containing the Pages2k data
f = gzip.open(datadir+'PAGES2k_v2.0.0_tempOnly.pklz','rb')
pages2k_data = pickle.load(f)
f.close()
# ===============================================================================
# Produce a summary of uploaded proxy data &
# generate integrated database in pandas DataFrame format
# ===============================================================================
# Summary of the final_proxy_list
nbsites = len(pages2k_data)
print('----------------------------------------------------------------------')
print(' SUMMARY: ')
print(' Total nb of records : ', nbsites)
print(' ------------------------------------------------------')
tot = []
# Loop over proxy types specified in *main*
counter = 0
# Build up pandas DataFrame
metadf = pd.DataFrame()
headers = ['NCDC ID','Site name','Lat (N)','Lon (E)','Elev','Archive type','Proxy measurement','Resolution (yr)',\
'Oldest (C.E.)','Youngest (C.E.)','Location','climateVariable','Realm','Relation_to_climateVariable',\
'Seasonality', 'Databases']
nb = []
for counter in range(0,len(pages2k_data)):
#counter = 13 # An example of a sub-annual record
# Give each record a unique descriptive name
pages2k_data[counter]['siteID'] = "PAGES2kv2_"+pages2k_data[counter]['dataSetName']+"_"+pages2k_data[counter]['paleoData_pages2kID']+":"+pages2k_data[counter]['paleoData_variableName']
nb.append(pages2k_data[counter]['siteID'])
print("Processing metadata", counter+1, "/", len(pages2k_data), ":",
pages2k_data[counter]['paleoData_pages2kID'])
# If the time axis goes backwards (i.e. newer to older), reverse it.
if pages2k_data[counter]['year'][-1] - pages2k_data[counter]['year'][-2] < 0:
pages2k_data[counter]['year'].reverse()
pages2k_data[counter]['paleoData_values'].reverse()
# If subannual, average up to annual --------------------------------------------------------
time_raw = np.array(pages2k_data[counter]['year'],dtype=np.float)
data_raw = np.array(pages2k_data[counter]['paleoData_values'],dtype=np.float)
# Remove values where either time or data is nan.
nan_indices = np.isnan(time_raw)+np.isnan(data_raw)
time_raw = time_raw[~nan_indices]
data_raw = data_raw[~nan_indices]
valid_frac = 0.5
# Use the following function to make annual-means.
# Inputs: time_raw, data_raw, valid_frac, year_type. Outputs: time_annual, data_annual
time_annual, data_annual, proxy_resolution = compute_annual_means(time_raw,data_raw,valid_frac,year_type)
# If gaussianize_data is set to true, transform the proxy data to Gaussian.
# This option should only be used when using regressions, not physically-based PSMs.
if gaussianize_data == True:
data_annual = gaussianize.gaussianize(data_annual)
# Write the annual data to the dictionary, so they can use written to
# the data file outside of this loop.
pages2k_data[counter]['time_annual'] = time_annual
pages2k_data[counter]['data_annual'] = data_annual
# Rename the proxy types in the same convention as the NCDC dataset.
# Proxy types not renamed: bivalve, borehole, documents, hybrid
if (pages2k_data[counter]['archiveType'] == 'coral') or (pages2k_data[counter]['archiveType'] == 'sclerosponge'):
pages2k_data[counter]['archiveType'] = 'Corals and Sclerosponges'
elif pages2k_data[counter]['archiveType'] == 'glacier ice':
pages2k_data[counter]['archiveType'] = 'Ice Cores'
elif pages2k_data[counter]['archiveType'] == 'lake sediment':
pages2k_data[counter]['archiveType'] = 'Lake Cores'
elif pages2k_data[counter]['archiveType'] == 'marine sediment':
pages2k_data[counter]['archiveType'] = 'Marine Cores'
elif pages2k_data[counter]['archiveType'] == 'speleothem':
pages2k_data[counter]['archiveType'] = 'Speleothems'
elif pages2k_data[counter]['archiveType'] == 'tree':
pages2k_data[counter]['archiveType'] = 'Tree Rings'
# Rename some of the the proxy measurements to be more standard.
if (pages2k_data[counter]['archiveType'] == 'Ice Cores') and (pages2k_data[counter]['paleoData_variableName'] == 'd18O1'):
pages2k_data[counter]['paleoData_variableName'] = 'd18O'
elif (pages2k_data[counter]['archiveType'] == 'Lake Cores') and (pages2k_data[counter]['paleoData_variableName'] == 'temperature1'):
pages2k_data[counter]['paleoData_variableName'] = 'temperature'
elif (pages2k_data[counter]['archiveType'] == 'Lake Cores') and (pages2k_data[counter]['paleoData_variableName'] == 'temperature3'):
pages2k_data[counter]['paleoData_variableName'] = 'temperature'
# Not all records have data for elevation. In these cases, set elevation to nan.
if 'geo_meanElev' not in pages2k_data[counter]:
pages2k_data[counter]['geo_meanElev'] = np.nan
# Ensure lon is in [0,360] domain
if pages2k_data[counter]['geo_meanLon'] < 0.0:
pages2k_data[counter]['geo_meanLon'] = 360 + pages2k_data[counter]['geo_meanLon']
# Determine the seasonality of the record.
# Seasonal names were mapped the three-month climatological seasons.
# 'early summer' was mapped to the first two months of summer only. Is this right????????????
# 'growing season' was mapped to summer.
season_orig = pages2k_data[counter]['climateInterpretation_seasonality']
if any(char.isdigit() for char in season_orig):
pages2k_data_seasonality = map(int,season_orig.split(' '))
elif season_orig == 'annual':
if year_type == 'tropical year': pages2k_data_seasonality = [4,5,6,7,8,9,10,11,12,13,14,15]
else: pages2k_data_seasonality = [1,2,3,4,5,6,7,8,9,10,11,12]
elif season_orig == 'summer':
if pages2k_data[counter]['geo_meanLat'] >= 0: pages2k_data_seasonality = [6,7,8]
else: pages2k_data_seasonality = [-12,1,2]
elif season_orig == 'winter':
if pages2k_data[counter]['geo_meanLat'] >= 0: pages2k_data_seasonality = [-12,1,2]
else: pages2k_data_seasonality = [6,7,8]
elif season_orig == 'winter/spring':
if pages2k_data[counter]['geo_meanLat'] >= 0: pages2k_data_seasonality = [-12,1,2,3,4,5]
else: pages2k_data_seasonality = [6,7,8,9,10,11]
elif season_orig == 'early summer':
if pages2k_data[counter]['geo_meanLat'] >= 0: pages2k_data_seasonality = [6,7]
else: pages2k_data_seasonality = [-12,1]
elif season_orig == 'growing season':
if pages2k_data[counter]['geo_meanLat'] >= 0: pages2k_data_seasonality = [6,7,8]
else: pages2k_data_seasonality = [-12,1,2]
else:
if year_type == 'tropical year': pages2k_data_seasonality = [4,5,6,7,8,9,10,11,12,13,14,15]
else: pages2k_data_seasonality = [1,2,3,4,5,6,7,8,9,10,11,12]
# Spell out the name of the interpretation variable.
if pages2k_data[counter]['climateInterpretation_variable'] == 'T':
pages2k_data[counter]['climateInterpretation_variable'] = 'temperature'
# Save to a dataframe
frame = pd.DataFrame({'a':pages2k_data[counter]['siteID'], 'b':pages2k_data[counter]['geo_siteName'], 'c':pages2k_data[counter]['geo_meanLat'], \
'd':pages2k_data[counter]['geo_meanLon'], 'e':pages2k_data[counter]['geo_meanElev'], \
'f':pages2k_data[counter]['archiveType'], 'g':pages2k_data[counter]['paleoData_variableName'], \
'h':proxy_resolution, 'i':pages2k_data[counter]['time_annual'][0], 'j':pages2k_data[counter]['time_annual'][-1], \
'k':pages2k_data[counter]['geo_pages2kRegion'], 'l':pages2k_data[counter]['climateInterpretation_variable'], \
'm':pages2k_data[counter]['climateInterpretation_variableDetail'], \
'n':pages2k_data[counter]['climateInterpretation_interpDirection'], 'o':None, 'p':None}, index=[counter])
# To get seasonality & databases *lists* into columns 'o' and 'p' of DataFrame
frame.set_value(counter,'o',pages2k_data_seasonality)
frame.set_value(counter,'p',['PAGES2kv2'])
# Append to main DataFrame
metadf = metadf.append(frame)
#print ' ', '{:40}'.format(key), ' : ', len(nb)
tot.append(len(nb))
nbtot = sum(tot)
print(' ------------------------------------------------------')
print(' ','{:40}'.format('Total:'), ' : ', nbtot)
print('----------------------------------------------------------------------')
print(' ')
# Redefine column headers
metadf.columns = headers
# Write metadata to file
print('Now writing metadata to file:', metaout)
metadf.to_pickle(metaout)
# -----------------------------------------------------
# Build the proxy **data** DataFrame and output to file
# -----------------------------------------------------
print(' ')
print('Now creating & loading the data in the pandas DataFrame...')
print(' ')
for counter in range(0,len(pages2k_data)):
print("Processing metadata", counter+1, "/", len(pages2k_data), ":",
pages2k_data[counter]['paleoData_pages2kID'])
nbdata = len(pages2k_data[counter]['time_annual'])
# Load data in numpy array
frame_data = np.zeros(shape=[nbdata,2])
frame_data[:,0] = pages2k_data[counter]['time_annual']
frame_data[:,1] = pages2k_data[counter]['data_annual']
if counter == 0:
# Build up pandas DataFrame
header = ['NCDC ID', pages2k_data[counter]['siteID']]
df = pd.DataFrame({'a':frame_data[:,0], 'b':frame_data[:,1]})
df.columns = header
else:
frame = pd.DataFrame({'NCDC ID':frame_data[:,0], pages2k_data[counter]['siteID']:frame_data[:,1]})
df = df.merge(frame, how='outer', on='NCDC ID')
# Fix DataFrame index and column name
col0 = df.columns[0]
df.set_index(col0, drop=True, inplace=True)
df.index.name = 'Year C.E.'
df.sort_index(inplace=True)
# Write data to file
print('Now writing to file:', dataout)
df.to_pickle(dataout)
print(' ')
print('Done!')
# ===================================================================================
# For DTDA project proxy data -------------------------------------------------------
# ===================================================================================
def DTDA_xcel_to_dataframes(filename, metaout, dataout, take_average_out):
"""
Takes in Pages2K CSV and converts it to dataframe storage. This increases
size on disk due to the joining along the time index (lots of null values).
Makes it easier to query and grab data for the proxy experiments.
:param filename:
:param metaout:
:param dataout:
:return:
Author: <NAME>, Univ. of Washington
Based on pages_xcel_to_dataframes function written by
<NAME> (Univ. of Washington)
"""
meta_sheet_name = 'Metadata'
metadata = pd.read_excel(filename, meta_sheet_name)
# add a "Databases" column and set to LMR
metadata.loc[:,'Databases'] = '[LMR]'
# add a "Seasonality" column and set to [1,2,3,4,5,6,7,8,9,10,11,12]
metadata.loc[:,'Seasonality'] = '[1,2,3,4,5,6,7,8,9,10,11,12]'
metadata.loc[:,'Elev'] = 0.0
nbrecords = len(metadata)
# One proxy record per sheet, all named as DataXXX
record_sheet_names = ['Data'+str("{0:03d}".format(i+1)) for i in range(nbrecords)]
for i, sheet in enumerate(record_sheet_names):
pdata = pd.read_excel(filename, sheet)
# rounding age data to nearest year (<NAME>, pers. comm.)
age = (pdata[pdata.columns[0]][1:]).astype('float').round()
pdata[pdata.columns[0]][1:] = age
# -- just for print out - looking into time axis for each record
# age difference between consecutive data
diff = np.diff(pdata[pdata.columns[0]][1:], 1)
print('{:10s}'.format(pdata.columns[1]), ' : temporal resolution : mean=', '{:7.1f}'.format(np.mean(diff)), ' median=', '{:7.1f}'.format(np.median(diff)),\
' min=', '{:7.1f}'.format(np.min(diff)), ' max=', '{:7.1f}'.format(np.max(diff)))
resolution = np.mean(diff) # take average difference as representative "resolution"
# update resolution info in the metadata
metadata.loc[i,'Resolution (yr)'] = int(resolution)
if i == 0:
df = pdata
else:
# SQL like table join along index
df = df.merge(pdata, how='outer', on='Proxy ID')
#fix index and column name
col0 = df.columns[0]
# check time definition and convert to year CE if needed
newcol0 = df[col0][0]
if newcol0 == 'Year C.E.' or newcol0 == 'Year CE':
# do nothing
pass
elif newcol0 == 'Year BP':
newcol0 = 'Year C.E.'
df[col0][1:] = 1950. - df[col0][1:]
else:
print('Unrecognized time definition...')
raise SystemExit()
df.set_index(col0, drop=True, inplace=True)
df.index.name = newcol0
df = df.ix[1:]
df.sort_index(inplace=True)
# Checkin for duplicate ages in proxy record. If present, calculate average (<NAME>, pers. comm.)
df = df.astype(float)
df_f = df.groupby(df.index).mean()
if take_average_out:
# copy of dataframe
df_tmp = df_f
# fill dataframe with new values where temporal averages over proxy records are subtracted
df_f = df_tmp.sub(df_tmp.mean(axis=0), axis=1)
# TODO: make sure year index is consecutive
#write data to file
df_f.to_pickle(dataout)
# Make sure ...
metadata['Archive type'] = metadata['Archive type'].astype(str)
# Add 'Youngest (C.E.)', 'Oldest (C.E.)' 'Elev' and 'Seasonality' info to metadata
sites = list(df_f)
for s in sites:
# 'Youngest' and 'Oldest' info based on the age data
values = df_f[s]
values = values[values.notnull()]
times = values.index.values
meta_ind = metadata[metadata['Proxy ID'] == s].index
metadata.loc[meta_ind,'Oldest (C.E.)'] = np.min(times)
metadata.loc[meta_ind,'Youngest (C.E.)'] = np.max(times)
# write metadata to file
metadata.to_pickle(metaout)
# ===================================================================================
# For PAGES2k phase 2 (2017) proxy data ---------------------------------------------
# ===================================================================================
def pages2kv2_pickle_to_dict(datadir, pages2kv2_file, proxy_def, year_type, gaussianize_data):
"""
Takes in a Pages2k pickle (pklz) file and converts it to python dictionary storage.
Authors: <NAME>, Univ. of Washington, Jan 2016.
<NAME>, Univ. of Southern California, Feb 2017
Revisions:
- Modified output, storing proxy information in dictionary returned
by the function, instead of storing in pandas dataframe dumped to
pickle file, as done in the original version by <NAME>.
[<NAME>, U. of Washington, May 2017]
"""
valid_frac = 0.5
# ===============================================================================
# Upload proxy data from Pages2k v2 pickle file
# ===============================================================================
begin_time = clock.time()
# Open the pickle file containing the Pages2k data, if it exists in target directory
infile = os.path.join(datadir, pages2kv2_file)
if os.path.isfile(infile):
print('Data from PAGES2k phase 2:')
print(' Uploading data from %s ...' %infile)
try:
# try to read as a straight pckl file
pages2k_data = pd.read_pickle(infile)
# f = open(infile,'rb')
# pages2k_data = pickle.load(f)
# f.close()
except:
# failed to read so try as a compressed pckl (pklz) file
try:
f = gzip.open(infile,'rb')
pages2k_data = pickle.load(f)
f.close()
except:
raise SystemExit(('ERROR: Could not read the PAGES2kv2 proxy file {}'
' as a regular or compressed pickle file. Unrecognized format!').format(pages2kv2_file))
else:
raise SystemExit(('ERROR: Option to include PAGES2kv2 proxies enabled'
' but corresponding data file could not be found!'
' Please place file {} in directory {}').format(pages2kv2_file,datadir))
# Summary of the uploaded data
nbsites = len(pages2k_data)
proxy_dict_pagesv2 = {}
tot = []
nb = []
for counter in range(0,nbsites):
# Give each record a unique descriptive name
pages2k_data[counter]['siteID'] = "PAGES2kv2_"+pages2k_data[counter]['dataSetName']+\
"_"+pages2k_data[counter]['paleoData_pages2kID']+\
":"+pages2k_data[counter]['paleoData_variableName']
nb.append(pages2k_data[counter]['siteID'])
print(' Processing %s/%s : %s' %(str(counter+1), str(len(pages2k_data)), pages2k_data[counter]['paleoData_pages2kID']))
# Look for publication title & authors
if 'NEEDS A TITLE' not in pages2k_data[counter]['pub1_title']:
pages2k_data[counter]['pub_title'] = pages2k_data[counter]['pub1_title']
pages2k_data[counter]['pub_author'] = pages2k_data[counter]['pub1_author']
else:
if 'NEEDS A TITLE' not in pages2k_data[counter]['pub2_title']:
pages2k_data[counter]['pub_title'] = pages2k_data[counter]['pub2_title']
pages2k_data[counter]['pub_author'] = pages2k_data[counter]['pub2_author']
else:
pages2k_data[counter]['pub_title'] = 'Unknown'
pages2k_data[counter]['pub_author'] = 'Unknown'
# If the time axis goes backwards (i.e. newer to older), reverse it.
if pages2k_data[counter]['year'][-1] - pages2k_data[counter]['year'][-2] < 0:
pages2k_data[counter]['year'].reverse()
pages2k_data[counter]['paleoData_values'].reverse()
# If subannual, average up to annual --------------------------------------------------------
time_raw = np.array(pages2k_data[counter]['year'],dtype=np.float)
data_raw = np.array(pages2k_data[counter]['paleoData_values'],dtype=np.float)
# Remove values where either time or data is nan.
nan_indices = np.isnan(time_raw)+np.isnan(data_raw)
time_raw = time_raw[~nan_indices]
data_raw = data_raw[~nan_indices]
# Use the following function to make annual-means.
# Inputs: time_raw, data_raw, valid_frac, year_type. Outputs: time_annual, data_annual
time_annual, data_annual, proxy_resolution = compute_annual_means(time_raw,data_raw,valid_frac,year_type)
data_annual = np.squeeze(data_annual)
# If gaussianize_data is set to true, transform the proxy data to Gaussian.
# This option should only be used when using regressions, not physically-based PSMs.
if gaussianize_data == True:
data_annual = gaussianize(data_annual)
# Write the annual data to the dictionary, so they can use written to
# the data file outside of this loop.
pages2k_data[counter]['time_annual'] = time_annual
pages2k_data[counter]['data_annual'] = data_annual
# Rename the proxy types in the same convention as the LMR's NCDC dataset.
# Proxy types not renamed, except capitalizing 1st letter: bivalve, borehole, documents, hybrid
if (pages2k_data[counter]['archiveType'] == 'coral') or (pages2k_data[counter]['archiveType'] == 'sclerosponge'):
pages2k_data[counter]['archiveType'] = 'Corals and Sclerosponges'
elif pages2k_data[counter]['archiveType'] == 'glacier ice':
pages2k_data[counter]['archiveType'] = 'Ice Cores'
elif pages2k_data[counter]['archiveType'] == 'lake sediment':
pages2k_data[counter]['archiveType'] = 'Lake Cores'
elif pages2k_data[counter]['archiveType'] == 'marine sediment':
pages2k_data[counter]['archiveType'] = 'Marine Cores'
elif pages2k_data[counter]['archiveType'] == 'speleothem':
pages2k_data[counter]['archiveType'] = 'Speleothems'
elif pages2k_data[counter]['archiveType'] == 'tree':
pages2k_data[counter]['archiveType'] = 'Tree Rings'
elif pages2k_data[counter]['archiveType'] == 'bivalve':
pages2k_data[counter]['archiveType'] = 'Bivalve'
elif pages2k_data[counter]['archiveType'] == 'borehole':
pages2k_data[counter]['archiveType'] = 'Borehole'
elif pages2k_data[counter]['archiveType'] == 'documents':
pages2k_data[counter]['archiveType'] = 'Documents'
elif pages2k_data[counter]['archiveType'] == 'hybrid':
pages2k_data[counter]['archiveType'] = 'Hybrid'
# Rename some of the the proxy measurements to be more standard.
if (pages2k_data[counter]['archiveType'] == 'Ice Cores') and (pages2k_data[counter]['paleoData_variableName'] == 'd18O1'):
pages2k_data[counter]['paleoData_variableName'] = 'd18O'
elif (pages2k_data[counter]['archiveType'] == 'Tree Rings') and (pages2k_data[counter]['paleoData_variableName'] == 'temperature1'):
pages2k_data[counter]['paleoData_variableName'] = 'temperature'
elif (pages2k_data[counter]['archiveType'] == 'Lake Cores') and (pages2k_data[counter]['paleoData_variableName'] == 'temperature1'):
pages2k_data[counter]['paleoData_variableName'] = 'temperature'
elif (pages2k_data[counter]['archiveType'] == 'Lake Cores') and (pages2k_data[counter]['paleoData_variableName'] == 'temperature3'):
pages2k_data[counter]['paleoData_variableName'] = 'temperature'
# Not all records have data for elevation. In these cases, set elevation to nan.
if 'geo_meanElev' not in pages2k_data[counter]:
pages2k_data[counter]['geo_meanElev'] = np.nan
# Ensure lon is in [0,360] domain
if pages2k_data[counter]['geo_meanLon'] < 0.0:
pages2k_data[counter]['geo_meanLon'] = 360 + pages2k_data[counter]['geo_meanLon']
# Determine the seasonality of the record.
# Seasonal names were mapped the three-month climatological seasons.
# 'early summer' was mapped to the first two months of summer only. Is this right????????????
# 'growing season' was mapped to summer.
season_orig = pages2k_data[counter]['climateInterpretation_seasonality']
if any(char.isdigit() for char in season_orig):
pages2k_data_seasonality = list(map(int,season_orig.split(' ')))
elif season_orig == 'annual':
if year_type == 'tropical year': pages2k_data_seasonality = [4,5,6,7,8,9,10,11,12,13,14,15]
else: pages2k_data_seasonality = [1,2,3,4,5,6,7,8,9,10,11,12]
elif season_orig == 'summer':
if pages2k_data[counter]['geo_meanLat'] >= 0: pages2k_data_seasonality = [6,7,8]
else: pages2k_data_seasonality = [-12,1,2]
elif season_orig == 'winter':
if pages2k_data[counter]['geo_meanLat'] >= 0: pages2k_data_seasonality = [-12,1,2]
else: pages2k_data_seasonality = [6,7,8]
elif season_orig == 'winter/spring':
if pages2k_data[counter]['geo_meanLat'] >= 0: pages2k_data_seasonality = [-12,1,2,3,4,5]
else: pages2k_data_seasonality = [6,7,8,9,10,11]
elif season_orig == 'early summer':
if pages2k_data[counter]['geo_meanLat'] >= 0: pages2k_data_seasonality = [6,7]
else: pages2k_data_seasonality = [-12,1]
elif season_orig == 'growing season':
if pages2k_data[counter]['geo_meanLat'] >= 0: pages2k_data_seasonality = [6,7,8]
else: pages2k_data_seasonality = [-12,1,2]
else:
if year_type == 'tropical year': pages2k_data_seasonality = [4,5,6,7,8,9,10,11,12,13,14,15]
else: pages2k_data_seasonality = [1,2,3,4,5,6,7,8,9,10,11,12]
# If the year type is "tropical", change all records tagged as "annual" to be tropical-year.
if year_type == 'tropical year' and pages2k_data_seasonality == [1,2,3,4,5,6,7,8,9,10,11,12]:
pages2k_data_seasonality = [4,5,6,7,8,9,10,11,12,13,14,15]
# Some code to fix two erroneous seasonality metadata found in the PAGES2kv2 file:
# The data in the file itself should be fixed, but error dealt with here in the mean time.
if pages2k_data_seasonality == [6,7,2008]:
pages2k_data_seasonality = [6,7,8]
elif pages2k_data_seasonality == [7,8,2009]:
pages2k_data_seasonality = [7,8,9]
# Spell out the name of the interpretation variable.
if pages2k_data[counter]['climateInterpretation_variable'] == 'T':
pages2k_data[counter]['climateInterpretation_variable'] = 'temperature'
tot.append(len(nb))
# ----------------------------------------------------------------------
# Filter the records which correspond to the proxy types & measurements
# specified in proxy_def dictionary. For records retained, transfer
# a subset of the available information to elements used in used in
# theLMR proxy database.
# ----------------------------------------------------------------------
for key in sorted(proxy_def.keys()):
proxy_archive = key.split('_')[0]
if pages2k_data[counter]['archiveType'] == proxy_archive \
and pages2k_data[counter]['paleoData_variableName'] in proxy_def[key]:
proxy_name = pages2k_data[counter]['siteID']
proxy_dict_pagesv2[proxy_name] = {}
# metadata
proxy_dict_pagesv2[proxy_name]['Archive'] = pages2k_data[counter]['archiveType']
proxy_dict_pagesv2[proxy_name]['Measurement'] = pages2k_data[counter]['paleoData_variableName']
proxy_dict_pagesv2[proxy_name]['SiteName'] = pages2k_data[counter]['geo_siteName']
proxy_dict_pagesv2[proxy_name]['StudyName'] = pages2k_data[counter]['pub_title']
proxy_dict_pagesv2[proxy_name]['Investigators'] = pages2k_data[counter]['pub_author']
proxy_dict_pagesv2[proxy_name]['Location'] = pages2k_data[counter]['geo_pages2kRegion']
proxy_dict_pagesv2[proxy_name]['Resolution (yr)'] = proxy_resolution
proxy_dict_pagesv2[proxy_name]['Lat'] = pages2k_data[counter]['geo_meanLat']
proxy_dict_pagesv2[proxy_name]['Lon'] = pages2k_data[counter]['geo_meanLon']
proxy_dict_pagesv2[proxy_name]['Elevation'] = pages2k_data[counter]['geo_meanElev']
proxy_dict_pagesv2[proxy_name]['YearRange'] = (int('%.0f' %pages2k_data[counter]['time_annual'][0]),int('%.0f' %pages2k_data[counter]['time_annual'][-1]))
proxy_dict_pagesv2[proxy_name]['Databases'] = ['PAGES2kv2']
proxy_dict_pagesv2[proxy_name]['Seasonality'] = pages2k_data_seasonality
proxy_dict_pagesv2[proxy_name]['climateVariable'] = pages2k_data[counter]['climateInterpretation_variable']
proxy_dict_pagesv2[proxy_name]['Realm'] = pages2k_data[counter]['climateInterpretation_variableDetail']
proxy_dict_pagesv2[proxy_name]['climateVariableDirec'] = pages2k_data[counter]['climateInterpretation_interpDirection']
# data
proxy_dict_pagesv2[proxy_name]['Years'] = pages2k_data[counter]['time_annual']
proxy_dict_pagesv2[proxy_name]['Data'] = pages2k_data[counter]['data_annual']
nbtot = sum(tot)
print('----------------------------------------------------------------------')
print(' PAGES2kv2 SUMMARY: ')
print(' Total nb of records found in file : %d' %nbsites)
print(' Number of proxy chronologies included in df : %d' %(len(proxy_dict_pagesv2)))
print(' ------------------------------------------------------')
print(' ')
tot = []
for key in sorted(proxy_def.keys()):
proxy_archive = key.split('_')[0]
proxy_measurement = proxy_def[key]
# change the associated between proxy type and proxy measurement for Breitenmoser tree ring data
if key == 'Tree Rings_WidthBreit':
proxy_measurement = [item+'_breit' for item in proxy_measurement]
nb = []
for siteID in list(proxy_dict_pagesv2.keys()):
if proxy_dict_pagesv2[siteID]['Archive'] == proxy_archive and proxy_dict_pagesv2[siteID]['Measurement'] in proxy_measurement:
nb.append(siteID)
print((' %s : %d' %('{:40}'.format(key), len(nb))))
tot.append(len(nb))
nbtot = sum(tot)
print(' ------------------------------------------------------')
print((' %s : %d' %('{:40}'.format('Total:'), nbtot)))
print('----------------------------------------------------------------------')
print(' ')
elapsed_time = clock.time() - begin_time
print('PAGES2k phase2 data extraction completed in %s secs' %str(elapsed_time))
return proxy_dict_pagesv2
# ===================================================================================
# For NCDC-templated proxy data files -----------------------------------------------
# ===================================================================================
def contains_blankspace(str):
return True in [c in str for c in string.whitespace]
# ===================================================================================
def colonReader(string, fCon, fCon_low, end):
'''This function seeks a specified string (or list of strings) within
the transcribed file fCon (lowercase version fCon_low) until a specified
character (typically end of the line) is found.x
If a list of strings is provided, make sure they encompass all possibilities
From <NAME> (Univ. of Southern California)
'''
if isinstance(string, str):
lstr = string + ': ' # append the annoying stuff
Index = fCon_low.find(lstr)
Len = len(lstr)
if Index != -1:
endlIndex = fCon_low[Index:].find(end)
rstring = fCon[Index+Len:Index+endlIndex] # returned string
if rstring[-1:] == '\r': # strip the '\r' character if it appears
rstring = rstring[:-1]
return rstring.strip()
else:
return ""
else:
num_str = len(string)
rstring = "" # initialize returned string
for k in range(0,num_str): # loop over possible strings
lstr = string[k] + ': ' # append the annoying stuff
Index = fCon_low.find(lstr)
Len = len(lstr)
if Index != -1:
endlIndex = fCon_low[Index:].find(end)
rstring = fCon[Index+Len:Index+endlIndex]
if rstring[-1:] == '\r': # strip the '\r' character if it appears
rstring = rstring[:-1]
if rstring == "":
return ""
else:
return rstring.strip()
# ===================================================================================
# ===================================================================================
def read_proxy_data_NCDCtxt(site, proxy_def, year_type=None, gaussianize_data=False):
#====================================================================================
# Purpose: Reads data from a selected site (chronology) in NCDC proxy dataset
#
# Input :
# - site : Full name of proxy data file, including the directory where
# file is located.
# - proxy_def : Dictionary containing information on proxy types to look for
# and associated characteristics, such as possible proxy
# measurement labels for the specific proxy type
# (ex. ['d18O','d18o','d18o_stk','d18o_int','d18o_norm']
# for delta 18 oxygen isotope measurements)
#
# Returns :
# - id : Site id read from the data file
# - lat/lon : latitude & longitude of the site
# - alt : Elevation of the site
# - time : Array containing the time of uploaded data
# - value : Array of uploaded proxy data
#
# Author(s): <NAME>, Univ. of Washington, Dept. of Atmospheric Sciences
# based on "ncdc_file_parser.py" code from <NAME>
# (Univ. of Southern California)
#
# Date : March 2015
#
# Revision : None
#
#====================================================================================
# Possible header definitions of time in data files ...
time_defs = ['age', 'age_int', 'year', \
'y_ad','Age_AD','age_AD','age_AD_ass','age_AD_int','Midpt_year','AD',\
'age_yb1950','yb_1950','yrb_1950',\
'kyb_1950',\
'yb_1989','age_yb1989',\
'yb_2000','yr_b2k','yb_2k','ky_b2k','kyb_2000','kyb_2k','kab2k','ka_b2k','kyr_b2k',\
'ky_BP','kyr_BP','ka_BP','age_kaBP','yr_BP','calyr_BP','Age(yrBP)','age_calBP','cal yr BP']
filename = site
valid_frac = 0.5
if os.path.isfile(filename):
print(' ')
print('File: %s' % filename)
# Define root string for filename
file_s = filename.replace(" ", '_') # strip all whitespaces if present
fileroot = '_'.join(file_s.split('.')[:-1])
# Open the file and port content to a string object
# Changed assumed encoding to UTF-8, anything not readable replaced with
# a '?' --AP Jan 2018
filein = open(filename, encoding='utf-8', errors='replace')
fileContent = filein.read()
fileContent_low = fileContent.lower()
# Initialize empty dictionary
d = {}
# Assign default values to some metadata
d['ElevationUnit'] = 'm'
d['TimeUnit'] = 'y_ad'
# note: 8240/2030 ASCII code for "permil"
# ===========================================================================
# ===========================================================================
# Extract metadata from file ------------------------------------------------
# ===========================================================================
# ===========================================================================
try:
# 'Archive' is the proxy type
archive_tag = colonReader('archive', fileContent, fileContent_low, '\n')
# to match definitions of records from original NCDC-templeated files and those
# provided by <NAME> (U. of Arizona)
if archive_tag == 'Paleoceanography': archive_tag = 'Marine Cores'
d['Archive'] = archive_tag
# Other info
study_name = colonReader('study_name', fileContent, fileContent_low, '\n')
d['Title'] = study_name
investigators = colonReader('investigators', fileContent, fileContent_low, '\n')
investigators.replace(';',' and') # take out the ; so that turtle doesn't freak out.
d['Investigators'] = investigators
d['PubDOI'] = colonReader('doi', fileContent, fileContent_low, '\n')
# ===========================================================================
# Extract information from the "Site_Information" section of the file -------
# ===========================================================================
# Find beginning of block
sline_begin = fileContent.find('# Site_Information:')
if sline_begin == -1:
sline_begin = fileContent.find('# Site_Information')
if sline_begin == -1:
sline_begin = fileContent.find('# Site Information')
# Find end of block
sline_end = fileContent.find('# Data_Collection:')
if sline_end == -1:
sline_end = fileContent.find('# Data_Collection\n')
if sline_end == -1:
sline_end = fileContent.find('# Data_Collection\n')
if sline_end == -1:
sline_end = fileContent.find('# Data_Collection \n')
if sline_end == -1:
sline_end = fileContent.find('# Data_Collection \n')
if sline_end == -1:
sline_end = fileContent.find('# Data_Collection \n')
if sline_end == -1:
sline_end = fileContent.find('# Data_Collection \n')
if sline_end == -1:
sline_end = fileContent.find('# Data Collection\n')
if sline_end == -1:
sline_end = fileContent.find('# Data Collection \n')
if sline_end == -1:
sline_end = fileContent.find('# Data Collection \n')
if sline_end == -1:
sline_end = fileContent.find('# Data Collection \n')
if sline_end == -1:
sline_end = fileContent.find('# Data Collection \n')
SiteInfo = fileContent[sline_begin:sline_end]
SiteInfo_low = SiteInfo.lower()
d['SiteName'] = colonReader('site_name', SiteInfo, SiteInfo_low, '\n')
d['Location'] = colonReader('location', SiteInfo, SiteInfo_low, '\n')
# get lat/lon info
try:
str_lst = ['northernmost_latitude', 'northernmost latitude'] # documented instances of this field property
d['NorthernmostLatitude'] = float(colonReader(str_lst, SiteInfo, SiteInfo_low, '\n'))
str_lst = ['southernmost_latitude', 'southernmost latitude'] # documented instances of this field property
d['SouthernmostLatitude'] = float(colonReader(str_lst, SiteInfo, SiteInfo_low, '\n'))
str_lst = ['easternmost_longitude', 'easternmost longitude'] # documented instances of this field property
d['EasternmostLongitude'] = float(colonReader(str_lst, SiteInfo, SiteInfo_low, '\n'))
str_lst = ['westernmost_longitude', 'westernmost longitude'] # documented instances of this field property
d['WesternmostLongitude'] = float(colonReader(str_lst, SiteInfo, SiteInfo_low, '\n'))
except (EmptyError,TypeError,ValueError) as err:
print('*** %s' % err.args)
print('*** WARNING ***: Valid values of lat/lon were not found! Skipping proxy record...')
return (None, None)
# get elevation info
elev = colonReader('elevation', SiteInfo, SiteInfo_low, '\n')
if 'nan' not in elev and len(elev)>0:
elev_s = elev.split(' ')
# is elevation negative (depth)?
if '-' in elev_s[0] or d['Archive'] == 'Marine Cores':
negative = True
sign = '-'
else:
negative = False
sign = ''
# is there a decimal in elev_s?
if '.' in elev_s[0]:
elev_s_split = elev_s[0].split('.')
elev_s_int = ''.join(c for c in elev_s_split[0] if c.isdigit())
elev_s_dec = ''.join(c for c in elev_s_split[1] if c.isdigit())
d['Elevation'] = float(sign+elev_s_int+'.'+elev_s_dec)
else:
d['Elevation'] = float(sign+''.join(c for c in elev_s[0] if c.isdigit())) # to only keep digits ...
else:
d['Elevation'] = float('NaN')
# ===========================================================================
# Extract information from the "Data_Collection" section of the file --------
# ===========================================================================
# Find beginning of block
sline_begin = fileContent.find('# Data_Collection:')
if sline_begin == -1:
sline_begin = fileContent.find('# Data_Collection')
if sline_begin == -1:
sline_begin = fileContent.find('# Data_Collection\n')
# Find end of block
sline_end = fileContent.find('# Variables:')
if sline_end == -1:
sline_end = fileContent.find('# Variables\n')
if sline_end == -1:
sline_end = fileContent.find('# Variables \n')
if sline_end == -1:
sline_end = fileContent.find('# Variables')
if sline_end == -1:
sline_end = fileContent.find('# Variables ')
DataColl = fileContent[sline_begin:sline_end]
DataColl_low = DataColl.lower()
d['CollectionName'] = colonReader('collection_name', DataColl, DataColl_low, '\n')
if not d['CollectionName']: d['CollectionName'] = filename.split('/')[-1].rstrip('.txt')
EarliestYearStr = colonReader('earliest_year', DataColl, DataColl_low, '\n')
MostRecentYearStr = colonReader('most_recent_year', DataColl, DataColl_low, '\n')
d['EarliestYear'] = None
d['MostRecentYear'] = None
if EarliestYearStr: d['EarliestYear'] = float(EarliestYearStr)
if EarliestYearStr: d['MostRecentYear'] = float(MostRecentYearStr)
d['TimeUnit'] = colonReader('time_unit', DataColl, DataColl_low, '\n')
if not d['TimeUnit']: d['TimeUnit'] = colonReader('time unit', DataColl, DataColl_low, '\n')
if d['TimeUnit'] not in time_defs:
print('***Time_Unit *%s* not in recognized time definitions! Exiting!' %d['TimeUnit'])
return (None, None)
# Get Notes: information, if it exists
notes = colonReader('notes', DataColl, DataColl_low, '\n')
if notes: # not empty
# database info is in form {"database":db1}{"database":db2} ...
# extract fields that are in {}. This produces a list.
jsdata = re.findall('\{.*?\}',notes)
bad_chars = '{}"'
jsdata = [item.translate(str.maketrans("", "", bad_chars)) for item in jsdata]
# Look for database information
# -----------------------------
# item in jsdata list with database info?
# TODO: ... think about using try/except instead ...
dbinfo = None
jsdata_db = [item for i, item in enumerate(jsdata) if 'database:' in item]
if jsdata_db:
db_lst = re.sub('database:', '', jsdata_db[0]).split(',')
if len(db_lst) > 1:
dbinfo = [item.split(':')[1] for item in db_lst]
else:
dbinfo = db_lst
# check if some db info exists
if dbinfo:
d['Databases'] = dbinfo
else:
# Set to default value if not found.
#d['Databases'] = None
d['Databases'] = ['LMR']
# Look for information on "climate interpretation" of proxy record
# ----------------------------------------------------------------
# Initialize metadata to be extracted
seasonality = [1,2,3,4,5,6,7,8,9,10,11,12] # annual (calendar)
climateVariable = None
climateVariableRealm = None
climateVariableDirec = None
jsdata_clim = [item for i, item in enumerate(jsdata) if 'climateInterpretation:' in item]
if jsdata_clim:
clim_lst = re.sub('climateInterpretation:', '', jsdata_clim[0])
clim_lst = clim_lst.replace('[','(').replace(']',')')
tmp = re.split(r',\s*(?![^()]*\))',clim_lst)
clim_elements = [item.replace('(','[').replace(')',']') for item in tmp]
seasonality = [item.split(':')[1] for item in clim_elements if 'seasonality:' in item][0]
climateVariable = [item.split(':')[1] for item in clim_elements if 'climateVariable:' in item][0]
climateVariableRealm = [item.split(':')[1] for item in clim_elements if 'climateVariableDetail:' in item][0]
climateVariableDirec = [item.split(':')[1] for item in clim_elements if 'interpDirection:' in item][0]
if len(seasonality) == 0: seasonality = [1,2,3,4,5,6,7,8,9,10,11,12]
if len(climateVariable) == 0: climateVariable = None
if len(climateVariableRealm) == 0: climateVariableRealm = None
if len(climateVariableDirec) == 0: climateVariableDirec = None
# Some translation...
if climateVariable == 'T': climateVariable = 'temperature'
if climateVariable == 'M': climateVariable = 'moisture'
# test whether seasonality is a string or already a list
# if a string, convert to list
if type(seasonality) is not list:
if isinstance(seasonality,six.string_types):
seasonality = ast.literal_eval(seasonality)
else:
print('Problem with seasonality metadata! Exiting!')
SystemExit(1)
d['Seasonality'] = seasonality
d['climateVariable'] = climateVariable
d['climateVariableRealm'] = climateVariableRealm
d['climateVariableDirec'] = climateVariableDirec
# Look for information about duplicate proxy records
# --------------------------------------------------
dup_lst = []
jsdata_dup = [item for i, item in enumerate(jsdata) if 'duplicate:' in item]
if jsdata_dup:
tmp = re.sub('duplicate:', '', jsdata_dup[0]).split(',')
if len(tmp) > 1:
dup_lst = [item.split(':')[1].rstrip('.txt') for item in tmp]
else:
dup_lst = [item.rstrip('.txt') for item in tmp]
d['Duplicates'] = dup_lst
"""
# Old code that worked for NCDC v0.0.0
# Look for information on relation to temperature
# -----------------------------------------------
clim_temp_relation = [item.split(':')[1] for item in jsdata if item.split(':')[0] == 'relationship']
if clim_temp_relation:
d['Relation_to_temp'] = clim_temp_relation[0]
else:
d['Relation_to_temp'] = None
# Look for information on the nature of sensitivity of the proxy data
# (i.e. temperature or moisture or etc.)
# -------------------------------------------------------------------
clim_sensitivity = [item.split(':')[1] for item in jsdata if item.split(':')[0] == 'sensitivity']
if clim_sensitivity:
d['Sensitivity'] = clim_sensitivity[0]
else:
d['Sensitivity'] = None
"""
d['Relation_to_temp'] = None
d['Sensitivity'] = None
else:
# Default values if not found.
#d['Databases'] = None
d['Databases'] = ['LMR']
d['Seasonality'] = [1,2,3,4,5,6,7,8,9,10,11,12]
d['climateVariable'] = None
d['climateVariableRealm'] = None
d['climateVariableDirec'] = None
d['Duplicates'] = []
d['Relation_to_temp'] = None
d['Sensitivity'] = None
# If the year type is "tropical", change all annual records to the tropical-year mean.
if year_type == 'tropical year' and d['Seasonality'] == [1,2,3,4,5,6,7,8,9,10,11,12]:
d['Seasonality'] = [4,5,6,7,8,9,10,11,12,13,14,15]
except EmptyError as e:
print(e)
return (None, None)
# ===========================================================================
# ===========================================================================
# Extract the data from file ------------------------------------------------
# ===========================================================================
# ===========================================================================
# ===========================================================================
# Extract information from the "Variables" section of the file --------------
# ===========================================================================
# Find beginning of block
sline_begin = fileContent.find('# Variables:')
if sline_begin == -1:
sline_begin = fileContent.find('# Variables')
# Find end of block
sline_end = fileContent.find('# Data:')
if sline_end == -1:
sline_end = fileContent.find('# Data\n')
VarDesc = fileContent[sline_begin:sline_end].splitlines()
nvar = 0 # counter for variable number
for line in VarDesc: # handle all the NCDC convention changes
# (TODO: more clever/general exception handling)
if line and line[0] != '' and line[0] != ' ' and line[0:2] != '#-' and line[0:2] != '# ' and line != '#':
nvar = nvar + 1
line2 = line.replace('\t',',') # clean up
sp_line = line2.split(',') # split line along commas
if len(sp_line) < 9:
continue
else:
d['DataColumn' + format(nvar, '02') + '_ShortName'] = sp_line[0].strip('#').strip(' ')
d['DataColumn' + format(nvar, '02') + '_LongName'] = sp_line[1]
d['DataColumn' + format(nvar, '02') + '_Material'] = sp_line[2]
d['DataColumn' + format(nvar, '02') + '_Uncertainty'] = sp_line[3]
d['DataColumn' + format(nvar, '02') + '_Units'] = sp_line[4]
d['DataColumn' + format(nvar, '02') + '_Seasonality'] = sp_line[5]
d['DataColumn' + format(nvar, '02') + '_Archive'] = sp_line[6]
d['DataColumn' + format(nvar, '02') + '_Detail'] = sp_line[7]
d['DataColumn' + format(nvar, '02') + '_Method'] = sp_line[8]
d['DataColumn' + format(nvar, '02') + '_CharOrNum'] = sp_line[9].strip(' ')
print('Site ID: %s Archive: %s' %(d['CollectionName'], d['Archive']))
# Cross-reference "ShortName" entries with possible proxy measurements specified in proxy_def dictionary
proxy_types_all = list(proxy_def.keys())
# Restrict to those matching d['Archive']
proxy_types_keep = [s for s in proxy_types_all if d['Archive'] in s or d['Archive'] in s.lower()]
# Which columns contain the important data (time & proxy values) to be extracted?
# Referencing variables (time/age & proxy data) with data column IDsx
# Time/age
TimeColumn_ided = False
for ivar in range(nvar):
if d['DataColumn' + format(ivar+1, '02') + '_ShortName'] in time_defs:
TimeColumn_ided = True
TimeColumn_id = ivar
if TimeColumn_ided:
print(' Time/Age data in data column: %d' %TimeColumn_id)
else:
print(' ')
# Proxy data
# Dictionary containing info on proxy type and column ID where to find the data
DataColumns_ided = False
proxy_types_in_file = {}
for ivar in range(nvar):
proxy_types = [s for s in proxy_types_keep if d['DataColumn' + format(ivar+1, '02') + '_ShortName'] in proxy_def[s]]
if proxy_types: # if non-empty list
# Crude logic to distinguish between PAGES2kv2 vs Breitenmoser Tree Rings data at proxy type level
if len(proxy_types) > 1 and [item for item in proxy_types if 'Tree Rings' in item ]:
if 'Breitenmoser' in d['Investigators'].split(',')[0]:
treetag = '_WidthBreit'
else:
treetag = '_WidthPages2'
ind = [i for i, s in enumerate(proxy_types) if s.endswith(treetag)][0]
proxy_types_in_file[proxy_types[ind]] = (d['DataColumn' + format(ivar+1, '02') + '_ShortName'], ivar)
else:
proxy_types_in_file[proxy_types[0]] = (d['DataColumn' + format(ivar+1, '02') + '_ShortName'], ivar)
dkeys = list(proxy_types_in_file.keys())
nbvalid = len(dkeys)
if nbvalid > 0:
DataColumns_ided = True
print(' Found %d valid proxy variables:' %nbvalid)
for i in range(nbvalid):
print(' %d : %s %s' %(i,dkeys[i],proxy_types_in_file[dkeys[i]]))
# Check status of what has been found in the data file
# If nothing found, just return (exit function by returning None)
if not TimeColumn_ided or not DataColumns_ided:
print('*** WARNING *** Valid data was not found in file!')
return (None, None)
# -- Checking time/age definition --
tdef = d['TimeUnit']
# Crude sanity checks on make-up of tdef string
if contains_blankspace(tdef):
tdef = tdef.replace(' ', '_')
tdef_parsed = tdef.split('_')
if len(tdef_parsed) != 2:
tdef_parsed = tdef.split('_')
if tdef_parsed[0] == 'cal' and tdef_parsed[1] == 'yr':
tdef = tdef_parsed[0]+tdef_parsed[1]+'_'+tdef_parsed[2]
tdef_parsed = tdef.split('_')
else:
print('*** WARNING *** Unrecognized time definition. Skipping proxy record...')
return (None, None)
# ===========================================================================
# Extract the numerical data from the "Data" section of the file ------------
# ===========================================================================
# Find line number at beginning of data block
sline = fileContent.find('# Data:')
if sline == -1:
sline = fileContent.find('# Data\n')
fileContent_datalines = fileContent[sline:].splitlines()
# Look for missing value info
missing_info_line= [line for line in fileContent_datalines if 'missing value' in line.lower()]
if len(missing_info_line) > 0:
missing_info = missing_info_line[0].split(':')[-1].replace(' ', '')
if len(missing_info) > 0:
missing_values = np.array([float(missing_info)])
else:
# Line present but no value found
missing_values = np.array([-999.0, np.nan])
else:
# Line not found
missing_values = np.array([-999.0, np.nan])
# Find where the actual data begin
start_line_index = 0
line_nb = 0
for line in fileContent_datalines: # skip lines without actual data
if not line or line[0]=='#' or line[0] == ' ':
start_line_index += 1
else:
start_line_index2 = line_nb
break
line_nb +=1
# Extract column descriptions (headers) of the data matrix
DataColumn_headers = fileContent_datalines[start_line_index].splitlines()[0].split('\t')
# Strip possible blanks in column headers
DataColumn_headers = [item.strip() for item in DataColumn_headers]
nc = len(DataColumn_headers)
# ---------------------
# -- Now the data !! --
# ---------------------
inds_to_extract = []
for dkey in dkeys:
inds_to_extract.append(proxy_types_in_file[dkey][1])
# from start of data block to end, in a list
datalist = fileContent_datalines[start_line_index+1:]
# Strip any empty lines
datalist = [x for x in datalist if x]
nbdata = len(datalist)
# into numpy arrays
time_raw = np.zeros(shape=[nbdata])
data_raw = np.zeros(shape=[nbdata,nbvalid])
# fill with NaNs for default values
data_raw[:] = np.NAN
for i in range(nbdata):
tmp = datalist[i].split('\t')
# any empty element replaced by NANs
tmp = ['NAN' if x == '' else x for x in tmp]
time_raw[i] = tmp[TimeColumn_id]
# strip possible "()" in data before conversion to float
# not sure why these are found sometimes ... sigh...
tmp = [tmp[j].replace('(','') for j in range(len(tmp))]
tmp = [tmp[j].replace(')','') for j in range(len(tmp))]
data_raw[i,:] = [float(tmp[j]) for j in inds_to_extract]
# -- Double check data validity --
# (time/age in particular as some records have entries w/ undefined age)
# Eliminate entries for which time/age is not defined (tagged as missing)
mask = np.in1d(time_raw, missing_values, invert=True)
time_raw = time_raw[mask]
data_raw = data_raw[mask,:]
# Making sure remaining entries in data array with missing values are converted to NaN.
ntime, ncols = data_raw.shape
for c in range(ncols):
data_raw[np.in1d(data_raw[:,c], missing_values), c] = np.NAN
# --- Modify "time" array into "years CE" if not already ---
# Here, tdef_parsed *should* have the expected structure
if len(tdef_parsed) == 2 and tdef_parsed[0] and tdef_parsed[1]:
if tdef_parsed[0] == 'yb' and is_number(tdef_parsed[1]):
time_raw = float(tdef_parsed[1]) - time_raw
elif tdef_parsed[0] == 'kyb' and is_number(tdef_parsed[1]):
time_raw = float(tdef_parsed[1]) - 1000.0*time_raw
elif tdef_parsed[0] == 'calyr' and tdef_parsed[1] == 'BP':
time_raw = 1950.0 - time_raw
elif tdef_parsed[0] == 'kyr' and tdef_parsed[1] == 'BP':
time_raw = 1950.0 - 1000.*time_raw
elif tdef_parsed[0] == 'kyr' and tdef_parsed[1] == 'b2k':
time_raw = 2000.0 - 1000.*time_raw
elif tdef_parsed[0] == 'y' and tdef_parsed[1] == 'ad':
pass # do nothing, time already in years_AD
else:
print('*** WARNING *** Unrecognized time definition. Skipping proxy record...')
return (None, None)
else:
print('*** WARNING *** Unexpected time definition. Skipping proxy record...')
return (None, None)
# Making sure the tagged earliest and most recent years of the record are consistent with the data,
# already transformed in year CE, common to all records before inclusion in the pandas DF.
d['EarliestYear'] = np.min(time_raw)
d['MostRecentYear'] = np.max(time_raw)
# Initial range in years for which data is available
yearRange = (int('%.0f' % d['EarliestYear']),int('%.0f' %d['MostRecentYear']))
# proxy identifier and geo location
id = d['CollectionName']
alt = d['Elevation']
# Something crude in assignement of lat/lon:
if d['NorthernmostLatitude'] != d['SouthernmostLatitude']:
lat = (d['NorthernmostLatitude'] + d['SouthernmostLatitude'])/2.0
else:
lat = d['NorthernmostLatitude']
if d['EasternmostLongitude'] != d['WesternmostLongitude']:
lon = (d['EasternmostLongitude'] + d['WesternmostLongitude'])/2.0
else:
lon = d['EasternmostLongitude']
# Ensure lon is in [0,360] domain
if lon < 0.0:
lon = 360 + lon
# If subannual, average up to annual --------------------------------------------------------
time_annual, data_annual, proxy_resolution = compute_annual_means(time_raw,data_raw,valid_frac,year_type)
# If gaussianize_data is set to true, transform the proxy data to Gaussian.
# This option should only be used when using regressions, not physically-based PSMs.
if gaussianize_data == True:
data_annual = gaussianize(data_annual)
# update to yearRange given availability of annual data
yearRange = (int('%.0f' %time_annual[0]),int('%.0f' %time_annual[-1]))
# Define and fill list of dictionaries to be returned by function
returned_list = []
duplicate_list = []
for k in range(len(dkeys)):
key = dkeys[k]
ind = proxy_types_in_file[key][1]
proxy_units = d['DataColumn' + format(ind+1, '02') + '_Units']
proxy_archive = key.split('_')[0]
proxy_measurement = key.split('_')[1]
proxy_measurement = d['DataColumn' + format(ind+1, '02') + '_ShortName']
if key == 'Tree Rings_WidthBreit': proxy_measurement = proxy_measurement + '_breit'
proxy_name = d['CollectionName']+':'+proxy_measurement
proxydata_dict = {}
proxydata_dict[proxy_name] = {}
if d['Archive'] != proxy_archive: d['Archive'] = proxy_archive
proxydata_dict[proxy_name]['Archive'] = d['Archive']
proxydata_dict[proxy_name]['SiteName'] = d['SiteName']
proxydata_dict[proxy_name]['StudyName'] = d['Title']
proxydata_dict[proxy_name]['Investigators'] = d['Investigators']
proxydata_dict[proxy_name]['Location'] = d['Location']
proxydata_dict[proxy_name]['Resolution (yr)'] = proxy_resolution
proxydata_dict[proxy_name]['Lat'] = lat
proxydata_dict[proxy_name]['Lon'] = lon
proxydata_dict[proxy_name]['Elevation'] = alt
proxydata_dict[proxy_name]['YearRange'] = yearRange
proxydata_dict[proxy_name]['Measurement'] = proxy_measurement
proxydata_dict[proxy_name]['DataUnits'] = proxy_units
proxydata_dict[proxy_name]['Databases'] = d['Databases']
proxydata_dict[proxy_name]['Seasonality'] = d['Seasonality']
proxydata_dict[proxy_name]['climateVariable'] = d['climateVariable']
proxydata_dict[proxy_name]['Realm'] = d['climateVariableRealm']
proxydata_dict[proxy_name]['climateVariableDirec'] = d['climateVariableDirec']
# *** for v.0.1.0:
#proxydata_dict[proxy_name]['Relation_to_temp'] = d['Relation_to_temp']
#proxydata_dict[proxy_name]['Sensitivity'] = d['Sensitivity']
proxydata_dict[proxy_name]['Years'] = time_annual
proxydata_dict[proxy_name]['Data'] = data_annual[:, k]
if d['Duplicates']:
duplicate_list.extend(d['Duplicates'])
# append to list of dictionaries
returned_list.append(proxydata_dict)
else:
print('***File NOT FOUND: %s' % filename)
returned_list = []
duplicate_list = []
return returned_list, duplicate_list
# =========================================================================================
def ncdc_txt_to_dict(datadir, proxy_def, year_type, gaussianize_data):
"""
Read proxy data from collection of NCDC-templated text files and store the data in
a python dictionary.
:param datadir :
:param proxy_def :
:param metaout :
:param dataout :
:return:
Author: <NAME>, Univ. of Washington, Jan 2016.
"""
# ===============================================================================
# Upload proxy data from NCDC-formatted text files
# ===============================================================================
begin_time = clock.time()
print('Data from LMR NCDC-templated text files:')
valid_frac = 0.5
# List filenames im the data directory (dirname)
# files is a python list contining file names to be read
sites_data = glob.glob(datadir+"/*.txt")
nbsites = len(sites_data)
if nbsites == 0:
print('ERROR: NCDC-templated proxy data files not found in directory:'
' %s. Please revise your user-defined parameters or directory/'
' data set-up.' %datadir)
raise SystemExit(1)
# Master dictionary containing all proxy chronologies extracted from the data files.
proxy_dict_ncdc = {}
dupelist = []
# Loop over files
nbsites_valid = 0
for file_site in sites_data:
proxy_list, duplicate_list = read_proxy_data_NCDCtxt(file_site,proxy_def,year_type,gaussianize_data)
if proxy_list: # if returned list is not empty
# extract data from list and populate the master proxy dictionary
for item in proxy_list:
proxy_name = list(item.keys())[0]
# test if dict element already exists
if proxy_name in list(proxy_dict_ncdc.keys()):
dupelist.append(proxy_name)
else:
proxy_dict_ncdc[proxy_name] = item[proxy_name]
nbsites_valid = nbsites_valid + 1
else: # returned list is empty, just move to next site
pass
# ===============================================================================
# Produce a summary of uploaded proxy data &
# generate integrated database in pandas DataFrame format
# ===============================================================================
# Summary
nbchronol = len(proxy_dict_ncdc)
print(' ')
print(' ')
print('----------------------------------------------------------------------')
print(' NCDC SUMMARY: ')
print(' Total nb of files found & queried : %d' % nbsites)
print(' Total nb of files with valid data : %d' % nbsites_valid)
print(' Number of proxy chronologies included in df : %d' % nbchronol)
print(' ------------------------------------------------------')
print(' ')
tot = []
for key in sorted(proxy_def.keys()):
proxy_archive = key.split('_')[0]
proxy_measurement = proxy_def[key]
# change the association between proxy type and proxy measurement for Breitenmoser tree ring data
if key == 'Tree Rings_WidthBreit':
proxy_measurement = [item+'_breit' for item in proxy_measurement]
nb = []
for siteID in list(proxy_dict_ncdc.keys()):
if proxy_dict_ncdc[siteID]['Archive'] == proxy_archive and proxy_dict_ncdc[siteID]['Measurement'] in proxy_measurement:
nb.append(siteID)
print(' %s : %d' %('{:40}'.format(key), len(nb)))
tot.append(len(nb))
nbtot = sum(tot)
print(' ------------------------------------------------------')
print(' %s : %d' %('{:40}'.format('Total:'), nbtot))
print('----------------------------------------------------------------------')
print(' ')
if dupelist:
print('***WARNING***: Proxy records with these names were found multiple times:')
print(dupelist)
elapsed_time = clock.time() - begin_time
print('NCDC data extraction completed in %s secs' %str(elapsed_time))
return proxy_dict_ncdc
# =========================================================================================
def merge_dicts_to_dataframes(proxy_def, ncdc_dict, pages2kv2_dict, meta_outfile, data_outfile, \
duplicates_file, eliminate_duplicates):
"""
Merges two dictionaries containing proxy metadata and data from two data sources
(PAGES2k phase 2 and NCDC-templated proxy data files) into one,
and writes out metadata and data into pickled pandas DataFrames.
Originator: <NAME>, Univ. of Washington, May 2017
"""
if len(ncdc_dict) > 0:
merged_dict = deepcopy(ncdc_dict)
if len(pages2kv2_dict) > 0:
merged_dict.update(pages2kv2_dict)
elif len(pages2kv2_dict) > 0:
merged_dict = deepcopy(pages2kv2_dict)
else:
raise SystemExit('No dataset has been selected for inclusion in the proxy database!')
totchronol = len(merged_dict)
dupecount = 0
if eliminate_duplicates:
print(' ')
print('Checking list of duplicate/bad records:')
# load info on duplicate records
dupes = pd.read_excel(duplicates_file,'ProxyDuplicates')
# numpy array containing names of proxy records to eliminate
toflush = dupes['Record_To_Eliminate'].values
for siteID in list(merged_dict.keys()):
if siteID in toflush:
try:
del merged_dict[siteID]
print(' -- deleting: %s' % siteID)
dupecount += 1
except KeyError:
print(' -- not found: %s' % siteID)
pass
print(' ')
print('----------------------------------------------------------------------')
print(' FINAL SUMMARY: ')
print(' Total number of merged proxy chronologies : %d' %totchronol)
print(' Total number of eliminated chronologies : %d' %dupecount)
print(' Number of proxy chronologies included in df : %d' %len(merged_dict))
print(' ------------------------------------------------------')
tot = []
for key in sorted(proxy_def.keys()):
proxy_archive = key.split('_')[0]
proxy_measurement = proxy_def[key]
# change the association between proxy type and proxy measurement for Breitenmoser tree ring data
if key == 'Tree Rings_WidthBreit':
proxy_measurement = [item+'_breit' for item in proxy_measurement]
nb = []
for siteID in list(merged_dict.keys()):
if merged_dict[siteID]['Archive'] == proxy_archive and merged_dict[siteID]['Measurement'] in proxy_measurement:
nb.append(siteID)
print(' %s : %d' %('{:40}'.format(key), len(nb)))
tot.append(len(nb))
nbtot = sum(tot)
print(' ------------------------------------------------------')
print(' %s : %d' %('{:40}'.format('Total:'), nbtot))
print('----------------------------------------------------------------------')
print(' ')
# ---------------------------------------------------------------------
# Preparing pandas DataFrames containing merged proxy metadata and data
# and output to pickle files
# ---------------------------------------------------------------------
# Loop over proxy types specified in *main*
counter = 0
# Build up pandas DataFrame
metadf = pd.DataFrame()
# headers = ['Proxy ID','Site name','Lat (N)','Lon (E)','Elev','Archive type','Proxy measurement','Resolution (yr)',\
# 'Oldest (C.E.)','Youngest (C.E.)','Location','climateVariable','Realm','Relation_to_climateVariable',\
# 'Seasonality', 'Databases']
headers = ['Proxy ID','Study name','Investigators','Site name','Lat (N)','Lon (E)','Elev','Archive type','Proxy measurement',\
'Resolution (yr)','Oldest (C.E.)','Youngest (C.E.)','Location','climateVariable','Realm','Relation_to_climateVariable',\
'Seasonality', 'Databases']
for key in sorted(proxy_def.keys()):
proxy_archive = key.split('_')[0]
# change the associated between proxy type and proxy measurement for Breitenmoser tree ring data
if key == 'Tree Rings_WidthBreit':
proxy_def[key] = [item+'_breit' for item in proxy_def[key]]
for siteID in list(merged_dict.keys()):
if merged_dict[siteID]['Archive'] == proxy_archive and merged_dict[siteID]['Measurement'] in proxy_def[key]:
frame = pd.DataFrame({'a':siteID, 'b':merged_dict[siteID]['StudyName'], 'c':merged_dict[siteID]['Investigators'], \
'd':merged_dict[siteID]['SiteName'], 'e':merged_dict[siteID]['Lat'], 'f':merged_dict[siteID]['Lon'], \
'g':merged_dict[siteID]['Elevation'], 'h':merged_dict[siteID]['Archive'], 'i':merged_dict[siteID]['Measurement'], \
'j':merged_dict[siteID]['Resolution (yr)'], 'k':merged_dict[siteID]['YearRange'][0], \
'l':merged_dict[siteID]['YearRange'][1], 'm':merged_dict[siteID]['Location'], \
'n':merged_dict[siteID]['climateVariable'], 'o':merged_dict[siteID]['Realm'], \
'p':merged_dict[siteID]['climateVariableDirec'], \
'q':None, 'r':None}, index=[counter])
# To get seasonality & databases *lists* into columns 'o' and 'p' of DataFrame
# To be deprecated - frame.set_value(counter,'q',merged_dict[siteID]['Seasonality'])
# To be deprecated - frame.set_value(counter,'r',merged_dict[siteID]['Databases'])
frame.at[counter,'q'] = merged_dict[siteID]['Seasonality']
frame.at[counter,'r'] = merged_dict[siteID]['Databases']
# Append to main DataFrame
metadf = metadf.append(frame)
counter = counter + 1
# Redefine column headers
metadf.columns = headers
# Write metadata to file
print('Now writing metadata to file: %s' %meta_outfile)
metadf.to_pickle(meta_outfile)
# -----------------------------------------------------
# Build the proxy **data** DataFrame and output to file
# -----------------------------------------------------
print(' ')
print('Now creating & loading the data in the pandas DataFrame...')
print(' ')
counter = 0
for siteID in list(merged_dict.keys()):
years = merged_dict[siteID]['Years']
data = merged_dict[siteID]['Data']
[nbdata,] = years.shape
# Load data in numpy array
frame_data = np.zeros(shape=[nbdata,2])
frame_data[:,0] = years
frame_data[:,1] = data
if counter == 0:
# Build up pandas DataFrame
header = ['Proxy ID', siteID]
df = pd.DataFrame({'a':frame_data[:,0], 'b':frame_data[:,1]})
df.columns = header
else:
frame = | pd.DataFrame({'Proxy ID':frame_data[:,0], siteID:frame_data[:,1]}) | pandas.DataFrame |
#!/usr/bin/env python3
import json
import math
import sys
import glob
import argparse
import os
from collections import namedtuple, defaultdict
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.lines import Line2D
from matplotlib.ticker import MaxNLocator
import pandas
RunConfig = namedtuple("RunConfig", "scheduler fec")
RunInfo = namedtuple("RunInfo", "count total durations interrupted_segments interrupt_times bitrates segment_bitrates segment_download_times segment_filenames initial_buffering")
PALETTE_5 = sns.color_palette("muted")
PALETTE_9 = sns.color_palette("muted")
PALETTE_9[4:9] = PALETTE_9[:5]
class FIGSIZE():
BOX_M = (5, 5)
WIDE_M = (12, 5)
WIDE_L = (15, 8)
def get_mean(l):
return sum(l) / len(l)
def get_stddev(l):
mean = get_mean(l)
return math.sqrt(sum([(x - mean)**2 for x in l]) / (len(l) - 1))
def get_median(l):
return sorted(l)[len(l) // 2]
def get_z_score(x, mean, stddev):
return abs((x - mean) / stddev)
def z_filter(l, cutoff = 2.5):
mean = get_mean(l)
stddev = get_stddev(l)
return list(filter(lambda x: get_z_score(x, mean, stddev) < cutoff, l))
def fixname(name):
name = name[:3].replace("IOD", "R-IOD") + name[3:]
name = name.replace("XOR4-1", "XOR 4")
name = name.replace("XOR16-1", "XOR 16")
return name.replace("LL", "LowRTT")
def get_population_stats(p):
return ", ".join([
f"mean: {round(get_mean(p), 2)}",
f"median: {round(get_median(p), 2)}",
f"stddev: {round(get_stddev(p), 2)}",
f"min: {round(min(p), 2)}",
f"max: {round(max(p), 2)}",
f"sum: {round(sum(p), 2)}",
])
def read_log(filename, slow_start_duration = 15):
with open(filename, 'rb') as fo:
log = json.load(fo)
conf = RunConfig(log['scheduler'], log['fecConfig'])
total = 0.0
start_time = log['playback_info']['start_time']
initial_buffering = float(log['playback_info']['initial_buffering_duration'])
count = 0
durations = []
interrupted_segments = []
interrupt_times = []
for event in log['playback_info']['interruptions']['events']:
seg_no = event['segment_number']
start = event['timeframe'][0]
end = event['timeframe'][1]
duration = end - start
if start < start_time + slow_start_duration:
# ignore first few seconds of stream
continue
# some interruptions are really short, ignore?
if duration < 1e-4:
continue
# some, on the other hand, are unrealistically long. this points
# towards a crash in the server and can be ignored
if duration > 10:
continue
count += 1
durations.append(duration)
total += duration
interrupted_segments.append(seg_no)
interrupt_times.append({
"start": start - start_time,
"end": end - start_time,
"duration": duration,
})
segment_filenames = [x[0] for x in log['segment_info']]
segment_bitrates = [int(x[1]) for x in log['segment_info']]
segment_download_times = [float(x[3]) for x in log['segment_info']]
bitrates = set(segment_bitrates)
return conf, RunInfo(count, total, durations, interrupted_segments,
interrupt_times, bitrates, segment_bitrates,
segment_download_times, segment_filenames, initial_buffering)
def print_stats(allInfos):
for conf, infos in allInfos.items():
print(f"=== {conf.scheduler}, {conf.fec} ===")
print("> population size")
print(f" {len(infos)}")
print("> count")
counts = [x.count for x in infos]
print(f" {get_population_stats(counts)}")
print("> total")
totals = [x.total for x in infos]
print(f" {get_population_stats(totals)}")
print("> bitrates")
bitrates = []
for info in infos:
bitrates += info.segment_bitrates
print(f" {get_population_stats(bitrates)}")
print("> bitrate switching (up)")
bitrate_up = []
for info in infos:
count = 0
for prev, current in zip(info.segment_bitrates[:-1], info.segment_bitrates[1:]):
if prev < current:
count += 1
bitrate_up.append(count)
print(f" {get_population_stats(bitrate_up)}")
print("> bitrate switching (down)")
bitrate_down = []
for info in infos:
count = 0
for prev, current in zip(info.segment_bitrates[:-1], info.segment_bitrates[1:]):
if prev > current:
count += 1
bitrate_down.append(count)
print(f" {get_population_stats(bitrate_down)}")
def visualize_boxplot(allInfos):
plt.figure(figsize=FIGSIZE.WIDE_M)
sns.set(style="whitegrid", palette=PALETTE_9)
data = {}
for conf, infos in allInfos.items():
key = fixname(f"{conf.scheduler}\n{conf.fec}".upper())
data[key] = z_filter([x.count for x in infos])
# fill missing recordings with NaNs
maxlen = max([len(data[k]) for k in data.keys()])
for k, v in data.items():
data[k] = v + [float('nan')] * (maxlen - (len(v)))
df = | pandas.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
'''用来扫描java类中的api,统计所有controller类文件中的api的url和请求类型
'''
import os
import re
import pandas as pd
header = ["controller", "url", "request", "对应菜单", "状态", "技术", "测试"] # 表格头,除了前三项,后面均可改动
FILE_ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
output = os.path.join(FILE_ROOT_PATH, "公有云加解密.xlsx") # 输出文件夹
# 源文件夹各服务
# root_dir = "E:/randolph-zy/公有云加解密/srm-platform"
root_dir = "E:/randolph-zy/公有云加解密/srm-mall"
# root_dir = "E:/randolph-zy/公有云加解密/srm-source"
# root_dir = "E:/randolph-zy/公有云加解密/srm-basic-platform"
# root_dir = "E:/randolph-zy/公有云加解密/srm-mdm"
sheet_name = root_dir.split('/')[3] # sheet页命名是类似srm-supplier的服务名
controller_f_paths = []
controller_f_names = []
def scan(paths: list, names: list) -> dict:
'''扫描该文件夹下所有controller类文件,整理出接口
返回值格式{'文件名': [[url1, url2, ...], [request1, ...]]}
'''
res = dict()
for i, (path, name) in enumerate(zip(paths, names)):
print(i + 1, path)
with open(path, 'r', encoding='utf8') as rfile: # 打开文件
java_file = rfile.readlines() # 读取全部内容
controller_file_content_list = [x.strip() for x in java_file if x.strip() != ''] # 去换行符和空格
url = [] # 每一个controller下的请求地址列表
request = [] # 每一个controller下的请求类型列表
for sen in controller_file_content_list:
if bool(re.search(r'@RequestMapping', sen)):
request_mapping = re.findall(r'"(.*)"', sen) # 匹配双引号之间的请求地址
if len(request_mapping) == 0:
print('该controller未能匹配请求地址!!!请人工复核')
else:
request_mapping_url = str(request_mapping[0])
# controller主请求RequestMapping
# 搜索过滤每个请求的url
if not bool(re.search(r'@RequestMapping', sen)): # 过滤掉主请求
if bool(re.search(r'@(.*)Mapping', sen)): # 包含注解@*Mapping的行
child_url = re.findall(r'"(.*)"', sen)
# 第二列 请求地址
req_type = re.findall(r'@(.*)Mapping', sen)[0] # 请求类型
if len(child_url) == 0: # 如果方法请求注解上没有子url,则直接写controller主请求RequestMapping
url.append(request_mapping_url)
request.append(req_type)
else:
url.append(request_mapping_url + str(child_url[0]))
request.append(req_type)
res[name] = [url, request]
return res
def read_file(path) -> pd.DataFrame:
is_exist = os.path.exists(path)
if not is_exist: # 校验表格存在性
df = pd.DataFrame(columns=header)
# 新建文件并写表头
df.to_excel(path, index=False)
df = pd.read_excel(path) # 读取源文件
return df
def save_file(res_dict: dict, path: str = None) -> None:
'''写入表格
'''
df = read_file(path) # 打开目标文件
for i, (k, v) in enumerate(res_dict.items()):
for url, type in zip(v[0], v[1]):
df.loc[i] = [k, url, type, None, None, '蔡迎港', None]
print(df)
# 检查是否已存在同名sheet页!!
check_df = pd.read_excel(path, sheet_name=None)
if sheet_name not in list(check_df.keys()):
with | pd.ExcelWriter(path, mode='a') | pandas.ExcelWriter |
import joblib
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import autorch
from autorch.function import sp2wt
class F(object):
def __init__(self,config):
# simulation data model
self.icg_model = joblib.load(config['icg_model_path'])
self.c620_model = joblib.load(config['c620_model_path'])
self.c660_model = joblib.load(config['c660_model_path'])
self.c670_model = joblib.load(config['c670_model_path'])
# real data model
self.icg_real_data_model = joblib.load(config['icg_model_path_real_data'])
self.c620_real_data_model = joblib.load(config['c620_model_path_real_data'])
self.c660_real_data_model = joblib.load(config['c660_model_path_real_data'])
self.c670_real_data_model = joblib.load(config['c670_model_path_real_data'])
# real data linear model
self.c620_real_data_model_linear = joblib.load(config['c620_model_path_real_data_linear'])
self.c660_real_data_model_linear = joblib.load(config['c660_model_path_real_data_linear'])
self.c670_real_data_model_linear = joblib.load(config['c670_model_path_real_data_linear'])
# columns name
self.icg_col = joblib.load(config['icg_col_path'])
self.c620_col = joblib.load(config['c620_col_path'])
self.c660_col = joblib.load(config['c660_col_path'])
self.c670_col = joblib.load(config['c670_col_path'])
# simple op_col
self.c620_simple_op_col = joblib.load(config['c620_simple_op_col'])
self.c660_simple_op_col = joblib.load(config['c660_simple_op_col'])
self.c670_simple_op_col = joblib.load(config['c670_simple_op_col'])
# other infomation
self.c620_wt_always_same_split_factor_dict = joblib.load(config['c620_wt_always_same_split_factor_dict'])
self.c660_wt_always_same_split_factor_dict = joblib.load(config['c660_wt_always_same_split_factor_dict'])
self.c670_wt_always_same_split_factor_dict = joblib.load(config['c670_wt_always_same_split_factor_dict'])
self.index_9999 = joblib.load(config['index_9999_path'])
self.index_0001 = joblib.load(config['index_0001_path'])
self.V615_density = 0.8626
self.C820_density = 0.8731
self.T651_density = 0.8749
# user can set two mode
self.Recommended_mode = False
self.real_data_mode = False
self._Post_processing = True
self._linear_model = False
def ICG_loop(self,Input):
while True:
if self.real_data_mode == True:
output = pd.DataFrame(self.icg_real_data_model.predict(Input[self.icg_col['x']].values),
index=Input.index,columns=['Simulation Case Conditions_C620 Distillate Rate_m3/hr'])
if self.real_data_mode == False:
output = pd.DataFrame(self.icg_model.predict(Input[self.icg_col['x']].values),
index=Input.index,columns=['Simulation Case Conditions_C620 Distillate Rate_m3/hr'])
dist_rate = output['Simulation Case Conditions_C620 Distillate Rate_m3/hr'].values[0]
na_in_benzene = Input['Simulation Case Conditions_Spec 2 : NA in Benzene_ppmw'].values[0]
print('current Distillate Rate_m3/hr:{} NA in Benzene_ppmw:{}'.format(dist_rate,na_in_benzene))
if output['Simulation Case Conditions_C620 Distillate Rate_m3/hr'].values[0] > 0:
return output,Input
else:
Input['Simulation Case Conditions_Spec 2 : NA in Benzene_ppmw'] -= 30
print('NA in Benzene_ppmw -= 30')
def __call__(self,icg_input,c620_feed,t651_feed):
# get index
idx = icg_input.index
# c620_case
c620_case = pd.DataFrame(index=idx,columns=self.c620_col['case'])
# c620_case(Receiver Temp_oC) = user input
c620_case['Tatoray Stripper C620 Operation_Specifications_Spec 1 : Receiver Temp_oC'] = icg_input['Tatoray Stripper C620 Operation_Specifications_Spec 1 : Receiver Temp_oC'].values
if self.Recommended_mode == True:
icg_input['Simulation Case Conditions_Spec 2 : NA in Benzene_ppmw'] = 980.0
icg_input['Simulation Case Conditions_Spec 1 : Benzene in C620 Sidedraw_wt%'] = 70.0
icg_output,icg_input = self.ICG_loop(icg_input)
print(icg_output)
c620_case['Tatoray Stripper C620 Operation_Specifications_Spec 2 : Distillate Rate_m3/hr'] = icg_output.values
c620_case['Tatoray Stripper C620 Operation_Specifications_Spec 3 : Benzene in Sidedraw_wt%'] = icg_input['Simulation Case Conditions_Spec 1 : Benzene in C620 Sidedraw_wt%'].values
if self.Recommended_mode == False:
c620_case['Tatoray Stripper C620 Operation_Specifications_Spec 2 : Distillate Rate_m3/hr'] = icg_input['Tatoray Stripper C620 Operation_Specifications_Spec 2 : Distillate Rate_m3/hr'].values
c620_case['Tatoray Stripper C620 Operation_Specifications_Spec 3 : Benzene in Sidedraw_wt%'] = icg_input['Simulation Case Conditions_Spec 1 : Benzene in C620 Sidedraw_wt%'].values
# c620_input(c620_case&c620_feed)
c620_input = c620_case.join(c620_feed)
# c620 output(op&wt)
c620_input = c620_case.join(c620_feed)
c620_output = self.c620_model.predict(c620_input)
c620_sp,c620_op = c620_output.iloc[:,:41*4],c620_output.iloc[:,41*4:]
# update by c620 real data model?
if self.real_data_mode == True:
if self._linear_model == True:
c620_op_real = self.c620_real_data_model_linear.predict(c620_input)[:,41*4:]
c620_op_real = pd.DataFrame(c620_op_real,index=c620_input.index,columns=self.c620_simple_op_col)
c620_sp_real = self.c620_real_data_model_linear.predict(c620_input)[:,:41*4]
c620_sp_real = pd.DataFrame(c620_sp_real,index=c620_input.index,columns=c620_sp.columns)
if self._linear_model == False:
c620_op_real = self.c620_real_data_model.predict(c620_input).iloc[:,41*4:]
c620_sp_real = self.c620_real_data_model.predict(c620_input).iloc[:,:41*4]
c620_op.update(c620_op_real)
c620_sp.update(c620_sp_real)
# c620 sp後處理
if self._Post_processing:
for i in self.c620_wt_always_same_split_factor_dict.keys():
c620_sp[i] = self.c620_wt_always_same_split_factor_dict[i]
# 計算 c620_wt
s1,s2,s3,s4 = c620_sp.iloc[:,:41].values,c620_sp.iloc[:,41:41*2].values,c620_sp.iloc[:,41*2:41*3].values,c620_sp.iloc[:,41*3:41*4].values
w1,w2,w3,w4 = sp2wt(c620_feed,s1),sp2wt(c620_feed,s2),sp2wt(c620_feed,s3),sp2wt(c620_feed,s4)
wt = np.hstack((w1,w2,w3,w4))
c620_wt = pd.DataFrame(wt,index=idx,columns=self.c620_col['vent_gas_x']+self.c620_col['distillate_x']+self.c620_col['sidedraw_x']+self.c620_col['bottoms_x'])
# 如果是線性模式就再update c620 wt一次,放在後處理前
if self._linear_model:
c620_wt_real = self.c620_real_data_model_linear.predict(c620_input)[:,:41*4]
c620_wt_real = pd.DataFrame(c620_wt_real,index=c620_input.index,columns=c620_wt.columns)
c620_wt.update(c620_wt_real)
# c620_wt 後處理 為了在輸出之前滿足業主給的約束條件
if self._Post_processing:
bz_idx = c620_wt.columns.tolist().index('Tatoray Stripper C620 Operation_Sidedraw Production Rate and Composition_Benzene_wt%')
other_idx = [i for i in range(41*2,41*3,1) if i != bz_idx]
other_total = (100 - c620_input['Tatoray Stripper C620 Operation_Specifications_Spec 3 : Benzene in Sidedraw_wt%'].values).reshape(-1,1)
c620_wt.iloc[:,bz_idx] = c620_input['Tatoray Stripper C620 Operation_Specifications_Spec 3 : Benzene in Sidedraw_wt%'].values
c620_wt.iloc[:,other_idx] = (c620_wt.iloc[:,other_idx].values /
c620_wt.iloc[:,other_idx].values.sum(axis=1).reshape(-1,1))*other_total
# c620 input mass flow rate m3 to ton
V615_Btm_m3 = icg_input['Simulation Case Conditions_Feed Rate_Feed from V615 Btm_m3/hr'].values.reshape(-1,1)
C820_Dist_m3 = icg_input['Simulation Case Conditions_Feed Rate_Feed from C820 Dist_m3/hr'].values.reshape(-1,1)
V615_Btm_ton = V615_Btm_m3*self.V615_density
C820_Dist_ton = C820_Dist_m3*self.C820_density
c620_feed_rate_ton = V615_Btm_ton+C820_Dist_ton
# c620 output mass flow ton
c620_mf_side = np.sum(c620_feed_rate_ton*c620_feed.values*s3*0.01,axis=1,keepdims=True)
c620_mf_bot = np.sum(c620_feed_rate_ton*c620_feed.values*s4*0.01,axis=1,keepdims=True)
# t651 feed mass flow rate(ton)
t651_mf = (icg_input['Simulation Case Conditions_Feed Rate_Feed from T651_m3/hr']*self.T651_density).values.reshape(-1,1)
# c660 input mass flow(ton)
c660_mf = t651_mf + c620_mf_side
t651_mf_p ,c620_mf_side_p = t651_mf/c660_mf ,c620_mf_side/c660_mf
# c660 input(feed & case)
c660_feed = c620_wt[self.c620_col['sidedraw_x']].values*c620_mf_side_p + t651_feed.values*t651_mf_p
c660_feed = pd.DataFrame(c660_feed,index=idx,columns=self.c660_col['x41'])
c660_case = | pd.DataFrame(index=idx,columns=self.c660_col['case']) | pandas.DataFrame |
import os
import random
from itertools import product
from unittest import mock
import arff
import pytest
import numpy as np
import pandas as pd
import scipy.sparse
from oslo_concurrency import lockutils
import openml
from openml import OpenMLDataset
from openml.exceptions import OpenMLCacheException, OpenMLHashException, \
OpenMLPrivateDatasetError
from openml.testing import TestBase
from openml.utils import _tag_entity, _create_cache_directory_for_id
from openml.datasets.functions import (create_dataset,
attributes_arff_from_df,
_get_cached_dataset,
_get_cached_dataset_features,
_get_cached_dataset_qualities,
_get_cached_datasets,
_get_dataset_arff,
_get_dataset_description,
_get_dataset_features,
_get_dataset_qualities,
_get_online_dataset_arff,
_get_online_dataset_format,
DATASETS_CACHE_DIR_NAME)
class TestOpenMLDataset(TestBase):
_multiprocess_can_split_ = True
def setUp(self):
super(TestOpenMLDataset, self).setUp()
def tearDown(self):
self._remove_pickle_files()
super(TestOpenMLDataset, self).tearDown()
def _remove_pickle_files(self):
cache_dir = self.static_cache_dir
for did in ['-1', '2']:
with lockutils.external_lock(
name='datasets.functions.get_dataset:%s' % did,
lock_path=os.path.join(openml.config.get_cache_directory(), 'locks'),
):
pickle_path = os.path.join(cache_dir, 'datasets', did,
'dataset.pkl')
try:
os.remove(pickle_path)
except (OSError, FileNotFoundError):
# Replaced a bare except. Not sure why either of these would be acceptable.
pass
def _get_empty_param_for_dataset(self):
return {
'name': None,
'description': None,
'creator': None,
'contributor': None,
'collection_date': None,
'language': None,
'licence': None,
'default_target_attribute': None,
'row_id_attribute': None,
'ignore_attribute': None,
'citation': None,
'attributes': None,
'data': None
}
def test__list_cached_datasets(self):
openml.config.cache_directory = self.static_cache_dir
cached_datasets = openml.datasets.functions._list_cached_datasets()
self.assertIsInstance(cached_datasets, list)
self.assertEqual(len(cached_datasets), 2)
self.assertIsInstance(cached_datasets[0], int)
@mock.patch('openml.datasets.functions._list_cached_datasets')
def test__get_cached_datasets(self, _list_cached_datasets_mock):
openml.config.cache_directory = self.static_cache_dir
_list_cached_datasets_mock.return_value = [-1, 2]
datasets = _get_cached_datasets()
self.assertIsInstance(datasets, dict)
self.assertEqual(len(datasets), 2)
self.assertIsInstance(list(datasets.values())[0], OpenMLDataset)
def test__get_cached_dataset(self, ):
openml.config.cache_directory = self.static_cache_dir
dataset = _get_cached_dataset(2)
features = _get_cached_dataset_features(2)
qualities = _get_cached_dataset_qualities(2)
self.assertIsInstance(dataset, OpenMLDataset)
self.assertTrue(len(dataset.features) > 0)
self.assertTrue(len(dataset.features) == len(features['oml:feature']))
self.assertTrue(len(dataset.qualities) == len(qualities))
def test_get_cached_dataset_description(self):
openml.config.cache_directory = self.static_cache_dir
description = openml.datasets.functions._get_cached_dataset_description(2)
self.assertIsInstance(description, dict)
def test_get_cached_dataset_description_not_cached(self):
openml.config.cache_directory = self.static_cache_dir
self.assertRaisesRegex(OpenMLCacheException,
"Dataset description for dataset id 3 not cached",
openml.datasets.functions._get_cached_dataset_description,
dataset_id=3)
def test_get_cached_dataset_arff(self):
openml.config.cache_directory = self.static_cache_dir
description = openml.datasets.functions._get_cached_dataset_arff(dataset_id=2)
self.assertIsInstance(description, str)
def test_get_cached_dataset_arff_not_cached(self):
openml.config.cache_directory = self.static_cache_dir
self.assertRaisesRegex(OpenMLCacheException,
"ARFF file for dataset id 3 not cached",
openml.datasets.functions._get_cached_dataset_arff,
dataset_id=3)
def _check_dataset(self, dataset):
self.assertEqual(type(dataset), dict)
self.assertGreaterEqual(len(dataset), 2)
self.assertIn('did', dataset)
self.assertIsInstance(dataset['did'], int)
self.assertIn('status', dataset)
self.assertIsInstance(dataset['status'], str)
self.assertIn(dataset['status'], ['in_preparation', 'active', 'deactivated'])
def _check_datasets(self, datasets):
for did in datasets:
self._check_dataset(datasets[did])
def test_tag_untag_dataset(self):
tag = 'test_tag_%d' % random.randint(1, 1000000)
all_tags = _tag_entity('data', 1, tag)
self.assertTrue(tag in all_tags)
all_tags = _tag_entity('data', 1, tag, untag=True)
self.assertTrue(tag not in all_tags)
def test_list_datasets(self):
# We can only perform a smoke test here because we test on dynamic
# data from the internet...
datasets = openml.datasets.list_datasets()
# 1087 as the number of datasets on openml.org
self.assertGreaterEqual(len(datasets), 100)
self._check_datasets(datasets)
def test_list_datasets_by_tag(self):
datasets = openml.datasets.list_datasets(tag='study_14')
self.assertGreaterEqual(len(datasets), 100)
self._check_datasets(datasets)
def test_list_datasets_by_size(self):
datasets = openml.datasets.list_datasets(size=10050)
self.assertGreaterEqual(len(datasets), 120)
self._check_datasets(datasets)
def test_list_datasets_by_number_instances(self):
datasets = openml.datasets.list_datasets(number_instances="5..100")
self.assertGreaterEqual(len(datasets), 4)
self._check_datasets(datasets)
def test_list_datasets_by_number_features(self):
datasets = openml.datasets.list_datasets(number_features="50..100")
self.assertGreaterEqual(len(datasets), 8)
self._check_datasets(datasets)
def test_list_datasets_by_number_classes(self):
datasets = openml.datasets.list_datasets(number_classes="5")
self.assertGreaterEqual(len(datasets), 3)
self._check_datasets(datasets)
def test_list_datasets_by_number_missing_values(self):
datasets = openml.datasets.list_datasets(number_missing_values="5..100")
self.assertGreaterEqual(len(datasets), 5)
self._check_datasets(datasets)
def test_list_datasets_combined_filters(self):
datasets = openml.datasets.list_datasets(tag='study_14',
number_instances="100..1000",
number_missing_values="800..1000")
self.assertGreaterEqual(len(datasets), 1)
self._check_datasets(datasets)
def test_list_datasets_paginate(self):
size = 10
max = 100
for i in range(0, max, size):
datasets = openml.datasets.list_datasets(offset=i, size=size)
self.assertEqual(size, len(datasets))
self._check_datasets(datasets)
def test_list_datasets_empty(self):
datasets = openml.datasets.list_datasets(tag='NoOneWouldUseThisTagAnyway')
if len(datasets) > 0:
raise ValueError('UnitTest Outdated, tag was already used (please remove)')
self.assertIsInstance(datasets, dict)
def test_check_datasets_active(self):
# Have to test on live because there is no deactivated dataset on the test server.
openml.config.server = self.production_server
active = openml.datasets.check_datasets_active([2, 17])
self.assertTrue(active[2])
self.assertFalse(active[17])
self.assertRaisesRegex(
ValueError,
'Could not find dataset 79 in OpenML dataset list.',
openml.datasets.check_datasets_active,
[79],
)
openml.config.server = self.test_server
def test_get_datasets(self):
dids = [1, 2]
datasets = openml.datasets.get_datasets(dids)
self.assertEqual(len(datasets), 2)
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "1", "description.xml")))
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "2", "description.xml")))
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "1", "dataset.arff")))
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "2", "dataset.arff")))
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "1", "features.xml")))
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "2", "features.xml")))
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "1", "qualities.xml")))
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "2", "qualities.xml")))
def test_get_datasets_lazy(self):
dids = [1, 2]
datasets = openml.datasets.get_datasets(dids, download_data=False)
self.assertEqual(len(datasets), 2)
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "1", "description.xml")))
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "2", "description.xml")))
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "1", "features.xml")))
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "2", "features.xml")))
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "1", "qualities.xml")))
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "2", "qualities.xml")))
self.assertFalse(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "1", "dataset.arff")))
self.assertFalse(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "2", "dataset.arff")))
datasets[0].get_data()
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "1", "dataset.arff")))
datasets[1].get_data()
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "2", "dataset.arff")))
def test_get_dataset(self):
# This is the only non-lazy load to ensure default behaviour works.
dataset = openml.datasets.get_dataset(1)
self.assertEqual(type(dataset), OpenMLDataset)
self.assertEqual(dataset.name, 'anneal')
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "1", "description.xml")))
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "1", "dataset.arff")))
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "1", "features.xml")))
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "1", "qualities.xml")))
self.assertGreater(len(dataset.features), 1)
self.assertGreater(len(dataset.qualities), 4)
# Issue324 Properly handle private datasets when trying to access them
openml.config.server = self.production_server
self.assertRaises(OpenMLPrivateDatasetError, openml.datasets.get_dataset, 45)
def test_get_dataset_lazy(self):
dataset = openml.datasets.get_dataset(1, download_data=False)
self.assertEqual(type(dataset), OpenMLDataset)
self.assertEqual(dataset.name, 'anneal')
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "1", "description.xml")))
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "1", "features.xml")))
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "1", "qualities.xml")))
self.assertFalse(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "1", "dataset.arff")))
self.assertGreater(len(dataset.features), 1)
self.assertGreater(len(dataset.qualities), 4)
dataset.get_data()
self.assertTrue(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "1", "dataset.arff")))
# Issue324 Properly handle private datasets when trying to access them
openml.config.server = self.production_server
self.assertRaises(OpenMLPrivateDatasetError, openml.datasets.get_dataset, 45, False)
def test_get_dataset_lazy_all_functions(self):
""" Test that all expected functionality is available without downloading the dataset. """
dataset = openml.datasets.get_dataset(1, download_data=False)
# We only tests functions as general integrity is tested by test_get_dataset_lazy
tag = 'test_lazy_tag_%d' % random.randint(1, 1000000)
dataset.push_tag(tag)
self.assertFalse(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "1", "dataset.arff")))
dataset.remove_tag(tag)
self.assertFalse(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "1", "dataset.arff")))
nominal_indices = dataset.get_features_by_type('nominal')
self.assertFalse(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "1", "dataset.arff")))
correct = [0, 1, 2, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 35, 36, 37, 38]
self.assertEqual(nominal_indices, correct)
classes = dataset.retrieve_class_labels()
self.assertEqual(classes, ['1', '2', '3', '4', '5', 'U'])
self.assertFalse(os.path.exists(os.path.join(
openml.config.get_cache_directory(), "datasets", "1", "dataset.arff")))
def test_get_dataset_sparse(self):
dataset = openml.datasets.get_dataset(102, download_data=False)
X = dataset.get_data(dataset_format='array')
self.assertIsInstance(X, scipy.sparse.csr_matrix)
def test_download_rowid(self):
# Smoke test which checks that the dataset has the row-id set correctly
did = 44
dataset = openml.datasets.get_dataset(did, download_data=False)
self.assertEqual(dataset.row_id_attribute, 'Counter')
def test__get_dataset_description(self):
description = _get_dataset_description(self.workdir, 2)
self.assertIsInstance(description, dict)
description_xml_path = os.path.join(self.workdir,
'description.xml')
self.assertTrue(os.path.exists(description_xml_path))
def test__getarff_path_dataset_arff(self):
openml.config.cache_directory = self.static_cache_dir
description = openml.datasets.functions._get_cached_dataset_description(2)
arff_path = _get_dataset_arff(description, cache_directory=self.workdir)
self.assertIsInstance(arff_path, str)
self.assertTrue(os.path.exists(arff_path))
def test__getarff_md5_issue(self):
description = {
'oml:id': 5,
'oml:md5_checksum': 'abc',
'oml:url': 'https://www.openml.org/data/download/61',
}
self.assertRaisesRegex(
OpenMLHashException,
'Checksum ad484452702105cbf3d30f8deaba39a9 of downloaded file '
'is unequal to the expected checksum abc. '
'Raised when downloading dataset 5.',
_get_dataset_arff,
description,
)
def test__get_dataset_features(self):
features = _get_dataset_features(self.workdir, 2)
self.assertIsInstance(features, dict)
features_xml_path = os.path.join(self.workdir, 'features.xml')
self.assertTrue(os.path.exists(features_xml_path))
def test__get_dataset_qualities(self):
# Only a smoke check
qualities = _get_dataset_qualities(self.workdir, 2)
self.assertIsInstance(qualities, list)
def test_deletion_of_cache_dir(self):
# Simple removal
did_cache_dir = _create_cache_directory_for_id(
DATASETS_CACHE_DIR_NAME, 1,
)
self.assertTrue(os.path.exists(did_cache_dir))
openml.utils._remove_cache_dir_for_id(
DATASETS_CACHE_DIR_NAME, did_cache_dir,
)
self.assertFalse(os.path.exists(did_cache_dir))
# Use _get_dataset_arff to load the description, trigger an exception in the
# test target and have a slightly higher coverage
@mock.patch('openml.datasets.functions._get_dataset_arff')
def test_deletion_of_cache_dir_faulty_download(self, patch):
patch.side_effect = Exception('Boom!')
self.assertRaisesRegex(Exception, 'Boom!', openml.datasets.get_dataset, dataset_id=1)
datasets_cache_dir = os.path.join(
self.workdir, 'org', 'openml', 'test', 'datasets'
)
self.assertEqual(len(os.listdir(datasets_cache_dir)), 0)
def test_publish_dataset(self):
# lazy loading not possible as we need the arff-file.
openml.datasets.get_dataset(3)
file_path = os.path.join(openml.config.get_cache_directory(),
"datasets", "3", "dataset.arff")
dataset = OpenMLDataset(
"anneal",
"test",
data_format="arff",
version=1,
licence="public",
default_target_attribute="class",
data_file=file_path,
)
dataset.publish()
self.assertIsInstance(dataset.dataset_id, int)
def test__retrieve_class_labels(self):
openml.config.cache_directory = self.static_cache_dir
labels = openml.datasets.get_dataset(2, download_data=False).retrieve_class_labels()
self.assertEqual(labels, ['1', '2', '3', '4', '5', 'U'])
labels = openml.datasets.get_dataset(2, download_data=False).retrieve_class_labels(
target_name='product-type')
self.assertEqual(labels, ['C', 'H', 'G'])
def test_upload_dataset_with_url(self):
dataset = OpenMLDataset(
"%s-UploadTestWithURL" % self._get_sentinel(),
"test",
data_format="arff",
version=1,
url="https://www.openml.org/data/download/61/dataset_61_iris.arff",
)
dataset.publish()
self.assertIsInstance(dataset.dataset_id, int)
def test_data_status(self):
dataset = OpenMLDataset(
"%s-UploadTestWithURL" % self._get_sentinel(),
"test", "ARFF",
version=1,
url="https://www.openml.org/data/download/61/dataset_61_iris.arff")
dataset.publish()
did = dataset.dataset_id
# admin key for test server (only adminds can activate datasets.
# all users can deactivate their own datasets)
openml.config.apikey = 'd488d8afd93b32331cf6ea9d7003d4c3'
openml.datasets.status_update(did, 'active')
# need to use listing fn, as this is immune to cache
result = openml.datasets.list_datasets(data_id=did, status='all')
self.assertEqual(len(result), 1)
self.assertEqual(result[did]['status'], 'active')
openml.datasets.status_update(did, 'deactivated')
# need to use listing fn, as this is immune to cache
result = openml.datasets.list_datasets(data_id=did, status='all')
self.assertEqual(len(result), 1)
self.assertEqual(result[did]['status'], 'deactivated')
openml.datasets.status_update(did, 'active')
# need to use listing fn, as this is immune to cache
result = openml.datasets.list_datasets(data_id=did, status='all')
self.assertEqual(len(result), 1)
self.assertEqual(result[did]['status'], 'active')
with self.assertRaises(ValueError):
openml.datasets.status_update(did, 'in_preparation')
# need to use listing fn, as this is immune to cache
result = openml.datasets.list_datasets(data_id=did, status='all')
self.assertEqual(len(result), 1)
self.assertEqual(result[did]['status'], 'active')
def test_attributes_arff_from_df(self):
# DataFrame case
df = pd.DataFrame(
[[1, 1.0, 'xxx', 'A', True], [2, 2.0, 'yyy', 'B', False]],
columns=['integer', 'floating', 'string', 'category', 'boolean']
)
df['category'] = df['category'].astype('category')
attributes = attributes_arff_from_df(df)
self.assertEqual(attributes, [('integer', 'INTEGER'),
('floating', 'REAL'),
('string', 'STRING'),
('category', ['A', 'B']),
('boolean', ['True', 'False'])])
# SparseDataFrame case
df = pd.SparseDataFrame([[1, 1.0],
[2, 2.0],
[0, 0]],
columns=['integer', 'floating'],
default_fill_value=0)
df['integer'] = df['integer'].astype(np.int64)
attributes = attributes_arff_from_df(df)
self.assertEqual(attributes, [('integer', 'INTEGER'),
('floating', 'REAL')])
def test_attributes_arff_from_df_mixed_dtype_categories(self):
# liac-arff imposed categorical attributes to be of sting dtype. We
# raise an error if this is not the case.
df = | pd.DataFrame([[1], ['2'], [3.]]) | pandas.DataFrame |
#!/usr/bin/env python
from itertools import combinations
import random
import scanpy.api as sc
import matplotlib.pyplot as plt
import numpy as np
from granatum_sdk import Granatum
import pandas as pd
import seaborn as sns
def main():
gn = Granatum()
tb1 = gn.pandas_from_assay(gn.get_import('assay1'))
tb2 = gn.pandas_from_assay(gn.get_import('assay2'))
label1 = gn.get_arg('label1')
label2 = gn.get_arg('label2')
direction = gn.get_arg('direction')
normalization = gn.get_arg('normalization')
if direction == 'samples':
tb1 = tb1.T
tb2 = tb2.T
overlapped_index = set(tb1.index) & set(tb2.index)
tb1.index = [f"{label1}_{x}" if x in overlapped_index else x for x in tb1.index]
tb2.index = [f"{label2}_{x}" if x in overlapped_index else x for x in tb2.index]
if normalization == 'none':
tb = pd.concat([tb1, tb2], axis=0)
elif normalization == 'frobenius':
ntb1 = np.linalg.norm(tb1)
ntb2 = np.linalg.norm(tb2)
ntb = np.mean([ntb1, ntb2])
fct1 = ntb / ntb1
fct2 = ntb / ntb2
tb = | pd.concat([tb1 * fct1, tb2 * fct2], axis=0) | pandas.concat |
# -*- coding: utf-8 -*-
import os
import sys
from typing import List, NamedTuple
from datetime import datetime
from google.cloud import aiplatform, storage
from google.cloud.aiplatform import gapic as aip
from kfp.v2 import compiler, dsl
from kfp.v2.dsl import component, pipeline, Input, Output, Model, Metrics, Dataset, HTML
USERNAME = "<lowercase user name>" # @param username
BUCKET_NAME = "gs://<USED BUCKET>" # @param bucket name
REGION = "<REGION>" # @param region
PROJECT_ID = "<GCP PROJECT ID>" # @param project id
PROJECT_NUMBER = "<GCP PROJECT NUMBER>" # @param project number
PIPELINE_NAME = f"diamonds-predictor-serving-pipeline-{USERNAME}"
ARTIFACT_REGISTRY_NAME = "diamonds-predictor-repo"
SUPERWISE_CLIENT_ID = "<YOUR SUPERWISE ACCOUNT CLIENT ID>" # @param project number
SUPERWISE_SECRET = "<YOUR SUPERWISE ACCOUNT SECRET>" # @param project number
SUPERWISE_MODEL_NAME = "Regression - Diamonds Price Predictor"
aiplatform.init(project=PROJECT_ID, location=REGION, staging_bucket=BUCKET_NAME)
""" Vertex definitions """
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
PIPELINE_ROOT = "{}/{}_pipeline_root/workshop".format(BUCKET_NAME, USERNAME)
# Load the data Component
@component(packages_to_install=["pandas"])
def load_data(dataset: Output[Dataset]):
import pandas as pd
df = pd.read_csv("https://www.openml.org/data/get_csv/21792853/dataset")
df = df[df["price"] < 10000]
print("Load Data: ", df.head())
df.to_csv(dataset.path, index=False)
# Validate the data Component
@component(packages_to_install=["pandas"])
def validate_data(df: Input[Dataset], validated_df: Output[Dataset]):
import pandas as pd
df = pd.read_csv(df.path)
print("Validate_data: ", df.head())
BINARY_FEATURES = []
# List all column names for numeric features
NUMERIC_FEATURES = ["carat", "depth", "table", "x", "y", "z"]
# List all column names for categorical features
CATEGORICAL_FEATURES = ["cut", "color", "clarity"]
# ID column - needed to support predict() over numpy arrays
ID = ["record_id"]
TARGET = "price"
ALL_COLUMNS = ID + BINARY_FEATURES + NUMERIC_FEATURES + CATEGORICAL_FEATURES
# define the column name for the target
df = df.reset_index().rename(columns={"index": "record_id"})
for n in NUMERIC_FEATURES:
df[n] = pd.to_numeric(df[n], errors="coerce")
df = df.fillna(df.mean(numeric_only=True))
def data_selection(df: pd.DataFrame, selected_columns: List[str]):
selected_columns.append(TARGET)
data = df.loc[:, selected_columns]
return data
## Feature selection
df = data_selection(df, ALL_COLUMNS)
return df.to_csv(validated_df.path, index=False)
# Prepare data for training Component
@component(packages_to_install=["scikit-learn==1.0.2", "pandas"])
def prepare_data(
df: Input[Dataset],
X_train: Output[Dataset],
y_train: Output[Dataset],
X_test: Output[Dataset],
y_test: Output[Dataset],
):
import pandas as pd
from sklearn.model_selection import train_test_split
target = "price"
df = pd.read_csv(df.path)
print("Prepare data: ", df.head())
X, y = df.drop(columns=[target]), df[target]
X_train_data, X_test_data, y_train_data, y_test_data = train_test_split(
X, y, test_size=0.2, random_state=42
)
X_train_data.to_csv(X_train.path, index=False)
y_train_data.to_csv(y_train.path, index=False)
X_test_data.to_csv(X_test.path, index=False)
y_test_data.to_csv(y_test.path, index=False)
# Train model Component
@component(packages_to_install=["scikit-learn==1.0.2", "pandas", "joblib"])
def train_model(
X_train: Input[Dataset],
y_train: Input[Dataset],
model_artifact: Output[Model],
):
import joblib
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import StandardScaler, OneHotEncoder, OrdinalEncoder
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_score
# List all column names for numeric features
NUMERIC_FEATURES = ["carat", "depth", "table", "x", "y", "z"]
# List all column names for categorical features
CATEGORICAL_FEATURES = ["cut", "color", "clarity"]
# ID column - needed to support predict() over numpy arrays
ID = ["record_id"]
ALL_COLUMNS = ID + NUMERIC_FEATURES + CATEGORICAL_FEATURES
X, y = pd.read_csv(X_train.path), pd.read_csv(y_train.path)
X = X.loc[:, ALL_COLUMNS]
print("Trainning model X:", X.head(), "Y: ", y.head())
numeric_transformer = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="median")),
("scaler", StandardScaler()),
]
)
categorical_transformer = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="most_frequent")),
("cat", OneHotEncoder(handle_unknown="ignore")),
]
)
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, NUMERIC_FEATURES),
("cat", categorical_transformer, CATEGORICAL_FEATURES),
],
remainder="drop",
n_jobs=-1,
)
# We now create a full pipeline, for preprocessing and training.
# for training we selected a RandomForestRegressor
model_params = {
"max_features": "auto",
"n_estimators": 500,
"max_depth": 9,
"random_state": 42,
}
regressor = RandomForestRegressor()
regressor.set_params(**model_params)
# steps=[('i', SimpleImputer(strategy='median'))
pipeline = Pipeline(
steps=[("preprocessor", preprocessor), ("regressor", regressor)]
)
# For Workshop time efficiency we will use 1-fold cross validation
score = cross_val_score(
pipeline, X, y, cv=2, scoring="neg_root_mean_squared_error", n_jobs=-1
).mean()
print("finished cross val")
# Now we fit all our data to the classifier.
pipeline.fit(X, y)
# Upload the model to GCS
joblib.dump(pipeline, model_artifact.path, compress=3)
model_artifact.metadata["train_score"] = score
# Evaluate the model Component
@component(
packages_to_install=["scikit-learn==1.0.2", "pandas", "seaborn", "matplotlib"]
)
def evaluate_model(
model_artifact: Input[Model],
x_test: Input[Dataset],
y_test: Input[Dataset],
model_performance: Output[Metrics],
html: Output[HTML],
):
import joblib
import io
import base64
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from math import sqrt
from sklearn.metrics import mean_squared_error, r2_score
model = joblib.load(model_artifact.path)
y_test = pd.read_csv(y_test.path)["price"]
y_pred = model.predict(pd.read_csv(x_test.path))
model_performance.metadata["rmse"] = sqrt(mean_squared_error(y_test, y_pred))
model_performance.metadata["r2"] = r2_score(y_test, y_pred)
model_performance.log_metric("r2", model_performance.metadata["r2"])
model_performance.log_metric("rmse", model_performance.metadata["rmse"])
df = pd.DataFrame({"predicted Price(USD)": y_pred, "actual Price(USD)": y_test})
def fig_to_base64(fig):
img = io.BytesIO()
fig.get_figure().savefig(img, format="png", bbox_inches="tight")
img.seek(0)
return base64.b64encode(img.getvalue())
encoded = fig_to_base64(
sns.scatterplot(data=df, x="predicted Price(USD)", y="actual Price(USD)")
)
encoded_html = "{}".format(encoded.decode("utf-8"))
html_content = '<html><head></head><body><h1>Predicted vs Actual Price</h1>\n<img src="data:image/png;base64, {}"></body></html>'.format(
encoded_html
)
with open(html.path, "w") as f:
f.write(html_content)
# Validate the model Component
@component(packages_to_install=["scikit-learn==1.0.2", "pandas"])
def validate_model(
new_model_metrics: Input[Metrics],
new_model: Input[Model],
dataset: Input[Dataset],
baseline: Output[Dataset],
model_metrics: Output[Metrics],
) -> NamedTuple("output", [("deploy", str)]):
import joblib
import pandas as pd
from math import sqrt
from sklearn.metrics import mean_squared_error, r2_score
target = "price"
validation_data = pd.read_csv(dataset.path)
X, y = validation_data.drop(columns=[target]), validation_data[target]
model = joblib.load(new_model.path)
y_pred = model.predict(X)
rmse = sqrt(mean_squared_error(y, y_pred))
r2 = r2_score(y, y_pred)
train_score = new_model.metadata["train_score"]
print("new model rmse cross validation mean score: ", train_score)
print("new model train rmse: ", new_model_metrics.metadata["rmse"])
print("new model train r2: ", new_model_metrics.metadata["r2"])
print("new model validation rmse: ", rmse)
print("new model validation r2: ", r2)
model_metrics.log_metric("rmse", rmse)
model_metrics.log_metric("r2", r2)
validation_data["predictions"] = y_pred
validation_data.to_csv(baseline.path, index=False)
if (
rmse <= new_model_metrics.metadata["rmse"]
and new_model_metrics.metadata["r2"] >= 0.95
and abs(train_score) < 1000
):
return ("true",)
return ("false",)
@component(packages_to_install=["superwise", "pandas"])
def register_model_to_superwise(
model_name: str,
superwise_client_id: str,
superwise_secret: str,
baseline: Input[Dataset],
timestamp: str,
) -> NamedTuple("output", [("superwise_model_id", int), ("superwise_version_id", int)]):
import pandas as pd
from datetime import datetime
from superwise import Superwise
from superwise.models.model import Model
from superwise.models.version import Version
from superwise.resources.superwise_enums import DataEntityRole
from superwise.controller.infer import infer_dtype
sw = Superwise(
client_id=superwise_client_id,
secret=superwise_secret,
)
first_version = False
# Check if model exists
models = sw.model.get_by_name(model_name)
if len(models) == 0:
print(f"Registering new model {model_name} to Superwise")
diamond_model = Model(name=model_name, description="Predicting Diamond Prices")
new_model = sw.model.create(diamond_model)
model_id = new_model.id
first_version = True
else:
print(f"Model {model_name} already exists in Superwise")
model_id = models[0].id
baseline_data = pd.read_csv(baseline.path).assign(
ts=pd.Timestamp.now() - | pd.Timedelta(30, "d") | pandas.Timedelta |
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.core import ops
from pandas.errors import NullFrequencyError
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = pd.timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
msg = "cannot subtract a datelike from|Could not operate"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx + Timestamp('2011-01-01')
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64_radd_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
# TODO: parametrize over scalar datetime types?
result = Timestamp('2011-01-01') + idx
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Operations with int-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser + Series([2, 3, 4])
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="GH#19123 integer "
"interpreted as "
"nanoseconds",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_radd_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
Series([2, 3, 4]) + tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_sub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser - Series([2, 3, 4])
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
@pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds',
strict=True)
def test_td64arr_rsub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
with pytest.raises(TypeError):
Series([2, 3, 4]) - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_intlike(self, box):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box)
err = TypeError if box is not pd.Index else NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box, scalar, tdser):
if box is pd.DataFrame and isinstance(scalar, np.ndarray):
# raises ValueError
pytest.xfail(reason="DataFrame to broadcast incorrectly")
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype, tdser):
if type(vec) is Series and not dtype.startswith('float'):
pytest.xfail(reason='GH#19123 integer interpreted as nanos')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
# TODO: parametrize over these four ops?
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with datetime-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype "
"instead of "
"datetime64[ns]",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_sub_timestamp(self, box):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdser = Series(pd.timedelta_range('1 day', periods=3))
expected = Series(pd.date_range('2012-01-02', periods=3))
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
tm.assert_equal(ts + tdser, expected)
tm.assert_equal(tdser + ts, expected)
expected2 = Series(pd.date_range('2011-12-31',
periods=3, freq='-1D'))
expected2 = tm.box_expected(expected2, box)
tm.assert_equal(ts - tdser, expected2)
tm.assert_equal(ts + (-tdser), expected2)
with pytest.raises(TypeError):
tdser - ts
# ------------------------------------------------------------------
# Operations with timedelta-like others (including DateOffsets)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly leading "
"to alignment error",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
@pytest.mark.parametrize('box', [pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - NaT
tm.assert_equal(res, expected)
class TestTimedeltaArraylikeMulDivOps(object):
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Incorrectly returns "
"m8[ns] instead of f8",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()])
def test_td64arr_floordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
result = td1 // scalar_td
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Incorrectly casts to f8",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()])
def test_td64arr_rfloordiv_tdscalar(self, box, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box)
expected = tm.box_expected(expected, box)
result = scalar_td // td1
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns m8[ns] dtype "
"instead of f8",
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=5, seconds=4),
| Timedelta('5m4s') | pandas.Timedelta |
"""
****************************************
* @author: <NAME>
* Date: 5/22/21
****************************************
"""
import time
import tensorflow.keras as keras
import pandas as pd
from tqdm import tqdm
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from random import sample
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import OneHotEncoder
import copy
activation_fcts = [
'relu', "sigmoid", "softmax", "softplus", "softsign", "tanh", "selu", "elu", "exponential"
]
optimizers = ["sgd", "rmsprop", "adam", "adadelta", "adagrad", "adamax", "nadam", "ftrl"]
losses = ["mae", "mape", "mse", "msle", "poisson", "categorical_crossentropy"]
class TimeHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.train_start_time = time.time()
self.epoch_times = []
self.batch_times = []
self.epoch_times_detail = []
self.batch_times_detail = []
def on_train_end(self, logs={}):
self.train_end_time = time.time()
def on_epoch_begin(self, epoch, logs={}):
self.epoch_time_start = time.time()
def on_epoch_end(self, epoch, logs={}):
epoch_time_end = time.time()
self.epoch_times.append(epoch_time_end - self.epoch_time_start)
self.epoch_times_detail.append((self.epoch_time_start, epoch_time_end))
def on_train_batch_begin(self, batch, logs={}):
self.bacth_time_start = time.time()
def on_train_batch_end(self, batch, logs={}):
batch_time_end = time.time()
self.batch_times.append(batch_time_end - self.bacth_time_start)
self.batch_times_detail.append((self.bacth_time_start, batch_time_end))
def relative_by_train_start(self):
self.epoch_times_detail = np.array(self.epoch_times_detail) - self.train_start_time
self.batch_times_detail = np.array(self.batch_times_detail) - self.train_start_time
self.train_end_time = np.array(self.train_end_time) - self.train_start_time
class gen_nn:
def __init__(
self,
hidden_layers_num_lower=5,
hidden_layers_num_upper=101,
hidden_layer_size_lower=1,
hidden_layer_size_upper=1001,
activation='random',
optimizer='random',
loss='random'
):
self.hidden_layers_num_lower = hidden_layers_num_lower
self.hidden_layers_num_upper = hidden_layers_num_upper
self.hidden_layer_size_lower = hidden_layer_size_lower
self.hidden_layer_size_upper = hidden_layer_size_upper
self.activation_pick = activation
self.optimizer_pick = optimizer
self.loss_pick = loss
self.activation_fcts = activation_fcts
self.optimizers = optimizers
self.losses = losses
@staticmethod
def nothing(x):
return x
@staticmethod
def build_dense_model(layer_sizes, activations, optimizer, loss):
model_dense = Sequential()
for index, size in enumerate(layer_sizes):
model_dense.add(Dense(size, activation=activations[index]))
model_dense.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
return model_dense
@staticmethod
def get_dense_model_features(keras_model):
layers = [
layer_info for layer_info in keras_model.get_config()['layers']
if layer_info['class_name'] == 'Dense'
]
layer_sizes = [l['config']['units'] for l in layers]
acts = [l['config']['activation'].lower() for l in layers]
return layer_sizes, acts
def generate_model(self):
hidden_layers_num = np.random.randint(
self.hidden_layers_num_lower, self.hidden_layers_num_upper
)
hidden_layer_sizes = np.random.randint(
self.hidden_layer_size_lower, self.hidden_layer_size_upper, hidden_layers_num
)
if self.activation_pick == 'random':
activations = np.random.choice(self.activation_fcts, hidden_layers_num)
else:
activations = np.random.choice([self.activation_pick], hidden_layers_num)
if self.optimizer_pick == 'random':
optimizer = np.random.choice(self.optimizers)
else:
optimizer = self.optimizer_pick
if self.loss_pick == 'random':
loss = np.random.choice(self.losses)
else:
loss = self.loss_pick
return {
'model': gen_nn.build_dense_model(hidden_layer_sizes, activations, optimizer, loss),
'layer_sizes': [int(i) for i in hidden_layer_sizes],
'activations': list(activations),
'optimizer': optimizer,
'loss': loss
}
def generate_model_configs(self, num_model_data=1000, progress=True):
model_configs = []
if progress:
loop_fun = tqdm
else:
loop_fun = gen_nn.nothing
for i in loop_fun(range(num_model_data)):
data = self.generate_model()
del data['model']
model_configs.append(data)
return model_configs
class model_train_data:
def __init__(
self,
model_configs,
input_dims=None,
batch_sizes=None,
epochs=None,
truncate_from=None,
trials=None,
batch_strategy='random',
input_dim_strategy='same'
):
"""
@param model_configs:
@param input_dims: input data number of features
@param batch_sizes:
@param epochs:
@param truncate_from:
@param trials:
@param input_dim_strategy: 'random' or 'same', same will be same size as first layer size
"""
self.model_configs = []
for info_dict in model_configs:
d2 = copy.deepcopy(info_dict)
self.model_configs.append(d2)
self.input_dims = input_dims if input_dims is not None else list(range(1, 1001))
self.batch_sizes = batch_sizes if batch_sizes is not None else [2**i for i in range(1, 9)]
self.epochs = epochs if epochs is not None else 10
self.truncate_from = truncate_from if truncate_from is not None else 2
self.trials = trials if trials is not None else 5
self.batch_strategy = batch_strategy
self.input_dim_strategy = input_dim_strategy
self.activation_fcts = activation_fcts
self.optimizers = optimizers
self.losses = losses
self.act_mapping = dict((act, index + 1) for index, act in enumerate(self.activation_fcts))
self.opt_mapping = dict((opt, index + 1) for index, opt in enumerate(self.optimizers))
self.loss_mapping = dict((loss, index + 1) for index, loss in enumerate(self.losses))
def get_train_data(self, progress=True):
model_data = []
model_configs = []
if progress:
loop_fun = tqdm
else:
loop_fun = gen_nn.nothing
for info_dict in self.model_configs:
d2 = copy.deepcopy(info_dict)
model_configs.append(d2)
for model_config in loop_fun(model_configs):
model = gen_nn.build_dense_model(
layer_sizes=model_config['layer_sizes'],
activations=model_config['activations'],
optimizer=model_config['optimizer'],
loss=model_config['loss']
)
if self.batch_strategy == 'all':
batch_sizes = self.batch_sizes.copy()
else:
batch_sizes = sample(self.batch_sizes, 1)
input_dim = sample(self.input_dims, 1)[0]
for batch_size in batch_sizes:
batch_size_data_batch = []
batch_size_data_epoch = []
if self.input_dim_strategy == 'same':
try:
input_shape = model.get_config()['layers'][0]['config']['units']
except:
input_shape = model.get_config(
)['layers'][0]['config']['batch_input_shape'][1]
else:
input_shape = input_dim
out_shape = model.get_config()['layers'][-1]['config']['units']
x = np.ones((batch_size, input_shape), dtype=np.float32)
y = np.ones((batch_size, out_shape), dtype=np.float32)
for _ in range(self.trials):
time_callback = TimeHistory()
model.fit(
x,
y,
epochs=self.epochs,
batch_size=batch_size,
callbacks=[time_callback],
verbose=False
)
times_batch = np.array(time_callback.batch_times) * 1000
times_epoch = np.array(time_callback.epoch_times) * 1000
batch_size_data_batch.extend(times_batch)
batch_size_data_epoch.extend(times_epoch)
batch_times_truncated = batch_size_data_batch[self.truncate_from:]
epoch_times_trancuted = batch_size_data_epoch[self.truncate_from:]
recovered_time = [
np.median(batch_times_truncated)
] * self.truncate_from + batch_times_truncated
model_config[f'batch_size_{batch_size}'] = {
'batch_time': np.median(batch_times_truncated),
'epoch_time': np.median(epoch_times_trancuted),
'setup_time': np.sum(batch_size_data_batch) - sum(recovered_time),
'input_dim': input_dim
}
model_data.append(model_config)
return model_data
def convert_config_data(
self,
model_data,
layer_num_upper,
layer_na_fill=0,
act_na_fill=0,
opt_dummy=True,
loss_dummy=True,
min_max_scaler=True
):
data_rows = []
time_rows = []
for model_i_data in model_data:
layer_sizes = model_i_data['layer_sizes'] + [layer_na_fill] * layer_num_upper
layer_sizes = layer_sizes[:layer_num_upper]
activations = [self.act_mapping[i]
for i in model_i_data['activations']] + [act_na_fill] * layer_num_upper
activations = activations[:layer_num_upper]
if opt_dummy:
optimizer = model_i_data['optimizer']
else:
optimizer = self.opt_mapping[model_i_data['optimizer']]
if loss_dummy:
loss = model_i_data['loss']
else:
loss = self.loss_mapping[model_i_data['loss']]
batch_names = [k for k in model_i_data.keys() if k.startswith('batch_size')]
for batch_name in batch_names:
batch_value = int(batch_name.split('_')[-1])
batch_time = model_i_data[batch_name]['batch_time']
epoch_time = model_i_data[batch_name]['epoch_time']
setup_time = model_i_data[batch_name]['setup_time']
input_dim = model_i_data[batch_name]['input_dim']
data_rows.append(
layer_sizes + activations + [optimizer, loss, batch_value, input_dim]
)
time_rows.append([batch_time, epoch_time, setup_time])
layer_names = [f'layer_{i + 1}_size' for i in range(layer_num_upper)]
act_names = [f'layer_{i + 1}_activation' for i in range(layer_num_upper)]
temp_df = pd.DataFrame(
data_rows,
columns=layer_names + act_names + ['optimizer', 'loss', 'batch_size', 'input_dim']
)
if opt_dummy:
first_row = dict(temp_df.iloc[0])
for opt in self.optimizers:
first_row['optimizer'] = opt
temp_df = temp_df.append(first_row, ignore_index=True)
temp_df = | pd.get_dummies(temp_df, columns=['optimizer']) | pandas.get_dummies |
from datetime import datetime, time, timedelta
from pandas.compat import range
import sys
import os
import nose
import numpy as np
from pandas import Index, DatetimeIndex, Timestamp, Series, date_range, period_range
import pandas.tseries.frequencies as frequencies
from pandas.tseries.tools import to_datetime
import pandas.tseries.offsets as offsets
from pandas.tseries.period import PeriodIndex
import pandas.compat as compat
from pandas.compat import is_platform_windows
import pandas.util.testing as tm
from pandas import Timedelta
def test_to_offset_multiple():
freqstr = '2h30min'
freqstr2 = '2h 30min'
result = frequencies.to_offset(freqstr)
assert(result == frequencies.to_offset(freqstr2))
expected = offsets.Minute(150)
assert(result == expected)
freqstr = '2h30min15s'
result = frequencies.to_offset(freqstr)
expected = offsets.Second(150 * 60 + 15)
assert(result == expected)
freqstr = '2h 60min'
result = frequencies.to_offset(freqstr)
expected = offsets.Hour(3)
assert(result == expected)
freqstr = '15l500u'
result = frequencies.to_offset(freqstr)
expected = offsets.Micro(15500)
assert(result == expected)
freqstr = '10s75L'
result = frequencies.to_offset(freqstr)
expected = offsets.Milli(10075)
assert(result == expected)
freqstr = '2800N'
result = frequencies.to_offset(freqstr)
expected = offsets.Nano(2800)
assert(result == expected)
# malformed
try:
frequencies.to_offset('2h20m')
except ValueError:
pass
else:
assert(False)
def test_to_offset_negative():
freqstr = '-1S'
result = frequencies.to_offset(freqstr)
assert(result.n == -1)
freqstr = '-5min10s'
result = frequencies.to_offset(freqstr)
assert(result.n == -310)
def test_to_offset_leading_zero():
freqstr = '00H 00T 01S'
result = frequencies.to_offset(freqstr)
assert(result.n == 1)
freqstr = '-00H 03T 14S'
result = frequencies.to_offset(freqstr)
assert(result.n == -194)
def test_to_offset_pd_timedelta():
# Tests for #9064
td = Timedelta(days=1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(86401)
assert(expected==result)
td = Timedelta(days=-1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(-86399)
assert(expected==result)
td = Timedelta(hours=1, minutes=10)
result = frequencies.to_offset(td)
expected = offsets.Minute(70)
assert(expected==result)
td = Timedelta(hours=1, minutes=-10)
result = frequencies.to_offset(td)
expected = offsets.Minute(50)
assert(expected==result)
td = Timedelta(weeks=1)
result = frequencies.to_offset(td)
expected = offsets.Day(7)
assert(expected==result)
td1 = Timedelta(hours=1)
result1 = frequencies.to_offset(td1)
result2 = frequencies.to_offset('60min')
assert(result1 == result2)
td = Timedelta(microseconds=1)
result = frequencies.to_offset(td)
expected = offsets.Micro(1)
assert(expected == result)
td = Timedelta(microseconds=0)
tm.assertRaises(ValueError, lambda: frequencies.to_offset(td))
def test_anchored_shortcuts():
result = frequencies.to_offset('W')
expected = frequencies.to_offset('W-SUN')
assert(result == expected)
result1 = frequencies.to_offset('Q')
result2 = frequencies.to_offset('Q-DEC')
expected = offsets.QuarterEnd(startingMonth=12)
assert(result1 == expected)
assert(result2 == expected)
result1 = frequencies.to_offset('Q-MAY')
expected = offsets.QuarterEnd(startingMonth=5)
assert(result1 == expected)
def test_get_rule_month():
result = frequencies._get_rule_month('W')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Week())
assert(result == 'DEC')
result = frequencies._get_rule_month('D')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Day())
assert(result == 'DEC')
result = frequencies._get_rule_month('Q')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=12))
print(result == 'DEC')
result = frequencies._get_rule_month('Q-JAN')
assert(result == 'JAN')
result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=1))
assert(result == 'JAN')
result = frequencies._get_rule_month('A-DEC')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.YearEnd())
assert(result == 'DEC')
result = frequencies._get_rule_month('A-MAY')
assert(result == 'MAY')
result = frequencies._get_rule_month(offsets.YearEnd(month=5))
assert(result == 'MAY')
class TestFrequencyCode(tm.TestCase):
def test_freq_code(self):
self.assertEqual(frequencies.get_freq('A'), 1000)
self.assertEqual(frequencies.get_freq('3A'), 1000)
self.assertEqual(frequencies.get_freq('-1A'), 1000)
self.assertEqual(frequencies.get_freq('W'), 4000)
self.assertEqual(frequencies.get_freq('W-MON'), 4001)
self.assertEqual(frequencies.get_freq('W-FRI'), 4005)
for freqstr, code in compat.iteritems(frequencies._period_code_map):
result = frequencies.get_freq(freqstr)
self.assertEqual(result, code)
result = frequencies.get_freq_group(freqstr)
self.assertEqual(result, code // 1000 * 1000)
result = frequencies.get_freq_group(code)
self.assertEqual(result, code // 1000 * 1000)
def test_freq_group(self):
self.assertEqual(frequencies.get_freq_group('A'), 1000)
self.assertEqual(frequencies.get_freq_group('3A'), 1000)
self.assertEqual(frequencies.get_freq_group('-1A'), 1000)
self.assertEqual(frequencies.get_freq_group('A-JAN'), 1000)
self.assertEqual(frequencies.get_freq_group('A-MAY'), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd()), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=1)), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=5)), 1000)
self.assertEqual(frequencies.get_freq_group('W'), 4000)
self.assertEqual(frequencies.get_freq_group('W-MON'), 4000)
self.assertEqual(frequencies.get_freq_group('W-FRI'), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week()), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week(weekday=1)), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week(weekday=5)), 4000)
def test_get_to_timestamp_base(self):
tsb = frequencies.get_to_timestamp_base
self.assertEqual(tsb(frequencies.get_freq_code('D')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('W')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('M')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('S')[0]),
frequencies.get_freq_code('S')[0])
self.assertEqual(tsb(frequencies.get_freq_code('T')[0]),
frequencies.get_freq_code('S')[0])
self.assertEqual(tsb(frequencies.get_freq_code('H')[0]),
frequencies.get_freq_code('S')[0])
def test_freq_to_reso(self):
Reso = frequencies.Resolution
self.assertEqual(Reso.get_str_from_freq('A'), 'year')
self.assertEqual(Reso.get_str_from_freq('Q'), 'quarter')
self.assertEqual(Reso.get_str_from_freq('M'), 'month')
self.assertEqual(Reso.get_str_from_freq('D'), 'day')
self.assertEqual(Reso.get_str_from_freq('H'), 'hour')
self.assertEqual(Reso.get_str_from_freq('T'), 'minute')
self.assertEqual(Reso.get_str_from_freq('S'), 'second')
self.assertEqual(Reso.get_str_from_freq('L'), 'millisecond')
self.assertEqual(Reso.get_str_from_freq('U'), 'microsecond')
self.assertEqual(Reso.get_str_from_freq('N'), 'nanosecond')
for freq in ['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U', 'N']:
# check roundtrip
result = Reso.get_freq(Reso.get_str_from_freq(freq))
self.assertEqual(freq, result)
for freq in ['D', 'H', 'T', 'S', 'L', 'U']:
result = Reso.get_freq(Reso.get_str(Reso.get_reso_from_freq(freq)))
self.assertEqual(freq, result)
def test_get_freq_code(self):
# freqstr
self.assertEqual(frequencies.get_freq_code('A'),
(frequencies.get_freq('A'), 1))
self.assertEqual(frequencies.get_freq_code('3D'),
(frequencies.get_freq('D'), 3))
self.assertEqual(frequencies.get_freq_code('-2M'),
(frequencies.get_freq('M'), -2))
# tuple
self.assertEqual(frequencies.get_freq_code(('D', 1)),
(frequencies.get_freq('D'), 1))
self.assertEqual(frequencies.get_freq_code(('A', 3)),
(frequencies.get_freq('A'), 3))
self.assertEqual(frequencies.get_freq_code(('M', -2)),
(frequencies.get_freq('M'), -2))
# numeric tuple
self.assertEqual(frequencies.get_freq_code((1000, 1)), (1000, 1))
# offsets
self.assertEqual(frequencies.get_freq_code(offsets.Day()),
(frequencies.get_freq('D'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Day(3)),
(frequencies.get_freq('D'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Day(-2)),
(frequencies.get_freq('D'), -2))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd()),
(frequencies.get_freq('M'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd(3)),
(frequencies.get_freq('M'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd(-2)),
(frequencies.get_freq('M'), -2))
self.assertEqual(frequencies.get_freq_code(offsets.Week()),
(frequencies.get_freq('W'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Week(3)),
(frequencies.get_freq('W'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Week(-2)),
(frequencies.get_freq('W'), -2))
# monday is weekday=0
self.assertEqual(frequencies.get_freq_code(offsets.Week(weekday=1)),
(frequencies.get_freq('W-TUE'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Week(3, weekday=0)),
(frequencies.get_freq('W-MON'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Week(-2, weekday=4)),
(frequencies.get_freq('W-FRI'), -2))
_dti = DatetimeIndex
class TestFrequencyInference(tm.TestCase):
def test_raise_if_period_index(self):
index = PeriodIndex(start="1/1/1990", periods=20, freq="M")
self.assertRaises(TypeError, frequencies.infer_freq, index)
def test_raise_if_too_few(self):
index = _dti(['12/31/1998', '1/3/1999'])
self.assertRaises(ValueError, frequencies.infer_freq, index)
def test_business_daily(self):
index = _dti(['12/31/1998', '1/3/1999', '1/4/1999'])
self.assertEqual(frequencies.infer_freq(index), 'B')
def test_day(self):
self._check_tick(timedelta(1), 'D')
def test_day_corner(self):
index = _dti(['1/1/2000', '1/2/2000', '1/3/2000'])
self.assertEqual(frequencies.infer_freq(index), 'D')
def test_non_datetimeindex(self):
dates = | to_datetime(['1/1/2000', '1/2/2000', '1/3/2000']) | pandas.tseries.tools.to_datetime |
# pylint: disable-msg=W0612,E1101,W0141
import nose
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notnull, isnull
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.compat import (range, lrange, StringIO, lzip, u, cPickle,
product as cart_product, zip)
import pandas as pd
import pandas.index as _index
class TestMultiLevel(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
self.single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
# create test series object
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
s[3] = np.NaN
self.series = s
tm.N = 100
self.tdf = tm.makeTimeDataFrame()
self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day]).sum()
# use Int64Index, to make sure things work
self.ymd.index.set_levels([lev.astype('i8')
for lev in self.ymd.index.levels],
inplace=True)
self.ymd.index.set_names(['year', 'month', 'day'],
inplace=True)
def test_append(self):
a, b = self.frame[:5], self.frame[5:]
result = a.append(b)
tm.assert_frame_equal(result, self.frame)
result = a['A'].append(b['A'])
tm.assert_series_equal(result, self.frame['A'])
def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
self.assertNotIsInstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(lrange(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
month_sums = self.ymd.sum(level='month')
result = month_sums.reindex(self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum)
assert_frame_equal(result, expected)
# Series
result = month_sums['A'].reindex(self.ymd.index, level=1)
expected = self.ymd['A'].groupby(level='month').transform(np.sum)
assert_series_equal(result, expected)
# axis=1
month_sums = self.ymd.T.sum(axis=1, level='month')
result = month_sums.reindex(columns=self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum).T
assert_frame_equal(result, expected)
def test_binops_level(self):
def _check_op(opname):
op = getattr(DataFrame, opname)
month_sums = self.ymd.sum(level='month')
result = op(self.ymd, month_sums, level='month')
broadcasted = self.ymd.groupby(level='month').transform(np.sum)
expected = op(self.ymd, broadcasted)
assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(self.ymd['A'], month_sums['A'], level='month')
broadcasted = self.ymd['A'].groupby(
level='month').transform(np.sum)
expected = op(self.ymd['A'], broadcasted)
assert_series_equal(result, expected)
_check_op('sub')
_check_op('add')
_check_op('mul')
_check_op('div')
def test_pickle(self):
def _test_roundtrip(frame):
pickled = cPickle.dumps(frame)
unpickled = cPickle.loads(pickled)
assert_frame_equal(frame, unpickled)
_test_roundtrip(self.frame)
_test_roundtrip(self.frame.T)
_test_roundtrip(self.ymd)
_test_roundtrip(self.ymd.T)
def test_reindex(self):
reindexed = self.frame.ix[[('foo', 'one'), ('bar', 'one')]]
expected = self.frame.ix[[0, 3]]
assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(self):
new_index = self.ymd.index[::10]
chunk = self.ymd.reindex(new_index)
self.assertIs(chunk.index, new_index)
chunk = self.ymd.ix[new_index]
self.assertIs(chunk.index, new_index)
ymdT = self.ymd.T
chunk = ymdT.reindex(columns=new_index)
self.assertIs(chunk.columns, new_index)
chunk = ymdT.ix[:, new_index]
self.assertIs(chunk.columns, new_index)
def test_sort_index_preserve_levels(self):
result = self.frame.sort_index()
self.assertEquals(result.index.names, self.frame.index.names)
def test_repr_to_string(self):
repr(self.frame)
repr(self.ymd)
repr(self.frame.T)
repr(self.ymd.T)
buf = StringIO()
self.frame.to_string(buf=buf)
self.ymd.to_string(buf=buf)
self.frame.T.to_string(buf=buf)
self.ymd.T.to_string(buf=buf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples([('a', 0, 'foo'), ('b', 1, 'bar')],
names=['a', 'b', 'c'])
df = DataFrame({'value': [0, 1]}, index=index)
lines = repr(df).split('\n')
self.assert_(lines[2].startswith('a 0 foo'))
def test_getitem_simple(self):
df = self.frame.T
col = df['foo', 'one']
assert_almost_equal(col.values, df.values[:, 0])
self.assertRaises(KeyError, df.__getitem__, ('foo', 'four'))
self.assertRaises(KeyError, df.__getitem__, 'foobar')
def test_series_getitem(self):
s = self.ymd['A']
result = s[2000, 3]
result2 = s.ix[2000, 3]
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
assert_series_equal(result, expected)
result = s[2000, 3, 10]
expected = s[49]
self.assertEquals(result, expected)
# fancy
result = s.ix[[(2000, 3, 10), (2000, 3, 13)]]
expected = s.reindex(s.index[49:51])
assert_series_equal(result, expected)
# key error
self.assertRaises(KeyError, s.__getitem__, (2000, 3, 4))
def test_series_getitem_corner(self):
s = self.ymd['A']
# don't segfault, GH #495
# out of bounds access
self.assertRaises(IndexError, s.__getitem__, len(self.ymd))
# generator
result = s[(x > 0 for x in s)]
expected = s[s > 0]
assert_series_equal(result, expected)
def test_series_setitem(self):
s = self.ymd['A']
s[2000, 3] = np.nan
self.assert_(isnull(s.values[42:65]).all())
self.assert_(notnull(s.values[:42]).all())
self.assert_(notnull(s.values[65:]).all())
s[2000, 3, 10] = np.nan
self.assert_(isnull(s[49]))
def test_series_slice_partial(self):
pass
def test_frame_getitem_setitem_boolean(self):
df = self.frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
with assertRaisesRegexp(TypeError, 'boolean values only'):
df[df * 0] = 2
def test_frame_getitem_setitem_slice(self):
# getitem
result = self.frame.ix[:4]
expected = self.frame[:4]
assert_frame_equal(result, expected)
# setitem
cp = self.frame.copy()
cp.ix[:4] = 0
self.assert_((cp.values[:4] == 0).all())
self.assert_((cp.values[4:] != 0).all())
def test_frame_getitem_setitem_multislice(self):
levels = [['t1', 't2'], ['a', 'b', 'c']]
labels = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]]
midx = MultiIndex(labels=labels, levels=levels, names=[None, 'id'])
df = DataFrame({'value': [1, 2, 3, 7, 8]}, index=midx)
result = df.ix[:, 'value']
assert_series_equal(df['value'], result)
result = df.ix[1:3, 'value']
assert_series_equal(df['value'][1:3], result)
result = df.ix[:, :]
assert_frame_equal(df, result)
result = df
df.ix[:, 'value'] = 10
result['value'] = 10
assert_frame_equal(df, result)
df.ix[:, :] = 10
assert_frame_equal(df, result)
def test_frame_getitem_multicolumn_empty_level(self):
f = DataFrame({'a': ['1', '2', '3'],
'b': ['2', '3', '4']})
f.columns = [['level1 item1', 'level1 item2'],
['', 'level2 item2'],
['level3 item1', 'level3 item2']]
result = f['level1 item1']
expected = DataFrame([['1'], ['2'], ['3']], index=f.index,
columns=['level3 item1'])
assert_frame_equal(result, expected)
def test_frame_setitem_multi_column(self):
df = DataFrame(randn(10, 4), columns=[['a', 'a', 'b', 'b'],
[0, 1, 0, 1]])
cp = df.copy()
cp['a'] = cp['b']
assert_frame_equal(cp['a'], cp['b'])
# set with ndarray
cp = df.copy()
cp['a'] = cp['b'].values
assert_frame_equal(cp['a'], cp['b'])
#----------------------------------------
# #1803
columns = MultiIndex.from_tuples([('A', '1'), ('A', '2'), ('B', '1')])
df = DataFrame(index=[1, 3, 5], columns=columns)
# Works, but adds a column instead of updating the two existing ones
df['A'] = 0.0 # Doesn't work
self.assertTrue((df['A'].values == 0).all())
# it broadcasts
df['B', '1'] = [1, 2, 3]
df['A'] = df['B', '1']
assert_series_equal(df['A', '1'], df['B', '1'])
assert_series_equal(df['A', '2'], df['B', '1'])
def test_getitem_tuple_plus_slice(self):
# GH #671
df = DataFrame({'a': lrange(10),
'b': lrange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)})
idf = df.set_index(['a', 'b'])
result = idf.ix[(0, 0), :]
expected = idf.ix[0, 0]
expected2 = idf.xs((0, 0))
assert_series_equal(result, expected)
assert_series_equal(result, expected2)
def test_getitem_setitem_tuple_plus_columns(self):
# GH #1013
df = self.ymd[:5]
result = df.ix[(2000, 1, 6), ['A', 'B', 'C']]
expected = df.ix[2000, 1, 6][['A', 'B', 'C']]
assert_series_equal(result, expected)
def test_getitem_multilevel_index_tuple_unsorted(self):
index_columns = list("abc")
df = DataFrame([[0, 1, 0, "x"], [0, 0, 1, "y"]],
columns=index_columns + ["data"])
df = df.set_index(index_columns)
query_index = df.index[:1]
rs = df.ix[query_index, "data"]
xp = Series(['x'], index=MultiIndex.from_tuples([(0, 1, 0)]))
assert_series_equal(rs, xp)
def test_xs(self):
xs = self.frame.xs(('bar', 'two'))
xs2 = self.frame.ix[('bar', 'two')]
assert_series_equal(xs, xs2)
assert_almost_equal(xs.values, self.frame.values[4])
# GH 6574
# missing values in returned index should be preserrved
acc = [
('a','abcde',1),
('b','bbcde',2),
('y','yzcde',25),
('z','xbcde',24),
('z',None,26),
('z','zbcde',25),
('z','ybcde',26),
]
df = DataFrame(acc, columns=['a1','a2','cnt']).set_index(['a1','a2'])
expected = DataFrame({ 'cnt' : [24,26,25,26] }, index=Index(['xbcde',np.nan,'zbcde','ybcde'],name='a2'))
result = df.xs('z',level='a1')
assert_frame_equal(result, expected)
def test_xs_partial(self):
result = self.frame.xs('foo')
result2 = self.frame.ix['foo']
expected = self.frame.T['foo'].T
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
result = self.ymd.xs((2000, 4))
expected = self.ymd.ix[2000, 4]
assert_frame_equal(result, expected)
# ex from #1796
index = MultiIndex(levels=[['foo', 'bar'], ['one', 'two'], [-1, 1]],
labels=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(8, 4), index=index,
columns=list('abcd'))
result = df.xs(['foo', 'one'])
expected = df.ix['foo', 'one']
assert_frame_equal(result, expected)
def test_xs_level(self):
result = self.frame.xs('two', level='second')
expected = self.frame[self.frame.index.get_level_values(1) == 'two']
expected.index = expected.index.droplevel(1)
assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([('x', 'y', 'z'), ('a', 'b', 'c'),
('p', 'q', 'r')])
df = DataFrame(np.random.randn(3, 5), index=index)
result = df.xs('c', level=2)
expected = df[1:2]
expected.index = expected.index.droplevel(2)
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = self.frame.xs('two', level='second')
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
def test_xs_level_multiple(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs(('a', 4), level=['one', 'four'])
expected = df.xs('a').xs(4, level='four')
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = df.xs(('a', 4), level=['one', 'four'])
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
# GH2107
dates = lrange(20111201, 20111205)
ids = 'abcde'
idx = MultiIndex.from_tuples([x for x in cart_product(dates, ids)])
idx.names = ['date', 'secid']
df = DataFrame(np.random.randn(len(idx), 3), idx, ['X', 'Y', 'Z'])
rs = df.xs(20111201, level='date')
xp = df.ix[20111201, :]
assert_frame_equal(rs, xp)
def test_xs_level0(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs('a', level=0)
expected = df.xs('a')
self.assertEqual(len(result), 2)
assert_frame_equal(result, expected)
def test_xs_level_series(self):
s = self.frame['A']
result = s[:, 'two']
expected = self.frame.xs('two', level=1)['A']
assert_series_equal(result, expected)
s = self.ymd['A']
result = s[2000, 5]
expected = self.ymd.ix[2000, 5]['A']
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
import pandas as pd
from surprise import KNNWithMeans, SVD, SVDpp, NMF
from surprise.prediction_algorithms.slope_one import SlopeOne
from settings.config import user_label, NMF_LABEL, \
SVDpp_LABEL, SVD_LABEL, SLOPE_LABEL, ITEMKNN_LABEL, USERKNN_LABEL, item_label, value_label, K_NEIGHBOR
from conversions.pandas_to_models import transform_testset, user_transactions_df_to_item_mapping
from conversions.suprise_and_pandas import surprise_to_pandas_get_candidates_items
from settings.language_strings import LANGUAGE_USER_KNN_START, LANGUAGE_ITEM_KNN_START, \
LANGUAGE_SVD_START, LANGUAGE_SVD_STOP, LANGUAGE_SVDPP_START, LANGUAGE_SVDPP_STOP, \
LANGUAGE_NMF_START, LANGUAGE_NMF_STOP, LANGUAGE_SLOPE_ONE_START, LANGUAGE_SLOPE_ONE_STOP
from posprocessing.step import postprocessing_calibration
from processing.recommendation_average import users_results_mean
def recommendation_and_posprocessing(user_id, user_trainset_df, user_prefs_distr_df, user_testset_df, item_mapping,
instance, baseline_label):
keys_list = item_mapping.keys()
know_items = user_trainset_df[item_label].unique().tolist()
unknow_items = set(keys_list) - set(know_items)
data = {item_label: list(unknow_items)}
user_testset = pd.DataFrame.from_dict(data)
user_testset[user_label] = user_id
user_testset[value_label] = 0.0
candidates_items_prediction = instance.test(transform_testset(user_testset))
user_candidates_items_df = surprise_to_pandas_get_candidates_items(candidates_items_prediction)
user_candidates_items_df.sort_values(by=[value_label], ascending=False, inplace=True)
candidates_items_mapping = user_transactions_df_to_item_mapping(user_candidates_items_df, item_mapping)
result_df = postprocessing_calibration(user_prefs_distr_df=user_prefs_distr_df,
candidates_items_mapping=candidates_items_mapping,
test_items_ids=user_testset_df[item_label].tolist(),
baseline_label=baseline_label)
return result_df
# #################################################################################################################### #
# #################################################################################################################### #
# #################################################################################################################### #
def user_knn_recommender(trainset, users_prefs_distr_df, trainset_df, testset_df, item_mapping):
# Recommender Prediction
print(LANGUAGE_USER_KNN_START)
instance = KNNWithMeans(k=K_NEIGHBOR, sim_options={'name': 'pearson_baseline', 'user_based': True})
instance.fit(trainset)
print(LANGUAGE_USER_KNN_START)
evaluation_results_df = [
recommendation_and_posprocessing(user_id, trainset_df[trainset_df[user_label] == user_id], user_prefs_distr_df,
testset_df[testset_df[user_label] == user_id], item_mapping, instance,
USERKNN_LABEL)
for user_id, user_prefs_distr_df in users_prefs_distr_df.iterrows()]
evaluation_results_df = pd.concat(evaluation_results_df)
recommender_results_df = users_results_mean(evaluation_results_df, USERKNN_LABEL)
return recommender_results_df
def item_knn_recommender(trainset, users_prefs_distr_df, trainset_df, testset_df, item_mapping):
# Recommender Prediction
print(LANGUAGE_ITEM_KNN_START)
instance = KNNWithMeans(k=K_NEIGHBOR, sim_options={'name': 'pearson_baseline', 'user_based': False})
instance.fit(trainset)
print(LANGUAGE_ITEM_KNN_START)
evaluation_results_df = [
recommendation_and_posprocessing(user_id, trainset_df[trainset_df[user_label] == user_id], user_prefs_distr_df,
testset_df[testset_df[user_label] == user_id], item_mapping, instance,
ITEMKNN_LABEL)
for user_id, user_prefs_distr_df in users_prefs_distr_df.iterrows()]
evaluation_results_df = | pd.concat(evaluation_results_df) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Three classes' definition in here.
* a params class which stores and manipulate the parameters of our MRS fitting/simulation model
* a metabolite class which stores and can compute a MRS modeled signal for a single metabolite, based on the pyGAMMA library (for python 3!) using a specific MR sequence described by pulse flip angles and delays
* a metabolite_group class which contains several metabolites
* a metabolite_basis_set class which contains a whole database of metabolites chemical shifts, J-couplings, nucleis and computed signals. Usefull to simulate all kind of MRS data for various metabolites, concentrations acquired with various sequences...
@author: <NAME>
"""
import suspect
import numpy as np
import math as ma
import matplotlib.pylab as plt
import pandas as pd
import warnings
import pathlib
from IPython.display import display
from xlrd import open_workbook
from termcolor import cprint
from enum import Enum
from pastis import reco
from pastis import aliases as xxx
from pastis import log
from pastis import paths as default_paths
import copy as copy
import json
import pdb
try:
import pygamma as pg
GAMMA_LIB_LOADED = True
except ImportError:
GAMMA_LIB_LOADED = False
# GAMMA_LIB_LOADED forced to False for debug
# GAMMA_LIB_LOADED = False
class sequence_exc_type(Enum):
"""The enum sequence_exc_type describes the type of excitation scheme of the sequence. Can be usefull when comparing sequences."""
PULSE_ACQUIRE = 1
STIMULATED_ECHO = 2
SPIN_ECHO = 3
class gating_signal_source(Enum):
"""The enum gating_signal_source describes the type of gating used during the acquisition."""
NO_GATING = 0
CARDIAC_ECG = 2
CARDIAC_GATING = 4
RESP_GATING = 16
class params(np.ndarray):
"""A class that stores the parameters used to modelize a MR spectrum during simulation or fit."""
# frozen stuff: a technique to prevent creating new attributes
# (https://stackoverflow.com/questions/3603502/prevent-creating-new-attributes-outside-init)
__isfrozen = False
def __setattr__(self, key, value):
"""Overload of __setattr__ method to check that we are not creating a new attribute."""
if self.__isfrozen and not hasattr(self, key):
log.error_new_attribute(key)
object.__setattr__(self, key, value)
def __init__(self, meta_bs):
"""
Initialize a params object.
Parameters
----------
meta_bs: metabolite_basis_set
A metabolite_basis_set object to which this params object is linked to
"""
super().__init__()
# those parameters are related to a metabolite database
self._meta_bs = meta_bs
# the link-lock vector used to control the model
self._linklock = np.zeros(self.shape)
# the error vector
self._errors = np.zeros(self.shape)
# the corr vector
self._corr_mat = None
# freeze
self.__isfrozen = True
def __new__(cls, meta_bs):
"""
Construct a params object that inherits of numpy array's class. This class is used to deal with metabolite parameters.
Parameters
----------
meta_bs: metabolite_basis_set
A metabolite_basis_set object to which this params object is linked to
Returns
-------
obj : params numpy array [n,4]
Resulting constructed params object
"""
obj = super(params, cls).__new__(cls, [len(meta_bs), 4])
obj[:] = 0.0
return(obj)
def __array_finalize__(self, obj):
"""
Overload of special numpy array method called when playing around with stuff relative to object copy etc...
Parameters
----------
obj : params numpy array [n,4]
"""
# to begin, I followed online help and wrote:
# self._meta_bs = getattr(obj, 'meta_bs', None)
# self._linklock = getattr(obj, 'linklock', None)
# self._errors = getattr(obj, 'errors', None)
# self._corr_mat = getattr(obj, 'corr_mat', None)
# but that only works for some simple attribute types
# if the attributes are nd arrays, only the pointers will be copied...
# leading to terrible bugs
# for now, I could only find this ugly way:
self._meta_bs = getattr(obj, 'meta_bs', None)
if(self.meta_bs is not None):
self._meta_bs = obj.meta_bs.copy()
self._linklock = getattr(obj, 'linklock', None)
if(self.linklock is not None):
self._linklock = obj.linklock.copy()
self._errors = getattr(obj, 'errors', None)
if(self.errors is not None):
self._errors = obj.errors.copy()
self._corr_mat = getattr(obj, 'corr_mat', None)
if(self.corr_mat is not None):
self._corr_mat = obj.corr_mat.copy()
@property
def meta_bs(self):
"""Property method for meta_bs."""
return(self._meta_bs)
@property
def linklock(self):
"""Property method for linklock."""
return(self._linklock)
@property
def errors(self):
"""Property method for errors."""
return(self._errors)
@property
def corr_mat(self):
"""Property method for corr_mat."""
return(self._corr_mat)
def get_meta_names(self, LCModel_names=False):
"""
Return list of metabolite names controlled by this params object.
Returns
-------
list(self._meta_bs.keys()) : list
List of metabolite names
"""
if(LCModel_names):
meta_names_lcmodel_list = []
for this_metagroup_key, this_metagroup_entry in self._meta_bs.items():
meta_names_lcmodel_list.append(this_metagroup_entry["LCModel"])
return(meta_names_lcmodel_list)
else:
return(list(self._meta_bs.keys()))
def get_errors_prct(self):
"""
Return relative errors in percentage.
Returns
-------
errors_prct : numpy array
Errors in percentage (%)
"""
# calculate percentages
errors_prct = self.errors / self[:] * 100.0
return(errors_prct)
def check(self):
"""
Check if the linklock vector is consistent and read to be used.
Returns
-------
all_right : boolean
True if eveything is ok, False if linklock vector if broken
"""
# by default, eveything is ok
all_right = True
LL_list = np.unique(self.linklock)
LL_list = LL_list[LL_list >= +2]
for this_LL in LL_list:
# count number of master
tmp = (self.linklock == -this_LL)
n_masters = np.sum(tmp[:])
# count number of slaves
tmp = (self.linklock == +this_LL)
n_slaves = np.sum(tmp[:])
# master ratio values should be all equal to one
tmp = (self.linklock == -this_LL)
if(np.any(tmp[:]) != 1.0):
all_right = False
# now, we should have one single master
if(n_masters != 1):
all_right = False
# and several slaves
if(n_slaves < 1):
all_right = False
return(all_right)
def toFreeParams(self):
"""
Return free parameters, in other words, parameters for which LL=0 (free) or LL<0 (free and master).
Returns
-------
self[self.linklock<=0] : 1D numpy array of free parameters
Array of free parameters
"""
return(self[self.linklock <= 0])
def toFullParams(self, pFree):
"""
Convert an array of free paramters to a full array of parameters based on the link-lock array and the initial parameter values.
Parameters
----------
pFree : 1D numpy array of free parameters
Array of free parameters
Returns
-------
self.copy() : params object
Copy of this current object with the applied modification
"""
# init: we are working on a copy
p = self.copy()
# link-lock stuff
p[p.linklock <= 0] = pFree
# some of them are masters, copying values to slaves
# find unique LL values
LL_list = np.unique(p.linklock)
LL_list = LL_list[LL_list >= +2]
for this_LL in LL_list:
p[p.linklock == +this_LL] = p[p.linklock == -this_LL]
return(p)
def set_T2_weighting(self, te):
"""
Recalculate metabolite amplitude parameters by applying a T2 relaxation, usefull for simulations.
Parameters
----------
te : float
Echo time in (ms)
Returns
-------
self.copy() : params object
Full array of parameters corrected in T2
"""
# init: we are working on a copy
p = self.copy()
# browse though the database and find T2s
params_T2s = []
for this_metagroup_key, this_metagroup_entry in self._meta_bs.items():
params_T2s.append(this_metagroup_entry["T2"])
# convert to np
params_T2s = np.array(params_T2s)
# apply T2w for given TE
multiplication_factor = np.exp(-te / params_T2s)
p[:, xxx.p_cm] = p[:, xxx.p_cm] * multiplication_factor
# to errors too
p.errors[:, xxx.p_cm] = p.errors[:, xxx.p_cm] * multiplication_factor
return(p)
def correct_T2s(self, te):
"""
Correct the concentration values of a parameter array depending on the TE and the common values of T2s for each metabolite.
Parameters
----------
te : float
Echo time in (ms)
Returns
-------
self.copy() : params object
Full array of parameters corrected in T2
"""
# init: we are working on a copy
p = self.copy()
# browse though the database and find T2s
params_T2s = []
for this_metagroup_key, this_metagroup_entry in self._meta_bs.items():
params_T2s.append(this_metagroup_entry["T2"])
# convert to np
params_T2s = np.array(params_T2s)
# finding real concentration values at TE=0ms
multiplication_factor = 1 / np.exp(-te / params_T2s)
p[:, xxx.p_cm] = p[:, xxx.p_cm] * multiplication_factor
# to errors too
p.errors[:, xxx.p_cm] = p.errors[:, xxx.p_cm] * multiplication_factor
return(p)
def correct_T1s(self, tr):
"""
Correct the concentration values of a parameter array depending on the TR and the common values of T1s for each metabolite.
Parameters
----------
tr : float
Repetition time in (ms)
Returns
-------
self.copy() : params object
Full array of parameters corrected in T1
"""
# init: we are working on a copy
p = self.copy()
# browse though the database and find T1s
params_T1s = []
for this_metagroup_key, this_metagroup_entry in self._meta_bs.items():
params_T1s.append(this_metagroup_entry["T1"])
# convert to np
params_T1s = np.array(params_T1s)
# finding real concentration values at TE=0ms
multiplication_factor = 1 / (1 - np.exp(-tr / params_T1s))
p[:, xxx.p_cm] = p[:, xxx.p_cm] * multiplication_factor
# to errors too
p.errors[:, xxx.p_cm] = p.errors[:, xxx.p_cm] * multiplication_factor
return(p)
def get_absolutes(self, mIndex=None, m_concentration_mmolkg=55000.0, params_ref=None):
"""
Calculate the metabolic concentration values relative to a metabolite. The metabolite relative concentration can be taken from the current params vector or another params vector (params_ref), assuming the absolute metabolite concentration value. Usefull to get concentrations relative to water (called absolute concentrations).
Parameters
----------
mIndex : int
Index of metabolite of reference
water_concentration : float
Assumed water concentration used to calculate absolute concentration estimates (mmol/kg)
params_ref : params object
Array of parameters used to get the concentration estimate of the reference metabolite
Returns
-------
self.copy() : params object
Full array of parameters
"""
# init: check ref params
if(params_ref is None):
params_ref = self.copy()
# use water by default
if(mIndex is None):
if("m_Water" in dir(xxx)):
log.warning("using default water concentration for absolute calculation!")
mIndex = xxx.m_Water
else:
log.error("no reference metabolite (mIndex argument) specified for absolute concentration calculation!")
# init: we are working on copies
p1 = self.copy()
p2 = self.copy()
# do the maths
multiplication_factor = m_concentration_mmolkg / params_ref[mIndex, xxx.p_cm]
p2[:, xxx.p_cm] = p1[:, xxx.p_cm] * multiplication_factor
# deal with errors too
# see paper http://dx.doi.org/10.1007/s10334-005-0018-7
# inspired from ratio, considering zero correlation between metabolite of interest and ref:
# relCRB(1/ref) = sqrt(relCRB1^2 + relCRB2^2)
# get numerator rel CRBs for cm
relCRBs_num = p1.get_errors_prct()[:, xxx.p_cm]
# get denominator rel CRBs for cm
relCRBs_den = p2.get_errors_prct()[mIndex, xxx.p_cm]
# calculate the final relCRB
rel_CRBs_ratio = np.sqrt(relCRBs_num**2 + relCRBs_den**2)
# back to absCRB
abs_CRBs_ratio = p2[:, xxx.p_cm] * rel_CRBs_ratio / 100.0
p2._errors[:, xxx.p_cm] = abs_CRBs_ratio
return(p2)
def get_ratios(self, mIndex):
"""
Calculate the metabolite ratios.
Parameters
----------
mIndex : int
Index of metabolite of reference used as the denominator
Returns
-------
self.copy() : params object
Full array of parameters
"""
# init: we are working on a copy
p1 = self.copy()
p2 = self.copy()
# ratio
p2[:, xxx.p_cm] = p1[:, xxx.p_cm] / p1[mIndex, xxx.p_cm]
# deal with errors too
# if correlation matrix available
if(self.corr_mat is not None):
# see paper http://dx.doi.org/10.1007/s10334-005-0018-7
# relCRB(1/2) = sqrt(relCRB1^2 + relCRB2^2 - 2*corr1_2*relCRB1*relCRB2)
# get numerator rel CRBs for cm
relCRBs_num = p1.get_errors_prct()[:, xxx.p_cm]
# get denominator rel CRBs for cm
relCRBs_den = p1.get_errors_prct()[mIndex, xxx.p_cm]
# get corr coeff between num and den cm
# first, get free param corr mat
free_param_corr_mat = self.corr_mat
# convert mIndex to free params index
p3 = self.copy()
# replace cm by index
p3[:, xxx.p_cm] = np.arange(0, p3.shape[0], 1)
# lock all other pars
p3.linklock[:, xxx.p_dd] = 1
p3.linklock[:, xxx.p_df] = 1
p3.linklock[:, xxx.p_dp] = 1
# convert to free params
p3_free = p3.toFreeParams()
# here we should get a list of indexes
# find where is mIndex and we did it
if(len(np.where(p3_free == mIndex)[0]) > 0):
ind_free_pars_mIndex = np.where(p3_free == mIndex)[0][0]
# extract corr vector
free_param_corr_vec = free_param_corr_mat[:, ind_free_pars_mIndex]
# convert it to full params
p3 = self.copy()
# lock all other pars
p3.linklock[:, xxx.p_dd] = 1
p3.linklock[:, xxx.p_df] = 1
p3.linklock[:, xxx.p_dp] = 1
# replace cm by cor coeffs
p3 = p3.toFullParams(free_param_corr_vec)
# get full param corr vector
full_param_corr_vec = p3[:, xxx.p_cm]
# make it absolute
rc_num_den_abs = np.abs(full_param_corr_vec)
# calculate the final relCRB
rel_CRBs_ratio = np.sqrt(relCRBs_num**2 + relCRBs_den**2 - 2 * rc_num_den_abs * relCRBs_num * relCRBs_den)
# back to absCRB
abs_CRBs_ratio = p2[:, xxx.p_cm] * rel_CRBs_ratio / 100.0
p2._errors[:, xxx.p_cm] = abs_CRBs_ratio
else:
# if we fall here, it probably means that the fit that generated thsi param object was done without this metabolite. Therefore, no correlation coefficient available. So we switch to an approximate ratio error calculation: we take the error of the numerator only (which is actually what a lot of people do)...
log.warning("no correlation coefficient available for metabolite [%d]: cannot calculate the ratio error properly... Taking the numerator error instead. :(" % mIndex)
return(p2)
def set_default_min(self):
"""
Initialize the params object to the default minimum values, no macromolecules.
Returns
-------
self.copy() : params object
Copy of this current object with the applied modification
"""
# all to zero
self[:] = 0.0
# metabolites min values
self[xxx.m_All_MBs, xxx.p_cm] = 0.0
self[xxx.m_All_MBs, xxx.p_dd] = 5.0
self[xxx.m_All_MBs, xxx.p_df] = -10.0
self[xxx.m_All_MBs, xxx.p_dp] = -0.1
# link all to the NAA singlet
self.linklock[:] = np.tile([0, 2, 3, 4], (xxx.n_All, 1))
self.linklock[xxx.m_Ref_MB, :] = [0, -2, -3, -4]
# set water
if("m_Water" in dir(xxx)):
self.linklock[xxx.m_Water, :] = 0
# no MMs
self.linklock[xxx.m_All_MMs, :] = 1
return(self.copy())
def set_default_max(self):
"""
Initialize the params object to the default maximum values, no macromolecules.
Returns
-------
self.copy() : params object
Copy of this current object with the applied modification
"""
# all to zero
self[:] = 0.0
# metabolites min values
self[xxx.m_All_MBs, xxx.p_cm] = 50.0
self[xxx.m_All_MBs, xxx.p_dd] = 100.0
self[xxx.m_All_MBs, xxx.p_df] = +10.0
self[xxx.m_All_MBs, xxx.p_dp] = +0.1
# link all to the NAA singlet
self.linklock[:] = np.tile([0, 2, 3, 4], (xxx.n_All, 1))
self.linklock[xxx.m_Ref_MB, :] = [0, -2, -3, -4]
# set water
if("m_Water" in dir(xxx)):
self.linklock[xxx.m_Water, :] = 0
# no MMs
self.linklock[xxx.m_All_MMs, :] = 1
return(self.copy())
def set_default_water_min(self):
"""
Initialize the params object to the default minimum values for a water only fit.
Returns
-------
self.copy() : params object
Copy of this current object with the applied modification
"""
self.set_default_min()
# all concentrations to zero
self[:, xxx.p_cm] = 0.0
# water min values
self[xxx.m_All_MBs, xxx.p_df] = -20.0
# lock everything except water
self.linklock[:] = 1
# set water
if("m_Water" in dir(xxx)):
self.linklock[xxx.m_Water, :] = 0
return(self.copy())
def set_default_water_max(self):
"""
Initialize the params object to the default maximum values for a water only fit.
Returns
-------
self.copy() : params object
Copy of this current object with the applied modification
"""
self.set_default_max()
# all concentrations to zero
self[:, xxx.p_cm] = 0.0
# water max values
self[xxx.m_All_MBs, xxx.p_df] = +20.0
# set water
if("m_Water" in dir(xxx)):
self.linklock[xxx.m_Water, 0] = 100000.0
# lock everything except water
self.linklock[:] = 1
# set water
if("m_Water" in dir(xxx)):
self.linklock[xxx.m_Water, :] = 0
return(self.copy())
def add_macromolecules_min(self):
"""
Enable the macromolecules modelization, minimum values.
Returns
-------
self.copy() : params object
Copy of this current object with the applied modification
"""
# macromolecules minimum values
self[xxx.m_All_MMs, xxx.p_cm] = 0.001
self[xxx.m_All_MMs, xxx.p_dd] = 100
self[xxx.m_All_MMs, xxx.p_df] = -5
self[xxx.m_All_MMs, xxx.p_dp] = -0.1
# link all MM parameters to MM1
self.linklock[xxx.m_All_MMs, :] = np.tile([0, 2000, 3000, 4000], (xxx.n_MMs, 1))
self.linklock[xxx.m_Ref_MM, :] = [0, -2000, -3000, -4000]
return(self.copy())
def add_macromolecules_max(self):
"""
Enable the macromolecules modelization, maximum values.
Returns
-------
self.copy() : params object
Copy of this current object with the applied modification
"""
self[xxx.m_All_MMs, xxx.p_cm] = 100.0
self[xxx.m_All_MMs, xxx.p_dd] = 200.0
self[xxx.m_All_MMs, xxx.p_df] = +5
self[xxx.m_All_MMs, xxx.p_dp] = +0.1
# link all MM parameters to MM1
self.linklock[xxx.m_All_MMs, :] = np.tile([0, 2000, 3000, 4000], (xxx.n_MMs, 1))
self.linklock[xxx.m_Ref_MM, :] = [0, -2000, -3000, -4000]
return(self.copy())
def print(self, bMM=False, bLL=True):
"""
Display an array of parameters in the console.
Parameters
----------
bMM : boolean
Includes macromolecular parameters (True) or not (False)
bLL : boolean
Displays link-lock status for each parameter (True) or not (False)
"""
cell_nchar = 11
log.info("displaying parameters...")
log.info_line________________________()
if(bMM):
n = xxx.n_All
else:
n = xxx.n_MBs
meta_names = self.get_meta_names()
LLtransTab = str.maketrans("-+0123456789", "⁻⁺⁰¹²³⁴⁵⁶⁷⁸⁹")
LLcolors = ['white', 'grey', 'yellow', 'blue', 'magenta', 'cyan', 'white', 'grey', 'yellow', 'blue', 'magenta', 'cyan', 'green']
print("[#] " + "Metabolite".ljust(cell_nchar) + "[cm]".ljust(cell_nchar) + "[dd]".ljust(cell_nchar) + "[df]".ljust(cell_nchar) + "[dp]".ljust(cell_nchar))
for k in range(n):
print(("#%2d " % k) + meta_names[k].ljust(cell_nchar), end="")
for kp in range(4):
if(bLL):
thisLL = self.linklock[k, kp]
if(thisLL > 0):
thisLL_str = "+%1.0f" % thisLL
elif(thisLL == 0):
thisLL_str = "%1.0f " % thisLL
else:
thisLL_str = "%1.0f" % thisLL
thisLL_str = thisLL_str.translate(LLtransTab)
if(thisLL == +1.0):
thisLL_color = 'red'
else:
ind_color = -int(np.mod(np.abs(thisLL), len(LLcolors)))
thisLL_color = LLcolors[ind_color]
this_cell_str = ("(%4.1f)" % self[k, kp]) + thisLL_str
cprint(this_cell_str.ljust(cell_nchar), thisLL_color, attrs=['bold'], end="")
else:
print(("(%4.1f)" % self[k, kp]).ljust(cell_nchar), end="")
print("", flush=True)
log.info_line________________________()
def __reduce__(self):
"""Reduce internal pickling method used when dumping. Copied from MRSData2 class. Modified so that params attributes are not forgotten. See for more info: https://docs.python.org/3/library/pickle.html ."""
# get numpy reduce tuple
rd = super().__reduce__()
# add params attributes
rd2 = rd[2] + (self.__dict__,)
# return the new reduce tuple version
return(rd[0], rd[1], rd2)
def __setstate__(self, d):
"""Set new state to object. Internal pickling method used when loading. Copied from MRSData2 class. Modified so that params attributes are not forgotten. See for more info: https://docs.python.org/3/library/pickle.html ."""
# load params attributes
self.__dict__ = d[-1]
# load all the rest relative to numpy
super().__setstate__(d[0:-1])
return(self)
def to_dataframe(self, prefix_str="params_", single_line=True):
"""
Convert this params object to dataframe.
Parameters
----------
prefix_str : string
Prefix string to add to column names
single_line : boolean
Produces a dataframe with one line (True) or 2D (False)
Returns
-------
df : Dataframe
Containing this param object as a vector
"""
log.debug("converting to dataframe...")
df = pd.DataFrame.from_dict([vars(self)])
df = df.filter(regex=("^(?!_).*"))
meta_names = self.get_meta_names()
par_names = ["cm", "dd", "df", "dp"]
if(single_line):
# 1D dataframe for storage
# create columns
col_list = []
for im, m in enumerate(meta_names):
for ip, p in enumerate(par_names):
col_list.append(p + "|" + m + "|val")
col_list.append(p + "|" + m + "|err")
df = pd.DataFrame([], columns=col_list)
for im, m in enumerate(meta_names):
for ip, p in enumerate(par_names):
df.at[0, p + "|" + m + "|val"] = self[im, ip]
df.at[0, p + "|" + m + "|err"] = self._errors[im, ip]
else:
# 2D dataframe more human friendly
# create columns
col_list = []
for ip, p in enumerate(par_names):
col_list.append(p + "|val")
col_list.append(p + "|err")
df = pd.DataFrame([],
columns=col_list,
index=meta_names)
for im, m in enumerate(meta_names):
for ip, p in enumerate(par_names):
df.loc[m, p + "|val"] = self[im, ip]
df.loc[m, p + "|err"] = self._errors[im, ip]
# add prefix
df = df.add_prefix(prefix_str)
return(df)
class mrs_sequence:
"""A class that stores a sequence and all its parameters used for simulation. This is a generic sequence class that you need to overload. By default, the simulated sequence is a simple pulse-acquire NMR experiment."""
# frozen stuff: a technique to prevent creating new attributes
# (https://stackoverflow.com/questions/3603502/prevent-creating-new-attributes-outside-init)
__isfrozen = False
def __setattr__(self, key, value):
"""Overload of __setattr__ method to check that we are not creating a new attribute."""
if self.__isfrozen and not hasattr(self, key):
log.error_new_attribute(key)
object.__setattr__(self, key, value)
def __init__(self, te, tr=3500.0, na=128, ds=4, nuclei="1H", npts=4096 * 4, voxel_size=[10, 10, 10], fs=5000.0, f0=297.2062580, vref=250.0, shims=[], timestamp=np.nan, gating_mode=gating_signal_source.NO_GATING, eff_acquisition_time=np.nan, scaling_factor=1.0):
"""
Initialize the sequence.
Parameters
----------
te : float
Echo time (ms)
tr : float
Repetition time (ms)
na : int
Number of averages/excitations
ds : int
Number of summy scans
nuclei : string
Observed nuclei. Examples: "1H", "31P", etc.
npts : int
Number of acquisition points
voxel_size : list
Dimensions of voxel (mm)
fs : float
Acquisition bandwidth (Hz)
f0 : float
Water Larmor frequency (MHz)
vref : float
Reference voltage (V)
shims : list of floats
List of shim voltages in volts
timestamp : float
Timestamp in ms
gating_mode : gating_signal_source
Acquisition triggering mode
eff_acquisition_time : float
Effective acquisition time (s)
scaling_factor : float
Scaling FID intensity factor
"""
# all sequences have those parameters
# name
self.name = "fid"
# type
self.exc_type = sequence_exc_type.PULSE_ACQUIRE
# echo time (ms)
self.te = te
# repetition time (ms)
self.tr = tr
# number of averages
self.na = na
# number of dummy scans
self.ds = ds
# which nuclei we are pulsing on/looking at (examples: '1H', '31P')
self.nuclei = nuclei
# number of acquired time points (int)
self.npts = npts
# voxel dimensions
self.voxel_size = voxel_size
# sampling frequency (Hz)
self.fs = fs
# larmor frequency of water (MHz)
self.f0 = f0
# reference voltage (V)
self.vref = vref
# shim vector
self.shims = shims
# start timestamp
self.timestamp = timestamp
# gating mode
self.gating_mode = gating_mode
# effective acquisition time
self.eff_acquisition_time = eff_acquisition_time
# kind of receiver gain
self.scaling_factor = scaling_factor
# ppm shift (ppm)
self.ppm_water = 4.7
# some 0th phase to add? (rd)
self.additional_phi0 = 0.0
# metabolite_basis_set object
self._meta_bs = metabolite_basis_set()
# try to simplify spin systems when possible to speed up simulations
self.allow_spin_system_simplification = True
# NMR simulation option: when hard zero-duration RF pulse are employed, should we take into account the duration of the real pulses in the evolution times or not? Experimentally, looks like yes.
self.allow_evolution_during_hard_pulses = True
# band-pass filter signals
self.bandpass_filter_range_ppm = None
# in case GAMMA could not be loaded, we can load the sequence from a stored file
self.db_file = None
# pre-calculated stuff
# 'metabase': set of numerically computed metabolite FID signals
self._meta_signals = None
# time vector
self._t = []
# last parameter call
self._last_params = None
self._last_model = None
# initialized or not
self._ready = False
def copy(self):
"""Copy method."""
obj = copy.copy(self)
if(self._meta_signals is not None):
obj._meta_signals = self._meta_signals.copy()
if(self._t is not None):
obj._t = self._t.copy()
if(self._last_params is not None):
obj._last_params = self._last_params.copy()
obj._last_model = None
return(obj)
@property
def ready(self):
"""
Property get function for _ready.
Returns
-------
self._ready : bool
to tell if the object if initialized or not
"""
return(self._ready)
@property
def meta_bs(self):
"""
Property get function for meta_bs.
Returns
-------
self._meta_bs : metabolite_basis_set object
Metabolite database to use for simulation
"""
return(self._meta_bs)
def _init_pulses(self):
"""Virtual method which initialize RF pulse waveforms if any."""
def _prepare_spin_system(self, metabolite):
"""
Return pyGAMMA spin system for a given metabolite, knowing all its properties. Simplify the system in simple cases like singulets (one single chemical shift, no J-couplings).
Parameters
----------
metabolite : dict
metabolite_basis_set entry for one single metabolite
Returns
-------
sys : pyGAMMA system object
Spin system object used for simulation
scaling_factor : float
Scaling factor after system simplification
"""
log.debug("preparing spin system")
# init
scaling_factor = 1.0
# extract metabolite properties needed to create spin system
ppm = metabolite["ppm"]
iso = metabolite["iso"]
j = metabolite["J"]
# check if we can simplify
if(self.allow_spin_system_simplification and len(ppm) > 1 and not np.any(j != 0.0) and len(np.unique(ppm)) == 1):
log.debug("simplifying the spin system")
# we have a non-coupled singulet with N spins here
# let's simplify to one single spin + amplification factor
scaling_factor = float(len(ppm))
ppm = np.array([ppm[0]])
iso = np.array([iso[0]])
j = np.array([j[0, 0]])
# init system
nSpins = len(ppm)
sys = pg.spin_system(nSpins)
sys.Omega(self.f0)
# for each spin
for i_spin in range(nSpins):
# set the nuclei
if(iso[i_spin] == 1):
sys.isotope(i_spin, '1H')
elif(iso[i_spin] == 14):
sys.isotope(i_spin, '14N')
elif(iso[i_spin] == 31):
sys.isotope(i_spin, '31P')
else:
pass
#log.error(str(iso[i_spin]) + ", that is weird nuclei!")
# set the ppm
sys.PPM(i_spin, ppm[i_spin])
# set the couplings
for icolJ in range(i_spin + 1, len(ppm)):
sys.J(i_spin, icolJ, j[i_spin, icolJ])
# water shift
sys.offsetShifts(self.ppm_water * self.f0, self.nuclei)
return(sys, scaling_factor)
def _run_sequence(self, metabolite):
"""
Simulate the NMR signal acquired using a single-pulse experiment using the GAMMA library via the pyGAMMA python wrapper for one metabolite (=one spin system).
Parameters
----------
metabolite : dict
metabolite_basis_set entry for one single metabolite
Returns
-------
s_MRSData2 : MRSData2 object
Containing the MRS signal simulated for this metabolite
"""
log.debug("acquiring pulse-acquire sequence: (90)...")
# create spin system
sys, amp_factor_spins = self._prepare_spin_system(metabolite)
# amplitude normalization
afactor = self.scaling_factor / sys.HS()
# coupling
H = pg.Hcs(sys) + pg.HJ(sys)
# detection stuff
D = pg.Fm(sys, self.nuclei)
dt = np.double(1 / self.fs)
# run the pulse-acquire experiment
sigma0 = pg.sigma_eq(sys)
te_real = 0.0
# excitation: hard 90 pulse
sigma1 = pg.Iypuls(sys, sigma0, self.nuclei, 90.0)
te_real += 0.0
# evolution
sigma0 = pg.evolve(sigma1, pg.prop(H, self.te / 1000.0)) # TE evolution
te_real += self.te / 1000.0
data = pg.FID(sigma0, pg.gen_op(D), H, dt, self.npts) # acquisition
# extract complex time data points
s = np.full([self.npts, ], np.nan, dtype=np.complex128)
for i in range(self.npts):
s[i] = -afactor * amp_factor_spins * (data.getRe(i) - 1j * data.getIm(i))
# convert to suspect
s_MRSData = suspect.MRSData(s, dt, self.f0)
s_MRSData2 = s_MRSData.view(reco.MRSData2)
log.debug("done. (real TE=%.2fms)" % (te_real * 1000.0))
return(s_MRSData2)
def _compute_metabolite_signals(self):
"""For each goup of metabolites, generate the simulated MRS signal."""
# clear metabase
self._meta_signals = []
# zero signal vector
s_full_meta = suspect.MRSData(np.zeros([self.npts, ]), 1 / self.fs, self.f0)
s_full_meta = s_full_meta.view(reco.MRSData2)
# browse though the database and display everything
for this_metagroup_key, this_metagroup_entry in self._meta_bs.items():
s_grp = s_full_meta.copy()
for this_meta_key, this_meta_entry in self._meta_bs[this_metagroup_key]["metabolites"].items():
log.info("simulating MRS signal for metabolite [%s/%s]..." % (this_metagroup_key, this_meta_key))
s = self._run_sequence(this_meta_entry)
# band pass filter?
if(self.bandpass_filter_range_ppm is not None):
#s = s.correct_bandpass_filtering_1d(self.bandpass_filter_range_ppm, np.ones)
# first any peaks outside of range?
if( (np.any(this_meta_entry["ppm"] > max(self.bandpass_filter_range_ppm))) or (np.any(this_meta_entry["ppm"] < min(self.bandpass_filter_range_ppm))) ):
# yes, so count them on the right side
n_peaks2remove = np.sum(this_meta_entry["ppm"] < min(self.bandpass_filter_range_ppm))
# and remove them
if(n_peaks2remove > 0):
s = s.correct_peak_removal_1d(n_peaks2remove * 10, [min(this_meta_entry["ppm"]) - 1, min(self.bandpass_filter_range_ppm)], False)
# now, count them on the left side
n_peaks2remove = np.sum(this_meta_entry["ppm"] > max(self.bandpass_filter_range_ppm))
# and remove them
if(n_peaks2remove > 0):
s = s.correct_peak_removal_1d(n_peaks2remove * 10, [max(self.bandpass_filter_range_ppm), max(this_meta_entry["ppm"] + 1)], False)
# build up the metabolite group
s_grp = s_grp + s
# append this metabolite group to the metabase
self._meta_signals.append(s_grp)
def _load_from_file(self, te_tol=5.0, f0_tol=5.0):
"""
Try to load the simulated metabolite signals from a stored PKL file.
Parameters
----------
te_tol : float
Maximum TE difference tolerated when looking for a sequence (ms)
f0_tol : float
Maximum f0 difference tolerated when looking for a sequence (MHz)
"""
if(self.db_file is None):
log.error("pyGAMMA library could not be loaded and no sequence database PKL file (db_file) was specified :(")
log.info("reading sequence database file...")
# load pickle file
df = pd.read_pickle(self.db_file)
# compare sequences
log.info("trying to find the right simulated sequence for you...")
# sequence excitation type
df_exc_type = df.loc[df["sequence_exc_type"] == self.exc_type]
n_exc_type = len(df_exc_type)
log.info("sequence type match: n=%d (%s)" % (n_exc_type, self.exc_type))
# sequence name
df_name = df.loc[df["sequence_name"] == self.name]
n_name = len(df_name)
log.info("sequence name match: n=%d (%s)" % (n_name, self.name))
# initialize final mask
# if cannot find exact sequence name, at least choose a similar excitation scheme (spin-echo or stim for example)
if(n_name > 0):
df = df_name
else:
df = df_exc_type
# nuclei
df = df.loc[df["sequence_nuclei"] == self.nuclei]
log.info("nuclei match: n=%d (%s)" % (len(df), self.nuclei))
# f0 (with tolerance)
df = df.loc[(df["sequence_f0"] > (self.f0 - f0_tol)) & (df["sequence_f0"] < (self.f0 + f0_tol))]
log.info("f0 match: need %.3fMHz, found n=%d sequences simulated at +/-%.3fMHz" % (self.f0, len(df), f0_tol))
# ppm water
df = df.loc[df["sequence_ppm_water"] == self.ppm_water]
log.info("water ppm match: n=%d (%.2f)" % (len(df), self.ppm_water))
# additional_phi0
df = df.loc[df["sequence_additional_phi0"] == self.additional_phi0]
log.info("0th order phase match: n=%d (%.2f)" % (len(df), self.additional_phi0))
# allow_evolution_during_hard_pulses
df = df.loc[df["sequence_allow_evolution_during_hard_pulses"] == self.allow_evolution_during_hard_pulses]
log.info("complicated stuff match: n=%d (%r)" % (len(df), self.allow_evolution_during_hard_pulses))
# metabolites
df["sequence_metabolite_basisset_obj_eq"] = [pd.json_normalize(mb).equals(pd.json_normalize(dict(self.meta_bs))) for mb in df["sequence_metabolite_basisset_obj"]]
df = df.loc[(df["sequence_metabolite_basisset_basis_set_name"] == self.meta_bs.basis_set_name) &
(df["sequence_metabolite_basisset_obj_eq"])]
log.info("metabolites match: n=%d" % len(df))
if(len(df) == 0):
log.error("sorry, there is no simulated sequence that matches with your acquisition criterias :(")
# te: look for closest
df["te_diff"] = (df["sequence_te"] - self.te).abs()
found_sequence = df.sort_values("te_diff").iloc[0]["sequence_obj"]
log.info("TE match: need %.2fms, found %.2fms" % (self.te, found_sequence.te))
# check if below tolerance
te_diff = np.abs(self.te - found_sequence.te)
if(te_diff > te_tol):
log.warning("TE match: the difference might seem a lot! That is fine if you do not want to consider J-coupling effects ;)")
# display
log.info("comparing what you asked/what you got...")
df_self = self.to_dataframe()
df_self["Index"] = "Current"
df_found = found_sequence.to_dataframe()
df_found["Index"] = "Found"
df_display = | pd.concat([df_self, df_found], axis=0) | pandas.concat |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict
from datetime import timedelta
import pickle
from matplotlib import pyplot as plt
from matplotlib.dates import DateFormatter, MonthLocator
from matplotlib.ticker import MaxNLocator
import numpy as np
import pandas as pd
# fb-block 1 start
import pkg_resources
# fb-block 1 end
try:
import pystan
except ImportError:
print('You cannot run prophet without pystan installed')
raise
# fb-block 2
class Prophet(object):
def __init__(
self,
growth='linear',
changepoints=None,
n_changepoints=25,
yearly_seasonality=True,
weekly_seasonality=True,
holidays=None,
seasonality_prior_scale=10.0,
holidays_prior_scale=10.0,
changepoint_prior_scale=0.05,
mcmc_samples=0,
interval_width=0.80,
uncertainty_samples=1000,
):
self.growth = growth
self.changepoints = pd.to_datetime(changepoints)
if self.changepoints is not None:
self.n_changepoints = len(self.changepoints)
else:
self.n_changepoints = n_changepoints
self.yearly_seasonality = yearly_seasonality
self.weekly_seasonality = weekly_seasonality
if holidays is not None:
if not (
isinstance(holidays, pd.DataFrame)
and 'ds' in holidays
and 'holiday' in holidays
):
raise ValueError("holidays must be a DataFrame with 'ds' and "
"'holiday' columns.")
holidays['ds'] = pd.to_datetime(holidays['ds'])
self.holidays = holidays
self.seasonality_prior_scale = float(seasonality_prior_scale)
self.changepoint_prior_scale = float(changepoint_prior_scale)
self.holidays_prior_scale = float(holidays_prior_scale)
self.mcmc_samples = mcmc_samples
self.interval_width = interval_width
self.uncertainty_samples = uncertainty_samples
# Set during fitting
self.start = None
self.y_scale = None
self.t_scale = None
self.changepoints_t = None
self.stan_fit = None
self.params = {}
self.history = None
self.validate_inputs()
def validate_inputs(self):
if self.growth not in ('linear', 'logistic'):
raise ValueError(
"Parameter 'growth' should be 'linear' or 'logistic'.")
if self.holidays is not None:
has_lower = 'lower_window' in self.holidays
has_upper = 'upper_window' in self.holidays
if has_lower + has_upper == 1:
raise ValueError('Holidays must have both lower_window and ' +
'upper_window, or neither')
if has_lower:
if max(self.holidays['lower_window']) > 0:
raise ValueError('Holiday lower_window should be <= 0')
if min(self.holidays['upper_window']) < 0:
raise ValueError('Holiday upper_window should be >= 0')
for h in self.holidays['holiday'].unique():
if '_delim_' in h:
raise ValueError('Holiday name cannot contain "_delim_"')
if h in ['zeros', 'yearly', 'weekly', 'yhat', 'seasonal',
'trend']:
raise ValueError('Holiday name {} reserved.'.format(h))
@classmethod
def get_linear_model(cls):
# fb-block 3
# fb-block 4 start
model_file = pkg_resources.resource_filename(
'fbprophet',
'stan_models/linear_growth.pkl'
)
# fb-block 4 end
with open(model_file, 'rb') as f:
return pickle.load(f)
@classmethod
def get_logistic_model(cls):
# fb-block 5
# fb-block 6 start
model_file = pkg_resources.resource_filename(
'fbprophet',
'stan_models/logistic_growth.pkl'
)
# fb-block 6 end
with open(model_file, 'rb') as f:
return pickle.load(f)
def setup_dataframe(self, df, initialize_scales=False):
"""Create auxillary columns 't', 't_ix', 'y_scaled', and 'cap_scaled'.
These columns are used during both fitting and prediction.
"""
if 'y' in df:
df['y'] = pd.to_numeric(df['y'])
df['ds'] = pd.to_datetime(df['ds'])
df = df.sort_values('ds')
df.reset_index(inplace=True, drop=True)
if initialize_scales:
self.y_scale = df['y'].max()
self.start = df['ds'].min()
self.t_scale = df['ds'].max() - self.start
df['t'] = (df['ds'] - self.start) / self.t_scale
if 'y' in df:
df['y_scaled'] = df['y'] / self.y_scale
if self.growth == 'logistic':
assert 'cap' in df
df['cap_scaled'] = df['cap'] / self.y_scale
return df
def set_changepoints(self):
"""Generate a list of changepoints.
Either:
1) the changepoints were passed in explicitly
A) they are empty
B) not empty, needs validation
2) we are generating a grid of them
3) the user prefers no changepoints to be used
"""
if self.changepoints is not None:
if len(self.changepoints) == 0:
pass
else:
too_low = min(self.changepoints) < self.history['ds'].min()
too_high = max(self.changepoints) > self.history['ds'].max()
if too_low or too_high:
raise ValueError('Changepoints must fall within training data.')
elif self.n_changepoints > 0:
# Place potential changepoints evenly throuh first 80% of history
max_ix = np.floor(self.history.shape[0] * 0.8)
cp_indexes = (
np.linspace(0, max_ix, self.n_changepoints + 1)
.round()
.astype(np.int)
)
self.changepoints = self.history.ix[cp_indexes]['ds'].tail(-1)
else:
# set empty changepoints
self.changepoints = []
if len(self.changepoints) > 0:
self.changepoints_t = np.sort(np.array(
(self.changepoints - self.start) / self.t_scale))
else:
self.changepoints_t = np.array([0]) # dummy changepoint
def get_changepoint_matrix(self):
A = np.zeros((self.history.shape[0], len(self.changepoints_t)))
for i, t_i in enumerate(self.changepoints_t):
A[self.history['t'].values >= t_i, i] = 1
return A
@staticmethod
def fourier_series(dates, period, series_order):
"""Generate a Fourier expansion for a fixed frequency and order.
Parameters
----------
dates: a pd.Series containing timestamps
period: an integer frequency (number of days)
series_order: number of components to generate
Returns
-------
a 2-dimensional np.array with one row per row in `dt`
"""
# convert to days since epoch
t = np.array(
(dates - pd.datetime(1970, 1, 1))
.dt.days
.astype(np.float)
)
return np.column_stack([
fun((2.0 * (i + 1) * np.pi * t / period))
for i in range(series_order)
for fun in (np.sin, np.cos)
])
@classmethod
def make_seasonality_features(cls, dates, period, series_order, prefix):
features = cls.fourier_series(dates, period, series_order)
columns = [
'{}_delim_{}'.format(prefix, i + 1)
for i in range(features.shape[1])
]
return pd.DataFrame(features, columns=columns)
def make_holiday_features(self, dates):
"""Generate a DataFrame with each column corresponding to a holiday.
"""
# A smaller prior scale will shrink holiday estimates more
scale_ratio = self.holidays_prior_scale / self.seasonality_prior_scale
# Holds columns of our future matrix.
expanded_holidays = defaultdict(lambda: np.zeros(dates.shape[0]))
# Makes an index so we can perform `get_loc` below.
row_index = pd.DatetimeIndex(dates)
for ix, row in self.holidays.iterrows():
dt = row.ds.date()
try:
lw = int(row.get('lower_window', 0))
uw = int(row.get('upper_window', 0))
except ValueError:
lw = 0
uw = 0
for offset in range(lw, uw + 1):
occurrence = dt + timedelta(days=offset)
try:
loc = row_index.get_loc(occurrence)
except KeyError:
loc = None
key = '{}_delim_{}{}'.format(
row.holiday,
'+' if offset >= 0 else '-',
abs(offset)
)
if loc is not None:
expanded_holidays[key][loc] = scale_ratio
else:
# Access key to generate value
expanded_holidays[key]
# This relies pretty importantly on pandas keeping the columns in order.
return pd.DataFrame(expanded_holidays)
def make_all_seasonality_features(self, df):
seasonal_features = [
# Add a column of zeros in case no seasonality is used.
pd.DataFrame({'zeros': np.zeros(df.shape[0])})
]
# Seasonality features
if self.yearly_seasonality:
seasonal_features.append(self.make_seasonality_features(
df['ds'],
365.25,
10,
'yearly',
))
if self.weekly_seasonality:
seasonal_features.append(self.make_seasonality_features(
df['ds'],
7,
3,
'weekly',
))
if self.holidays is not None:
seasonal_features.append(self.make_holiday_features(df['ds']))
return pd.concat(seasonal_features, axis=1)
@staticmethod
def linear_growth_init(df):
i0, i1 = df['ds'].idxmin(), df['ds'].idxmax()
T = df['t'].ix[i1] - df['t'].ix[i0]
k = (df['y_scaled'].ix[i1] - df['y_scaled'].ix[i0]) / T
m = df['y_scaled'].ix[i0] - k * df['t'].ix[i0]
return (k, m)
@staticmethod
def logistic_growth_init(df):
i0, i1 = df['ds'].idxmin(), df['ds'].idxmax()
T = df['t'].ix[i1] - df['t'].ix[i0]
# Force valid values, in case y > cap.
r0 = max(1.01, df['cap_scaled'].ix[i0] / df['y_scaled'].ix[i0])
r1 = max(1.01, df['cap_scaled'].ix[i1] / df['y_scaled'].ix[i1])
if abs(r0 - r1) <= 0.01:
r0 = 1.05 * r0
L0 = np.log(r0 - 1)
L1 = np.log(r1 - 1)
# Initialize the offset
m = L0 * T / (L0 - L1)
# And the rate
k = L0 / m
return (k, m)
# fb-block 7
def fit(self, df, **kwargs):
"""Fit the Prophet model to data.
Parameters
----------
df: pd.DataFrame containing history. Must have columns 'ds', 'y', and
if logistic growth, 'cap'.
kwargs: Additional arguments passed to Stan's sampling or optimizing
function, as appropriate.
Returns
-------
The fitted Prophet object.
"""
history = df[df['y'].notnull()].copy()
history = self.setup_dataframe(history, initialize_scales=True)
self.history = history
seasonal_features = self.make_all_seasonality_features(history)
self.set_changepoints()
A = self.get_changepoint_matrix()
dat = {
'T': history.shape[0],
'K': seasonal_features.shape[1],
'S': len(self.changepoints_t),
'y': history['y_scaled'],
't': history['t'],
'A': A,
't_change': self.changepoints_t,
'X': seasonal_features,
'sigma': self.seasonality_prior_scale,
'tau': self.changepoint_prior_scale,
}
if self.growth == 'linear':
kinit = self.linear_growth_init(history)
model = self.get_linear_model()
else:
dat['cap'] = history['cap_scaled']
kinit = self.logistic_growth_init(history)
model = self.get_logistic_model()
def stan_init():
return {
'k': kinit[0],
'm': kinit[1],
'delta': np.zeros(len(self.changepoints_t)),
'beta': np.zeros(seasonal_features.shape[1]),
'sigma_obs': 1,
}
if self.mcmc_samples > 0:
stan_fit = model.sampling(
dat,
init=stan_init,
iter=self.mcmc_samples,
**kwargs
)
for par in stan_fit.model_pars:
self.params[par] = stan_fit[par]
else:
params = model.optimizing(dat, init=stan_init, iter=1e4, **kwargs)
for par in params:
self.params[par] = params[par].reshape((1, -1))
# If no changepoints were requested, replace delta with 0s
if len(self.changepoints) == 0:
# Fold delta into the base rate k
params['k'] = params['k'] + params['delta']
params['delta'] = np.zeros(params['delta'].shape)
return self
# fb-block 8
def predict(self, df=None):
"""Predict historical and future values for y.
Note: you must only pass in future dates here.
Historical dates are prepended before predictions are made.
`df` can be None, in which case we predict only on history.
"""
if df is None:
df = self.history.copy()
else:
df = self.setup_dataframe(df)
df['trend'] = self.predict_trend(df)
seasonal_components = self.predict_seasonal_components(df)
intervals = self.predict_uncertainty(df)
df2 = pd.concat((df, intervals, seasonal_components), axis=1)
df2['yhat'] = df2['trend'] + df2['seasonal']
return df2
@staticmethod
def piecewise_linear(t, deltas, k, m, changepoint_ts):
# Intercept changes
gammas = -changepoint_ts * deltas
# Get cumulative slope and intercept at each t
k_t = k * np.ones_like(t)
m_t = m * np.ones_like(t)
for s, t_s in enumerate(changepoint_ts):
indx = t >= t_s
k_t[indx] += deltas[s]
m_t[indx] += gammas[s]
return k_t * t + m_t
@staticmethod
def piecewise_logistic(t, cap, deltas, k, m, changepoint_ts):
# Compute offset changes
k_cum = np.concatenate((np.atleast_1d(k), np.cumsum(deltas) + k))
gammas = np.zeros(len(changepoint_ts))
for i, t_s in enumerate(changepoint_ts):
gammas[i] = (
(t_s - m - np.sum(gammas))
* (1 - k_cum[i] / k_cum[i + 1])
)
# Get cumulative rate and offset at each t
k_t = k * np.ones_like(t)
m_t = m * np.ones_like(t)
for s, t_s in enumerate(changepoint_ts):
indx = t >= t_s
k_t[indx] += deltas[s]
m_t[indx] += gammas[s]
return cap / (1 + np.exp(-k_t * (t - m_t)))
def predict_trend(self, df):
k = np.nanmean(self.params['k'])
m = np.nanmean(self.params['m'])
deltas = np.nanmean(self.params['delta'], axis=0)
t = np.array(df['t'])
if self.growth == 'linear':
trend = self.piecewise_linear(t, deltas, k, m, self.changepoints_t)
else:
cap = df['cap_scaled']
trend = self.piecewise_logistic(
t, cap, deltas, k, m, self.changepoints_t)
return trend * self.y_scale
def predict_seasonal_components(self, df):
seasonal_features = self.make_all_seasonality_features(df)
lower_p = 100 * (1.0 - self.interval_width) / 2
upper_p = 100 * (1.0 + self.interval_width) / 2
components = pd.DataFrame({
'col': np.arange(seasonal_features.shape[1]),
'component': [x.split('_delim_')[0] for x in seasonal_features.columns],
})
# Remove the placeholder
components = components[components['component'] != 'zeros']
if components.shape[0] > 0:
X = seasonal_features.as_matrix()
data = {}
for component, features in components.groupby('component'):
cols = features.col.tolist()
comp_beta = self.params['beta'][:, cols]
comp_features = X[:, cols]
comp = (
np.matmul(comp_features, comp_beta.transpose())
* self.y_scale
)
data[component] = np.nanmean(comp, axis=1)
data[component + '_lower'] = np.nanpercentile(comp, lower_p,
axis=1)
data[component + '_upper'] = np.nanpercentile(comp, upper_p,
axis=1)
component_predictions = pd.DataFrame(data)
component_predictions['seasonal'] = (
component_predictions[components['component'].unique()].sum(1))
else:
component_predictions = pd.DataFrame(
{'seasonal': np.zeros(df.shape[0])})
return component_predictions
def predict_uncertainty(self, df):
n_iterations = self.params['k'].shape[0]
samp_per_iter = max(1, int(np.ceil(
self.uncertainty_samples / float(n_iterations)
)))
# Generate seasonality features once so we can re-use them.
seasonal_features = self.make_all_seasonality_features(df)
sim_values = {'yhat': [], 'trend': [], 'seasonal': []}
for i in range(n_iterations):
for j in range(samp_per_iter):
sim = self.sample_model(df, seasonal_features, i)
for key in sim_values:
sim_values[key].append(sim[key])
lower_p = 100 * (1.0 - self.interval_width) / 2
upper_p = 100 * (1.0 + self.interval_width) / 2
series = {}
for key, value in sim_values.items():
mat = np.column_stack(value)
series['{}_lower'.format(key)] = np.nanpercentile(mat, lower_p,
axis=1)
series['{}_upper'.format(key)] = np.nanpercentile(mat, upper_p,
axis=1)
return | pd.DataFrame(series) | pandas.DataFrame |
#!/usr/bin/python
from keras.models import load_model
import pandas as pd
import numpy as np
# Read data
test = pd.read_csv('test.csv')
X_test = (test.ix[:,:].values).astype('float32')
# 28x28 pixels
X_test = X_test.reshape(X_test.shape[0], 28, 28,1)
# pre-processing: divide by max and substract mean
scale = 255
X_test /= scale
mean = np.std(X_test)
X_test -= mean
# load model
model = load_model('model.h5')
print("Loaded model from disk")
print('Predicting...')
prediction = model.predict(X_test)
#write_preds(prediction, "prediction.csv")
results = []
for i in range(0,len(prediction)):
# prediction put into list
tmp_result = np.argmax(prediction[i])
results.append(tmp_result)
#results=kn.predict(tdata)
res = | pd.DataFrame() | pandas.DataFrame |
"""
Test output formatting for Series/DataFrame, including to_string & reprs
"""
from datetime import datetime
from io import StringIO
import itertools
from operator import methodcaller
import os
from pathlib import Path
import re
from shutil import get_terminal_size
import sys
import textwrap
import dateutil
import numpy as np
import pytest
import pytz
from pandas.compat import (
IS64,
is_platform_windows,
)
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
get_option,
option_context,
read_csv,
reset_option,
set_option,
)
import pandas._testing as tm
import pandas.io.formats.format as fmt
import pandas.io.formats.printing as printing
use_32bit_repr = is_platform_windows() or not IS64
@pytest.fixture(params=["string", "pathlike", "buffer"])
def filepath_or_buffer_id(request):
"""
A fixture yielding test ids for filepath_or_buffer testing.
"""
return request.param
@pytest.fixture
def filepath_or_buffer(filepath_or_buffer_id, tmp_path):
"""
A fixture yielding a string representing a filepath, a path-like object
and a StringIO buffer. Also checks that buffer is not closed.
"""
if filepath_or_buffer_id == "buffer":
buf = StringIO()
yield buf
assert not buf.closed
else:
assert isinstance(tmp_path, Path)
if filepath_or_buffer_id == "pathlike":
yield tmp_path / "foo"
else:
yield str(tmp_path / "foo")
@pytest.fixture
def assert_filepath_or_buffer_equals(
filepath_or_buffer, filepath_or_buffer_id, encoding
):
"""
Assertion helper for checking filepath_or_buffer.
"""
def _assert_filepath_or_buffer_equals(expected):
if filepath_or_buffer_id == "string":
with open(filepath_or_buffer, encoding=encoding) as f:
result = f.read()
elif filepath_or_buffer_id == "pathlike":
result = filepath_or_buffer.read_text(encoding=encoding)
elif filepath_or_buffer_id == "buffer":
result = filepath_or_buffer.getvalue()
assert result == expected
return _assert_filepath_or_buffer_equals
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(df):
r = repr(df)
c1 = r.split("\n")[0].startswith("<class")
c2 = r.split("\n")[0].startswith(r"<class") # _repr_html_
return c1 or c2
def has_non_verbose_info_repr(df):
has_info = has_info_repr(df)
r = repr(df)
# 1. <class>
# 2. Index
# 3. Columns
# 4. dtype
# 5. memory usage
# 6. trailing newline
nv = len(r.split("\n")) == 6
return has_info and nv
def has_horizontally_truncated_repr(df):
try: # Check header row
fst_line = np.array(repr(df).splitlines()[0].split())
cand_col = np.where(fst_line == "...")[0][0]
except IndexError:
return False
# Make sure each row has this ... in the same place
r = repr(df)
for ix, l in enumerate(r.splitlines()):
if not r.split()[cand_col] == "...":
return False
return True
def has_vertically_truncated_repr(df):
r = repr(df)
only_dot_row = False
for row in r.splitlines():
if re.match(r"^[\.\ ]+$", row):
only_dot_row = True
return only_dot_row
def has_truncated_repr(df):
return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df)
def has_doubly_truncated_repr(df):
return has_horizontally_truncated_repr(df) and has_vertically_truncated_repr(df)
def has_expanded_repr(df):
r = repr(df)
for line in r.split("\n"):
if line.endswith("\\"):
return True
return False
@pytest.mark.filterwarnings("ignore::FutureWarning:.*format")
class TestDataFrameFormatting:
def test_eng_float_formatter(self, float_frame):
df = float_frame
df.loc[5] = 0
fmt.set_eng_float_format()
repr(df)
fmt.set_eng_float_format(use_eng_prefix=True)
repr(df)
fmt.set_eng_float_format(accuracy=0)
repr(df)
tm.reset_display_options()
def test_show_null_counts(self):
df = DataFrame(1, columns=range(10), index=range(10))
df.iloc[1, 1] = np.nan
def check(show_counts, result):
buf = StringIO()
df.info(buf=buf, show_counts=show_counts)
assert ("non-null" in buf.getvalue()) is result
with option_context(
"display.max_info_rows", 20, "display.max_info_columns", 20
):
check(None, True)
check(True, True)
check(False, False)
with option_context("display.max_info_rows", 5, "display.max_info_columns", 5):
check(None, False)
check(True, False)
check(False, False)
# GH37999
with tm.assert_produces_warning(
FutureWarning, match="null_counts is deprecated.+"
):
buf = StringIO()
df.info(buf=buf, null_counts=True)
assert "non-null" in buf.getvalue()
# GH37999
with pytest.raises(ValueError, match=r"null_counts used with show_counts.+"):
df.info(null_counts=True, show_counts=True)
def test_repr_truncation(self):
max_len = 20
with option_context("display.max_colwidth", max_len):
df = DataFrame(
{
"A": np.random.randn(10),
"B": [
tm.rands(np.random.randint(max_len - 1, max_len + 1))
for i in range(10)
],
}
)
r = repr(df)
r = r[r.find("\n") + 1 :]
adj = fmt.get_adjustment()
for line, value in zip(r.split("\n"), df["B"]):
if adj.len(value) + 1 > max_len:
assert "..." in line
else:
assert "..." not in line
with option_context("display.max_colwidth", 999999):
assert "..." not in repr(df)
with option_context("display.max_colwidth", max_len + 2):
assert "..." not in repr(df)
def test_repr_deprecation_negative_int(self):
# TODO(2.0): remove in future version after deprecation cycle
# Non-regression test for:
# https://github.com/pandas-dev/pandas/issues/31532
width = get_option("display.max_colwidth")
with tm.assert_produces_warning(FutureWarning):
set_option("display.max_colwidth", -1)
set_option("display.max_colwidth", width)
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
reset_option("display.chop_threshold") # default None
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
with option_context("display.chop_threshold", 0.2):
assert repr(df) == " 0 1\n0 0.0 0.5\n1 0.5 0.0"
with option_context("display.chop_threshold", 0.6):
assert repr(df) == " 0 1\n0 0.0 0.0\n1 0.0 0.0"
with option_context("display.chop_threshold", None):
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
def test_repr_chop_threshold_column_below(self):
# GH 6839: validation case
df = DataFrame([[10, 20, 30, 40], [8e-10, -1e-11, 2e-9, -2e-11]]).T
with option_context("display.chop_threshold", 0):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 -1.000000e-11\n"
"2 30.0 2.000000e-09\n"
"3 40.0 -2.000000e-11"
)
with option_context("display.chop_threshold", 1e-8):
assert repr(df) == (
" 0 1\n"
"0 10.0 0.000000e+00\n"
"1 20.0 0.000000e+00\n"
"2 30.0 0.000000e+00\n"
"3 40.0 0.000000e+00"
)
with option_context("display.chop_threshold", 5e-11):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 0.000000e+00\n"
"2 30.0 2.000000e-09\n"
"3 40.0 0.000000e+00"
)
def test_repr_obeys_max_seq_limit(self):
with option_context("display.max_seq_items", 2000):
assert len(printing.pprint_thing(list(range(1000)))) > 1000
with option_context("display.max_seq_items", 5):
assert len(printing.pprint_thing(list(range(1000)))) < 100
with option_context("display.max_seq_items", 1):
assert len(printing.pprint_thing(list(range(1000)))) < 9
def test_repr_set(self):
assert printing.pprint_thing({1}) == "{1}"
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather than
# stylized
idx = Index(["a", "b"])
res = eval("pd." + repr(idx))
tm.assert_series_equal(Series(res), Series(idx))
def test_repr_should_return_str(self):
# https://docs.python.org/3/reference/datamodel.html#object.__repr__
# "...The return value must be a string object."
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = ["\u03c3", "\u03c4", "\u03c5", "\u03c6"]
cols = ["\u03c8"]
df = DataFrame(data, columns=cols, index=index1)
assert type(df.__repr__()) == str # both py2 / 3
def test_repr_no_backslash(self):
with option_context("mode.sim_interactive", True):
df = DataFrame(np.random.randn(10, 4))
assert "\\" not in repr(df)
def test_expand_frame_repr(self):
df_small = DataFrame("hello", index=[0], columns=[0])
df_wide = DataFrame("hello", index=[0], columns=range(10))
df_tall = DataFrame("hello", index=range(30), columns=range(5))
with option_context("mode.sim_interactive", True):
with option_context(
"display.max_columns",
10,
"display.width",
20,
"display.max_rows",
20,
"display.show_dimensions",
True,
):
with option_context("display.expand_frame_repr", True):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_truncated_repr(df_wide)
assert has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert has_expanded_repr(df_tall)
with option_context("display.expand_frame_repr", False):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_horizontally_truncated_repr(df_wide)
assert not has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert not has_expanded_repr(df_tall)
def test_repr_non_interactive(self):
# in non interactive mode, there can be no dependency on the
# result of terminal auto size detection
df = DataFrame("hello", index=range(1000), columns=range(5))
with option_context(
"mode.sim_interactive", False, "display.width", 0, "display.max_rows", 5000
):
assert not has_truncated_repr(df)
assert not has_expanded_repr(df)
def test_repr_truncates_terminal_size(self, monkeypatch):
# see gh-21180
terminal_size = (118, 96)
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
index = range(5)
columns = MultiIndex.from_tuples(
[
("This is a long title with > 37 chars.", "cat"),
("This is a loooooonger title with > 43 chars.", "dog"),
]
)
df = DataFrame(1, index=index, columns=columns)
result = repr(df)
h1, h2 = result.split("\n")[:2]
assert "long" in h1
assert "loooooonger" in h1
assert "cat" in h2
assert "dog" in h2
# regular columns
df2 = DataFrame({"A" * 41: [1, 2], "B" * 41: [1, 2]})
result = repr(df2)
assert df2.columns[0] in result.split("\n")[0]
def test_repr_truncates_terminal_size_full(self, monkeypatch):
# GH 22984 ensure entire window is filled
terminal_size = (80, 24)
df = DataFrame(np.random.rand(1, 7))
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
assert "..." not in str(df)
def test_repr_truncation_column_size(self):
# dataframe with last column very wide -> check it is not used to
# determine size of truncation (...) column
df = DataFrame(
{
"a": [108480, 30830],
"b": [12345, 12345],
"c": [12345, 12345],
"d": [12345, 12345],
"e": ["a" * 50] * 2,
}
)
assert "..." in str(df)
assert " ... " not in str(df)
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
pytest.skip(f"terminal size too small, {term_width} x {term_height}")
def mkframe(n):
index = [f"{i:05d}" for i in range(n)]
return DataFrame(0, index, index)
df6 = mkframe(6)
df10 = mkframe(10)
with option_context("mode.sim_interactive", True):
with option_context("display.width", term_width * 2):
with option_context("display.max_rows", 5, "display.max_columns", 5):
assert not has_expanded_repr(mkframe(4))
assert not has_expanded_repr(mkframe(5))
assert not has_expanded_repr(df6)
assert has_doubly_truncated_repr(df6)
with option_context("display.max_rows", 20, "display.max_columns", 10):
# Out off max_columns boundary, but no extending
# since not exceeding width
assert not has_expanded_repr(df6)
assert not has_truncated_repr(df6)
with option_context("display.max_rows", 9, "display.max_columns", 10):
# out vertical bounds can not result in expanded repr
assert not has_expanded_repr(df10)
assert has_vertically_truncated_repr(df10)
# width=None in terminal, auto detection
with option_context(
"display.max_columns",
100,
"display.max_rows",
term_width * 20,
"display.width",
None,
):
df = mkframe((term_width // 7) - 2)
assert not has_expanded_repr(df)
df = mkframe((term_width // 7) + 2)
printing.pprint_thing(df._repr_fits_horizontal_())
assert has_expanded_repr(df)
def test_repr_min_rows(self):
df = DataFrame({"a": range(20)})
# default setting no truncation even if above min_rows
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
df = DataFrame({"a": range(61)})
# default of max_rows 60 triggers truncation if above
assert ".." in repr(df)
assert ".." in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(df)
assert "2 " not in repr(df)
assert "..." in df._repr_html_()
assert "<td>2</td>" not in df._repr_html_()
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(df)
assert "<td>5</td>" in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(df)
assert "<td>5</td>" not in df._repr_html_()
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
def test_str_max_colwidth(self):
# GH 7856
df = DataFrame(
[
{
"a": "foo",
"b": "bar",
"c": "uncomfortably long line with lots of stuff",
"d": 1,
},
{"a": "foo", "b": "bar", "c": "stuff", "d": 1},
]
)
df.set_index(["a", "b", "c"])
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably long line with lots of stuff 1\n"
"1 foo bar stuff 1"
)
with option_context("max_colwidth", 20):
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably lo... 1\n"
"1 foo bar stuff 1"
)
def test_auto_detect(self):
term_width, term_height = get_terminal_size()
fac = 1.05 # Arbitrary large factor to exceed term width
cols = range(int(term_width * fac))
index = range(10)
df = DataFrame(index=index, columns=cols)
with option_context("mode.sim_interactive", True):
with option_context("display.max_rows", None):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", 0):
# Truncate with auto detection.
assert has_horizontally_truncated_repr(df)
index = range(int(term_height * fac))
df = DataFrame(index=index, columns=cols)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
# Truncate vertically
assert has_vertically_truncated_repr(df)
with option_context("display.max_rows", None):
with option_context("display.max_columns", 0):
assert has_horizontally_truncated_repr(df)
def test_to_string_repr_unicode(self):
buf = StringIO()
unicode_values = ["\u03c3"] * 10
unicode_values = np.array(unicode_values, dtype=object)
df = DataFrame({"unicode": unicode_values})
df.to_string(col_space=10, buf=buf)
# it works!
repr(df)
idx = Index(["abc", "\u03c3a", "aegdvg"])
ser = Series(np.random.randn(len(idx)), idx)
rs = repr(ser).split("\n")
line_len = len(rs[0])
for line in rs[1:]:
try:
line = line.decode(get_option("display.encoding"))
except AttributeError:
pass
if not line.startswith("dtype:"):
assert len(line) == line_len
# it works even if sys.stdin in None
_stdin = sys.stdin
try:
sys.stdin = None
repr(df)
finally:
sys.stdin = _stdin
def test_east_asian_unicode_false(self):
# not aligned properly because of east asian width
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あああああ あ\n"
"bb い いいい\nc う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\nあああ あああああ あ\n"
"いいいいいい い いいい\nうう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n0 あああああ ... さ\n"
".. ... ... ...\n3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\nあああ あああああ ... さ\n"
".. ... ... ...\naaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
def test_east_asian_unicode_true(self):
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\n"
"a あああああ あ\n"
"bb い いいい\n"
"c う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\n"
"あああ あああああ あ\n"
"いいいいいい い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n"
"0 あああああ ... さ\n"
".. ... ... ...\n"
"3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\n"
"あああ あああああ ... さ\n"
"... ... ... ...\n"
"aaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
# ambiguous unicode
df = DataFrame(
{"b": ["あ", "いいい", "¡¡", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "¡¡¡"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c ¡¡ 33333\n"
"¡¡¡ ええええええ 4"
)
assert repr(df) == expected
def test_to_string_buffer_all_unicode(self):
buf = StringIO()
empty = DataFrame({"c/\u03c3": Series(dtype=object)})
nonempty = DataFrame({"c/\u03c3": Series([1, 2, 3])})
print(empty, file=buf)
print(nonempty, file=buf)
# this should work
buf.getvalue()
def test_to_string_with_col_space(self):
df = DataFrame(np.random.random(size=(1, 3)))
c10 = len(df.to_string(col_space=10).split("\n")[1])
c20 = len(df.to_string(col_space=20).split("\n")[1])
c30 = len(df.to_string(col_space=30).split("\n")[1])
assert c10 < c20 < c30
# GH 8230
# col_space wasn't being applied with header=False
with_header = df.to_string(col_space=20)
with_header_row1 = with_header.splitlines()[1]
no_header = df.to_string(col_space=20, header=False)
assert len(with_header_row1) == len(no_header)
def test_to_string_with_column_specific_col_space_raises(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
msg = (
"Col_space length\\(\\d+\\) should match "
"DataFrame number of columns\\(\\d+\\)"
)
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40])
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40, 50, 60])
msg = "unknown column"
with pytest.raises(ValueError, match=msg):
df.to_string(col_space={"a": "foo", "b": 23, "d": 34})
def test_to_string_with_column_specific_col_space(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
result = df.to_string(col_space={"a": 10, "b": 11, "c": 12})
# 3 separating space + each col_space for (id, a, b, c)
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
result = df.to_string(col_space=[10, 11, 12])
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
def test_to_string_truncate_indices(self):
for index in [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeIntIndex,
tm.makeDateIndex,
tm.makePeriodIndex,
]:
for column in [tm.makeStringIndex]:
for h in [10, 20]:
for w in [10, 20]:
with option_context("display.expand_frame_repr", False):
df = DataFrame(index=index(h), columns=column(w))
with option_context("display.max_rows", 15):
if h == 20:
assert has_vertically_truncated_repr(df)
else:
assert not has_vertically_truncated_repr(df)
with option_context("display.max_columns", 15):
if w == 20:
assert has_horizontally_truncated_repr(df)
else:
assert not (has_horizontally_truncated_repr(df))
with option_context(
"display.max_rows", 15, "display.max_columns", 15
):
if h == 20 and w == 20:
assert has_doubly_truncated_repr(df)
else:
assert not has_doubly_truncated_repr(df)
def test_to_string_truncate_multilevel(self):
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = DataFrame(index=arrays, columns=arrays)
with option_context("display.max_rows", 7, "display.max_columns", 7):
assert has_doubly_truncated_repr(df)
def test_truncate_with_different_dtypes(self):
# 11594, 12045
# when truncated the dtypes of the splits can differ
# 11594
import datetime
s = Series(
[datetime.datetime(2012, 1, 1)] * 10
+ [datetime.datetime(1012, 1, 2)]
+ [datetime.datetime(2012, 1, 3)] * 10
)
with option_context("display.max_rows", 8):
result = str(s)
assert "object" in result
# 12045
df = DataFrame({"text": ["some words"] + [None] * 9})
with option_context("display.max_rows", 8, "display.max_columns", 3):
result = str(df)
assert "None" in result
assert "NaN" not in result
def test_truncate_with_different_dtypes_multiindex(self):
# GH#13000
df = DataFrame({"Vals": range(100)})
frame = pd.concat([df], keys=["Sweep"], names=["Sweep", "Index"])
result = repr(frame)
result2 = repr(frame.iloc[:5])
assert result.startswith(result2)
def test_datetimelike_frame(self):
# GH 12211
df = DataFrame({"date": [Timestamp("20130101").tz_localize("UTC")] + [NaT] * 5})
with option_context("display.max_rows", 5):
result = str(df)
assert "2013-01-01 00:00:00+00:00" in result
assert "NaT" in result
assert "..." in result
assert "[6 rows x 1 columns]" in result
dts = [Timestamp("2011-01-01", tz="US/Eastern")] * 5 + [NaT] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00-05:00 1\n"
"1 2011-01-01 00:00:00-05:00 2\n"
".. ... ..\n"
"8 NaT 9\n"
"9 NaT 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [NaT] * 5 + [Timestamp("2011-01-01", tz="US/Eastern")] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 NaT 1\n"
"1 NaT 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [Timestamp("2011-01-01", tz="Asia/Tokyo")] * 5 + [
Timestamp("2011-01-01", tz="US/Eastern")
] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00+09:00 1\n"
"1 2011-01-01 00:00:00+09:00 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
@pytest.mark.parametrize(
"start_date",
[
"2017-01-01 23:59:59.999999999",
"2017-01-01 23:59:59.99999999",
"2017-01-01 23:59:59.9999999",
"2017-01-01 23:59:59.999999",
"2017-01-01 23:59:59.99999",
"2017-01-01 23:59:59.9999",
],
)
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
df = DataFrame({"A": date_range(start=start_date, freq="D", periods=5)})
result = str(df)
assert start_date in result
dti = date_range(start=start_date, freq="D", periods=5)
df = DataFrame({"A": range(5)}, index=dti)
result = str(df.index)
assert start_date in result
def test_nonunicode_nonascii_alignment(self):
df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]])
rep_str = df.to_string()
lines = rep_str.split("\n")
assert len(lines[1]) == len(lines[2])
def test_unicode_problem_decoding_as_ascii(self):
dm = DataFrame({"c/\u03c3": Series({"test": np.nan})})
str(dm.to_string())
def test_string_repr_encoding(self, datapath):
filepath = datapath("io", "parser", "data", "unicode_series.csv")
df = read_csv(filepath, header=None, encoding="latin1")
repr(df)
repr(df[1])
def test_repr_corner(self):
# representing infs poses no problems
df = DataFrame({"foo": [-np.inf, np.inf]})
repr(df)
def test_frame_info_encoding(self):
index = ["'Til There Was You (1997)", "ldum klaka (Cold Fever) (1994)"]
fmt.set_option("display.max_rows", 1)
df = DataFrame(columns=["a", "b", "c"], index=index)
repr(df)
repr(df.T)
fmt.set_option("display.max_rows", 200)
def test_wide_repr(self):
with option_context(
"mode.sim_interactive",
True,
"display.show_dimensions",
True,
"display.max_columns",
20,
):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
assert f"10 rows x {max_cols - 1} columns" in rep_str
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 120):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_columns(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
df = DataFrame(
np.random.randn(5, 3), columns=["a" * 90, "b" * 90, "c" * 90]
)
rep_str = repr(df)
assert len(rep_str.splitlines()) == 20
def test_wide_repr_named(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
df.index.name = "DataFrame Index"
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "DataFrame Index" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)), index=midx)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "Level 0 Level 1" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex_cols(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
mcols = MultiIndex.from_arrays(tm.rands_array(3, size=(2, max_cols - 1)))
df = DataFrame(
tm.rands_array(25, (10, max_cols - 1)), index=midx, columns=mcols
)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150, "display.max_columns", 20):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_unicode(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_long_columns(self):
with option_context("mode.sim_interactive", True):
df = DataFrame({"a": ["a" * 30, "b" * 30], "b": ["c" * 70, "d" * 80]})
result = repr(df)
assert "ccccc" in result
assert "ddddd" in result
def test_long_series(self):
n = 1000
s = Series(
np.random.randint(-50, 50, n),
index=[f"s{x:04d}" for x in range(n)],
dtype="int64",
)
import re
str_rep = str(s)
nmatches = len(re.findall("dtype", str_rep))
assert nmatches == 1
def test_index_with_nan(self):
# GH 2850
df = DataFrame(
{
"id1": {0: "1a3", 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: "78d", 1: "79d"},
"value": {0: 123, 1: 64},
}
)
# multi-index
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# index
y = df.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nd67 9h4 79d 64"
)
assert result == expected
# with append (this failed in 0.12)
y = df.set_index(["id1", "id2"]).set_index("id3", append=True)
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# all-nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nNaN 9h4 79d 64"
)
assert result == expected
# partial nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index(["id2", "id3"])
result = y.to_string()
expected = (
" id1 value\nid2 id3 \n"
"NaN 78d 1a3 123\n 79d 9h4 64"
)
assert result == expected
df = DataFrame(
{
"id1": {0: np.nan, 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: np.nan, 1: "79d"},
"value": {0: 123, 1: 64},
}
)
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"NaN NaN NaN 123\n9h4 d67 79d 64"
)
assert result == expected
def test_to_string(self):
# big mixed
biggie = DataFrame(
{"A": np.random.randn(200), "B": tm.makeStringIndex(200)},
index=np.arange(200),
)
biggie.loc[:20, "A"] = np.nan
biggie.loc[:20, "B"] = np.nan
s = biggie.to_string()
buf = StringIO()
retval = biggie.to_string(buf=buf)
assert retval is None
assert buf.getvalue() == s
assert isinstance(s, str)
# print in right order
result = biggie.to_string(
columns=["B", "A"], col_space=17, float_format="%.5f".__mod__
)
lines = result.split("\n")
header = lines[0].strip().split()
joined = "\n".join([re.sub(r"\s+", " ", x).strip() for x in lines[1:]])
recons = read_csv(StringIO(joined), names=header, header=None, sep=" ")
tm.assert_series_equal(recons["B"], biggie["B"])
assert recons["A"].count() == biggie["A"].count()
assert (np.abs(recons["A"].dropna() - biggie["A"].dropna()) < 0.1).all()
# expected = ['B', 'A']
# assert header == expected
result = biggie.to_string(columns=["A"], col_space=17)
header = result.split("\n")[0].strip().split()
expected = ["A"]
assert header == expected
biggie.to_string(columns=["B", "A"], formatters={"A": lambda x: f"{x:.1f}"})
biggie.to_string(columns=["B", "A"], float_format=str)
biggie.to_string(columns=["B", "A"], col_space=12, float_format=str)
frame = DataFrame(index=np.arange(200))
frame.to_string()
def test_to_string_no_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=False)
expected = "0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
def test_to_string_specified_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=["X", "Y"])
expected = " X Y\n0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
msg = "Writing 2 cols but got 1 aliases"
with pytest.raises(ValueError, match=msg):
df.to_string(header=["X"])
def test_to_string_no_index(self):
# GH 16839, GH 13032
df = DataFrame({"x": [11, 22], "y": [33, -44], "z": ["AAA", " "]})
df_s = df.to_string(index=False)
# Leading space is expected for positive numbers.
expected = " x y z\n11 33 AAA\n22 -44 "
assert df_s == expected
df_s = df[["y", "x", "z"]].to_string(index=False)
expected = " y x z\n 33 11 AAA\n-44 22 "
assert df_s == expected
def test_to_string_line_width_no_index(self):
# GH 13998, GH 22505
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 1 \n 2 \n 3 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n11 \n22 \n33 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 11 \n 22 \n-33 \n\n y \n 4 \n 5 \n-6 "
assert df_s == expected
def test_to_string_float_formatting(self):
tm.reset_display_options()
fmt.set_option(
"display.precision",
5,
"display.column_space",
12,
"display.notebook_repr_html",
False,
)
df = DataFrame(
{"x": [0, 0.25, 3456.000, 12e45, 1.64e6, 1.7e8, 1.253456, np.pi, -1e6]}
)
df_s = df.to_string()
if _three_digit_exp():
expected = (
" x\n0 0.00000e+000\n1 2.50000e-001\n"
"2 3.45600e+003\n3 1.20000e+046\n4 1.64000e+006\n"
"5 1.70000e+008\n6 1.25346e+000\n7 3.14159e+000\n"
"8 -1.00000e+006"
)
else:
expected = (
" x\n0 0.00000e+00\n1 2.50000e-01\n"
"2 3.45600e+03\n3 1.20000e+46\n4 1.64000e+06\n"
"5 1.70000e+08\n6 1.25346e+00\n7 3.14159e+00\n"
"8 -1.00000e+06"
)
assert df_s == expected
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string()
expected = " x\n0 3234.000\n1 0.253"
assert df_s == expected
tm.reset_display_options()
assert get_option("display.precision") == 6
df = DataFrame({"x": [1e9, 0.2512]})
df_s = df.to_string()
if _three_digit_exp():
expected = " x\n0 1.000000e+009\n1 2.512000e-001"
else:
expected = " x\n0 1.000000e+09\n1 2.512000e-01"
assert df_s == expected
def test_to_string_float_format_no_fixed_width(self):
# GH 21625
df = DataFrame({"x": [0.19999]})
expected = " x\n0 0.200"
assert df.to_string(float_format="%.3f") == expected
# GH 22270
df = DataFrame({"x": [100.0]})
expected = " x\n0 100"
assert df.to_string(float_format="%.0f") == expected
def test_to_string_small_float_values(self):
df = DataFrame({"a": [1.5, 1e-17, -5.5e-7]})
result = df.to_string()
# sadness per above
if _three_digit_exp():
expected = (
" a\n"
"0 1.500000e+000\n"
"1 1.000000e-017\n"
"2 -5.500000e-007"
)
else:
expected = (
" a\n"
"0 1.500000e+00\n"
"1 1.000000e-17\n"
"2 -5.500000e-07"
)
assert result == expected
# but not all exactly zero
df = df * 0
result = df.to_string()
expected = " 0\n0 0\n1 0\n2 -0"
def test_to_string_float_index(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.arange(5), index=index)
result = df.to_string()
expected = " 0\n1.5 0\n2.0 1\n3.0 2\n4.0 3\n5.0 4"
assert result == expected
def test_to_string_complex_float_formatting(self):
# GH #25514, 25745
with option_context("display.precision", 5):
df = DataFrame(
{
"x": [
(0.4467846931321966 + 0.0715185102060818j),
(0.2739442392974528 + 0.23515228785438969j),
(0.26974928742135185 + 0.3250604054898979j),
(-1j),
]
}
)
result = df.to_string()
expected = (
" x\n0 0.44678+0.07152j\n"
"1 0.27394+0.23515j\n"
"2 0.26975+0.32506j\n"
"3 -0.00000-1.00000j"
)
assert result == expected
def test_to_string_ascii_error(self):
data = [
(
"0 ",
" .gitignore ",
" 5 ",
" \xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2",
)
]
df = DataFrame(data)
# it works!
repr(df)
def test_to_string_int_formatting(self):
df = DataFrame({"x": [-15, 20, 25, -35]})
assert issubclass(df["x"].dtype.type, np.integer)
output = df.to_string()
expected = " x\n0 -15\n1 20\n2 25\n3 -35"
assert output == expected
def test_to_string_index_formatter(self):
df = DataFrame([range(5), range(5, 10), range(10, 15)])
rs = df.to_string(formatters={"__index__": lambda x: "abc"[x]})
xp = """\
0 1 2 3 4
a 0 1 2 3 4
b 5 6 7 8 9
c 10 11 12 13 14\
"""
assert rs == xp
def test_to_string_left_justify_cols(self):
tm.reset_display_options()
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string(justify="left")
expected = " x \n0 3234.000\n1 0.253"
assert df_s == expected
def test_to_string_format_na(self):
tm.reset_display_options()
df = DataFrame(
{
"A": [np.nan, -1, -2.1234, 3, 4],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0000 foo\n"
"2 -2.1234 foooo\n"
"3 3.0000 fooooo\n"
"4 4.0000 bar"
)
assert result == expected
df = DataFrame(
{
"A": [np.nan, -1.0, -2.0, 3.0, 4.0],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0 foo\n"
"2 -2.0 foooo\n"
"3 3.0 fooooo\n"
"4 4.0 bar"
)
assert result == expected
def test_to_string_format_inf(self):
# Issue #24861
tm.reset_display_options()
df = DataFrame(
{
"A": [-np.inf, np.inf, -1, -2.1234, 3, 4],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0000 foo\n"
"3 -2.1234 foooo\n"
"4 3.0000 fooooo\n"
"5 4.0000 bar"
)
assert result == expected
df = DataFrame(
{
"A": [-np.inf, np.inf, -1.0, -2.0, 3.0, 4.0],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0 foo\n"
"3 -2.0 foooo\n"
"4 3.0 fooooo\n"
"5 4.0 bar"
)
assert result == expected
def test_to_string_decimal(self):
# Issue #23614
df = DataFrame({"A": [6.0, 3.1, 2.2]})
expected = " A\n0 6,0\n1 3,1\n2 2,2"
assert df.to_string(decimal=",") == expected
def test_to_string_line_width(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
s = df.to_string(line_width=80)
assert max(len(line) for line in s.split("\n")) == 80
def test_show_dimensions(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
True,
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
False,
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
with option_context(
"display.max_rows",
2,
"display.max_columns",
2,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
def test_repr_html(self, float_frame):
df = float_frame
df._repr_html_()
fmt.set_option("display.max_rows", 1, "display.max_columns", 1)
df._repr_html_()
fmt.set_option("display.notebook_repr_html", False)
df._repr_html_()
tm.reset_display_options()
df = DataFrame([[1, 2], [3, 4]])
fmt.set_option("display.show_dimensions", True)
assert "2 rows" in df._repr_html_()
fmt.set_option("display.show_dimensions", False)
assert "2 rows" not in df._repr_html_()
tm.reset_display_options()
def test_repr_html_mathjax(self):
df = DataFrame([[1, 2], [3, 4]])
assert "tex2jax_ignore" not in df._repr_html_()
with option_context("display.html.use_mathjax", False):
assert "tex2jax_ignore" in df._repr_html_()
def test_repr_html_wide(self):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." not in df._repr_html_()
wide_df = DataFrame(tm.rands_array(25, size=(10, max_cols + 1)))
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in wide_df._repr_html_()
def test_repr_html_wide_multiindex_cols(self):
max_cols = 20
mcols = MultiIndex.from_product(
[np.arange(max_cols // 2), ["foo", "bar"]], names=["first", "second"]
)
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))), columns=mcols)
reg_repr = df._repr_html_()
assert "..." not in reg_repr
mcols = MultiIndex.from_product(
(np.arange(1 + (max_cols // 2)), ["foo", "bar"]), names=["first", "second"]
)
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))), columns=mcols)
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in df._repr_html_()
def test_repr_html_long(self):
with option_context("display.max_rows", 60):
max_rows = get_option("display.max_rows")
h = max_rows - 1
df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
reg_repr = df._repr_html_()
assert ".." not in reg_repr
assert str(41 + max_rows // 2) in reg_repr
h = max_rows + 1
df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
long_repr = df._repr_html_()
assert ".." in long_repr
assert str(41 + max_rows // 2) not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_float(self):
with option_context("display.max_rows", 60):
max_rows = get_option("display.max_rows")
h = max_rows - 1
df = DataFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
reg_repr = df._repr_html_()
assert ".." not in reg_repr
assert f"<td>{40 + h}</td>" in reg_repr
h = max_rows + 1
df = DataFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
long_repr = df._repr_html_()
assert ".." in long_repr
assert "<td>31</td>" not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_long_multiindex(self):
max_rows = 60
max_L1 = max_rows // 2
tuples = list(itertools.product(np.arange(max_L1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(np.random.randn(max_L1 * 2, 2), index=idx, columns=["A", "B"])
with option_context("display.max_rows", 60, "display.max_columns", 20):
reg_repr = df._repr_html_()
assert "..." not in reg_repr
tuples = list(itertools.product(np.arange(max_L1 + 1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(
np.random.randn((max_L1 + 1) * 2, 2), index=idx, columns=["A", "B"]
)
long_repr = df._repr_html_()
assert "..." in long_repr
def test_repr_html_long_and_wide(self):
max_cols = 20
max_rows = 60
h, w = max_rows - 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." not in df._repr_html_()
h, w = max_rows + 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in df._repr_html_()
def test_info_repr(self):
# GH#21746 For tests inside a terminal (i.e. not CI) we need to detect
# the terminal size to ensure that we try to print something "too big"
term_width, term_height = get_terminal_size()
max_rows = 60
max_cols = 20 + (max(term_width, 80) - 80) // 4
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_vertically_truncated_repr(df)
with option_context("display.large_repr", "info"):
assert has_info_repr(df)
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_horizontally_truncated_repr(df)
with option_context(
"display.large_repr", "info", "display.max_columns", max_cols
):
assert has_info_repr(df)
def test_info_repr_max_cols(self):
# GH #6939
df = DataFrame(np.random.randn(10, 5))
with option_context(
"display.large_repr",
"info",
"display.max_columns",
1,
"display.max_info_columns",
4,
):
assert has_non_verbose_info_repr(df)
with option_context(
"display.large_repr",
"info",
"display.max_columns",
1,
"display.max_info_columns",
5,
):
assert not has_non_verbose_info_repr(df)
# test verbose overrides
# fmt.set_option('display.max_info_columns', 4) # exceeded
def test_info_repr_html(self):
max_rows = 60
max_cols = 20
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert r"<class" not in df._repr_html_()
with option_context("display.large_repr", "info"):
assert r"<class" in df._repr_html_()
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert "<class" not in df._repr_html_()
with option_context(
"display.large_repr", "info", "display.max_columns", max_cols
):
assert "<class" in df._repr_html_()
def test_fake_qtconsole_repr_html(self, float_frame):
df = float_frame
def get_ipython():
return {"config": {"KernelApp": {"parent_appname": "ipython-qtconsole"}}}
repstr = df._repr_html_()
assert repstr is not None
fmt.set_option("display.max_rows", 5, "display.max_columns", 2)
repstr = df._repr_html_()
assert "class" in repstr # info fallback
tm.reset_display_options()
def test_pprint_pathological_object(self):
"""
If the test fails, it at least won't hang.
"""
class A:
def __getitem__(self, key):
return 3 # obviously simplified
df = DataFrame([A()])
repr(df) # just don't die
def test_float_trim_zeros(self):
vals = [
2.08430917305e10,
3.52205017305e10,
2.30674817305e10,
2.03954217305e10,
5.59897817305e10,
]
skip = True
for line in repr(DataFrame({"A": vals})).split("\n")[:-2]:
if line.startswith("dtype:"):
continue
if _three_digit_exp():
assert ("+010" in line) or skip
else:
assert ("+10" in line) or skip
skip = False
@pytest.mark.parametrize(
"data, expected",
[
(["3.50"], "0 3.50\ndtype: object"),
([1.20, "1.00"], "0 1.2\n1 1.00\ndtype: object"),
([np.nan], "0 NaN\ndtype: float64"),
([None], "0 None\ndtype: object"),
(["3.50", np.nan], "0 3.50\n1 NaN\ndtype: object"),
([3.50, np.nan], "0 3.5\n1 NaN\ndtype: float64"),
([3.50, np.nan, "3.50"], "0 3.5\n1 NaN\n2 3.50\ndtype: object"),
([3.50, None, "3.50"], "0 3.5\n1 None\n2 3.50\ndtype: object"),
],
)
def test_repr_str_float_truncation(self, data, expected):
# GH#38708
series = Series(data)
result = repr(series)
assert result == expected
@pytest.mark.parametrize(
"float_format,expected",
[
("{:,.0f}".format, "0 1,000\n1 test\ndtype: object"),
("{:.4f}".format, "0 1000.0000\n1 test\ndtype: object"),
],
)
def test_repr_float_format_in_object_col(self, float_format, expected):
# GH#40024
df = Series([1000.0, "test"])
with option_context("display.float_format", float_format):
result = repr(df)
assert result == expected
def test_dict_entries(self):
df = DataFrame({"A": [{"a": 1, "b": 2}]})
val = df.to_string()
assert "'a': 1" in val
assert "'b': 2" in val
def test_categorical_columns(self):
# GH35439
data = [[4, 2], [3, 2], [4, 3]]
cols = ["aaaaaaaaa", "b"]
df = DataFrame(data, columns=cols)
df_cat_cols = DataFrame(data, columns=pd.CategoricalIndex(cols))
assert df.to_string() == df_cat_cols.to_string()
def test_period(self):
# GH 12615
df = DataFrame(
{
"A": pd.period_range("2013-01", periods=4, freq="M"),
"B": [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02-01", freq="D"),
pd.Period("2011-03-01 09:00", freq="H"),
pd.Period("2011-04", freq="M"),
],
"C": list("abcd"),
}
)
exp = (
" A B C\n"
"0 2013-01 2011-01 a\n"
"1 2013-02 2011-02-01 b\n"
"2 2013-03 2011-03-01 09:00 c\n"
"3 2013-04 2011-04 d"
)
assert str(df) == exp
@pytest.mark.parametrize(
"length, max_rows, min_rows, expected",
[
(10, 10, 10, 10),
(10, 10, None, 10),
(10, 8, None, 8),
(20, 30, 10, 30), # max_rows > len(frame), hence max_rows
(50, 30, 10, 10), # max_rows < len(frame), hence min_rows
(100, 60, 10, 10), # same
(60, 60, 10, 60), # edge case
(61, 60, 10, 10), # edge case
],
)
def test_max_rows_fitted(self, length, min_rows, max_rows, expected):
"""Check that display logic is correct.
GH #37359
See description here:
https://pandas.pydata.org/docs/dev/user_guide/options.html#frequently-used-options
"""
formatter = fmt.DataFrameFormatter(
DataFrame(np.random.rand(length, 3)),
max_rows=max_rows,
min_rows=min_rows,
)
result = formatter.max_rows_fitted
assert result == expected
def gen_series_formatting():
s1 = Series(["a"] * 100)
s2 = Series(["ab"] * 100)
s3 = Series(["a", "ab", "abc", "abcd", "abcde", "abcdef"])
s4 = s3[::-1]
test_sers = {"onel": s1, "twol": s2, "asc": s3, "desc": s4}
return test_sers
class TestSeriesFormatting:
def setup_method(self, method):
self.ts = tm.makeTimeSeries()
def test_repr_unicode(self):
s = Series(["\u03c3"] * 10)
repr(s)
a = Series(["\u05d0"] * 1000)
a.name = "title1"
repr(a)
def test_to_string(self):
buf = StringIO()
s = self.ts.to_string()
retval = self.ts.to_string(buf=buf)
assert retval is None
assert buf.getvalue().strip() == s
# pass float_format
format = "%.4f".__mod__
result = self.ts.to_string(float_format=format)
result = [x.split()[1] for x in result.split("\n")[:-1]]
expected = [format(x) for x in self.ts]
assert result == expected
# empty string
result = self.ts[:0].to_string()
assert result == "Series([], Freq: B)"
result = self.ts[:0].to_string(length=0)
assert result == "Series([], Freq: B)"
# name and length
cp = self.ts.copy()
cp.name = "foo"
result = cp.to_string(length=True, name=True, dtype=True)
last_line = result.split("\n")[-1].strip()
assert last_line == (f"Freq: B, Name: foo, Length: {len(cp)}, dtype: float64")
def test_freq_name_separation(self):
s = Series(
np.random.randn(10), index=date_range("1/1/2000", periods=10), name=0
)
result = repr(s)
assert "Freq: D, Name: 0" in result
def test_to_string_mixed(self):
s = Series(["foo", np.nan, -1.23, 4.56])
result = s.to_string()
expected = "0 foo\n" + "1 NaN\n" + "2 -1.23\n" + "3 4.56"
assert result == expected
# but don't count NAs as floats
s = Series(["foo", np.nan, "bar", "baz"])
result = s.to_string()
expected = "0 foo\n" + "1 NaN\n" + "2 bar\n" + "3 baz"
assert result == expected
s = Series(["foo", 5, "bar", "baz"])
result = s.to_string()
expected = "0 foo\n" + "1 5\n" + "2 bar\n" + "3 baz"
assert result == expected
def test_to_string_float_na_spacing(self):
s = Series([0.0, 1.5678, 2.0, -3.0, 4.0])
s[::2] = np.nan
result = s.to_string()
expected = (
"0 NaN\n"
+ "1 1.5678\n"
+ "2 NaN\n"
+ "3 -3.0000\n"
+ "4 NaN"
)
assert result == expected
def test_to_string_without_index(self):
# GH 11729 Test index=False option
s = Series([1, 2, 3, 4])
result = s.to_string(index=False)
expected = "1\n" + "2\n" + "3\n" + "4"
assert result == expected
def test_unicode_name_in_footer(self):
s = Series([1, 2], name="\u05e2\u05d1\u05e8\u05d9\u05ea")
sf = fmt.SeriesFormatter(s, name="\u05e2\u05d1\u05e8\u05d9\u05ea")
sf._get_footer() # should not raise exception
def test_east_asian_unicode_series(self):
# not aligned properly because of east asian width
# unicode index
s = Series(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"])
expected = "あ a\nいい bb\nううう CCC\nええええ D\ndtype: object"
assert repr(s) == expected
# unicode values
s = Series(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"])
expected = "a あ\nbb いい\nc ううう\nddd ええええ\ndtype: object"
assert repr(s) == expected
# both
s = Series(["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"])
expected = (
"ああ あ\nいいいい いい\nう ううう\nえええ ええええ\ndtype: object"
)
assert repr(s) == expected
# unicode footer
s = Series(
["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"], name="おおおおおおお"
)
expected = (
"ああ あ\nいいいい いい\nう ううう\n"
"えええ ええええ\nName: おおおおおおお, dtype: object"
)
assert repr(s) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
s = Series([1, 22, 3333, 44444], index=idx)
expected = (
"あ いい 1\n"
"う え 22\n"
"おおお かかかか 3333\n"
"き くく 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
expected = (
"1 1\nAB 22\nNaN 3333\nあああ 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, longer than unicode repr
s = Series(
[1, 22, 3333, 44444], index=[1, "AB", Timestamp("2011-01-01"), "あああ"]
)
expected = (
"1 1\n"
"AB 22\n"
"2011-01-01 00:00:00 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# truncate
with option_context("display.max_rows", 3):
s = Series(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお")
expected = (
"0 あ\n ... \n"
"3 ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
s.index = ["ああ", "いいいい", "う", "えええ"]
expected = (
"ああ あ\n ... \n"
"えええ ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# unicode index
s = Series(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"])
expected = (
"あ a\nいい bb\nううう CCC\n"
"ええええ D\ndtype: object"
)
assert repr(s) == expected
# unicode values
s = Series(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"])
expected = (
"a あ\nbb いい\nc ううう\n"
"ddd ええええ\ndtype: object"
)
assert repr(s) == expected
# both
s = Series(["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"])
expected = (
"ああ あ\n"
"いいいい いい\n"
"う ううう\n"
"えええ ええええ\ndtype: object"
)
assert repr(s) == expected
# unicode footer
s = Series(
["あ", "いい", "ううう", "ええええ"],
index=["ああ", "いいいい", "う", "えええ"],
name="おおおおおおお",
)
expected = (
"ああ あ\n"
"いいいい いい\n"
"う ううう\n"
"えええ ええええ\n"
"Name: おおおおおおお, dtype: object"
)
assert repr(s) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
s = Series([1, 22, 3333, 44444], index=idx)
expected = (
"あ いい 1\n"
"う え 22\n"
"おおお かかかか 3333\n"
"き くく 44444\n"
"dtype: int64"
)
assert repr(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
expected = (
"1 1\nAB 22\nNaN 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, longer than unicode repr
s = Series(
[1, 22, 3333, 44444],
index=[1, "AB", | Timestamp("2011-01-01") | pandas.Timestamp |
"""
Helper functions for dfds_ds_toolbox.analysis.plotting.
"""
from typing import List, Union
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib.figure import Figure
def _get_equally_grouped_data(
input_data: pd.DataFrame,
feature: str,
target_col: str,
bins: int,
cuts: Union[int, List[int]] = None,
):
"""Bins continuous features into equal sample size buckets and returns the target mean in each bucket.
Separates out nulls into another bucket.
Helper function for other plotting functions.
Args:
input_data: dataframe containing features and target column.
feature: feature column name
target_col: target column name
bins: Number bins required
cuts: if buckets of certain specific cuts are required. Used on test data to use cuts from train.
Returns:
If cuts are passed only grouped data is returned, else cuts and grouped data is returned
"""
has_null = pd.isnull(input_data[feature]).sum() > 0
if has_null == 1:
data_null = input_data[pd.isnull(input_data[feature])]
input_data = input_data[~ | pd.isnull(input_data[feature]) | pandas.isnull |
"""
Created on Thu Nov 7, 2019
@author: <NAME>
"""
import serial # `pyserial` package; NOT `serial` package
import warnings
import pandas as pd
import numpy as np
import time
import os
import sys
from datetime import datetime
try:
from serial.tools import list_ports
IMPORTED_LIST_PORTS = True
except ValueError:
IMPORTED_LIST_PORTS = False
from .options import SETTINGS_DICT
# link to usb-serial driver for macOS
_L1 = "http://www.prolific.com.tw/UserFiles/files/PL2303HXD_G_Driver_v2_0_0_20191204.zip"
# blog post explaining how to bypass blocked extensions
# need this because no Big Sur version of driver as of Jan 7 2020.
_L2 = "https://eclecticlight.co/2019/06/01/how-to-bypass-mojave-10-14-5s-new-kext-security/"
class LockInError(Exception):
"""named exception for LockIn serial port connection issues"""
pass
class LockIn(object):
"""
represents a usable connection with the lock-in amplifier
"""
SWEEP_HEADER = "{:>3} \t {:>15} \t {:>15} \t {:>15}"
SWEEP_BLANK = "{:>3d} \t {:>15,.2f} \t {:>15,.4e} \t {:>15,.4e}"
@staticmethod
def get_serial(comm_port):
return serial.Serial(comm_port,
baudrate=19200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=3)
DEFAULT_PORTS = {
'darwin': ['/dev/cu.usbserial-1410'],
'win32': ['COM5'],
'linux': ['/dev/ttyUSB0']
}
def __init__(self, comm_port: str = None):
# (detect os and) set communication port
self._comm = None
if comm_port is not None:
try:
self._comm = LockIn.get_serial(comm_port)
except serial.SerialException:
print("lockintools: could not connect to port: %s" % comm_port)
else:
print("lockintools: trying default ports for platform: %s" % sys.platform)
for cp in LockIn.DEFAULT_PORTS[sys.platform]:
try:
self._comm = LockIn.get_serial(cp)
break
except serial.SerialException:
print("lockintools: could not connect to port: %s" % cp)
if self._comm is None and IMPORTED_LIST_PORTS:
print("lockintools: tying to detect port and auto-connect...")
for cp_match in list_ports.grep("(usb|USB)"):
cp_str = str(cp_match).split('-')[0].strip()
try:
self._comm = LockIn.get_serial(cp_str)
break
except serial.SerialException:
print("lockintools: could not connect to port: %s" % cp_str)
if self._comm is None:
raise LockInError("lockintools: CONNECTION FAILED! Do you have a driver installed?")
print("lockintools: SUCCESS! Connection established.")
self.print_to_stdout = True
@property
def comm(self):
# `serial.Serial` object for handling communications
return self._comm
def close(self):
"""closes communication port"""
if self.comm.is_open:
self.comm.close()
def open(self):
"""(re)-opens communication port"""
if not self.comm.is_open:
self.comm.open()
def cmd(self, command):
"""execute arbitrary lockin command"""
self.comm.write(str.encode(command + '\n'))
self.comm.flush()
if '?' in command:
state = bytes.decode(self.comm.readline())
return state
else:
return
def set_input_mode(self, mode):
"""set lockin input configuration"""
if mode == "A":
self.cmd("ISRC0")
elif mode == "A-B":
self.cmd("ISRC1")
elif mode == "I":
self.cmd("ISRC2")
elif mode == "I100":
self.cmd("ISRC3")
else:
raise ValueError("invalid mode {}, valid values are 'A', 'A-B', 'I', or 'I100'"
.format(mode))
def set_coupling_mode(self, mode):
if mode == "AC":
self.cmd("ICPL0")
elif mode == "DC":
self.cmd("ICPL1")
else:
raise ValueError("invalid mode {}, valid values are 'AC' or 'DC'"
.format(mode))
def set_freq(self, freq):
"""set lock-in amp. frequency"""
command = 'FREQ' + str(freq)
return self.cmd(command)
def set_ampl(self, ampl):
"""set lock-in amp. voltage amplitude"""
if ampl > 5.:
raise ValueError("can not exceed amplitude of 5V")
command = 'SLVL' + str(ampl)
return self.cmd(command)
def set_sens(self, sens):
"""set lock-in amp. sensitivity"""
if 0 <= sens <= 26:
self.cmd('SENS' + str(sens))
else:
raise ValueError("sensitivity setting must be between 0 (1 nV) and "
"26 (1 V)")
def set_harm(self, harm):
"""set lock-in amp. detection harmonic"""
harm = int(harm)
if 1 <= harm <= 19999:
self.cmd('HARM' + str(harm))
else:
raise ValueError("harmonic must be between 1 and 19999")
def get_reading(self, ch, meas_time=0.1, stdev=False):
"""
read average value from channel `ch` over `meas_time` seconds
optionally, also return standard deviation (`stdev=True`)
"""
if not (ch == 1 or ch == 2):
raise ValueError("channel `ch` should be 1 or 2")
self.cmd("REST")
self.cmd("STRT")
time.sleep(meas_time)
self.cmd("PAUS")
N = self.cmd("SPTS?")
r_str = self.cmd("TRCA?" + str(ch) + ",0," + N)
r = [float(ri) for ri in r_str.split(',')[:-1]]
if stdev:
return np.mean(r), np.std(r)
return np.mean(r)
def get_x(self, meas_time=0.1, stdev=False):
return self.get_reading(ch=1, meas_time=meas_time, stdev=stdev)
def get_y(self, meas_time=0.1, stdev=False):
return self.get_reading(ch=2, meas_time=meas_time, stdev=stdev)
def sweep(self, label: str, freqs, ampls, sens: int, harm: int,
stb_time: float = 9.,
meas_time: float = 1.,
ampl_time: float = 5.,
L_MAX: int = 50):
"""
Conduct a frequency sweep measurement across one or more voltage
amplitudes.
:param label: (string) label for the sweep data
:param freqs: (scalar or array-like) freqs. to sweep over
:param ampls: (scalar or array-like) amplitudes to sweep over
:param sens: (int) integer indicating lock-in amp. sensitivity setting
:param harm: (int) detection harmonic
:param stb_time: (float) time (s) for stabilization at each freq.
:param meas_time: (float) time (s) for data collection at each freq.
:param ampl_time: (float) time (s) for stabilization at each voltage
:param L_MAX: (int) maximum data array size
:return: (lockin.SweepData) container of pandas `DataFrame`s for
in- and out-of-phase detected voltages, and variances thereof
"""
self.set_harm(harm)
self.set_sens(sens)
ampls = np.asarray(ampls)
freqs = np.asarray(freqs)
if ampls.ndim == 0:
ampls = ampls[None]
if freqs.ndim == 0:
freqs = freqs[None]
# buffer arrays for in- and out-of-phase data
X = np.full((len(ampls), len(freqs), L_MAX), fill_value=np.nan)
Y = np.full((len(ampls), len(freqs), L_MAX), fill_value=np.nan)
for i, V in enumerate(ampls):
self._print('V = {:.2f} volts'.format(V))
self._print('waiting for stabilization after amplitude change...')
self.set_ampl(V)
self.set_freq(freqs[0])
time.sleep(ampl_time)
self._print('')
self._print(LockIn.SWEEP_HEADER.format('', 'freq [Hz]', 'X [V]', 'Y [V]'))
for j, freq in enumerate(freqs):
# self._print("waiting for stabilization at f = {:.4f} Hz "
# "({:d}/{:d})".format(freq, j + 1, len(freqs)))
self.set_freq(freq)
self.cmd('REST')
time.sleep(stb_time)
# self._print('taking measurement')
# beep(repeat=1)
self.cmd('STRT')
time.sleep(meas_time)
self.cmd('PAUS')
# self._print('extracting values')
N = self.cmd('SPTS?')
x_str = self.cmd('TRCA?1,0,' + N)
y_str = self.cmd('TRCA?2,0,' + N)
# list of values measured at a single point
# last character is a newline character
x = np.array([float(_) for _ in x_str.split(',')[:-1]])
y = np.array([float(_) for _ in y_str.split(',')[:-1]])
try:
X[i, j][:len(x)] = x
Y[i, j][:len(x)] = y
except ValueError:
warnings.warn("buffer array overflow encountered at point "
"f = {:.1f} Hz, V = {:.1f} volts"
.format(freq, V))
X[i, j] = x[:L_MAX]
Y[i, j] = y[:L_MAX]
x_ = np.mean(x[~np.isnan(x)])
y_ = np.mean(y[~np.isnan(y)])
self._print(LockIn.SWEEP_BLANK.format(j + 1, freq, x_, y_))
self._print('')
return SweepData(X, Y, freqs, ampls, label, sens, harm)
def get_config(self):
raw_config = {}
for key in SETTINGS_DICT.keys():
if key != 'names':
raw_config[key] = self.cmd(key + '?')
return raw_config
def _print(self, s):
if self.print_to_stdout:
print(s)
class SweepData(object):
"""
Contains the data relevant to a single sweep.
i.e. the amplitude of the oscillations described by the `harm`th harmonic of
the voltage measured across the heater line or shunt, for a driving
voltage `V` in `Vs` at a frequency `freq` in `freqs`.
The digested values (ex: `V_x[i]` and `dV_x[i]) at each point are the
average of many measurements at that point and the variance of those
measurements.
"""
def __init__(self, X, Y, freqs, Vs, label, sens, harm):
dt1 = datetime.now()
dt = dt1.strftime("%d-%m-%Y_%H-%M")
self.ID = '_'.join([label, 'HARM' + str(harm), 'SENS' + str(sens), dt])
# frequency and voltage ranges
self.freqs = freqs
self.Vs = Vs
# full raw buffer output from lock-in (padded with NaNs)
self.X = X
self.Y = Y
n = len(freqs)
m = len(Vs)
# initialing arrays for digests
V_x = np.zeros((m, n)) # in-phase amplitudes (left lockin display)
V_y = np.zeros((m, n)) # out-of-phase amplitudes (right lockin display)
dV_x = np.zeros((m, n)) # variances of buffer outputs over time
dV_y = np.zeros((m, n)) # variances of buffer output over time
for i in range(m):
for j in range(n):
_X_ = X[i, j]
_Y_ = Y[i, j]
_X_ = _X_[~np.isnan(_X_)]
_Y_ = _Y_[~np.isnan(_Y_)]
V_x[i, j] = np.mean(_X_)
V_y[i, j] = np.mean(_Y_)
dV_x[i, j] = np.std(_X_)
dV_y[i, j] = np.std(_Y_)
# converting to DataFrames for readability
self.V_x = pd.DataFrame(V_x.T, index=freqs, columns=Vs)
self.V_y = pd.DataFrame(V_y.T, index=freqs, columns=Vs)
self.dV_x = pd.DataFrame(dV_x.T, index=freqs, columns=Vs)
self.dV_y = | pd.DataFrame(dV_y.T, index=freqs, columns=Vs) | pandas.DataFrame |
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import scipy.stats as ss
import os
#import matplotlib.pyplot as plt
import matplotlib
#matplotlib.get_backend()
from matplotlib import pyplot as plt
import seaborn as sns
#import matplotlib.pyplot as plt
#import matplotlib
#matplotlib.use('TkAgg')
#import matplotlib.pyplot as plt
import json
import os
import ast
from scipy.spatial import distance
import argparse
from collections import Counter, OrderedDict
from operator import itemgetter
#from sklearn.metrics import jaccard_similarity_score
#from sklearn.metrics import jaccard_score
parser = argparse.ArgumentParser()#help="--fields_path , --data_folder_name --proto ")
parser.add_argument('--proto', type=str, default="dns")#, required=True)
parser.add_argument('--proto_folder', default=None)#, required=True)
parser.add_argument('--plot_root_dir', type=str, default="./qp_plots")#, required=True)
parser.add_argument('--qp_dir', type=str, default="./qps/out_DNS_10k_query_searchout_May11dns_sec")#, required=True)
parser.add_argument('--depth_pq_file', type=str, default="dnssec_DAG_QPS_depth_median.npy")#, required=True)
#parser.add_argument('--depths', type=str, default="dnssec_DAG_QPS_depth_median.npy")#, required=True)
parser.add_argument('--depth', nargs='+', type=int, help='<Required> depth flag', required=True)
parser.add_argument('--max_plot_similarity', action='store_true', default=False)
parser.add_argument('--all_plot_similarity', action='store_true', default=False)
parser.add_argument('--DNSSEC_True', action='store_true', default=False)
#parser.add_argument('--intermediate_data_folder', type=str, default="./intermediate_data")
#parser.add_argument('--aggregate_summary', default=False, action='store_true')
args = parser.parse_args()
print(args)
# CHANGE these names when generating new data/protocol/signature
# Queries_filename = 'out_dns1kdns_sec-1.csv'
plot_root_dir = args.plot_root_dir# "./qp_plots"
#qp_dir = "."#"/Users/soojin/Google Drive/Research/AmpMap/Eval_Current/MeasurementOut/QueryPattern/out_DNS_10k_query_searchout_May11dns_sec"
qp_dir = args.qp_dir # "./qps/out_DNS_10k_query_searchout_May11dns_sec"
#qp_dir = "./qps/out_dns1kdns_sec/"# "./qps/out_DNS_10k_query_searchout_May11dns_sec"
# PERCENTILE = 98
#######Relevant file
Queries_filename = os.path.join( qp_dir, "ALL_Queries.csv")
sig_filename = os.path.join(qp_dir, 'sigs.npz')
#depth_pq_file = os.path.join(qp_dir,"dnssec_DAG_QPS_depth_median.npy" )
depth_pq_file = os.path.join(qp_dir, args.depth_pq_file )
domain_dnssec = ['berkeley.edu', 'energy.gov', 'aetna.com', 'Nairaland.com']
depth_minus1_file = os.path.join( qp_dir, "Hamming_2.csv")
# Queries_filename = os.path.join( qp_dir, 'ALL_Queries.csv')
# sig_filename = os.path.join(qp_dir, 'sigs.npz')
# depth_pq_file = os.path.join(qp_dir,"dnssec_DAG_QPS_depth_median.npy" )
# domain_dnssec = ['berkeley.edu', 'energy.gov', 'aetna.com', 'Nairaland.com']
# depth_minus1_file = os.path.join( qp_dir, "Hamming_2.csv")
topK = 10
####flag
all_plot_similarity = args.all_plot_similarity
max_plot_similarity = args.max_plot_similarity
DNSSEC_True= args.DNSSEC_True # True
PROTO = args.proto
# if PROTO.lower() == "dns" and DNSSEC_True == True:
# PROTO = "dns-dnssec"
# elif PROTO.lower() == "dns" and DNSSEC_True == False:
# PROTO = "dns-nodnssec"
if args.proto_folder == None:
args.proto_folder = PROTO
print(" ", args.proto_folder )
proto_dir = os.path.join(plot_root_dir, args.proto_folder ) #"yucheng_plots/"+PROTO
SetCover_True = True
# load QPs
# depths = [-1] #6,5,4,3,2,1] #[-1]
depths = args.depth # [0,1,2,3,4,5,6,7,8,9]
########### Hamming QP ######################################
# out_dir = "yucheng_plots/"+PROTO+"/hamming"
# if not os.path.exists(out_dir):
# os.makedirs(out_dir)
# QPs = pd.read_csv(QP_filename)
# QPs.sort_values(by=['amp_fac'], ascending=False,inplace=True)
#############################################################
#plt.clf()
#sys.exit(1)
def compute_percentile(QP_AFs, PERCENTILE, outfile):
QP_AFs_percentile = {}
for key, value in QP_AFs.items():
QP_AFs_percentile[key] = np.percentile(value, PERCENTILE)
QP_AFs_percentile = OrderedDict(sorted(QP_AFs_percentile.items(), key = itemgetter(1), reverse = True))
fp = open(os.path.join(out_dir, outfile+"_PERCENTILE_"+str(PERCENTILE)+".csv"), 'w')
for key, value in QP_AFs_percentile.items():
fp.write("{},{}\n".format(key, value))
fp.close()
return QP_AFs_percentile
# In[6]:
# map one query (currow) to a list of QPs
# Input: one query
# Output: a list of matching QPs index
def map_query_to_QP(currow):
newrow=[]
for row_sig in signatures:
e1=row_sig[0]
if e1 in ['url']:
continue
e2=hc[e1]
# print(e1,e2)
if(e2!=2):
newrow.append(currow[e1])
else:
sigvals=col_info[e1]
# print(sigvals)
curval = currow[e1]
for i,rr in enumerate(sigvals):
if curval in rr[0]:
newrow.append(i)
break
# print(currow,newrow,new_sigs[0,:])
newrow=np.array(newrow)
# print(newrow.shape)
curmatches=[]
for signum,sig in enumerate(new_sigs):
match = 1
for k in range(0,newrow.shape[0]):
sigin=k
v1=newrow[k]
cur_col=sig[sigin]
# print(v1,cur_col)
if '-1' in cur_col:
continue
# match=1
# else:
if str(v1) not in cur_col:
match=0
break
if(match==1):
curmatches.append(signum)
# aux_array.
curAF=currow[0]
# print("ROW : ",newrow)
# print("curmatches: ", curmatches)
# for matches in new_sigs[curmatches]:
# print("matches: ", matches)
return curmatches
# In[7]:
# convert a list of tuples to dict
def merge_tuples(tuple_list):
results = {}
for item in tuple_list:
key = item[0]
value = item[1]
if key not in results:
results[key] = []
results[key].append(value)
return results
def read_from_json(filename):
with open(filename, 'r') as fp:
dict_ = json.load( fp )
return dict_
def output_dict_to_json(dict_, filename):
results = {}
results["children"] = []
for key, value in dict_.items():
result = {"Name": "QP "+str(key), "Count": round(value, 2)}
results["children"].append(result)
with open(filename, 'w') as fp:
json.dump(results, fp)
def output_dict_to_json_v2(dict_, filename):
with open(filename, 'w') as fp:
json.dump(dict_, fp)
# AForCount : 0: AF, 1: Count
def output_dict_to_csv(dict_, filename, AForCount):
with open(filename, 'w') as fp:
if AForCount == 0:
fp.write("QP_index,meanAF\n")
elif AForCount == 1:
fp.write("QP_index,count\n")
elif AForCount == 2:
fp.write("QP_index,medianAF\n")
for key, value in dict_.items():
fp.write("{},{}\n".format(key, value))
def output_json_to_html(infile, outfile, AForCount):
fr = open("bubble_plot.html", 'r')
fw = open(os.path.join(proto_dir, "depth_"+str(DEPTH)+"_"+outfile), 'w')
print(os.path.join(proto_dir, "depth_"+str(DEPTH)+"_"+outfile))
infile = "depth_"+str(DEPTH)+"/"+infile
for line in fr:
if (line.strip().startswith("d3.json")):
fw.write("\t\td3.json(\"%s\", function(dataset) {\n"%infile)
elif (line.strip().startswith("var diameter")):
if AForCount == 0:
fw.write("\t\t\tvar diameter = 800\n")
elif AForCount == 1:
fw.write("\t\t\tvar diameter = 600\n")
else:
fw.write(line)
fr.close()
fw.close()
def output_QP_stats(QP_AFs):
QP_mean_AF = {}
QP_occ = {}
QP_percent = {}
total_len = 0
for key, value in QP_AFs.items():
QP_mean_AF[key] = np.mean(value)
QP_occ[key] = len(value)
total_len += len(value)
for key, value in QP_occ.items():
QP_percent[key] = float(value)/float(total_len)
QP_mean_AF = OrderedDict(sorted(QP_mean_AF.items(), key = itemgetter(1), reverse = True))
QP_occ = OrderedDict(sorted(QP_occ.items(), key = itemgetter(1), reverse = True))
QP_percent = OrderedDict(sorted(QP_percent.items(), key = itemgetter(1), reverse = True))
return QP_mean_AF, QP_occ, QP_percent
# In[24]:
# box plot for top FIVE QPs
# pick TOP by MEAN AF
# box plot for top FIVE QPs
# pick TOP by MEAN AF
def QP_boxplot(QP_AFs, QP_mean_AF, topK, outfile, title, rank_by):
assert(len(QP_AFs) == len(QP_mean_AF))
top_index_num = min(len(QP_mean_AF), topK)
#print("top index num" , top_index_num)
#print("list ",list(QP_mean_AF.keys()))
top_index = list(QP_mean_AF.keys())[:top_index_num]
#print(top_index)
data = []
xlabels = []
nll=[]
plt.style.use(['seaborn-whitegrid', 'seaborn-paper'])
df = pd.DataFrame(columns=['QPs', 'value'])
rowlist=[]
# dict={}
for index in top_index:
values=QP_AFs[index]
for e1 in values:
curd={}
curd['QP']="QP"+str(index)
curd['AF'] = e1
rowlist.append(curd)
# print(rowlist)
df = pd.DataFrame(rowlist)
# print(df.head())
# ()+1
# data.append(QP_AFs[index])
# xlabels.append("QP "+str(index))
# curd
# nll.append
# print(xlabels)
# print(data)
plt.clf()
plt.figure(figsize=(20, 5))
ax = sns.boxplot(x="QP", y="AF", data=df, linewidth=4, palette="Set2",dodge=True,showmeans=True ) # figsize=(15,6))
#ax = sns.boxplot(x='mode',y='count',hue='design',data=df1,linewidth=4, palette="Set2",dodge=True,showmeans=True )
# plt.boxplot(data)
ax.set_xticks([i for i in range(top_index_num)], xlabels) #, fontsize=18)
#ax.set_xticklabels([i for i in range(top_index_num)], xlabels, fontsize=18)
ax.set_ylabel("Amplification Factor", fontsize=24)
ax.set_xlabel("Query Patterns (QP) ranked by {}".format(rank_by), fontsize=25, labelpad=20)
ax.tick_params(axis='x', labelsize=21)
ax.tick_params(axis='y', labelsize=23)
#plt.title(title)
plt.savefig(outfile,bbox_inches='tight')
for DEPTH in depths:
print("DEPTH: ", DEPTH)
########### DEPTH QP ######################################
proto_dir = os.path.join(plot_root_dir , args.proto_folder) # "yucheng_plots/"+ args.proto
if not os.path.exists(proto_dir):
os.makedirs(proto_dir)
#out_dir = plot_root_dir + "/" + PROTO + "/depth_"+str(DEPTH) # "yucheng_plots/"+ args.proto +"/depth_"+str(DEPTH)
out_dir = proto_dir + "/depth_"+str(DEPTH) # "yucheng_plots/"+ args.proto +"/depth_"+str(DEPTH)
#out_dir = plot_root_dir + "/" + args.proto_folder + "/depth_"+str(DEPTH)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if SetCover_True == True:
print(depth_pq_file)
depth_QPs_dict = np.load( depth_pq_file, allow_pickle=True )
QPs_set = depth_QPs_dict.item()
QPs = QPs_set[DEPTH]
QPs = QPs.applymap(str)
else:
depth_QPs_dict = np.load( depth_pq_file , allow_pickle=True )
QPs = | pd.read_csv(depth_minus1_file) | pandas.read_csv |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.html
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from feeder import VarFeeder
import os
import argparse
import pandas as pd
import numpy
import logging
import yaml
import datetime
import math
import numpy as np
from typing import Tuple
import pickle
from predictor_dl_model.pipeline.util import get_dow
from typing import Tuple, Dict, Collection, List
from pyhive import hive
import json
log = logging.getLogger()
SHUFFLE_BUFFER = 100
def fill_isolated_zeros(x_in):
# x_out = x_in
for i in range(x_in.shape[0]):
ind = np.where(x_in[i,:]!=0)
s, e = ind[0][0], ind[0][-1]
ind_zero = np.where(x_in[i,s:e]==0)
if ind_zero:
for j in ind_zero[0]:
ind_here = s + j
prev = max(s, ind_here-1)
post = min(e, ind_here+1)
if x_in[i,prev]!=0 and x_in[i,post]!=0:
x_in[i, ind_here] = (x_in[i,prev] + x_in[i,post]) / 2
# x_out[i, ind_here] = (x_in[i,prev] + x_in[i,post]) / 2
return
def __data_parser(serialized_example):
features = tf.parse_single_example(serialized_example,
features={'page_ix': tf.FixedLenFeature([], tf.string),
'price_cat_1_n': tf.FixedLenFeature([], tf.float32),
'price_cat_2_n': tf.FixedLenFeature([], tf.float32),
'price_cat_3_n': tf.FixedLenFeature([], tf.float32),
# 'hour': tf.FixedLenFeature([], tf.int64),
'g_g_m_n': tf.FixedLenFeature([], tf.float32),
'g_g_f_n': tf.FixedLenFeature([], tf.float32),
'g_g_x_n': tf.FixedLenFeature([], tf.float32),
'g__n': tf.FixedLenFeature([], tf.float32),
'a__n': tf.FixedLenFeature([], tf.float32),
'a_1_n': tf.FixedLenFeature([], tf.float32),
'a_2_n': tf.FixedLenFeature([], tf.float32),
'a_3_n': tf.FixedLenFeature([], tf.float32),
'a_4_n': tf.FixedLenFeature([], tf.float32),
'a_5_n': tf.FixedLenFeature([], tf.float32),
'a_6_n': tf.FixedLenFeature([], tf.float32),
't_3G_n': tf.FixedLenFeature([], tf.float32),
't_4G_n': tf.FixedLenFeature([], tf.float32),
't_UNKNOWN_n': tf.FixedLenFeature([], tf.float32),
't_WIFI_n': tf.FixedLenFeature([], tf.float32),
't_2G_n': tf.FixedLenFeature([], tf.float32),
'si_vec_n': tf.FixedLenSequenceFeature([], tf.float32, allow_missing=True),
'ts_n': tf.FixedLenSequenceFeature([], tf.float32, allow_missing=True),
'p_n': tf.FixedLenFeature([], tf.float32),
})
uckey = tf.cast(features['page_ix'], tf.string)
price_cat_1 = tf.cast(features['price_cat_1_n'], tf.float32)
price_cat_2 = tf.cast(features['price_cat_2_n'], tf.float32)
price_cat_3 = tf.cast(features['price_cat_3_n'], tf.float32)
gender_x = tf.cast(features['g_g_x_n'], tf.float32)
gender_f = tf.cast(features['g_g_f_n'], tf.float32)
gender_m = tf.cast(features['g_g_m_n'], tf.float32)
gender_no = tf.cast(features['g__n'], tf.float32)
age_no = tf.cast(features['a__n'], tf.float32)
age_1 = tf.cast(features['a_1_n'], tf.float32)
age_2 = tf.cast(features['a_2_n'], tf.float32)
age_3 = tf.cast(features['a_3_n'], tf.float32)
age_4 = tf.cast(features['a_4_n'], tf.float32)
age_5 = tf.cast(features['a_5_n'], tf.float32)
age_6 = tf.cast(features['a_6_n'], tf.float32)
t_3G = tf.cast(features['t_3G_n'], tf.float32)
t_4G = tf.cast(features['t_4G_n'], tf.float32)
t_2G =tf.cast(features['t_2G_n'], tf.float32)
t_UNKNOWN =tf.cast(features['t_UNKNOWN_n'], tf.float32)
t_WIFI =tf.cast(features['t_WIFI_n'], tf.float32)
si = tf.cast(features['si_vec_n'], tf.float32)
hits = tf.cast(features['ts_n'], tf.float32)
page_popularity = tf.cast(features['p_n'], tf.float32)
return uckey, price_cat_1, price_cat_2,price_cat_3, gender_no, gender_f, gender_m, gender_x, age_no,age_1, age_2, age_3, age_4,age_5, age_6, \
t_2G, t_3G,t_4G,t_UNKNOWN, t_WIFI, si,hits,page_popularity
def holiday_norm(day):
return math.sin(day), math.cos(day)
def lag_indexes(tf_stat)-> List[pd.Series]:
"""
Calculates indexes for 3, 6, 9, 12 months backward lag for the given date range
:param begin: start of date range
:param end: end of date range
:return: List of 4 Series, one for each lag. For each Series, index is date in range(begin, end), value is an index
of target (lagged) date in a same Series. If target date is out of (begin,end) range, index is -1
"""
date_range = pd.date_range(tf_stat['days'][0],tf_stat['days'][-1])
# key is date, value is day index
base_index = pd.Series(np.arange(0, len(date_range)),index=date_range)
def lag(offset):
dates = date_range - offset
return pd.Series(data=base_index[dates].fillna(-1).astype(np.int16).values, index=date_range)
return [lag(pd.DateOffset(months=m)) for m in (1, 2)]
def run(cfg):
conn = hive.Connection(host='10.213.37.46', username='hive', password='<PASSWORD>', auth='CUSTOM')
cursor = conn.cursor()
cursor.execute('select * from dlpm_11092020_model_stat')
stat_model = cursor.fetchone()
model_info= json.loads(stat_model[0])
stat_info = json.loads(stat_model[1])
names = []
tfrecord_location = cfg['tfrecords_local_path']
for file in os.listdir(tfrecord_location):
if file.startswith("part"):
names.append(file)
file_paths = [os.path.join(tfrecord_location, name) for name in names]
# read and make the dataset from tfrecord
dataset = tf.data.TFRecordDataset(file_paths)
dataset = dataset.map(__data_parser)
batch_size = cfg['batch_size']
duration = cfg['duration']
dataset = dataset.batch(batch_size).shuffle(SHUFFLE_BUFFER)
iterator = dataset.make_one_shot_iterator()
next_el = iterator.get_next()
# lagged_ix = numpy.ones((duration, 4), dtype=float)
# lagged_ix = np.where(lagged_ix == 1, -1, lagged_ix)
lagged_ix = np.stack(lag_indexes(model_info), axis=-1)
# quarter_autocorr = numpy.ones((batch_size,), dtype=float)
date_list = model_info['days']
dow =get_dow(date_list)
holiday_list = cfg['holidays']
holidays = [1 if _ in holiday_list else 0 for _ in date_list]
a_list = []
b_list = []
for _ in holidays:
a,b =holiday_norm(_)
a_list.append(a)
b_list.append(b)
holiday = (a_list, b_list)
with tf.Session() as sess:
x = sess.run(next_el)
quarter_autocorr = numpy.ones((x[0].size,), dtype=float)
page_indx = list(x[0])
# fill_isolated_zeros(x[21])
tensors = dict(
hits=pd.DataFrame(x[21], index=page_indx, columns=date_list),
lagged_ix=lagged_ix,
page_ix=page_indx,
pf_age=pd.DataFrame(x[8:15], columns=page_indx, index = (1,2,3,4,5,6,7)).T,
pf_si=pd.DataFrame(x[20], index = page_indx ),
pf_network=pd.DataFrame(x[15:20], columns=page_indx, index=('2G','3G','4G', 'UNKNOWN','WIFI')).T,
pf_price_cat=pd.DataFrame(x[1:4], columns=page_indx, index=('pc1','pc2','pc3')).T,
pf_gender=pd.DataFrame(x[4:8], columns=page_indx, index= ('none','f','m','x')).T,
page_popularity=x[22],
# page_popularity = quarter_autocorr,
quarter_autocorr= quarter_autocorr,
dow= | pd.DataFrame(dow) | pandas.DataFrame |
"""
Routines for casting.
"""
from contextlib import suppress
from datetime import date, datetime, timedelta
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Sequence,
Set,
Sized,
Tuple,
Type,
Union,
)
import numpy as np
from pandas._libs import lib, tslib, tslibs
from pandas._libs.tslibs import (
NaT,
OutOfBoundsDatetime,
Period,
Timedelta,
Timestamp,
conversion,
iNaT,
ints_to_pydatetime,
ints_to_pytimedelta,
)
from pandas._libs.tslibs.timezones import tz_compare
from pandas._typing import AnyArrayLike, ArrayLike, Dtype, DtypeObj, Scalar, Shape
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
INT64_DTYPE,
POSSIBLY_CAST_DTYPES,
TD64NS_DTYPE,
ensure_int8,
ensure_int16,
ensure_int32,
ensure_int64,
ensure_object,
ensure_str,
is_bool,
is_bool_dtype,
is_categorical_dtype,
is_complex,
is_complex_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
is_numeric_dtype,
is_object_dtype,
is_scalar,
is_sparse,
is_string_dtype,
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_unsigned_integer_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import (
DatetimeTZDtype,
ExtensionDtype,
IntervalDtype,
PeriodDtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeArray,
ABCDatetimeIndex,
ABCExtensionArray,
ABCPeriodArray,
ABCPeriodIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import is_list_like
from pandas.core.dtypes.missing import (
is_valid_nat_for_dtype,
isna,
na_value_for_dtype,
notna,
)
if TYPE_CHECKING:
from pandas import Series
from pandas.core.arrays import ExtensionArray
from pandas.core.indexes.base import Index
_int8_max = np.iinfo(np.int8).max
_int16_max = np.iinfo(np.int16).max
_int32_max = np.iinfo(np.int32).max
_int64_max = np.iinfo(np.int64).max
def maybe_convert_platform(values):
""" try to do platform conversion, allow ndarray or list here """
if isinstance(values, (list, tuple, range)):
values = construct_1d_object_array_from_listlike(values)
if getattr(values, "dtype", None) == np.object_:
if hasattr(values, "_values"):
values = values._values
values = lib.maybe_convert_objects(values)
return values
def is_nested_object(obj) -> bool:
"""
return a boolean if we have a nested object, e.g. a Series with 1 or
more Series elements
This may not be necessarily be performant.
"""
if isinstance(obj, ABCSeries) and is_object_dtype(obj.dtype):
if any(isinstance(v, ABCSeries) for v in obj._values):
return True
return False
def maybe_box_datetimelike(value: Scalar, dtype: Optional[Dtype] = None) -> Scalar:
"""
Cast scalar to Timestamp or Timedelta if scalar is datetime-like
and dtype is not object.
Parameters
----------
value : scalar
dtype : Dtype, optional
Returns
-------
scalar
"""
if dtype == object:
pass
elif isinstance(value, (np.datetime64, datetime)):
value = tslibs.Timestamp(value)
elif isinstance(value, (np.timedelta64, timedelta)):
value = tslibs.Timedelta(value)
return value
def maybe_downcast_to_dtype(result, dtype: Union[str, np.dtype]):
"""
try to cast to the specified dtype (e.g. convert back to bool/int
or could be an astype of float64->float32
"""
do_round = False
if is_scalar(result):
return result
elif isinstance(result, ABCDataFrame):
# occurs in pivot_table doctest
return result
if isinstance(dtype, str):
if dtype == "infer":
inferred_type = lib.infer_dtype(ensure_object(result), skipna=False)
if inferred_type == "boolean":
dtype = "bool"
elif inferred_type == "integer":
dtype = "int64"
elif inferred_type == "datetime64":
dtype = "datetime64[ns]"
elif inferred_type == "timedelta64":
dtype = "timedelta64[ns]"
# try to upcast here
elif inferred_type == "floating":
dtype = "int64"
if issubclass(result.dtype.type, np.number):
do_round = True
else:
dtype = "object"
dtype = np.dtype(dtype)
elif dtype.type is Period:
from pandas.core.arrays import PeriodArray
with suppress(TypeError):
# e.g. TypeError: int() argument must be a string, a
# bytes-like object or a number, not 'Period
return PeriodArray(result, freq=dtype.freq)
converted = maybe_downcast_numeric(result, dtype, do_round)
if converted is not result:
return converted
# a datetimelike
# GH12821, iNaT is cast to float
if dtype.kind in ["M", "m"] and result.dtype.kind in ["i", "f"]:
if hasattr(dtype, "tz"):
# not a numpy dtype
if dtype.tz:
# convert to datetime and change timezone
from pandas import to_datetime
result = to_datetime(result).tz_localize("utc")
result = result.tz_convert(dtype.tz)
else:
result = result.astype(dtype)
return result
def maybe_downcast_numeric(result, dtype: DtypeObj, do_round: bool = False):
"""
Subset of maybe_downcast_to_dtype restricted to numeric dtypes.
Parameters
----------
result : ndarray or ExtensionArray
dtype : np.dtype or ExtensionDtype
do_round : bool
Returns
-------
ndarray or ExtensionArray
"""
if not isinstance(dtype, np.dtype):
# e.g. SparseDtype has no itemsize attr
return result
if isinstance(result, list):
# reached via groupby.agg._ohlc; really this should be handled earlier
result = np.array(result)
def trans(x):
if do_round:
return x.round()
return x
if dtype.kind == result.dtype.kind:
# don't allow upcasts here (except if empty)
if result.dtype.itemsize <= dtype.itemsize and result.size:
return result
if is_bool_dtype(dtype) or is_integer_dtype(dtype):
if not result.size:
# if we don't have any elements, just astype it
return trans(result).astype(dtype)
# do a test on the first element, if it fails then we are done
r = result.ravel()
arr = np.array([r[0]])
if isna(arr).any():
# if we have any nulls, then we are done
return result
elif not isinstance(r[0], (np.integer, np.floating, int, float, bool)):
# a comparable, e.g. a Decimal may slip in here
return result
if (
issubclass(result.dtype.type, (np.object_, np.number))
and notna(result).all()
):
new_result = trans(result).astype(dtype)
if new_result.dtype.kind == "O" or result.dtype.kind == "O":
# np.allclose may raise TypeError on object-dtype
if (new_result == result).all():
return new_result
else:
if np.allclose(new_result, result, rtol=0):
return new_result
elif (
issubclass(dtype.type, np.floating)
and not is_bool_dtype(result.dtype)
and not is_string_dtype(result.dtype)
):
return result.astype(dtype)
return result
def maybe_cast_result(
result: ArrayLike, obj: "Series", numeric_only: bool = False, how: str = ""
) -> ArrayLike:
"""
Try casting result to a different type if appropriate
Parameters
----------
result : array-like
Result to cast.
obj : Series
Input Series from which result was calculated.
numeric_only : bool, default False
Whether to cast only numerics or datetimes as well.
how : str, default ""
How the result was computed.
Returns
-------
result : array-like
result maybe casted to the dtype.
"""
dtype = obj.dtype
dtype = maybe_cast_result_dtype(dtype, how)
assert not is_scalar(result)
if (
is_extension_array_dtype(dtype)
and not is_categorical_dtype(dtype)
and dtype.kind != "M"
):
# We have to special case categorical so as not to upcast
# things like counts back to categorical
cls = dtype.construct_array_type()
result = maybe_cast_to_extension_array(cls, result, dtype=dtype)
elif numeric_only and is_numeric_dtype(dtype) or not numeric_only:
result = maybe_downcast_to_dtype(result, dtype)
return result
def maybe_cast_result_dtype(dtype: DtypeObj, how: str) -> DtypeObj:
"""
Get the desired dtype of a result based on the
input dtype and how it was computed.
Parameters
----------
dtype : DtypeObj
Input dtype.
how : str
How the result was computed.
Returns
-------
DtypeObj
The desired dtype of the result.
"""
from pandas.core.arrays.boolean import BooleanDtype
from pandas.core.arrays.integer import Int64Dtype
if how in ["add", "cumsum", "sum"] and (dtype == np.dtype(bool)):
return np.dtype(np.int64)
elif how in ["add", "cumsum", "sum"] and isinstance(dtype, BooleanDtype):
return Int64Dtype()
return dtype
def maybe_cast_to_extension_array(
cls: Type["ExtensionArray"], obj: ArrayLike, dtype: Optional[ExtensionDtype] = None
) -> ArrayLike:
"""
Call to `_from_sequence` that returns the object unchanged on Exception.
Parameters
----------
cls : class, subclass of ExtensionArray
obj : arraylike
Values to pass to cls._from_sequence
dtype : ExtensionDtype, optional
Returns
-------
ExtensionArray or obj
"""
from pandas.core.arrays.string_ import StringArray
from pandas.core.arrays.string_arrow import ArrowStringArray
assert isinstance(cls, type), f"must pass a type: {cls}"
assertion_msg = f"must pass a subclass of ExtensionArray: {cls}"
assert issubclass(cls, ABCExtensionArray), assertion_msg
# Everything can be converted to StringArrays, but we may not want to convert
if (
issubclass(cls, (StringArray, ArrowStringArray))
and lib.infer_dtype(obj) != "string"
):
return obj
try:
result = cls._from_sequence(obj, dtype=dtype)
except Exception:
# We can't predict what downstream EA constructors may raise
result = obj
return result
def maybe_upcast_putmask(
result: np.ndarray, mask: np.ndarray, other: Scalar
) -> Tuple[np.ndarray, bool]:
"""
A safe version of putmask that potentially upcasts the result.
The result is replaced with the first N elements of other,
where N is the number of True values in mask.
If the length of other is shorter than N, other will be repeated.
Parameters
----------
result : ndarray
The destination array. This will be mutated in-place if no upcasting is
necessary.
mask : boolean ndarray
other : scalar
The source value.
Returns
-------
result : ndarray
changed : bool
Set to true if the result array was upcasted.
Examples
--------
>>> arr = np.arange(1, 6)
>>> mask = np.array([False, True, False, True, True])
>>> result, _ = maybe_upcast_putmask(arr, mask, False)
>>> result
array([1, 0, 3, 0, 0])
"""
if not isinstance(result, np.ndarray):
raise ValueError("The result input must be a ndarray.")
if not is_scalar(other):
# We _could_ support non-scalar other, but until we have a compelling
# use case, we assume away the possibility.
raise ValueError("other must be a scalar")
if mask.any():
# Two conversions for date-like dtypes that can't be done automatically
# in np.place:
# NaN -> NaT
# integer or integer array -> date-like array
if result.dtype.kind in ["m", "M"]:
if isna(other):
other = result.dtype.type("nat")
elif is_integer(other):
other = np.array(other, dtype=result.dtype)
def changeit():
# we are forced to change the dtype of the result as the input
# isn't compatible
r, _ = maybe_upcast(result, fill_value=other, copy=True)
np.place(r, mask, other)
return r, True
# we want to decide whether place will work
# if we have nans in the False portion of our mask then we need to
# upcast (possibly), otherwise we DON't want to upcast (e.g. if we
# have values, say integers, in the success portion then it's ok to not
# upcast)
new_dtype, _ = maybe_promote(result.dtype, other)
if new_dtype != result.dtype:
# we have a scalar or len 0 ndarray
# and its nan and we are changing some values
if isna(other):
return changeit()
try:
np.place(result, mask, other)
except TypeError:
# e.g. int-dtype result and float-dtype other
return changeit()
return result, False
def maybe_casted_values(
index: "Index", codes: Optional[np.ndarray] = None
) -> ArrayLike:
"""
Convert an index, given directly or as a pair (level, code), to a 1D array.
Parameters
----------
index : Index
codes : np.ndarray[intp] or None, default None
Returns
-------
ExtensionArray or ndarray
If codes is `None`, the values of `index`.
If codes is passed, an array obtained by taking from `index` the indices
contained in `codes`.
"""
values = index._values
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the codes, extract the values with a mask
if codes is not None:
mask: np.ndarray = codes == -1
if mask.size > 0 and mask.all():
# we can have situations where the whole mask is -1,
# meaning there is nothing found in codes, so make all nan's
dtype = index.dtype
fill_value = na_value_for_dtype(dtype)
values = construct_1d_arraylike_from_scalar(fill_value, len(mask), dtype)
else:
values = values.take(codes)
if mask.any():
if isinstance(values, np.ndarray):
values, _ = maybe_upcast_putmask(values, mask, np.nan)
else:
values[mask] = np.nan
return values
def maybe_promote(dtype, fill_value=np.nan):
"""
Find the minimal dtype that can hold both the given dtype and fill_value.
Parameters
----------
dtype : np.dtype or ExtensionDtype
fill_value : scalar, default np.nan
Returns
-------
dtype
Upcasted from dtype argument if necessary.
fill_value
Upcasted from fill_value argument if necessary.
"""
if not is_scalar(fill_value) and not is_object_dtype(dtype):
# with object dtype there is nothing to promote, and the user can
# pass pretty much any weird fill_value they like
raise ValueError("fill_value must be a scalar")
# if we passed an array here, determine the fill value by dtype
if isinstance(fill_value, np.ndarray):
if issubclass(fill_value.dtype.type, (np.datetime64, np.timedelta64)):
fill_value = fill_value.dtype.type("NaT", "ns")
else:
# we need to change to object type as our
# fill_value is of object type
if fill_value.dtype == np.object_:
dtype = np.dtype(np.object_)
fill_value = np.nan
if dtype == np.object_ or dtype.kind in ["U", "S"]:
# We treat string-like dtypes as object, and _always_ fill
# with np.nan
fill_value = np.nan
dtype = np.dtype(np.object_)
# returns tuple of (dtype, fill_value)
if issubclass(dtype.type, np.datetime64):
if isinstance(fill_value, datetime) and fill_value.tzinfo is not None:
# Trying to insert tzaware into tznaive, have to cast to object
dtype = np.dtype(np.object_)
elif is_integer(fill_value) or (is_float(fill_value) and not isna(fill_value)):
dtype = np.dtype(np.object_)
else:
try:
fill_value = Timestamp(fill_value).to_datetime64()
except (TypeError, ValueError):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, np.timedelta64):
if (
is_integer(fill_value)
or (is_float(fill_value) and not np.isnan(fill_value))
or isinstance(fill_value, str)
):
# TODO: What about str that can be a timedelta?
dtype = np.dtype(np.object_)
else:
try:
fv = Timedelta(fill_value)
except ValueError:
dtype = np.dtype(np.object_)
else:
if fv is NaT:
# NaT has no `to_timedelta64` method
fill_value = np.timedelta64("NaT", "ns")
else:
fill_value = fv.to_timedelta64()
elif is_datetime64tz_dtype(dtype):
if isna(fill_value):
fill_value = NaT
elif not isinstance(fill_value, datetime):
dtype = np.dtype(np.object_)
elif fill_value.tzinfo is None:
dtype = np.dtype(np.object_)
elif not tz_compare(fill_value.tzinfo, dtype.tz):
# TODO: sure we want to cast here?
dtype = np.dtype(np.object_)
elif is_extension_array_dtype(dtype) and isna(fill_value):
fill_value = dtype.na_value
elif is_float(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, np.integer):
dtype = np.dtype(np.float64)
elif dtype.kind == "f":
mst = np.min_scalar_type(fill_value)
if mst > dtype:
# e.g. mst is np.float64 and dtype is np.float32
dtype = mst
elif dtype.kind == "c":
mst = np.min_scalar_type(fill_value)
dtype = np.promote_types(dtype, mst)
elif is_bool(fill_value):
if not issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif is_integer(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, np.integer):
if not np.can_cast(fill_value, dtype):
# upcast to prevent overflow
mst = np.min_scalar_type(fill_value)
dtype = np.promote_types(dtype, mst)
if dtype.kind == "f":
# Case where we disagree with numpy
dtype = np.dtype(np.object_)
elif is_complex(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.dtype(np.object_)
elif issubclass(dtype.type, (np.integer, np.floating)):
mst = np.min_scalar_type(fill_value)
dtype = np.promote_types(dtype, mst)
elif dtype.kind == "c":
mst = np.min_scalar_type(fill_value)
if mst > dtype:
# e.g. mst is np.complex128 and dtype is np.complex64
dtype = mst
elif fill_value is None:
if is_float_dtype(dtype) or is_complex_dtype(dtype):
fill_value = np.nan
elif is_integer_dtype(dtype):
dtype = np.float64
fill_value = np.nan
elif is_datetime_or_timedelta_dtype(dtype):
fill_value = dtype.type("NaT", "ns")
else:
dtype = np.dtype(np.object_)
fill_value = np.nan
else:
dtype = np.dtype(np.object_)
# in case we have a string that looked like a number
if is_extension_array_dtype(dtype):
pass
elif issubclass(np.dtype(dtype).type, (bytes, str)):
dtype = np.dtype(np.object_)
fill_value = _ensure_dtype_type(fill_value, dtype)
return dtype, fill_value
def _ensure_dtype_type(value, dtype: DtypeObj):
"""
Ensure that the given value is an instance of the given dtype.
e.g. if out dtype is np.complex64_, we should have an instance of that
as opposed to a python complex object.
Parameters
----------
value : object
dtype : np.dtype or ExtensionDtype
Returns
-------
object
"""
# Start with exceptions in which we do _not_ cast to numpy types
if is_extension_array_dtype(dtype):
return value
elif dtype == np.object_:
return value
elif isna(value):
# e.g. keep np.nan rather than try to cast to np.float32(np.nan)
return value
return dtype.type(value)
def infer_dtype_from(val, pandas_dtype: bool = False) -> Tuple[DtypeObj, Any]:
"""
Interpret the dtype from a scalar or array.
Parameters
----------
val : object
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar/array belongs to pandas extension types is inferred as
object
"""
if is_scalar(val):
return infer_dtype_from_scalar(val, pandas_dtype=pandas_dtype)
return infer_dtype_from_array(val, pandas_dtype=pandas_dtype)
def infer_dtype_from_scalar(val, pandas_dtype: bool = False) -> Tuple[DtypeObj, Any]:
"""
Interpret the dtype from a scalar.
Parameters
----------
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar belongs to pandas extension types is inferred as
object
"""
dtype: DtypeObj = np.dtype(object)
# a 1-element ndarray
if isinstance(val, np.ndarray):
msg = "invalid ndarray passed to infer_dtype_from_scalar"
if val.ndim != 0:
raise ValueError(msg)
dtype = val.dtype
val = val.item()
elif isinstance(val, str):
# If we create an empty array using a string to infer
# the dtype, NumPy will only allocate one character per entry
# so this is kind of bad. Alternately we could use np.repeat
# instead of np.empty (but then you still don't want things
# coming out as np.str_!
dtype = np.dtype(object)
elif isinstance(val, (np.datetime64, datetime)):
val = Timestamp(val)
if val is NaT or val.tz is None:
dtype = np.dtype("M8[ns]")
else:
if pandas_dtype:
dtype = DatetimeTZDtype(unit="ns", tz=val.tz)
else:
# return datetimetz as object
return np.dtype(object), val
val = val.value
elif isinstance(val, (np.timedelta64, timedelta)):
val = Timedelta(val).value
dtype = np.dtype("m8[ns]")
elif is_bool(val):
dtype = np.dtype(np.bool_)
elif is_integer(val):
if isinstance(val, np.integer):
dtype = np.dtype(type(val))
else:
dtype = np.dtype(np.int64)
try:
np.array(val, dtype=dtype)
except OverflowError:
dtype = np.array(val).dtype
elif is_float(val):
if isinstance(val, np.floating):
dtype = np.dtype(type(val))
else:
dtype = np.dtype(np.float64)
elif is_complex(val):
dtype = np.dtype(np.complex_)
elif pandas_dtype:
if lib.is_period(val):
dtype = PeriodDtype(freq=val.freq)
elif lib.is_interval(val):
subtype = infer_dtype_from_scalar(val.left, pandas_dtype=True)[0]
dtype = IntervalDtype(subtype=subtype)
return dtype, val
def dict_compat(d: Dict[Scalar, Scalar]) -> Dict[Scalar, Scalar]:
"""
Convert datetimelike-keyed dicts to a Timestamp-keyed dict.
Parameters
----------
d: dict-like object
Returns
-------
dict
"""
return {maybe_box_datetimelike(key): value for key, value in d.items()}
def infer_dtype_from_array(
arr, pandas_dtype: bool = False
) -> Tuple[DtypeObj, ArrayLike]:
"""
Infer the dtype from an array.
Parameters
----------
arr : array
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, array belongs to pandas extension types
is inferred as object
Returns
-------
tuple (numpy-compat/pandas-compat dtype, array)
Notes
-----
if pandas_dtype=False. these infer to numpy dtypes
exactly with the exception that mixed / object dtypes
are not coerced by stringifying or conversion
if pandas_dtype=True. datetime64tz-aware/categorical
types will retain there character.
Examples
--------
>>> np.asarray([1, '1'])
array(['1', '1'], dtype='<U21')
>>> infer_dtype_from_array([1, '1'])
(dtype('O'), [1, '1'])
"""
if isinstance(arr, np.ndarray):
return arr.dtype, arr
if not is_list_like(arr):
arr = [arr]
if pandas_dtype and is_extension_array_dtype(arr):
return arr.dtype, arr
elif isinstance(arr, ABCSeries):
return arr.dtype, np.asarray(arr)
# don't force numpy coerce with nan's
inferred = lib.infer_dtype(arr, skipna=False)
if inferred in ["string", "bytes", "mixed", "mixed-integer"]:
return (np.dtype(np.object_), arr)
arr = np.asarray(arr)
return arr.dtype, arr
def maybe_infer_dtype_type(element):
"""
Try to infer an object's dtype, for use in arithmetic ops.
Uses `element.dtype` if that's available.
Objects implementing the iterator protocol are cast to a NumPy array,
and from there the array's type is used.
Parameters
----------
element : object
Possibly has a `.dtype` attribute, and possibly the iterator
protocol.
Returns
-------
tipo : type
Examples
--------
>>> from collections import namedtuple
>>> Foo = namedtuple("Foo", "dtype")
>>> maybe_infer_dtype_type(Foo(np.dtype("i8")))
dtype('int64')
"""
tipo = None
if hasattr(element, "dtype"):
tipo = element.dtype
elif is_list_like(element):
element = np.asarray(element)
tipo = element.dtype
return tipo
def maybe_upcast(
values: ArrayLike,
fill_value: Scalar = np.nan,
dtype: Dtype = None,
copy: bool = False,
) -> Tuple[ArrayLike, Scalar]:
"""
Provide explicit type promotion and coercion.
Parameters
----------
values : ndarray or ExtensionArray
The array that we want to maybe upcast.
fill_value : what we want to fill with
dtype : if None, then use the dtype of the values, else coerce to this type
copy : bool, default True
If True always make a copy even if no upcast is required.
Returns
-------
values: ndarray or ExtensionArray
the original array, possibly upcast
fill_value:
the fill value, possibly upcast
"""
if not is_scalar(fill_value) and not is_object_dtype(values.dtype):
# We allow arbitrary fill values for object dtype
raise ValueError("fill_value must be a scalar")
if is_extension_array_dtype(values):
if copy:
values = values.copy()
else:
if dtype is None:
dtype = values.dtype
new_dtype, fill_value = maybe_promote(dtype, fill_value)
if new_dtype != values.dtype:
values = values.astype(new_dtype)
elif copy:
values = values.copy()
return values, fill_value
def invalidate_string_dtypes(dtype_set: Set[DtypeObj]):
"""
Change string like dtypes to object for
``DataFrame.select_dtypes()``.
"""
non_string_dtypes = dtype_set - {np.dtype("S").type, np.dtype("<U").type}
if non_string_dtypes != dtype_set:
raise TypeError("string dtypes are not allowed, use 'object' instead")
def coerce_indexer_dtype(indexer, categories):
""" coerce the indexer input array to the smallest dtype possible """
length = len(categories)
if length < _int8_max:
return ensure_int8(indexer)
elif length < _int16_max:
return ensure_int16(indexer)
elif length < _int32_max:
return ensure_int32(indexer)
return ensure_int64(indexer)
def astype_nansafe(
arr, dtype: DtypeObj, copy: bool = True, skipna: bool = False
) -> ArrayLike:
"""
Cast the elements of an array to a given dtype a nan-safe manner.
Parameters
----------
arr : ndarray
dtype : np.dtype
copy : bool, default True
If False, a view will be attempted but may fail, if
e.g. the item sizes don't align.
skipna: bool, default False
Whether or not we should skip NaN when casting as a string-type.
Raises
------
ValueError
The dtype was a datetime64/timedelta64 dtype, but it had no unit.
"""
# dispatch on extension dtype if needed
if is_extension_array_dtype(dtype):
return dtype.construct_array_type()._from_sequence(arr, dtype=dtype, copy=copy)
if not isinstance(dtype, np.dtype):
dtype = pandas_dtype(dtype)
if issubclass(dtype.type, str):
return lib.ensure_string_array(
arr.ravel(), skipna=skipna, convert_na_value=False
).reshape(arr.shape)
elif is_datetime64_dtype(arr):
if is_object_dtype(dtype):
return ints_to_pydatetime(arr.view(np.int64))
elif dtype == np.int64:
if isna(arr).any():
raise ValueError("Cannot convert NaT values to integer")
return arr.view(dtype)
# allow frequency conversions
if dtype.kind == "M":
return arr.astype(dtype)
raise TypeError(f"cannot astype a datetimelike from [{arr.dtype}] to [{dtype}]")
elif is_timedelta64_dtype(arr):
if is_object_dtype(dtype):
return ints_to_pytimedelta(arr.view(np.int64))
elif dtype == np.int64:
if isna(arr).any():
raise ValueError("Cannot convert NaT values to integer")
return arr.view(dtype)
if dtype not in [INT64_DTYPE, TD64NS_DTYPE]:
# allow frequency conversions
# we return a float here!
if dtype.kind == "m":
mask = | isna(arr) | pandas.core.dtypes.missing.isna |
import sys
import numpy as np
from skimage import measure
sys.path.append("../")
def test_clustering_widget(make_napari_viewer):
import napari_clusters_plotter as ncp
viewer = make_napari_viewer(strict_qt=True)
widget_list = ncp.napari_experimental_provide_dock_widget()
n_wdgts = len(viewer.window._dock_widgets)
for widget in widget_list:
_widget = widget(viewer)
if isinstance(
_widget, ncp._dimensionality_reduction.DimensionalityReductionWidget
):
plot_widget = _widget
viewer.window.add_dock_widget(plot_widget)
assert len(viewer.window._dock_widgets) == n_wdgts + 1
def test_bad_measurements(qtbot, make_napari_viewer):
from napari_clusters_plotter._dimensionality_reduction import (
DimensionalityReductionWidget,
)
from napari_clusters_plotter._utilities import set_features
label = np.array(
[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 2, 2],
[0, 0, 0, 0, 2, 2, 2],
[3, 3, 0, 0, 0, 0, 0],
[0, 0, 4, 4, 0, 0, 0],
[6, 6, 6, 6, 0, 5, 0], # <-single pixel label leading to NaN meas.
[0, 7, 7, 0, 0, 0, 0],
]
)
viewer = make_napari_viewer(strict_qt=True)
labels_layer = viewer.add_labels(label)
# Add NaNs to data
measurements = measure.regionprops_table(
label, properties=(["label", "area", "perimeter"])
)
for key in list(measurements.keys())[1:]:
measurements[key] = measurements[key].astype(float)
measurements[key][4] = np.nan
set_features(labels_layer, measurements)
widget = DimensionalityReductionWidget(napari_viewer=viewer)
widget.run(
viewer=viewer,
labels_layer=labels_layer,
selected_measurements_list=list(measurements.keys()),
n_neighbours=2,
perplexity=5,
selected_algorithm="UMAP",
standardize=False,
n_components=2,
explained_variance=95.0,
pca_components=0,
)
blocker = qtbot.waitSignal(widget.worker.finished, timeout=1000000)
blocker.wait()
# def test_call_to_function(qtbot, make_napari_viewer):
# viewer = make_napari_viewer(strict_qt=True)
# label = np.array(
# [
# [0, 0, 0, 0, 0, 0, 0],
# [0, 1, 1, 0, 0, 2, 2],
# [0, 0, 0, 0, 2, 2, 2],
# [3, 3, 0, 0, 0, 0, 0],
# [0, 0, 4, 4, 0, 5, 5],
# [6, 6, 6, 6, 0, 5, 0],
# [0, 7, 7, 0, 0, 0, 0],
# ]
# )
# props = measure.regionprops_table(
# label, properties=(["label", "area", "perimeter"])
# )
# label_layer = viewer.add_labels(label, properties=props)
# from napari_clusters_plotter._dimensionality_reduction import (
# DimensionalityReductionWidget,
# )
# from napari_clusters_plotter._utilities import get_layer_tabular_data
# widget = DimensionalityReductionWidget(napari_viewer=viewer)
# widget.run(
# viewer=viewer,
# labels_layer=label_layer,
# selected_measurements_list=["area", "perimeter"],
# n_neighbours=2,
# perplexity=5,
# selected_algorithm="UMAP",
# standardize=False,
# n_components=2,
# explained_variance=95.0,
# pca_components=0,
# )
# # waiting till the thread worker finished
# blocker = qtbot.waitSignal(widget.worker.finished, timeout=1000000)
# blocker.wait()
# # additional waiting so the return_func_umap gets the returned embedding
# # from the thread, and writes the results into properties/features of the labels layer
# time.sleep(5)
# result = get_layer_tabular_data(label_layer)
# assert "UMAP_0" in result.columns
# assert "UMAP_1" in result.columns
# widget.run(
# viewer=viewer,
# labels_layer=label_layer,
# selected_measurements_list=["area", "perimeter"],
# n_neighbours=2,
# perplexity=5,
# selected_algorithm="t-SNE",
# standardize=False,
# n_components=2,
# explained_variance=95.0,
# pca_components=0,
# )
# blocker = qtbot.waitSignal(widget.worker.finished, timeout=1000000)
# blocker.wait()
# time.sleep(5)
# result = get_layer_tabular_data(label_layer)
# assert "t-SNE_0" in result.columns
# assert "t-SNE_1" in result.columns
# widget.run(
# viewer=viewer,
# labels_layer=label_layer,
# selected_measurements_list=["area", "perimeter"],
# n_neighbours=2,
# perplexity=5,
# selected_algorithm="PCA",
# standardize=False,
# n_components=2,
# explained_variance=95.0,
# pca_components=2,
# )
# blocker = qtbot.waitSignal(widget.worker.finished, timeout=10000000)
# blocker.wait()
# time.sleep(10)
# result = get_layer_tabular_data(label_layer)
# assert "PC_0" in result.columns
def test_umap():
import pandas as pd
from napari_clusters_plotter._dimensionality_reduction import umap
X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
n_comp = 2
# umap returns (str, np.ndarray), where the first item is algorithm name
result = umap(pd.DataFrame(X), n_neigh=2, n_components=n_comp)
assert result[1].shape[-1] == n_comp
def test_tsne():
X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
n_comp = 2
import pandas as pd
from napari_clusters_plotter._dimensionality_reduction import tsne
result = tsne( | pd.DataFrame(X) | pandas.DataFrame |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
df = tm.makeDataFrame()
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
df["bool"] = df["float322"] > 0
df["time1"] = Timestamp("20130101")
df["time2"] = Timestamp("20130102")
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
store.append("df", df_empty)
with pytest.raises(KeyError, match="'No object named df in the file'"):
store.select("df")
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
store.append("df", df_empty)
tm.assert_frame_equal(store.select("df"), df)
# store
df = DataFrame(columns=list("ABC"))
store.put("df2", df)
tm.assert_frame_equal(store.select("df2"), df)
def test_append_raise(self, setup_path):
with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# multiple invalid columns
df["invalid2"] = [["a"]] * len(df)
df["invalid3"] = [["a"]] * len(df)
with pytest.raises(TypeError):
store.append("df", df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df["invalid"] = s
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# directly ndarray
with pytest.raises(TypeError):
store.append("df", np.arange(10))
# series directly
with pytest.raises(TypeError):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append("df", df)
df["foo"] = "foo"
with pytest.raises(ValueError):
store.append("df", df)
def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
def test_table_values_dtypes_roundtrip(self, setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
with pytest.raises(ValueError):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self, setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
with pytest.raises(TypeError):
store.append("df1_{n}".format(n=n), df)
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
with pytest.raises(TypeError):
store.append("df_unimplemented", df)
@td.xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion("1.15.0"),
reason=(
"Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"
),
)
def test_calendar_roundtrip_issue(self, setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self, setup_path):
# GH 17618
time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self, setup_path):
# GH 3577
# append timedelta
df = DataFrame(
dict(
A=Timestamp("20130101"),
B=[
Timestamp("20130101") + timedelta(days=i, seconds=10)
for i in range(10)
],
)
)
df["C"] = df["A"] - df["B"]
df.loc[3:5, "C"] = np.nan
with ensure_clean_store(setup_path) as store:
# table
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<100000")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<pd.Timedelta('-3D')")
tm.assert_frame_equal(result, df.iloc[3:])
result = store.select("df", "C<'-3D'")
tm.assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select("df", "C<'-500000s'")
result = result.dropna(subset=["C"])
tm.assert_frame_equal(result, df.iloc[6:])
result = store.select("df", "C<'-3.5D'")
result = result.iloc[1:]
tm.assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, "df2")
store.put("df2", df)
result = store.select("df2")
tm.assert_frame_equal(result, df)
def test_remove(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_invalid_terms(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[0:4, "string"] = "bar"
store.put("df", df, format="table")
# some invalid terms
with pytest.raises(TypeError):
Term()
# more invalid
with pytest.raises(ValueError):
store.select("df", "df.index[3]")
with pytest.raises(SyntaxError):
store.select("df", "index>")
# from the docs
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table", data_columns=True)
# check ok
read_hdf(
path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']"
)
read_hdf(path, "dfq", where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table")
with pytest.raises(ValueError):
read_hdf(path, "dfq", where="A>0 or C>0")
def test_same_name_scoping(self, setup_path):
with ensure_clean_store(setup_path) as store:
import pandas as pd
df = DataFrame(
np.random.randn(20, 2), index=pd.date_range("20130101", periods=20)
)
store.put("df", df, format="table")
expected = df[df.index > pd.Timestamp("20130105")]
import datetime # noqa
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
from datetime import datetime # noqa
# technically an error, but allow it
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
result = store.select("df", "index>datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
def test_series(self, setup_path):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal, path=setup_path)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))
self._check_roundtrip(
ts3, tm.assert_series_equal, path=setup_path, check_index_type=False
)
def test_float_index(self, setup_path):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
@td.xfail_non_writeable
def test_tuple_index(self, setup_path):
# GH #492
col = np.arange(10)
idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
self._check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(self, setup_path):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(
l, r, check_dtype=True, check_index_type=True, check_series_type=True
)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1.23, "b"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(
values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]
)
self._check_roundtrip(ser, func, path=setup_path)
def test_timeseries_preepoch(self, setup_path):
dr = bdate_range("1/1/1940", "1/1/1960")
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
except OverflowError:
pytest.skip("known failer on some windows platforms")
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_frame(self, compression, setup_path):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
self._check_roundtrip(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(
tdf, tm.assert_frame_equal, path=setup_path, compression=compression
)
with ensure_clean_store(setup_path) as store:
# not consolidated
df["foo"] = np.random.randn(len(df))
store["df"] = df
recons = store["df"]
assert recons._data.is_consolidated()
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
def test_empty_series_frame(self, setup_path):
s0 = Series(dtype=object)
s1 = Series(name="myseries", dtype=object)
df0 = DataFrame()
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
self._check_roundtrip(s0, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(s1, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(df0, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"dtype", [np.int64, np.float64, np.object, "m8[ns]", "M8[ns]"]
)
def test_empty_series(self, dtype, setup_path):
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_can_serialize_dates(self, setup_path):
rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
def test_store_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
frame = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path)
# check that the names are stored
with ensure_clean_store(setup_path) as store:
store["frame"] = frame
recons = store["frame"]
tm.assert_frame_equal(recons, frame)
def test_store_index_name(self, setup_path):
df = tm.makeDataFrame()
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store["frame"] = df
recons = store["frame"]
tm.assert_frame_equal(recons, df)
def test_store_index_name_with_tz(self, setup_path):
# GH 13884
df = pd.DataFrame({"A": [1, 2]})
df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])
df.index = df.index.tz_localize("UTC")
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize("table_format", ["table", "fixed"])
def test_store_index_name_numpy_str(self, table_format, setup_path):
# GH #13492
idx = pd.Index(
pd.to_datetime([datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)]),
name="cols\u05d2",
)
idx1 = pd.Index(
pd.to_datetime([datetime.date(2010, 1, 1), datetime.date(2010, 1, 2)]),
name="rows\u05d0",
)
df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format=table_format)
df2 = read_hdf(path, "df")
tm.assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == str
assert type(df2.columns.name) == str
def test_store_series_name(self, setup_path):
df = tm.makeDataFrame()
series = df["A"]
with ensure_clean_store(setup_path) as store:
store["series"] = series
recons = store["series"]
tm.assert_series_equal(recons, series)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_store_mixed(self, compression, setup_path):
def _make_one():
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["int1"] = 1
df["int2"] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
with ensure_clean_store(setup_path) as store:
store["obj"] = df1
tm.assert_frame_equal(store["obj"], df1)
store["obj"] = df2
tm.assert_frame_equal(store["obj"], df2)
# check that can store Series of all of these types
self._check_roundtrip(
df1["obj1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["bool1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["int1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
@pytest.mark.filterwarnings(
"ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning"
)
def test_select_with_dups(self, setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_overwrite_node(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store["a"] = ts
tm.assert_series_equal(store["a"], ts)
def test_select(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
dict(ts=bdate_range("2012-01-01", periods=300), A=np.random.randn(300))
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
dict(
A=np.random.rand(20),
B=np.random.rand(20),
index=np.arange(20, dtype="f8"),
)
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
dict(
ts=bdate_range("2012-01-01", periods=300),
A=np.random.randn(300),
B=range(300),
users=["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ ["a{i:03d}".format(i=i) for i in range(100)],
)
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select(
"df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']"
)
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + ["a{i:03d}".format(i=i) for i in range(60)]
result = store.select(
"df", "ts>=Timestamp('2012-02-01') and users=selector"
)
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(self, setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = "index <= '{end_dt}'".format(end_dt=end_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = "index > '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert 0 == len(results)
def test_select_iterator_many_empty_frames(self, setup_path):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = int(1e4)
# with iterator, range limited to the first chunk
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100000, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = "index <= '{beg_dt}' & index >= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be []
assert len(results) == 0
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes(self, setup_path):
# GH 3499, losing frequency info on index recreation
df = DataFrame(
dict(A=Series(range(3), index=date_range("2000-1-1", periods=3, freq="H")))
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "data")
store.put("data", df, format="table")
result = store.get("data")
tm.assert_frame_equal(df, result)
for attr in ["freq", "tz", "name"]:
for idx in ["index", "columns"]:
assert getattr(getattr(df, idx), attr, None) == getattr(
getattr(result, idx), attr, None
)
# try to append a table with a different frequency
with catch_warnings(record=True):
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("data", df2)
assert store.get_storer("data").info["index"]["freq"] is None
# this is ok
_maybe_remove(store, "df2")
df2 = DataFrame(
dict(
A=Series(
range(3),
index=[
Timestamp("20010101"),
Timestamp("20010102"),
Timestamp("20020101"),
],
)
)
)
store.append("df2", df2)
df3 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("df2", df3)
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes2(self, setup_path):
with ensure_clean_path(setup_path) as path:
with catch_warnings(record=True):
df = DataFrame(
dict(
A=Series(
range(3), index=date_range("2000-1-1", periods=3, freq="H")
)
)
)
df.to_hdf(path, "data", mode="w", append=True)
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
df2.to_hdf(path, "data", append=True)
idx = date_range("2000-1-1", periods=3, freq="H")
idx.name = "foo"
df = DataFrame(dict(A=Series(range(3), index=idx)))
df.to_hdf(path, "data", mode="w", append=True)
assert read_hdf(path, "data").index.name == "foo"
with catch_warnings(record=True):
idx2 = date_range("2001-1-1", periods=3, freq="H")
idx2.name = "bar"
df2 = DataFrame(dict(A=Series(range(3), index=idx2)))
df2.to_hdf(path, "data", append=True)
assert read_hdf(path, "data").index.name is None
def test_frame_select(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
date = df.index[len(df) // 2]
crit1 = Term("index>=date")
assert crit1.env.scope["date"] == date
crit2 = "columns=['A', 'D']"
crit3 = "columns=A"
result = store.select("frame", [crit1, crit2])
expected = df.loc[date:, ["A", "D"]]
tm.assert_frame_equal(result, expected)
result = store.select("frame", [crit3])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append("df_time", df)
with pytest.raises(ValueError):
store.select("df_time", "index>0")
# can't select if not written as table
# store['frame'] = df
# with pytest.raises(ValueError):
# store.select('frame', [crit1, crit2])
def test_frame_select_complex(self, setup_path):
# select via complex criteria
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[df.index[0:4], "string"] = "bar"
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", data_columns=["string"])
# empty
result = store.select("df", 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select("df", 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "foo")]
tm.assert_frame_equal(result, expected)
# or
result = store.select("df", 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index > df.index[3]) | (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select(
"df", "(index>df.index[3] & " 'index<=df.index[6]) | string="bar"'
)
expected = df.loc[
((df.index > df.index[3]) & (df.index <= df.index[6]))
| (df.string == "bar")
]
tm.assert_frame_equal(result, expected)
# invert
result = store.select("df", 'string!="bar"')
expected = df.loc[df.string != "bar"]
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
with pytest.raises(NotImplementedError):
store.select("df", '~(string="bar")')
# invert ok for filters
result = store.select("df", "~(columns=['A','B'])")
expected = df.loc[:, df.columns.difference(["A", "B"])]
tm.assert_frame_equal(result, expected)
# in
result = store.select("df", "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index > df.index[3]].reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(self, setup_path):
with ensure_clean_path(["parms.hdf", "hist.hdf"]) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({"A": [1, 1, 2, 2, 3]})
parms.to_hdf(pp, "df", mode="w", format="table", data_columns=["A"])
selection = read_hdf(pp, "df", where="A=[2,3]")
hist = DataFrame(
np.random.randn(25, 1),
columns=["data"],
index=MultiIndex.from_tuples(
[(i, j) for i in range(5) for j in range(5)], names=["l1", "l2"]
),
)
hist.to_hdf(hh, "df", mode="w", format="table")
expected = | read_hdf(hh, "df", where="l1=[2, 3, 4]") | pandas.io.pytables.read_hdf |
from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import pytest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"])
df = df.pivot_table(
columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False
)
df = df.reindex( | pd.date_range(start_date, end_date) | pandas.date_range |
import os
import os.path as osp
import shutil
import json
from tqdm.auto import tqdm as tq
from itertools import repeat, product
import numpy as np
import pandas as pd
import torch
from torch_geometric.data import Data, InMemoryDataset, extract_zip
from torch_geometric.io import read_txt_array
import torch_geometric.transforms as T
from torch_points3d.core.data_transform import SaveOriginalPosId
from torch_points3d.metrics.shapenet_part_tracker import (
ShapenetPartTracker,
)
from torch_points3d.metrics.segmentation_tracker import (
SegmentationTracker,
)
from torch_points3d.datasets.base_dataset import (
BaseDataset,
save_used_properties,
)
from torch_points3d.utils.download import download_url
from plyfile import PlyData, PlyElement, PlyProperty, PlyListProperty
from sklearn.preprocessing import (
StandardScaler,
MinMaxScaler,
MaxAbsScaler,
)
########################################################################################
# #
# UTILS #
# #
########################################################################################
def scale_data(col: pd.Series, scaler) -> pd.Series:
"""
Scale a column
"""
X = col.values.reshape(-1, 1).copy()
scaled_array = scaler.fit_transform(X)
scaled_column = pd.Series(scaled_array.tolist()).explode()
return scaled_column
def my_global_scaler(X, r_min, r_max):
t_min = 0
t_max = 1
return ((X - r_min) / (r_max - r_min)) * (t_max - t_min) + t_min
def convert_mesh_to_dataframe(
meshply, feat_dict, scaler_type="global"
):
"""
Convert mesh values into a dataframe and add feature_names according
to the dictionary passed and scale them between 0 and 1.
Args:
meshply (meshply obj): Mesh obj that should be converted to a
dataframe
features_dict (dictionary,optional): Custom features to be
used to make Dataset. Values should be True or False or
1 or 0 for each key.
Accepted keys - "mean_curvature", "gauss_curvature", "fpfh",
"shot", "rf", "ones"
"""
df = pd.DataFrame()
# Scaler for the dataframe
min_max_scaler = MinMaxScaler()
df["x"] = pd.Series(meshply.elements[0].data["x"])
df["y"] = pd.Series(meshply.elements[0].data["y"])
df["z"] = pd.Series(meshply.elements[0].data["z"])
############################################################
# FEATURE: MEAN CURVATURE
############################################################
if [v for k, v in feat_dict.items() if k == "mean_curvature"][0]:
df["mean_curv"] = pd.Series(
meshply.elements[0].data["mean_curv"]
)
col = "mean_curv"
# # Scaler for the feature
if scaler_type == "global":
r_min = -0.001
r_max = 0.0008
df.loc[df[col] < r_min] = r_min
df.loc[df[col] > r_max] = r_max
df[col] = df[col].apply(
lambda x: my_global_scaler(x, r_min, r_max)
)
# else:
# df[col] = scale_data(df[col], min_max_scaler)
############################################################
# FEATURE: GAUSSIAN CURVATURE
############################################################
if [v for k, v in feat_dict.items() if k == "gauss_curvature"][0]:
df["gauss_curv"] = pd.Series(
meshply.elements[0].data["gauss_curv"]
)
col = "gauss_curv"
# # Scaler for the feature
if scaler_type == "global":
r_min = -11.025
r_max = 158.026
df.loc[df[col] < r_min] = r_min
df.loc[df[col] > r_max] = r_max
df[col] = df[col].apply(
lambda x: my_global_scaler(x, r_min, r_max)
)
# else:
# df["gauss_curv"] = scale_data(
# df["gauss_curv"], min_max_scaler
# )
############################################################
# FEATURE: FPFH DESCRIPTOR
############################################################
if [v for k, v in feat_dict.items() if k == "fpfh"][0]:
df[["fpfh_1", "fpfh_2"]] = pd.DataFrame(
meshply.elements[0].data["fpfh"].tolist()
)
col = "fpfh_1"
# Scaler for the feature
if scaler_type == "global":
r_min = -112.58
r_max = 191.905
df.loc[df[col] < r_min] = r_min
df.loc[df[col] > r_max] = r_max
df[col] = df[col].apply(
lambda x: my_global_scaler(x, r_min, r_max)
)
else:
df["fpfh_1"] = scale_data(df["fpfh_1"], min_max_scaler)
col = "fpfh_2"
if scaler_type == "global":
r_min = -85.80
r_max = 144.51
df.loc[df[col] < r_min] = r_min
df.loc[df[col] > r_max] = r_max
df[col] = df[col].apply(
lambda x: my_global_scaler(x, r_min, r_max)
)
else:
df["fpfh_2"] = scale_data(df["fpfh_2"], min_max_scaler)
############################################################
# FEATURE: SHOT DESCRIPTOR #
############################################################
if [v for k, v in feat_dict.items() if k == "shot"][0]:
df[["shot_1", "shot_2", "shot_3"]] = pd.DataFrame(
meshply.elements[0].data["shot"].tolist()
)
col = "shot_1"
# Scaler for the feature
if scaler_type == "global":
r_min = -0.68
r_max = 0.854
df.loc[df[col] < r_min] = r_min
df.loc[df[col] > r_max] = r_max
df[col] = df[col].apply(
lambda x: my_global_scaler(x, r_min, r_max)
)
else:
df["shot_1"] = scale_data(df["shot_1"], min_max_scaler)
col = "shot_2"
# Scaler for the feature
if scaler_type == "global":
r_min = -0.63
r_max = 0.84
df.loc[df[col] < r_min] = r_min
df.loc[df[col] > r_max] = r_max
df[col] = df[col].apply(
lambda x: my_global_scaler(x, r_min, r_max)
)
else:
df["shot_2"] = scale_data(df["shot_2"], min_max_scaler)
col = "shot_3"
# Scaler for the feature
if scaler_type == "global":
r_min = -0.67
r_max = 0.76
df.loc[df[col] < r_min] = r_min
df.loc[df[col] > r_max] = r_max
df[col] = df[col].apply(
lambda x: my_global_scaler(x, r_min, r_max)
)
else:
df["shot_3"] = scale_data(df["shot_3"], min_max_scaler)
############################################################
# FEATURE: SHOT_RF DESCRIPTOR #
############################################################
if [v for k, v in feat_dict.items() if k == "rf"][0]:
df[["rf_1", "rf_2", "rf_3"]] = pd.DataFrame(
meshply.elements[0].data["rf"].tolist()
)
# Scaler for the feature
df["rf_1"] = scale_data(df["rf_1"], min_max_scaler)
df["rf_2"] = scale_data(df["rf_2"], min_max_scaler)
df["rf_3"] = scale_data(df["rf_3"], min_max_scaler)
############################################################
# FEATURE: ADD ONES #
############################################################
if [v for k, v in feat_dict.items() if k == "ones"][0]:
df["ones"] = int(1)
############################################################
# LABEL: WSS - FOR PREDICTION #
############################################################
df["WSS"] = | pd.Series(meshply.elements[0].data["WSS"]) | pandas.Series |
from datetime import datetime, timedelta
import operator
import pickle
import unittest
import numpy as np
from pandas.core.index import Index, Factor, MultiIndex, NULL_INDEX
from pandas.util.testing import assert_almost_equal
import pandas.util.testing as tm
import pandas._tseries as tseries
class TestIndex(unittest.TestCase):
def setUp(self):
self.strIndex = tm.makeStringIndex(100)
self.dateIndex = tm.makeDateIndex(100)
self.intIndex = tm.makeIntIndex(100)
self.empty = Index([])
self.tuples = Index(zip(['foo', 'bar', 'baz'], [1, 2, 3]))
def test_hash_error(self):
self.assertRaises(TypeError, hash, self.strIndex)
def test_deepcopy(self):
from copy import deepcopy
copy = deepcopy(self.strIndex)
self.assert_(copy is self.strIndex)
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertRaises(Exception, idx._verify_integrity)
def test_sort(self):
self.assertRaises(Exception, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(Exception, self.strIndex.__setitem__, 5, 0)
self.assertRaises(Exception, self.strIndex.__setitem__, slice(1,5), 0)
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = arr.view(Index)
tm.assert_contains_all(arr, index)
self.assert_(np.array_equal(self.strIndex, index))
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(Exception, Index, 0)
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assert_(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse( | Index(['a', 'b', 'c']) | pandas.core.index.Index |
# -*- coding: utf-8 -*-
import argparse
import json
from os import listdir
from os.path import join
import numpy as np
import pandas as pd
from src.utilities import mkdir_if_needed
def read_presentation_type(sequence):
"""
This function extracts the presentation_type variable from a sequence dictionary.
"""
if sequence["alternatives"][0] in [0, 1]:
return "alternatives"
elif sequence["attributes"][0] in ["p", "m"]:
return "attributes"
def compute_durations(sequence, alternative=None, attribute=None):
"""Computes the relative presentation duration of alternatives, attributes, or combinations of both for a given sequence.
Args:
sequence (dict): Sequence dictionary with keys "attributes", "alternatives" and "durations", each containing a list.
alternative (int, optional): Index of alternative for which overall relative duration should be computed. Defaults to None.
attribute (str, optional): Attribute for which overall relative duration should be computed. For example "p" or "m". Defaults to None.
Returns:
float: Relative duration measure.
"""
if alternative is not None:
alt_mask = np.array(
[alt in [alternative, "all"] for alt in sequence["alternatives"]]
)
else:
alt_mask = np.ones(len(sequence["alternatives"])).astype(bool)
if attribute is not None:
att_mask = np.array(
[att in [attribute, "all"] for att in sequence["attributes"]]
)
else:
att_mask = np.ones(len(sequence["attributes"])).astype(bool)
g = np.sum(np.array(sequence["durations"])[alt_mask & att_mask]) / np.sum(
np.array(sequence["durations"])
)
return g
def add_duration_vars(df):
"""Adds variables for relative durations towards alernatives and attributes.
Args:
df (pandas.DataFrame): Dataframe with `sequence` variable containing the presentation sequence.
Returns:
pandas.DataFrame: The DataFrame with added variables.
"""
for alt in [0, 1]:
df[f"g{alt}r"] = df.apply(
lambda x: compute_durations(json.loads(x["sequence"]), alternative=alt),
axis=1,
)
for att in ["p", "m"]:
df[f"g{att}r"] = df.apply(
lambda x: compute_durations(json.loads(x["sequence"]), attribute=att),
axis=1,
)
# Normalize durations to 1 in each trial
df["g0"] = df["g0r"] / df[["g0r", "g1r"]].sum(axis=1)
df["g1"] = df["g1r"] / df[["g0r", "g1r"]].sum(axis=1)
df["gm"] = df["gmr"] / df[["gmr", "gpr"]].sum(axis=1)
df["gp"] = df["gpr"] / df[["gmr", "gpr"]].sum(axis=1)
return df.drop(["g0r", "g1r", "gmr", "gpr"], axis=1)
def add_last_stage_favours_var(df):
"""Adds variable that describes which alternative is favoured by the last presentation step in the sequence.
Args:
df (pandas.DataFrame): DataFrame with conditions. Must contain columns `presentation`, `targetFirst`, `target`, `other`, `p0`, `p1`, `m0`, `m1`.
Returns:
pandas.DataFrame: The DataFrame with added `lastFavours` column.
"""
df["last_stage_favours"] = np.where(
df["presentation"] == "alternatives",
df["sequence"].apply(lambda x: json.loads(x)["alternatives"][-1]),
np.where(
df["presentation"] == "attributes",
np.where(
df["sequence"].apply(lambda x: json.loads(x)["attributes"][-1] == "p"),
df["higher_p"],
df["higher_m"],
),
np.nan,
),
).astype(float)
return df
def add_duration_favours_var(choices):
# Add target variable, coding which alternative is favoured by hypothesized duration effect
choices["duration_favours"] = np.where(
choices["condition"].str.startswith("exp_"),
np.where(
choices["presentation"] == "alternatives",
choices[["g0", "g1"]].idxmax(axis=1).str[1],
np.where(
choices[["gp", "gm"]].idxmax(axis=1).str[1] == "p",
choices["higher_p"],
choices["higher_m"],
),
),
np.nan,
).astype(float)
return choices
def add_misc_variables(choices):
# Add necessary variables
choices["label0"] = np.where(
choices["condition"].str.startswith("catch"),
"dominated",
np.where(choices["higher_p"] == 0, "higher_p", "higher_m"),
)
choices["label1"] = np.where(
choices["condition"].str.startswith("catch"),
"dominant",
np.where(choices["higher_p"] == 1, "higher_p", "higher_m"),
)
choices["duration_favours_str"] = np.where(
choices["duration_favours"] == 0,
choices["label0"],
np.where(choices["duration_favours"] == 1, choices["label1"], np.nan),
)
choices["last_stage_favours_str"] = np.where(
choices["last_stage_favours"] == 0,
choices["label0"],
np.where(choices["last_stage_favours"] == 1, choices["label1"], np.nan),
)
choices["ev0"] = choices["p0"] * choices["m0"]
choices["ev1"] = choices["p1"] * choices["m1"]
choices["delta_ev"] = choices["ev0"] - choices["ev1"]
choices["delta_ev_z"] = (
choices["delta_ev"] - choices["delta_ev"].mean()
) / choices["delta_ev"].std(ddof=1)
choices["choose_higher_p"] = choices["choice"] == choices["higher_p"]
choices["by_attribute"] = choices["presentation"] == "attributes"
choices["left_alternative"] = np.where(
choices["pL"] == choices["p0"],
0,
np.where(choices["pL"] == choices["p1"], 1, np.nan),
)
return choices
def preprocess_choice_data(raw_data):
"""
This function extracts and processes choice data from raw single subject jsPsych data.
"""
# Extract only choice data
choices = (
raw_data.loc[
(raw_data["trial_type"] == "two-gamble-sequence")
& ~(raw_data["condition.1"].str.startswith("practice_"))
][
[
"condition.1",
"rt",
"key_press",
"choice",
"p0",
"p1",
"m0",
"m1",
"pL",
"sequence",
"webgazer_data",
]
]
.rename({"condition.1": "condition"}, axis=1)
.reset_index(drop=True)
.astype({"p0": float, "p1": float, "m0": float, "m1": float, "pL": float})
)
# Adjust outcome values
choices[["m0", "m1"]] *= 10
# Handle missing responses, recode choice to integer
for var in ["choice", "rt"]:
choices[var] = np.where(choices[var] == '"', np.nan, choices[var])
choices = choices.astype({var: float})
# Identify options with higher P and higher M in each trial
choices["higher_p"] = (
choices[["p0", "p1"]].idxmax(axis=1).apply(lambda x: int(x[-1]))
)
choices["higher_m"] = (
choices[["m0", "m1"]].idxmax(axis=1).apply(lambda x: int(x[-1]))
)
# Add presentation variable
choices["presentation"] = choices.apply(
lambda x: read_presentation_type(json.loads(x["sequence"])), axis=1
)
# Add numerical `presentation` variable for pyDDM
choices["presentation01"] = np.where(
choices["presentation"] == "alternatives",
0,
np.where(choices["presentation"] == "attributes", 1, np.nan),
)
# Add dependent variable
choices["choose_higher_p"] = choices["choice"] == choices["higher_p"]
# Add variables for relative presentation durations
choices = add_duration_vars(choices)
# Add variable coding which alternative is favoured by the last presentation stage
choices = add_last_stage_favours_var(choices)
choices = add_duration_favours_var(choices)
choices = add_misc_variables(choices)
return choices
def main():
# Process choice data
choices = []
# Read keys mapping PIDs and run_id to subject keys
subject_summary = | pd.read_csv(args.subject_summary, index_col=0) | pandas.read_csv |
import pandas as pd
import sqlite3
class Co2:
# ind_name -> 산업명
def ind_name(self,ind):
con = sqlite3.connect('./sorting.db')
df = | pd.read_sql_query('select * from sorting',con) | pandas.read_sql_query |
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 13 21:37:34 2021
@author: <NAME>
"""
"""
Functions of Question 1
"""
"""
1.1 Get the list of animes
"""
# import modules
import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
import time
import os
import random
import datetime
import re
import numpy as np
import pandas as pd
import nltk
nltk.download('punkt')
import math
import warnings
warnings.filterwarnings('ignore')
# 1.1 input a url, and get all urls in the page we input.
def getlink(url): #get all links in one web page
link = []
try:
# Open the URL and read the whole page
red = urllib.request.urlopen(url,timeout=30)
html = red.read()
red.close()
soup = BeautifulSoup(html, 'html.parser')
tags = soup('a')
for tag in tags:
link.append(tag.get('href', None))
except:
getlink(url)# Solve the problem that the sever didn't return data sometimes
return link
"""
1.2 Crawl animes
"""
# 1.2 input a url, return the html
def getHtml(url): # get the html from url
global html
try:
red = urllib.request.urlopen(url,timeout=30)
html = red.read()
red.close()
except:
getHtml(url)
return html
# 1.2 input file name(path) and html, save it to local disk
def saveHtml(file_name, file_content): # save the html
with open(file_name.replace('/', '_') + ".html", "wb") as f:
f.write(file_content)
f.close
"""
1.3 Parse downloaded pages
"""
# 1.3 clean unuseful characters in type strings we get
def type_clean(anime_type): # clean the string we got which inclued the info of type
new_name = ''
point = 0
if anime_type != []:
for c in anime_type[0]:
if point == 1:
new_name = new_name + c
if c == '>':
point = 1
return new_name
#1.3 clean the date strings and return time type data.
def date_clean(animeDate): # Estimate the string we get included the date information, then we change it from string to ReleaseDate and EndDate
dateset = animeDate[0].split('to')
if len(dateset) == 2 and ',' in dateset[1] and ',' in dateset[0]:
re_date = date_transfer(dateset[0])
re_date = datetime.datetime.strptime(re_date,'%m %d %Y')
re_date = re_date.date()
en_date = date_transfer(dateset[1])
en_date = datetime.datetime.strptime(en_date,'%m %d %Y')
en_date = en_date.date()
else:
if len(dateset) == 2 and ',' not in dateset[1] and ',' not in dateset[0]:
re_date = dateset[0]
en_date = dateset[1]
else:
if len(dateset) == 2 and ',' not in dateset[1]:
re_date = date_transfer(dateset[0])
re_date = datetime.datetime.strptime(re_date,'%m %d %Y')
re_date = re_date.date()
en_date = ' '
else:
if len(dateset) == 1 and ',' in dateset[0]:
re_date = date_transfer(dateset[0])
re_date = datetime.datetime.strptime(re_date,'%m %d %Y')
re_date = re_date.date()
en_date = ' '
else:
re_date = dateset[0]
en_date = ' '
return re_date,en_date
#1.3 # transfer the format (MAr 01, 2021 -> 03 01 2021)
def date_transfer(date_str):
month = {'Jan':'1','Feb':'2','Mar':'3','Apr':'4','May':5,'Jun':6,'Jul':7,'Aug':8,'Sep':9,'Oct':10,'Nov':11,'Dec':12}
newdate = date_str.split()
newdate[0] = month[newdate[0]]
day = newdate[1]
newdate[1] = day[0:-1]
returndate = str(newdate[0])+' '+str(newdate[1])+' '+str(newdate[2])
return returndate
#1.3 # Whole function we used to get info from html by Regular Expression
def getinfo(html_path):
with open(html_path,'r',encoding = 'utf-8') as f:
text = f.read()
f.close
name_res = r'<span itemprop="name">\n(.*?)\n' #1.Title
animeTitle = re.findall(name_res,text)
animeTitle = str(animeTitle[-1])
quadtest = 0
newtitle = ''
for k in animeTitle:
if k != ' ':
quadtest = 1
if quadtest == 1:
newtitle = newtitle + k
animeTitle = newtitle
print(animeTitle)
type_res = r'<span class="dark_text">Type:</span>\n(.*?)</a>'#2.Type
anime_type = re.findall(type_res,text)
animeType = type_clean(anime_type)
#animetype = animeType[0]
Episode_res = r'<span class="dark_text">Episodes:</span>\n(.*?)\n'#3.NumEpisode
animeNumEpisode = re.findall(Episode_res,text)
animeNumEpisode = animeNumEpisode[0]
Date_res = r'<span class="dark_text">Aired:</span>\n(.*?)\n'#4.Date
animeDate = re.findall(Date_res,text)
releaseDate,endDate = date_clean(animeDate)
Member_res = r'<span class="dark_text">Members:</span>\n(.*?)\n'#5.Member
animeNumMembers = re.findall(Member_res,text)
newmember =''
for item in animeNumMembers[0]:
if item !=" " and item != ',':
newmember =newmember + item
animeNumMembers = int(newmember)
score_res = r'<span class="dark_text">Score:</span>\n(.*?)</span>'#6.score
strscore = re.findall(score_res,text)
newscore = ''
scoretest = 0
for item in strscore[0]:
if scoretest == 1:
newscore = newscore + item
if item == '>':
scoretest = 1
if newscore != 'N/A':
animeScore = float(newscore)
else:
animeScore = newscore
user_res = r'<span itemprop="ratingCount" style="display: none">(.*?)</span>'#7.users
struser = re.findall(user_res,text)
if struser != []:
animeUsers = int(struser[0])
else:
animeUsers = ' '
rank_res = r'<span class="dark_text">Ranked:</span>\n(.*?)<sup>'#8.rank
strRank = re.findall(rank_res,text)
newrank = ''
for item in strRank[0]:
if item != '#':
newrank = newrank + item
animeRank = newrank
print(animeRank)
popu_res = r'<span class="dark_text">Popularity:</span>\n(.*?)\n'#9.popularity
strpopu= re.findall(popu_res,text)
newpopu = ''
for item in strpopu[0]:
if item != '#':
newpopu = newpopu + item
animePopularity = int(newpopu)
des_res = r'<meta property="og:description" content="(.*?)>'#10.animeDescription
animeDescription = re.findall(des_res,text)
animeDescription = animeDescription[0]
rel_res = r'<td nowrap="" valign="top" class="ar fw-n borderClass">(.*?)<' #11.Related
animeRe = re.findall(rel_res,text)
lated_res = r'<td width="100%" class="borderClass">(.*?)</td>'
animelated = re.findall(lated_res,text)
for i in range(len(animelated)):
newanimelated = ''
latedtest = 0
for c in animelated[i]:
if c == '<':
latedtest = 0
if latedtest == 1:
newanimelated = newanimelated + c
if c == '>':
latedtest = 1
animelated[i] = newanimelated
animeRelated = []
for j in range(len(animeRe)):
animeRelated.append(animeRe[j]+animelated[j])
cha_res = r'<h3 class="h3_characters_voice_actors">(.*?)</a></h3>' #12.Character
animeCharacters = re.findall(cha_res,text)
for i in range(len(animeCharacters)):
chartest = 0
newchar = ''
for k in animeCharacters[i]:
if chartest == 1:
newchar = newchar+k
if k == '>':
chartest = 1
animeCharacters[i] = newchar
voice_res = r'<td class="va-t ar pl4 pr4">\n(.*?)</a><br>' #13.voicer
animeVoices = re.findall(voice_res,text)
for i in range(len(animeVoices)):
voicetest = 0
newvoice = ''
for k in animeVoices[i]:
if voicetest == 1:
newvoice = newvoice+k
if k == '>':
voicetest = 1
animeVoices[i] = newvoice
staff_res = r'<td valign="top" class="borderClass">\n(.*?)</a>' #14.staff
animeStaff = re.findall(staff_res,text)
for i in range(len(animeStaff)):
stafftest = 0
newstaff = ''
for k in animeStaff[i]:
if stafftest == 1:
newstaff = newstaff+k
if k == '>':
stafftest = 1
animeStaff[i] = newstaff
renewstaff = []
for item in animeStaff:
if item[0] != '<':
renewstaff.append(item)
animeStaff = renewstaff
tsv = ' \t '
output_str = animeTitle+tsv+animeType+tsv+str(animeNumEpisode)+tsv+str(releaseDate)+tsv+str(endDate)+tsv+str(animeNumMembers)+tsv+str(animeScore)+tsv+str(animeUsers)+tsv+str(animeRank)+tsv+str(animePopularity)+tsv+animeDescription+tsv+str(animeRelated)+tsv+str(animeCharacters)+tsv+str(animeVoices)+tsv+str(animeStaff)
return output_str # Return a string with all info that we need.
"""
Question 2: Search Engine
"""
"""
2.1 Conjunctive query
"""
#2.1 input a string, return a list of words,no other characters
def clean_des(des):
import re
des = re.sub('\[\w+\s+\w*\s+\w+\s+\w+].','', des)
des = re.sub('[^a-zA-Z\s]','',des)
tokens = nltk.word_tokenize(des)
return tokens
#2.1
"""
If you want to use this SearchEngine,
you need to load [all_words,read_dictionary,anime_name,anime_des,link] before using it.
"""
def SearchEngine():
all_words = pd.read_csv('./words_set.csv')
print("Please input the key words:")
key_words = input().split()
Numlist = []
for item in key_words:
if item in all_words:
key_num = all_words.index(item)
Numlist.append(read_dictionary[key_num])
if Numlist != []:
all_output_num = Numlist[0]
else:
all_output_num = []
for n in range(len(Numlist)):
all_output_num=set(all_output_num) & set(Numlist[n])
# get the number list of conjunctive queries
all_output_num = list(set(all_output_num))
output_name = []
output_des = []
output_link = []
for m in all_output_num:
output_name.append(anime_name[m])
output_des.append(anime_des[m])
output_link.append(link[m])
final_output = {'animeTitle':output_name,'animeDescription': output_des,'Url':output_link}
output_df=pd.DataFrame(final_output)
output_df.index += 1
return output_df
"""
2.2 If you want to use this SearchEngine,
you need to load [all_words,read_dictionary,anime_name,anime_des,link] before using it.
"""
#2.2 tfIdf score function
# you need to load read_dictionay, all_des, all_words before use it
def tfidf(wordNum):
Key_Num = read_dictionary[wordNum]
tfid = []
idf = math.log(19131/len(Key_Num))
for item in Key_Num:
ni = des_words[item].count(all_words[wordNum])
tf = ni/len(des_words[item])
tfi = idf*tf
tfid.append(tfi)
return tfid
#2.2 Similarity Calculation
def get_word_vector(input_list,des_list):
all_element = set(input_list)|set(des_list)
all_element = list(all_element)
v1 = [0]*len(all_element)
v2 = [0]*len(all_element)
for i in range(len(all_element)):
v1[i] = input_list.count(all_element[i])
v2[i] = des_list.count(all_element[i])
return v1,v2
def cos_dist(vec1,vec2):
dist1=float(np.dot(vec1,vec2)/(np.linalg.norm(vec1)*np.linalg.norm(vec2)))
return dist1
#2.2 -----------Execute the query 2-------------
def SearchEngine2(key_words):
Numlist = []
for item in key_words:
if item in all_words:
key_num = all_words.index(item)
Numlist.append(read_dictionary[key_num])
if Numlist != []:
all_output_num = Numlist[0]
else:
all_output_num = []
for n in range(len(Numlist)):
all_output_num=set(all_output_num) & set(Numlist[n])
# get the number list of conjunctive queries
all_output_num = list(set(all_output_num))
output_name = []
output_des = []
output_link = []
similarity_des = []
for m in all_output_num:
output_name.append(anime_name[m])
output_des.append(anime_des[m])
output_link.append(link[m])
similarity_des.append(des_words[m])
# compute the similarity
similarity = []
for o in range(len(similarity_des)):
vec1,vec2=get_word_vector(key_words,similarity_des[o])
dist1=cos_dist(vec1,vec2)
similarity.append(dist1)
# output
final_output = {'animeTitle':output_name,'animeDescription': output_des,'Url':output_link,'Similarity':similarity}
output_df=pd.DataFrame(final_output)
output_df = output_df.sort_values(by='Similarity',ascending = False)
output_df = output_df[0:10] # get top k animes; k = 10
output_df.index = range(len(output_df))
output_df.index += 1
return output_df
"""
Question 3: Define a new Score
"""
"""
"""
import string
def convert_dtype_float(x):
ans = []
for item in x:
item = item.strip()
try:
ans.append(np.float64(item))
except:
ans.append(None)
return pd.Series(ans)
def convert_dtype_int(x):
ans = []
for item in x:
item = item.strip()
try:
ans.append(np.int64(item))
except:
ans.append(None)
return | pd.Series(ans) | pandas.Series |
# -*- coding: utf-8 -*-
import geopandas as gpd
import multiprocessing as mp
import numpy as np
import os
import pandas as pd
import re
import seaborn as sns
import sys
import time
from tqdm import tqdm
from matplotlib import pyplot as plt
import warnings
from hs_process.utilities import defaults
from hs_process.utilities import hsio
from hs_process.segment import segment
from hs_process.spec_mod import spec_mod
from hs_process.spatial_mod import spatial_mod
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import as_completed
class batch(object):
'''
Class for batch processing hyperspectral image data. Makes use of
`segment`_, `spatial_mod`_, and `spec_mod`_ to batch process many
datacubes in a given directory. Supports options to save full
datacubes, geotiff renders, as well as summary statistics and/or
reports for the various tools.
Note:
It may be a good idea to review and understand the `defaults`_,
`hsio`_, `hstools`_, `segment`_, `spatial_mod`_, and `spec_mod`_
classes prior to using the ``batch`` module.
.. _defaults: hs_process.defaults.html
.. _hsio: hs_process.hsio.html
.. _hstools: hs_process.hstools.html
.. _segment: hs_process.segment.html
.. _spatial_mod: hs_process.spatial_mod.html
.. _spec_mod: hs_process.spec_mod.html
'''
def __init__(self, base_dir=None, search_ext='.bip', dir_level=0,
lock=None, progress_bar=False):
'''
Parameters:
base_dir (``str``, optional): directory path to search for files to
spectrally clip; if ``fname_list`` is not ``None``, ``base_dir`` will
be ignored (default: ``None``).
search_ext (``str``): file format/extension to search for in all
directories and subdirectories to determine which files to
process; if ``fname_list`` is not ``None``, ``search_ext`` will
be ignored (default: 'bip').
dir_level (``int``): The number of directory levels to search; if
``None``, searches all directory levels (default: 0).
lock (``multiprocessing.Lock``): Can be passed to ensure lock is in
place when writing to a file during multiprocessing.
'''
self.base_dir = base_dir
self.search_ext = search_ext
self.dir_level = dir_level
self.lock = lock
self.progress_bar = progress_bar
self.fname_list = None
if base_dir is not None:
self.fname_list = self._recurs_dir(base_dir, search_ext, dir_level)
self.io = hsio()
self.my_spectral_mod = None
self.my_spatial_mod = None
self.my_segment = None
def _try_spat_crop_col_key(self, key, df_row):
'''
Gets value of ``key`` (column name) from ``df_row``; returns
``None`` if there is a KeyError
This is tricky for crop_X and buf_X columns, because we must decipher
whether to get these values from the default pool or not. If we get a
KeyError, our first instinct is to gather the default, but we must
check the "inverse" first (the "inverse" of crop_e_pix is crop_e_m) to
avoid overwriting a value passed in df_row unintentionally. Therefore,
this function handles keys differently if "crop" or "buf" are part of
``key`` than if they are not part of ``key``
Adds ``key`` to batch.io.defaults.spat_crop_cols if it does not yet
exist, but then of course the ``value`` that is returned will be
``None``
'''
if key not in self.io.defaults.spat_crop_cols.keys():
print('Adding key "{0}" to defaults.spat_crop_cols dictionary'
''.format(key))
self.io.defaults.spat_crop_cols[key] = key
try:
value = df_row[self.io.defaults.spat_crop_cols[key]]
except KeyError: # try to retrieve a default value
# decide whehter to get default or not.. how?
# check the inverse to see if it is accesible
# try:
# value = self.io.defaults.crop_defaults[key]
# except KeyError:
# value = None
if 'crop' in key or 'buf' in key:
key_base = key[:key.find('_', key.rfind('_'))]
key_unit = key[key.find('_', key.rfind('_')):]
if key_unit == '_m':
key_unit_inv = '_pix'
elif key_unit == '_pix':
key_unit_inv = '_m'
try:
value_inv = df_row[self.io.defaults.spat_crop_cols[key_base+key_unit_inv]] # exists; set to NaN and carry on
value = None
except KeyError: # neither exist, gather default
try:
value = self.io.defaults.crop_defaults[key]
except KeyError:
value = None
else: # proceed as normal
try:
value = self.io.defaults.crop_defaults[key]
except KeyError:
value = None
# if key in ['crop_e_m', 'crop_n_m', 'crop_e_pix', 'crop_n_pix']:
# print('Key: {0} Value: {1}'.format(key, value))
return value
def _check_processed(self, fname_list, base_dir_out, folder_name,
name_append, append_extra=None, ext=None):
'''
Checks if any files in fname_list have already (presumably) undergone
processing. This is determined by checking if a file exists with a
particular name based on the filename in fname_list and naming
parameters (i.e,. ``folder_name`` and ``name_append``).
Parameters:
ext (``str``): e.g., '.spec'
'''
if append_extra is None:
append_extra = ''
fname_list_final = fname_list.copy()
for fname in fname_list:
if base_dir_out is None:
base_dir = os.path.split(fname)[0]
dir_out, name_append = self._save_file_setup(
base_dir, folder_name, name_append)
else:
dir_out, name_append = self._save_file_setup(
base_dir_out, folder_name, name_append)
name_print = self._get_name_print(fname)
if ext is None:
name_label = (name_print + name_append + append_extra + '.' +
self.io.defaults.envi_write.interleave)
else:
name_label = (name_print + name_append + append_extra + ext)
if os.path.isfile(os.path.join(dir_out, name_label)):
fname_list_final.remove(fname)
msg1 = ('There are no files to process. Please check if files have '
'already undergone processing. If existing files should be '
'overwritten, be sure to set the ``out_force`` parameter.\n')
msg2 = ('Processing {0} files. If existing files should be '
'overwritten, be sure to set the ``out_force`` parameter.\n'
''.format(len(fname_list_final)))
if not len(fname_list_final) > 0:
warnings.warn(msg1, UserWarning, stacklevel=0)
# else:
# print(msg2)
time.sleep(0.2) # when using progress bar, this keeps from splitting lines
return fname_list_final
def _crop_read_sheet(self, row):
'''
Reads the necessary information from the spreadsheet and saves it
to a dictionary
If this function causes an error, try checking
``batch.io.defaults.spat_crop_col`` - these should be adjusted
according to the default column names of the input (i.e.,
``fname_sheet``).
'''
crop_specs = {
'directory': self._try_spat_crop_col_key('directory', row),
'fname': self._try_spat_crop_col_key('fname', row),
'name_short': self._try_spat_crop_col_key('name_short', row),
'name_long': self._try_spat_crop_col_key('name_long', row),
'ext': self._try_spat_crop_col_key('ext', row),
'plot_id_ref': self._try_spat_crop_col_key('plot_id_ref', row),
'pix_e_ul': self._try_spat_crop_col_key('pix_e_ul', row),
'pix_n_ul': self._try_spat_crop_col_key('pix_n_ul', row),
'alley_size_e_m': self._try_spat_crop_col_key('alley_size_e_m', row),
'alley_size_n_m': self._try_spat_crop_col_key('alley_size_n_m', row),
'alley_size_e_pix': self._try_spat_crop_col_key('alley_size_e_pix', row),
'alley_size_n_pix': self._try_spat_crop_col_key('alley_size_n_pix', row),
'buf_e_m': self._try_spat_crop_col_key('buf_e_m', row),
'buf_n_m': self._try_spat_crop_col_key('buf_n_m', row),
'buf_e_pix': self._try_spat_crop_col_key('buf_e_pix', row),
'buf_n_pix': self._try_spat_crop_col_key('buf_n_pix', row),
'crop_e_m': self._try_spat_crop_col_key('crop_e_m', row),
'crop_n_m': self._try_spat_crop_col_key('crop_n_m', row),
'crop_e_pix': self._try_spat_crop_col_key('crop_e_pix', row),
'crop_n_pix': self._try_spat_crop_col_key('crop_n_pix', row),
'gdf_shft_e_pix': self._try_spat_crop_col_key('gdf_shft_e_pix', row),
'gdf_shft_n_pix': self._try_spat_crop_col_key('gdf_shft_n_pix', row),
'gdf_shft_e_m': self._try_spat_crop_col_key('gdf_shft_e_m', row),
'gdf_shft_n_m': self._try_spat_crop_col_key('gdf_shft_n_m', row),
'n_plots_x': self._try_spat_crop_col_key('n_plots_x', row),
'n_plots_y': self._try_spat_crop_col_key('n_plots_y', row),
'n_plots': self._try_spat_crop_col_key('n_plots', row)}
if crop_specs['fname'] is None:
try:
crop_specs['fname'] = (crop_specs['name_short'] +
crop_specs['name_long'] +
crop_specs['ext'])
except TypeError:
crop_specs['fname'] = None
if crop_specs['fname'] is not None:
base_name = os.path.basename(crop_specs['fname'])
if crop_specs['name_short'] is None:
crop_specs['name_short'] = base_name[
:base_name.find('-', base_name.rfind('_'))]
if crop_specs['name_long'] is None:
crop_specs['name_long'] = base_name[
base_name.find('-', base_name.rfind('_')):]
if crop_specs['ext'] is None:
crop_specs['ext'] = os.path.splitext(crop_specs['fname'])[1]
for col_name in row.index:
if col_name not in self.io.defaults.spat_crop_cols.keys():
crop_specs[col_name] = row[col_name]
if not pd.notnull(crop_specs['name_long']):
crop_specs['name_long'] = None
if not pd.notnull(crop_specs['plot_id_ref']):
crop_specs['plot_id_ref'] = None
if not pd.notnull(crop_specs['name_short']):
crop_specs['name_short'] = None
self.crop_specs = crop_specs
return crop_specs
def _pix_to_mapunit(self, crop_specs, spyfile=None):
'''
Looks over specifications of ``crop_specs``, and converts betweeen pixel
units and map units if one is populated and the other is ``None``
'''
cs = crop_specs.copy()
if spyfile is None:
spyfile = self.io.spyfile
spy_ps_e = float(spyfile.metadata['map info'][5])
spy_ps_n = float(spyfile.metadata['map info'][6])
# Crop size
# if cs['crop_e_pix'] is None and cs['crop_e_m'] is not None:
if pd.isnull(cs['crop_e_pix']) and pd.notnull(cs['crop_e_m']):
cs['crop_e_pix'] = int(cs['crop_e_m'] / spy_ps_e)
elif pd.notnull(cs['crop_e_pix']) and | pd.isnull(cs['crop_e_m']) | pandas.isnull |
import os
import pytest
import pandas as pd
import numpy as np
from scripts.national_load import (
filter_outliers,
_interpolate_gaps,
_fill_29th_feb,
_countries_with_missing_data_in_model_year,
_get_index_of_missing_data,
_ignore_feb_29th,
clean_load_data
)
THIS_DIR = os.path.dirname(__file__)
class TestLoadHelperFunctions:
@pytest.fixture
def foobar_df(self):
def _foobar_df(foo, bar):
return | pd.DataFrame({"foo": foo, "bar": bar}) | pandas.DataFrame |
###############################################################################
# PCAAnomalyDetector
import numpy as np
import pandas
from nimbusml.datasets import get_dataset
from nimbusml.decomposition import PcaAnomalyDetector
from sklearn.model_selection import train_test_split
# use 'iris' data set to create test and train data
# Sepal_Length Sepal_Width Petal_Length Petal_Width Label Species Setosa
# 0 5.1 3.5 1.4 0.2 0 setosa 1.0
# 1 4.9 3.0 1.4 0.2 0 setosa 1.0
df = get_dataset("iris").as_df()
df.drop(['Label', 'Setosa', 'Species'], axis=1, inplace=True)
X_train, X_test = train_test_split(df)
# homogenous values for labels, y-column required by scikit
y_train = np.ones(len(X_train))
svm = PcaAnomalyDetector(rank=3)
svm.fit(X_train)
# add additional non-iris data to the test data set
not_iris = pandas.DataFrame(data=dict(Sepal_Length=[2.5, 2.6],
Sepal_Width=[.75, .9],
Petal_Length=[2.5, 2.5],
Petal_Width=[.8, .7]))
merged_test = | pandas.concat([X_test, not_iris], sort=False) | pandas.concat |
"""
A non-blending lightGBM model that incorporates portions and ideas from various public kernels.
"""
DEBUG = False
WHERE = 'kaggle'
FILENO = 4
NCHUNK = 32000000
OFFSET = 75000000
VAL_RUN = False
MISSING32 = 999999999
MISSING8 = 255
PUBLIC_CUTOFF = 4032690
if WHERE=='kaggle':
inpath = '../input/talkingdata-adtracking-fraud-detection/'
pickle_path ='../input/training-and-validation-data-pickle/'
suffix = ''
outpath = ''
savepath = ''
oofpath = ''
cores = 4
elif WHERE=='gcloud':
inpath = '../.kaggle/competitions/talkingdata-adtracking-fraud-detection/'
pickle_path = '../data/'
suffix = '.zip'
outpath = '../sub/'
oofpath = '../oof/'
savepath = '../data/'
cores = 7
import pandas as pd
import time
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
import lightgbm as lgb
import gc
import matplotlib.pyplot as plt
import os
def do_count( df, group_cols, agg_name, agg_type='uint32', show_max=False, show_agg=True ):
if show_agg:
print( "Aggregating by ", group_cols , '...' )
gp = df[group_cols][group_cols].groupby(group_cols).size().rename(agg_name).to_frame().reset_index()
df = df.merge(gp, on=group_cols, how='left')
del gp
if show_max:
print( agg_name + " max value = ", df[agg_name].max() )
df[agg_name] = df[agg_name].astype(agg_type)
gc.collect()
return( df )
def do_countuniq( df, group_cols, counted, agg_name, agg_type='uint32', show_max=False, show_agg=True ):
if show_agg:
print( "Counting unqiue ", counted, " by ", group_cols , '...' )
gp = df[group_cols+[counted]].groupby(group_cols)[counted].nunique().reset_index().rename(columns={counted:agg_name})
df = df.merge(gp, on=group_cols, how='left')
del gp
if show_max:
print( agg_name + " max value = ", df[agg_name].max() )
df[agg_name] = df[agg_name].astype(agg_type)
gc.collect()
return( df )
def do_cumcount( df, group_cols, counted, agg_name, agg_type='uint32', show_max=False, show_agg=True ):
if show_agg:
print( "Cumulative count by ", group_cols , '...' )
gp = df[group_cols+[counted]].groupby(group_cols)[counted].cumcount()
df[agg_name]=gp.values
del gp
if show_max:
print( agg_name + " max value = ", df[agg_name].max() )
df[agg_name] = df[agg_name].astype(agg_type)
gc.collect()
return( df )
def do_mean( df, group_cols, counted, agg_name, agg_type='float32', show_max=False, show_agg=True ):
if show_agg:
print( "Calculating mean of ", counted, " by ", group_cols , '...' )
gp = df[group_cols+[counted]].groupby(group_cols)[counted].mean().reset_index().rename(columns={counted:agg_name})
df = df.merge(gp, on=group_cols, how='left')
del gp
if show_max:
print( agg_name + " max value = ", df[agg_name].max() )
df[agg_name] = df[agg_name].astype(agg_type)
gc.collect()
return( df )
def do_var( df, group_cols, counted, agg_name, agg_type='float32', show_max=False, show_agg=True ):
if show_agg:
print( "Calculating variance of ", counted, " by ", group_cols , '...' )
gp = df[group_cols+[counted]].groupby(group_cols)[counted].var().reset_index().rename(columns={counted:agg_name})
df = df.merge(gp, on=group_cols, how='left')
del gp
if show_max:
print( agg_name + " max value = ", df[agg_name].max() )
df[agg_name] = df[agg_name].astype(agg_type)
gc.collect()
return( df )
debug = DEBUG
if debug:
print('*** debug parameter set: this is a test run for debugging purposes ***')
if VAL_RUN:
nrows=122071522
outpath = oofpath
else:
nrows=184903890
nchunk=NCHUNK
val_size=2500000
frm=nrows-OFFSET
if debug:
frm=0
nchunk=100000
val_size=10000
to=frm+nchunk
fileno = FILENO
dtypes = {
'ip' : 'uint32',
'app' : 'uint16',
'device' : 'uint16',
'os' : 'uint16',
'channel' : 'uint16',
'is_attributed' : 'uint8',
'click_id' : 'uint32',
}
if VAL_RUN:
print('loading train data...',frm,to)
train_df = pd.read_pickle( pickle_path+"training.pkl.gz" )[frm:to]
train_df['click_time'] = pd.to_datetime( train_df.click_time )
print('loading test data...')
if debug:
public_cutoff = 10000
test_df = pd.read_pickle( pickle_path+"validation.pkl.gz" )[:30000]
test_df['click_time'] = pd.to_datetime( test_df.click_time )
y_test = test_df['is_attributed'].values
test_df.drop(['is_attributed'],axis=1,inplace=True)
else:
public_cutoff = PUBLIC_CUTOFF
test_df = pd.read_pickle( pickle_path+"validation.pkl.gz" )
test_df['click_time'] = pd.to_datetime( test_df.click_time )
y_test = test_df['is_attributed'].values
test_df.drop(['is_attributed'],axis=1,inplace=True)
else:
print('loading train data...',frm,to)
train_df = pd.read_csv(inpath+"train.csv", parse_dates=['click_time'], skiprows=range(1,frm), nrows=to-frm, dtype=dtypes, usecols=['ip','app','device','os', 'channel', 'click_time', 'is_attributed'])
print('loading test data...')
if debug:
test_df = | pd.read_csv(inpath+"test.csv", nrows=100000, parse_dates=['click_time'], dtype=dtypes, usecols=['ip','app','device','os', 'channel', 'click_time', 'click_id']) | pandas.read_csv |
"""
Generate ensemble submission by majority vote.
Authors:
<NAME> and <NAME>
"""
import argparse
import glob
import pandas as pd
parser = argparse.ArgumentParser('Get args for ensemble script')
parser.add_argument('--split',
type=str,
default='dev',
choices=('dev', 'test'),
help='Split to use for ensembling.')
parser.add_argument('--sub_file',
type=str,
default='val_submission.csv',
help='Name for submission file.')
parser.add_argument('--out_dir',
type=str,
default='',
help='Name for out directory')
parser.add_argument('--file_to_omit',
type=str,
default='none',
help='Allow specification of file to omit')
parser.add_argument('--metric_name',
type=str,
default='F1',
choices=('EM', 'F1'),
help='Name of metric to determine tie breaking')
parser.add_argument('--threshold',
type=float,
default=65.0,
help='Threshold for models to include in ensemble')
parser.add_argument('--models_to_include',
type=str,
default=None,
help='Optional file specifying exact models to include')
args = parser.parse_args()
source_folder = './save/' + f'{args.split}' + '_submissions/'
stats_file = 'sub_stats.csv'
stats = pd.read_csv(stats_file)
# Either read in models to include in ensemble from provided txt file, or use metric and threshold
mods_to_include = []
if args.models_to_include is not None:
filename = './save/' + f'{args.split}' + '_submissions/' + f'{args.models_to_include}'
with open(filename, 'r') as fh:
lines = fh.read().splitlines()
for line in lines:
mods_to_include.append(line)
stats_sub = stats[stats['TestName'].isin(mods_to_include)]
else:
stats_sub = stats[(stats[args.metric_name] >= args.threshold) & (stats['TestName'] != 'none') &
(stats['TestName'] != args.file_to_omit)]
# Get best models by given metric for tie breaking
by_best_metric = stats_sub.sort_values(by=args.metric_name, ascending=False)
file_best_metric = source_folder + by_best_metric['TestName'].iloc[0] + '.csv'
file_2nd_best_metric = source_folder + by_best_metric['TestName'].iloc[1] + '.csv'
# Get list of filenames for for-loop
filenames = list(stats_sub['TestName'])
filenames = [source_folder + file + '.csv' for file in filenames]
# Combine model outputs into one dataframe
data = []
is_first_file = True
for filename in glob.glob(source_folder + '*.csv'):
if filename in filenames:
df = pd.read_csv(filename, keep_default_na=False)
if is_first_file:
df = df.rename(columns={'Predicted': filename})
is_first_file = False
else:
df = df.rename(columns={'Predicted': filename})
df = df[filename]
data.append(df)
df_all = | pd.concat(data, axis=1) | pandas.concat |
import pandas as pd
import numpy as np
import os, csv
from collections import defaultdict
import logging
class CityInfo:
def __init__(self):
# Make dict
self.cities_data = {}
self.cities_data_ascii_names = {}
with open('worldcities.csv', encoding='utf-8') as csvDataFile:
csvReader = csv.reader(csvDataFile)
for row in csvReader:
self.cities_data[row[0]] = row[2:]
self.cities_data_ascii_names[row[1]] = row[2:]
def get_city_coord(self, city: str):
city = city.title()
city = city.split(',')[0]
if city == "Cracow" or city == "Krakow":
city = "Kraków"
elif city == "Warszawa":
city = "Warsaw"
elif city == "Wroclaw":
city = "Wrocław"
elif city == "Helsingfors":
city = "Helsinki"
try:
city_data = self.cities_data[city]
return city_data[0], city_data[1]
except:
city_data = self.cities_data_ascii_names[city]
return city_data[0], city_data[1]
def to_eur(money, currency):
if currency == 'EUR' and currency == '€':
return money
elif currency == 'USD' and currency == '$':
return money / 1.08
elif currency == 'A$' and currency == 'AUD':
return money / 1.80
elif currency == 'PLN':
return money / 4.58
elif currency == 'kr':
return money / 11.00
elif currency == 'GBP' or currency == '£':
return money / 0.88
elif currency == 'CHF':
return money / 1.06
elif currency == 'CAD' or currency == 'C$':
return money / 1.53
elif currency == 'HUF':
return money / 367.93
elif currency == 'CZK':
return money / 27.78
elif currency == '₹' or currency == 'JPY':
return money / 117.25
else:
None
if __name__ == "__main__":
ci = CityInfo()
min_low_salaries = {}
max_high_salaries = {}
with open('DATABASE.csv', encoding='utf-8') as csvDataFile:
csvReader = csv.reader(csvDataFile)
next(csvReader)
for row in csvReader:
salary = row[-2].strip()
cities = row[-1]
salary_high = row[-4]
salary_low = row[-3]
salary_high = int(float(salary_high))
salary_low = int(float(salary_low))
if salary_high == 0 or salary_low == 0:
continue
if row[-2] == 'PLN':
# Per hour
if salary_low <= 500:
salary_low *= 160
if salary_high <= 500:
salary *= 160
# Per day
if salary_low > 500 and salary_low <= 2000:
salary_low *= 20
if salary_high > 500 and salary_high <= 2000:
salary_high *= 20
# To year
salary_high *= 12
salary_low *= 12
if row[-2] == '$':
# To year salary
if salary_high < 1000:
salary_high *= 160 * 12
if salary_low < 1000:
salary_low *= 160 * 12
salary_high = to_eur(salary_high, row[-2])
salary_low = to_eur(salary_low, row[-2])
if salary_high == None or salary_low == None:
continue
for c in row[-6].split(','):
c = c.strip()
try:
latitude, longitude = ci.get_city_coord(c)
try:
if min_low_salaries[(latitude, longitude)] > salary_low:
min_low_salaries[(latitude, longitude)] = salary_low
except:
min_low_salaries[(latitude, longitude)] = salary_low
try:
if max_high_salaries[(latitude, longitude)] < salary_high:
max_high_salaries[(latitude, longitude)] = salary_high
except:
max_high_salaries[(latitude, longitude)] = salary_high
except KeyError as ex:
pass
except Exception as ex:
#logging.exception("Something awful happened!")
pass
db = defaultdict(list)
for k in min_low_salaries.keys():
db['latitude'].append(k[0])
db['longitude'].append(k[1])
db['salary_low'].append(min_low_salaries[k])
df = pd.DataFrame.from_dict(db)
df.to_csv(f'kepler_low.csv', index=False)
db = defaultdict(list)
for k in max_high_salaries.keys():
db['latitude'].append(k[0])
db['longitude'].append(k[1])
db['salary_high'].append(max_high_salaries[k])
df = | pd.DataFrame.from_dict(db) | pandas.DataFrame.from_dict |
import sys
import os
import json
import argparse
import urllib.request
import multiprocessing
import pandas as pd
# download abstact text and NER annotation in pubtator format
def download_abs(X):
_id_s, tar_dir, url_prefix = X
file_path = tar_dir+_id_s
url_s = url_prefix+_id_s
# only retrive gene/disease
url_s += '&concepts=gene,disease'
FLAG = 0
err_stat = ""
#print(url_s)
try:
txt = urllib.request.urlopen(url_s).read()
#print(txt)
if txt == '':
err_stat = "loaded empty"
FLAG = 2
else:
try:
with open(file_path, 'wb') as F:
F.write(txt)
FLAG = 1
err_stat = "succeed"
except Exception as e:
err_stat = "abs loads or writting wrror"
FLAG = 0
except Exception as e:
err_stat = "request error"
FLAG = 0
return (FLAG, err_stat, _id_s)
def write_json_file(f_name, data):
with open(f_name, 'w') as f:
json.dump(data, f)
# download full-text and NER annotation in biojson format
def download_doc(X):
_id_s, tar_dir, url_prefix = X
file_path = tar_dir+_id_s
url_s = url_prefix+_id_s
# only retrive gene/disease
url_s += '&concepts=gene,disease'
FLAG = 0
err_stat = ""
try:
url = urllib.request.urlopen(url_s)
doc_dec = url.read().decode()
if doc_dec == '':
err_stat = "loaded empty"
FLAG = 2
else:
# print(doc_dec)
try:
data = json.loads(doc_dec)
write_json_file(file_path, data)
FLAG = 1
err_stat = "succeed"
except Exception as e:
err_stat = "json loads or writting wrror"
FLAG = 0
except Exception as e:
err_stat = "request error"
FLAG = 0
return (FLAG, err_stat, _id_s)
def download_from_lst_hd(tar_id_lst, tar_dir, url_prefix, _type='json', cores=3):
rst_rec = []
try:
pool = multiprocessing.Pool(processes=cores)
_i = 0
end_i = len(tar_id_lst)
while _i < end_i:
_n = min(end_i, _i + cores) - _i
_input = [(tar_id_lst[i], tar_dir, url_prefix) for i in range(_i, _i+_n)]
_i += _n
# print(batch_input)
tmp_i = 0
tar_downloader = download_doc if _type == 'json' else download_abs
for FLAG, err_stat, _id_s in pool.imap(tar_downloader, _input):
print('%s' % (['/', '-', '\\'][int((_i / _n) % 3)]), end='\r')
if (_i + tmp_i) % 100 == 0:
print((_i + tmp_i), url_prefix+_id_s, err_stat)
tmp_i += 1
rst_rec.append((_id_s, FLAG))
if FLAG == 0:
print(_id_s, err_stat)
pass
except Exception as e:
print(e)
#print(rst_rec)
_hit_n = sum([_r[1] == 1 for _r in rst_rec])
_miss_n = sum([_r[1] == 0 for _r in rst_rec])
_empty_n = sum([_r[1] == 2 for _r in rst_rec])
print('download ended, hit {} | empty {} | miss {}'.format(_hit_n, _empty_n, _miss_n))
hit_rec = [_r[0] for _r in rst_rec if _r[1] == 1]
return hit_rec
def download_from_lst_abs(tar_pmid_lst, tar_dir, cores = 3):
rst_rec = []
try:
# download abstract on 'pubtator' format
# www.ncbi.nlm.nih.gov/CBBresearch/Lu/Demo/tmTools/Format.html
if len(tar_pmid_lst) > 0:
print('begin requesting abs data, {}'.format(len(tar_pmid_lst)))
url_prefix = "https://www.ncbi.nlm.nih.gov/research/pubtator-api/publications/export/pubtator?pmids="
rst_rec = download_from_lst_hd(tar_pmid_lst, tar_dir, url_prefix, 'abs', cores)
except Exception as e:
print(e)
return rst_rec
def download_from_lst_ft(tar_pmcid_lst, tar_dir, cores = 3):
rst_rec = []
try:
if len(tar_pmcid_lst) > 0:
print('begin requesting full-text data, {}'.format(len(tar_pmcid_lst)))
url_prefix = "https://www.ncbi.nlm.nih.gov/research/pubtator-api/publications/export/biocjson?pmcids="
rst_rec = download_from_lst_hd(tar_pmcid_lst, tar_dir, url_prefix, 'json', cores)
except Exception as e:
print(e)
return rst_rec
def download_data(config):
id_f = config.id_f
is_ft = True if config.type == "ft" else False
tar_dir = config.dir
tmp_f = config.tmp_hit_f
if tar_dir[-1] != '/':
tar_dir = tar_dir + '/'
cores = multiprocessing.cpu_count()
cores = min(config.process_n, cores)
try:
id_df = pd.read_csv(id_f, header=0, dtype=str)
id_df = id_df.drop_duplicates()
if is_ft:
tar_pmcid_lst = list(id_df.pmcid.values)
rst_rec = download_from_lst_ft(tar_pmcid_lst, tar_dir, cores)
else:
tar_pmid_lst = list(id_df.pmid.values)
rst_rec = download_from_lst_abs(tar_pmid_lst, tar_dir, cores)
print('hit records at {}'.format(tmp_f))
| pd.DataFrame(rst_rec, columns=['pmcid' if is_ft else 'pmid']) | pandas.DataFrame |
import pandas
import math
import csv
import random
import numpy
from sklearn import linear_model
from sklearn.model_selection import cross_val_score
# 当每支队伍没有elo等级分时,赋予其基础elo等级分
base_elo = 1600
team_elos = {}
team_stats = {}
x = []
y = []
folder = 'data'
# 根据每支队伍的Micellaneous, Opponent, Team统计数据csv文件进行初始化
def initialize_data(miscellaneous_stats, opponent_per_game_stats, team_per_game_stats):
miscellaneous_stats.drop(['Rk', 'Arena'], axis=1, inplace=True)
opponent_per_game_stats.drop(['Rk', 'G', 'MP'], axis=1, inplace=True)
team_per_game_stats.drop(['Rk', 'G', 'MP'], axis=1, inplace=True)
team_stats = pandas.merge(miscellaneous_stats, opponent_per_game_stats, how='left', on='Team')
team_stats = pandas.merge(team_stats, team_per_game_stats, how='left', on='Team')
return team_stats.set_index('Team', inplace=False, drop=True)
def get_elo(team):
try:
return team_elos[team]
except:
# 当最初没有elo时,给每个队伍最初赋base_elo
team_elos[team] = base_elo
return team_elos[team]
# 计算每个球队的elo值
def calc_elo(win_team, lose_team):
winner_rank = get_elo(win_team)
loser_rank = get_elo(lose_team)
rank_diff = winner_rank - loser_rank
exp = (rank_diff * -1) / 400
odds = 1 / (1 + math.pow(10, exp))
#根据rank级别修改K值
if winner_rank < 2100:
k = 32
elif winner_rank >= 2100 and winner_rank < 2400:
k = 24
else:
k = 16
new_winner_rank = round(winner_rank + (k * (1 - odds)))
new_rank_diff = new_winner_rank - winner_rank
new_loser_rank = loser_rank - new_rank_diff
return new_winner_rank, new_loser_rank
def build_season_data(all_data):
print("Building season data.")
X = []
skip = 0
for index, row in all_data.iterrows():
# Get starter or previous elos.
Wteam = row['WTeam']
Lteam = row['LTeam']
# 获取最初的elo或是每个队伍最初的elo值
team1_elo = get_elo(Wteam)
team2_elo = get_elo(Lteam)
# 给主场比赛的队伍加上100的elo值
if row['WVenue'] == 'Home':
team1_elo += 100
else:
team2_elo += 100
# 把elo当为评价每个队伍的第一个特征值
team1_features = [team1_elo]
team2_features = [team2_elo]
# 添加我们从basketball-reference.com获得的每个队伍的统计信息
for key, value in team_stats.loc[Wteam].iteritems():
team1_features.append(value)
for key, value in team_stats.loc[Lteam].iteritems():
team2_features.append(value)
# 讲两支队伍的特征值随机的分配在每场比赛数据的左右两侧
# 并将对应的0/1赋给y值
if random.random() > 0.5:
X.append(team1_features + team2_features)
y.append(0)
else:
X.append(team2_features + team1_features)
y.append(1)
if skip == 0:
print(X)
skip = 1
# 根据这场比赛的数据更新队伍的elo值
new_winner_rank, new_loser_rank = calc_elo(Wteam, Lteam)
team_elos[Wteam] = new_winner_rank
team_elos[Lteam] = new_loser_rank
return numpy.nan_to_num(X), y
def predict_winner(team_1, team_2, model):
features = []
# team 1, Away team
features.append(get_elo(team_1))
for key, value in team_stats.loc[team_1].iteritems():
features.append(value)
# team 2, Home team
features.append(get_elo(team_2) + 100)
for key, value in team_stats.loc[team_2].iteritems():
features.append(value)
features = numpy.nan_to_num(features)
return model.predict_proba([features])
if __name__ == '__main__':
miscellaneous_stats = | pandas.read_csv('data/MiscellaneousStats.csv') | pandas.read_csv |
# AUTOGENERATED! DO NOT EDIT! File to edit: 07_location_history_parse.ipynb (unless otherwise specified).
__all__ = ['load_json_file', 'parse_activities', 'parse_json_file', 'parse_json_file_as_rows', 'parse_json_data',
'parse_data_point', 'parse_activity', 'filter_json_data', 'sort_json_data', 'rowify_json_data',
'parse_timestamp', 'check_against_filters', 'd', 'create_timestamp', 't',
'parse_google_location_history_json', 'parsed_loc_hist_to_df', 'google_location_history_to_df', 'haversine',
'coords_to_m_from_hpsc', 'get_dt', 'add_dt_and_velocity', 'get_is_trip_bool', 'get_trip_time_bools',
'split_trips_into_lines', 'get_big_bay_lims', 'get_small_bay_lims', 'spatial_filter_df']
# Cell
import json
from datetime import datetime
import pandas as pd
import numpy as np
import math
# Cell
def load_json_file(filename):
with open(filename) as json_file:
return json.load(json_file)
# Cell
def parse_activities(activity_recs):
flat_activities = {}
for activity_rec in activity_recs:
for activity in activity_rec['activity']:
flat_activities[activity['type']] = activity['confidence']
return flat_activities
# Cell
def parse_json_file(filename, filters = {}):
raw_json_data = load_json_file(filename)
parsed_json_data = parse_json_data(raw_json_data)
if filters != {}:
filtered_json_data = filter_json_data(parsed_json_data, filters=filters)
else:
filtered_json_data = parsed_json_data
sorted_json_data = sort_json_data(filtered_json_data, 'timestamp')
return sorted_json_data
# Cell
def parse_json_file_as_rows(filename, filters = {}):
sorted_json_data = parse_json_file(filename, filters)
rows = rowify_json_data(sorted_json_data)
return rows
# Cell
def parse_json_data(raw_json_data):
parsed_data_points = []
for data_point in raw_json_data["locations"]:
parsed_data_points.append(parse_data_point(data_point))
return {"locations": parsed_data_points}
# Cell
def parse_data_point(data_point):
data_point["timestamp"] = parse_timestamp(data_point.get("timestampMs"))
data_point["lat"] = data_point.get("latitudeE7") / 1e7
data_point["lon"] = data_point.get("longitudeE7") / 1e7
data_point["accuracy"] = data_point.get("accuracy", np.nan)
data_point["velocity"] = data_point.get("velocity", np.nan)
data_point["heading"] = data_point.get("heading", np.nan)
data_point["altitude"] = data_point.get("altitude", np.nan)
data_point["vertical_accuracy"] = data_point.get("verticalAccuracy", np.nan)
# data_point["activity"], data_point["activity_confidence"] = parse_activity(data_point.get("activity", ""))
return data_point
# Cell
def parse_activity(activity):
if activity == "":
return ["", ""]
first_activity = activity[0]
classifications = first_activity["activity"]
best_classification = classifications[0]
return [best_classification["type"], best_classification["confidence"]]
# Cell
def filter_json_data(parsed_json_data, filters={}):
filtered_data_points = []
for data_point in parsed_json_data["locations"]:
if check_against_filters(data_point, filters):
filtered_data_points.append(data_point)
return {"locations": filtered_data_points}
# Cell
def sort_json_data(filtered_json_data, sort_key="timestamp"):
filtered_json_data["locations"].sort(key=lambda data_point: data_point[sort_key])
return filtered_json_data
# Cell
def rowify_json_data(sorted_json_data):
headers = [
"timestamp",
"latitude",
"longitude",
"accuracy",
"velocity",
"heading",
"altitude",
"vertical_accuracy",
"activity",
"activity_confidence",
]
rows = [headers]
for data_point in sorted_json_data["locations"]:
row = [
str(data_point.get("timestamp")),
data_point["lat"],
data_point["lon"],
data_point["accuracy"],
data_point["velocity"],
data_point["heading"],
data_point["altitude"],
data_point["vertical_accuracy"],
data_point["activity"],
data_point["activity_confidence"],
]
rows.append(row)
return rows
# Cell
def parse_timestamp(t):
return datetime.utcfromtimestamp(int(t) / 1000)
# Cell
def check_against_filters(data_point, filters):
start = filters.get("start", False)
end = filters.get("end", False)
bbox = filters.get("bbox", False)
# Skip data from before the provided start datetime
if start and (data_point["timestamp"] <= start):
return False
# Skip data from after the provided end datetime
if end and (end < data_point["timestamp"]):
return False
# Skip data_points outside of bounding box
if (bbox
and ((data_point["lat"] < bbox["min_lat"])
or (data_point["lat"] > bbox["max_lat"])
or (data_point["lon"] < bbox["min_lon"])
or (data_point["lon"] > bbox["max_lon"]))):
return False
# Skip data that hasn't been assigned an activity category
if len(data_point["activity"]) == 0:
return False
# If data_point passes all filters, return True
return True
# Cell
def d(timestamp):
return str(parse_timestamp(timestamp))
# Cell
def create_timestamp(datetime):
return datetime.strftime("%s%f")
# Cell
def t(datetime):
return create_timestamp(datetime)
# Cell
def parse_google_location_history_json(filepath):
raw = load_json_file(filepath)
raw = raw['locations']
parsed_data_points = []
for data_point in raw:
parsed_data_point = parse_data_point(data_point)
if 'activity' in data_point.keys():
parsed_activities = parse_activities(data_point['activity'])
parsed_data_point.update(parsed_activities)
parsed_data_point.pop('activity')
parsed_data_points.append(parsed_data_point)
return parsed_data_points
# Cell
def parsed_loc_hist_to_df(parsed_data_points):
df = pd.DataFrame().from_records(parsed_data_points)
#calculate odds of being moving
df['MOVING'] = 100 - df.loc[:,['STILL','UNKNOWN']].sum(axis=1)
df.loc[df['STILL'].isna() & df['UNKNOWN'].isna(), 'MOVING'] = np.nan
#add delta position
df.loc[1:,'dlat'] = df['lat'].diff()
df.loc[1:,'dlon'] = df['lon'].diff()
df['dpos'] = (df['dlat']**2 + df['dlon']**2) ** 0.5
df.columns = [col.lower() for col in df.columns]
df = df.rename(columns={'timestamp':'t'})
front_cols = [
't',
'lat',
'lon',
'dpos',
'velocity',
'heading',
'moving',
'still',
'unknown',
'in_vehicle',
'on_bicycle',
'on_foot',
'walking',
'running',
'dlat',
'dlon',
]
cols = df.columns.to_list()
for col in reversed(front_cols):
cols.remove(col)
cols.insert(0,col)
df = df.loc[:, cols]
df = df.set_index('t')
# df.index = df.index.tz_localize('UTC', ambiguous='infer').tz_convert('US/Pacific')
df.index = df.index - | pd.Timedelta('8H') | pandas.Timedelta |
import pandas as pd
import sys
if len(sys.argv) != 3:
print("Usage: python3 overhead.py raw.csv transform.csv")
raw = pd.read_csv(sys.argv[1])
tran = pd.read_csv(sys.argv[2])
half = len(tran) // 2
# raw = raw[half:]
# tran = tran[half:]
merged = pd.merge(raw,tran, on=['Index', 'Index'])
merged["diff"] = (merged["DurationNs_y"] - merged["DurationNs_x"]) / 1000
# s = merged.groupby(['KernelName_x'])['diff'].sum()
s = merged.groupby(['KernelName_x']).agg({'diff': {'count', 'mean', 'sum'}})
| pd.set_option('display.max_rows', None) | pandas.set_option |
"""
Plot the IQR of your janky light curves vs KC19 reported age.
"""
###########
# imports #
###########
import os, socket, requests
from glob import glob
import numpy as np, pandas as pd, matplotlib.pyplot as plt
from numpy import array as nparr
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.stats import iqr
from stringcheese import pipeline_utils as pu
host = socket.gethostname()
if 'brik' in host:
basedir = '/home/luke/local/stringcheese/'
else:
basedir = np.nan
###############
# main driver #
###############
def get_data():
source_df = pd.read_csv('../data/kounkel_table1_sourceinfo.csv')
sel = (
(source_df['Tmag_pred'] < 14)
&
(source_df['parallax'] > 5)
)
sdf = source_df[sel]
outpath = '../data/Tmag_lt_14_plx_gt_5_concatenated_lc_statistics.csv'
if os.path.exists(outpath):
return pd.read_csv(outpath), sdf
df2 = pd.read_csv('../data/string_table2.csv')
sdf2 = df2[df2['parallax'] > 5]
starinfos, varinfos = [], []
for ix, r in sdf.iterrows():
source_id = np.int64(r['source_id'])
ra, dec = float(r['ra_x']), float(r['dec_x'])
name = str(r['name'])
group_id = str(r['group_id'])
workingdir = os.path.join(basedir,
'fits_pkls_results_pngs',
'group{}_name{}'.format(group_id, name))
workingdir = os.path.join(workingdir, str(source_id))
resultfile = os.path.join(workingdir, 'GLS_rotation_period.results')
if os.path.exists(resultfile):
d = pu.load_status(resultfile)
starinfos.append(dict(d['starinfo']))
varinfos.append(dict(d['variability_info']))
else:
continue
stardf = pd.DataFrame(starinfos)
vardf = | pd.DataFrame(varinfos) | pandas.DataFrame |
import itertools
import numpy
import os
import random
import re
import scipy.spatial.distance as ssd
import scipy.stats
from scipy.cluster.hierarchy import dendrogram, linkage
import pandas
from matplotlib import colors
from matplotlib import pyplot as plt
import vectors
from libs import tsne
rubensteinGoodenoughData = None
def rubensteinGoodenough(wordIndexMap, embeddings):
global rubensteinGoodenoughData
if rubensteinGoodenoughData is None:
rubensteinGoodenoughData = []
rubensteinGoodenoughFilePath = 'res/RG/EN-RG-65.txt'
with open(rubensteinGoodenoughFilePath) as rgFile:
lines = rgFile.readlines()
for line in lines:
word0, word1, targetScore = tuple(line.strip().split('\t'))
targetScore = float(targetScore)
rubensteinGoodenoughData.append((word0, word1, targetScore))
scores = []
targetScores = []
for word0, word1, targetScore in rubensteinGoodenoughData:
if word0 in wordIndexMap and word1 in wordIndexMap:
targetScores.append(targetScore)
word0Index = wordIndexMap[word0]
word1Index = wordIndexMap[word1]
word0Embedding = embeddings[word0Index]
word1Embedding = embeddings[word1Index]
score = vectors.cosineSimilarity(word0Embedding, word1Embedding)
scores.append(score)
if len(scores) == 0:
return numpy.nan
pearson, pearsonDeviation = scipy.stats.pearsonr(scores, targetScores)
spearman, spearmanDeviation = scipy.stats.spearmanr(scores, targetScores)
rubensteinGoodenoughMetric = numpy.mean([pearson, spearman])
return rubensteinGoodenoughMetric
wordSimilarity353Data = None
def wordSimilarity353(wordIndexMap, embeddings):
global wordSimilarity353Data
if wordSimilarity353Data is None:
wordSimilarity353Data = []
wordSimilarity353FilePath = 'res/WordSimilarity-353/combined.csv'
data = pandas.read_csv(wordSimilarity353FilePath)
for word0, word1, score in zip(data['Word1'], data['Word2'], data['Score']):
wordSimilarity353Data.append((word0, word1, score))
scores = []
targetScores = []
for word0, word1, targetScore in wordSimilarity353Data:
if word0 in wordIndexMap and word1 in wordIndexMap:
targetScores.append(targetScore)
word0Index = wordIndexMap[word0]
word1Index = wordIndexMap[word1]
word0Embedding = embeddings[word0Index]
word1Embedding = embeddings[word1Index]
score = vectors.cosineSimilarity(word0Embedding, word1Embedding)
scores.append(score)
if len(scores) == 0:
return numpy.nan
pearson, pearsonDeviation = scipy.stats.pearsonr(scores, targetScores)
spearman, spearmanDeviation = scipy.stats.spearmanr(scores, targetScores)
metric = numpy.mean([pearson, spearman])
return metric
simLex999Data = None
def simLex999(wordIndexMap, embeddings):
global simLex999Data
if simLex999Data is None:
simLex999Data = []
simLex999FilePath = 'res/SimLex-999/SimLex-999.txt'
data = pandas.read_csv(simLex999FilePath, sep='\t')
for word0, word1, targetScore in zip(data['word1'], data['word2'], data['SimLex999']):
simLex999Data.append((word0, word1, targetScore))
targetScores = []
scores = []
for word0, word1, targetScore in simLex999Data:
if word0 in wordIndexMap and word1 in wordIndexMap:
targetScores.append(targetScore)
word0Index = wordIndexMap[word0]
word1Index = wordIndexMap[word1]
word0Embedding = embeddings[word0Index]
word1Embedding = embeddings[word1Index]
score = vectors.cosineSimilarity(word0Embedding, word1Embedding)
scores.append(score)
if len(scores) == 0:
return numpy.nan
pearson, pearsonDeviation = scipy.stats.pearsonr(scores, targetScores)
spearman, spearmanDeviation = scipy.stats.spearmanr(scores, targetScores)
simLex999Metric = numpy.mean([pearson, spearman])
return simLex999Metric
syntacticWordData = None
def syntacticWordRelations(wordIndexMap, embeddings, maxWords=10):
global syntacticWordData
if syntacticWordData is None:
syntacticWordData = []
syntWordRelFilePath = 'res/Syntactic-Word-Relations/questions-words.txt'
with open(syntWordRelFilePath, 'r') as swrFile:
lines = swrFile.readlines()
syntacticWordData = [tuple(line.lower().split(' ')) for line in lines if not line.startswith(':')]
syntacticWordData = [(word0.strip(), word1.strip(), word2.strip(), word3.strip()) for word0, word1, word2, word3 in syntacticWordData]
scores = []
for word0, word1, word2, word3 in syntacticWordData:
if word0 not in wordIndexMap or word1 not in wordIndexMap or word2 not in wordIndexMap or word3 not in wordIndexMap:
continue
word0Index = wordIndexMap[word0]
word1Index = wordIndexMap[word1]
word2Index = wordIndexMap[word2]
word3Index = wordIndexMap[word3]
word0Embedding = embeddings[word0Index]
word1Embedding = embeddings[word1Index]
word2Embedding = embeddings[word2Index]
word3Embedding = embeddings[word3Index]
similarity01 = vectors.cosineSimilarity(word0Embedding, word1Embedding)
similarity23 = vectors.cosineSimilarity(word2Embedding, word3Embedding)
score = 1
minSimilarityDelta = abs(similarity01 - similarity23)
for embedding in embeddings[:maxWords]:
similarity2N = vectors.cosineSimilarity(word2Embedding, embedding)
similarityDelta = abs(similarity01 - similarity2N)
score = not (similarityDelta < minSimilarityDelta)
if not score:
break
scores.append(score)
if len(scores) == 0:
return numpy.nan
syntacticWordRelationsMetric = float(sum(scores)) / len(scores)
return syntacticWordRelationsMetric
satQuestionsData = None
def satQuestions(wordIndexMap, embeddings):
global satQuestionsData
if satQuestionsData is None:
satQuestionsData = []
satQuestionsFilePath = 'res/SAT-Questions/SAT-package-V3.txt'
maxLineLength = 50
aCode = ord('a')
with open(satQuestionsFilePath) as satFile:
line = satFile.readline()
while line != '':
if len(line) < maxLineLength:
match = re.match('(?P<word0>[\w-]+)\s(?P<word1>[\w-]+)\s[nvar]:[nvar]', line)
if match:
stemWord0, stemWord1 = match.group('word0'), match.group('word1')
satQuestion = [stemWord0, stemWord1]
line = satFile.readline()
match = re.match('(?P<word0>[\w-]+)\s(?P<word1>[\w-]+)\s[nvar]:[nvar]', line)
while match:
choiceWord0, choiceWord1 = match.group('word0'), match.group('word1')
satQuestion.append(choiceWord0)
satQuestion.append(choiceWord1)
line = satFile.readline()
match = re.match('(?P<word0>[\w-]+)\s(?P<word1>[\w-]+)\s[nvar]:[nvar]', line)
correctChoiceIndex = ord(line.strip()) - aCode
satQuestion.append(correctChoiceIndex)
satQuestionsData.append(satQuestion)
line = satFile.readline()
scores = []
for satQuestion in satQuestionsData:
if any([word not in wordIndexMap for word in satQuestion[:-1]]):
continue
stemWord0, stemWord1 = satQuestion[:2]
stemWord0Index = wordIndexMap[stemWord0]
stemWord1Index = wordIndexMap[stemWord1]
stemWord0Embedding, stemWord1Embedding = embeddings[stemWord0Index], embeddings[stemWord1Index]
stemSimilarity = vectors.cosineSimilarity(stemWord0Embedding, stemWord1Embedding)
correctChoiceIndex = satQuestion[-1]
choiceSimilarityDeltas = []
choices = satQuestion[2:-1]
for i in xrange(0, len(choices), 2):
choiceWord0, choiceWord1 = choices[i], choices[i+1]
choiceWord0Index, choiceWord1Index = wordIndexMap[choiceWord0], wordIndexMap[choiceWord1]
choiceWord0Embedding, choiceWord1Embedding = embeddings[choiceWord0Index], embeddings[choiceWord1Index]
choiceSimilarity = vectors.cosineSimilarity(choiceWord0Embedding, choiceWord1Embedding)
choiceSimilarityDelta = abs(stemSimilarity - choiceSimilarity)
choiceSimilarityDeltas.append(choiceSimilarityDelta)
choiceIndex = numpy.argmin(choiceSimilarityDeltas)
scores.append(int(choiceIndex == correctChoiceIndex))
if len(scores) == 0:
return numpy.nan
metric = float(sum(scores)) / len(scores)
return metric
def validate(wordIndexMap, embeddings):
rg = rubensteinGoodenough(wordIndexMap, embeddings)
sim353 = wordSimilarity353(wordIndexMap, embeddings)
sl999 = simLex999(wordIndexMap, embeddings)
syntRel = syntacticWordRelations(wordIndexMap, embeddings)
sat = satQuestions(wordIndexMap, embeddings)
return rg, sim353, sl999, syntRel, sat
def dump(metricsPath, epoch, customMetrics):
metrics = {
'epoch': epoch
}
for name, value in customMetrics.items():
metrics[name] = value
metrics = [metrics]
if os.path.exists(metricsPath):
with open(metricsPath, 'a') as metricsFile:
metricsHistory = pandas.DataFrame.from_dict(metrics)
metricsHistory.to_csv(metricsFile, header=False)
else:
metricsHistory = pandas.DataFrame.from_dict(metrics)
metricsHistory.to_csv(metricsPath, header=True)
def compareMetrics(metricsHistoryPath, *metricNames):
metrics = pandas.DataFrame.from_csv(metricsHistoryPath)
iterations = range(0, len(metrics))
plt.grid()
metricScatters = []
colorNames = colors.cnames.keys()
for metricIndex, metricName in enumerate(metricNames):
metric = metrics[metricName]
random.shuffle(colorNames)
metricScatter = plt.scatter(iterations, metric, c=colorNames[metricIndex % len(colorNames)])
metricScatters.append(metricScatter)
metricsFileName = os.path.basename(metricsHistoryPath)
plt.title(metricsFileName)
plt.legend(metricScatters, metricNames, scatterpoints=1, loc='lower right', ncol=3, fontsize=8)
plt.show()
def compareHistories(metricName, *metricsHistoryPaths):
plt.grid()
metricScatters = []
metricsHistoryNames = []
colorNames = colors.cnames.keys()
for metricsHistoryIndex, metricsHistoryPath in enumerate(metricsHistoryPaths):
metrics = | pandas.DataFrame.from_csv(metricsHistoryPath) | pandas.DataFrame.from_csv |
import numpy as np
import pandas as pd
import xarray as xr
import copy
import warnings
try:
from plotly import graph_objs as go
plotly_installed = True
except:
plotly_installed = False
# warnings.warn("PLOTLY not installed so interactive plots are not available. This may result in unexpected funtionality")
global_3d_mapper = np.repeat(0, 256 * 4).reshape(256, -1)
global_3d_mapper[ord('T'), :] = np.array([0, 0, 0, 1])
global_3d_mapper[ord('C'), :] = np.array([0, 1, 0, 0])
global_3d_mapper[ord('A'), :] = np.array([1, 0, 0, 0])
global_3d_mapper[ord('G'), :] = np.array([0, 0, 1, 0])
def compare_sequence_matrices(seq_arr1, seq_arr2, flip=False, treat_as_match=[], ignore_characters=[], return_num_bases=False):
"""
This will "align" seq_arr1 to seq_arr2. It will calculate which positions in each sequence defined by seq_arr1 matches each position in each sequence defined by seq_arr2
seq_arr1 = NxP matrix where N = # of sequences represented in seq_arr1 and P represents each base pair position/the length of the string
seq_arr2 = MxP matrix where M = # of sequences represented in seq_arr1 and P represents each base pair position/the length of the string
This operation will return a NxPxM boolean matrix where each position represents whether the base pair in sequence N and the base pair in sequence M represented at position P match
In other words, if bool_arr = compare_sequence_matrices(A, B) then the total hamming distance between the second and third sequence in matrices A and B respective can be found as
>>> bool_arr.sum(axis=1)[1][2]
Args:
seq_arr1 (np.array): MxP matrix of sequences represented as array of numbers
seq_arr2 (np.array): NxP matrix of sequences represented as array of numbers
flip (bool): If False then "true" means that letters are equal at specified positoin, If True then return positions that are NOT equal to one another
treat_as_match (list of chars): Treat any positions that have any of these letters in either matricies as True
ignore_characters (list of chars): Ignore positions that have letters in either matricies at specified positions
.. warning:: datatype
When ignore character is defined, the array is passed back as a np.float dtype because it must accomodate np.nan
return_num_bases (False): If true then it will return a second parameter that defines the number of non nan values between alignments
Returns: NxPxM array of boolean values
"""
assert seq_arr1.shape[1] == seq_arr2.shape[1], 'Matrices do not match!'
# use np.int8 because it ends upbeing faster
seq_arr1 = seq_arr1.view(np.uint8)
seq_arr2 = seq_arr2.view(np.uint8)
# this will return true of pos X in seqA and seqB are equal
diff_arr = (seq_arr1[..., np.newaxis].view(np.uint8) == seq_arr2.T[np.newaxis, ...])
# print(diff_arr.shape)
if treat_as_match:
# treat any of these letters at any positions as true regardles of whether they match in respective pairwise sequences
if not isinstance(treat_as_match, list):
treat_as_match = [treat_as_match]
treat_as_match = [ord(let) for let in treat_as_match]
# now we have to ignore characters that are equal to specific values
# return True for any positions that is equal to "treat_as_true"
ignore_pos = ((seq_arr1 == treat_as_match[0])[..., np.newaxis]) | ((seq_arr2 == treat_as_match[0])[..., np.newaxis].T)
for chr_p in treat_as_match[1:]:
ignore_pos = ignore_pos | ((seq_arr1 == chr_p)[..., np.newaxis]) | ((seq_arr2 == chr_p)[..., np.newaxis].T)
# now adjust boolean results to ignore any positions == treat_as_true
diff_arr = (diff_arr | ignore_pos) # if flip is False else (diffs | ignore_pos)
if flip is False:
diff_arr = diff_arr # (~(~diffarr))
else:
diff_arr = ~diff_arr # (~diffarr)
# print(diff_arr.shape)
if ignore_characters:
# do not treat these characters as true OR false
if not isinstance(ignore_characters, list):
ignore_characters = [ignore_characters]
ignore_characters = [ord(let) for let in ignore_characters]
# now we have to ignore characters that are equal to specific values
ignore_pos = (seq_arr1 == ignore_characters[0])[..., np.newaxis] | ((seq_arr2 == ignore_characters[0])[..., np.newaxis].T)
for chr_p in ignore_characters[1:]:
ignore_pos = ignore_pos | ((seq_arr1 == chr_p)[..., np.newaxis]) | ((seq_arr2 == chr_p)[..., np.newaxis]).T
diff_arr = diff_arr.astype(np.float)
diff_arr[ignore_pos] = np.nan
diff_arr = diff_arr
if return_num_bases:
num_bases = np.apply_along_axis(
arr=diff_arr,
axis=1,
func1d=lambda x: len(x[~np.isnan(x)])
)
return diff_arr, num_bases
else:
return diff_arr
def numpy_value_counts_bin_count(arr, weights=None):
"""
Use the 'bin count' function in numpy to calculate the unique values in every column of a dataframe
clocked at about 3-4x faster than pandas_value_counts (df.apply(pd.value_counts))
Args:
arr (dataframe, or np array): Should represent rows as sequences and columns as positions. All values should be int
weights (np array): Should be a list of weights to place on each
"""
if not isinstance(arr, np.ndarray):
raise Exception('The provided parameter for arr is not a dataframe or numpy array')
if len(arr.shape) == 1:
# its a ONE D array, lets make it two D
arr = arr.reshape(-1, 1)
arr = arr.view(np.uint8)
# returns an array of length equal to the the max value in array + 1. each element represents number of times an integer appeared in array.
bins = [
np.bincount(arr[:, x], weights=weights)
for x in range(arr.shape[1])
]
indices = [np.nonzero(x)[0] for x in bins] # only look at non zero bins
series = [pd.Series(y[x], index=x) for (x, y) in zip(indices, bins)]
return pd.concat(series, axis=1).fillna(0)
def get_quality_dist(
arr, col_names=None, bins='even', exclude_null_quality=True, sample=None,
percentiles=[10, 25, 50, 75, 90], stats=['mean', 'median', 'max', 'min'],
plotly_sampledata_size=20, use_multiindex=True,
):
"""
Returns the distribution of quality across the given sequence, similar to FASTQC quality seq report.
Args:
arr (np.array): a matrix of quality scores where rows represent a sequence and columns represent a position
col_names (list): column header for the numpy array (either from xarray or pandas)
bins(list of ints or tuples, or 'fastqc', or 'even'): bins defines how to group together the columns/sequence positions when aggregating the statistics.
.. note:: bins='fastqc' or 'even'
if bins is not a set of numbers and instead one of the two predefined strings ('fastqc' and 'even') then calculation of bins will be defined as follows:
1. fastqc: Identical to the bin ranges used by fastqc report
2. even: Creates 10 evenly sized bins based on sequence lengths
percentiles (list of floats, default=[10, 25, 50, 75, 90]): value passed into numpy quantiles function.
exclude_null_quality (boolean, default=True): do not include quality scores of 0 in the distribution
sample (int, default=None): If defined, then we will only calculate the distribution on a random subsampled population of sequences
plotly_sampledata_size (int, default=20): Number of values to store in a sample numpy array used for creating box plots in plotly
.. note:: min size
note the minimum value for a sampledata size is 10
Returns:
data (DataFrame): contains the distribution information at every bin (min value, max value, desired precentages and quartiles)
graphs (plotly object): contains plotly graph objects for generating plots of the data afterwards
Examples:
Show the median of the quality at the first ten positions in the sequence
>>> table = SeqTable(['AAAAAAAAAA', 'AAAAAAAAAC', 'CCCCCCCCCC'], qualitydata=['6AA9-C9--6C', '6AA!1C9BA6C', '6AA!!C9!-6C'])
>>> box_data, graphs = table.get_quality_dist(bins=range(10), percentiles=[50])
Now repeat the example from above, except group together all values from the first 5 bases and the next 5 bases
i.e. All qualities between positions 0-4 will be grouped together before performing median, and all qualities between 5-9 will be grouped together). Also, return the bottom 10 and upper 90 percentiles in the statsitics
>>> box_data, graphs = table.get_quality_dist(bins=[(0,4), (5,9)], percentiles=[10, 50, 90])
We can also plot the results as a series of boxplots using plotly
>>> from plotly.offline import init_notebook_mode, iplot, plot, iplot_mpl
# assuming ipython..
>>> init_notebook_mode()
>>> plotly.iplot(graphs)
# using outside of ipython
>>> plotly.plot(graphs)
"""
from collections import OrderedDict
current_stats = ['min', 'max', 'mean', 'median']
assert set(stats).issubset(set(current_stats)), "The stats provided are not currently supported. We only support {0}".format(','.join(current_stats))
# current base positions in dataframe
if col_names is None:
col_names = np.arange(1, arr.shape[1] + 1)
else:
assert len(col_names) == arr.shape[1], 'Column names does not match shape'
# print(bins)
if bins is 'fastqc':
# use default bins as defined by fastqc report
bins = [
(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9),
(10, 14), (15, 19), (20, 24), (25, 29), (30, 34), (35, 39), (40, 44), (45, 49), (50, 54), (55, 59), (60, 64),
(65, 69), (70, 74), (80, 84), (85, 89), (90, 94), (95, 99),
(100, 104), (105, 109), (110, 114), (115, 119), (120, 124), (125, 129), (130, 134), (135, 139), (140, 144), (145, 149), (150, 154), (155, 159), (160, 164), (165, 169), (170, 174), (175, 179), (180, 184), (185, 189), (190, 194), (195, 199),
(200, 204), (205, 209), (210, 214), (215, 219), (220, 224), (225, 229), (230, 234), (235, 239), (240, 244), (245, 249), (250, 254), (255, 259), (260, 264), (265, 269), (270, 274), (275, 279), (280, 284), (285, 289), (290, 294), (295, 299),
] + [(p, p + 9) for p in np.arange(300, arr.shape[1], 10)]
bins = [x if isinstance(x, int) else (x[0], x[1]) for x in bins]
elif bins is 'even':
# create an equal set of 10 bins based on df shape
binsize = int(arr.shape[1] / 10)
bins = []
for x in range(0, arr.shape[1], binsize):
c1 = col_names[x]
c2 = col_names[min(x + binsize - 1, arr.shape[1] - 1)]
bins.append((c1, c2))
# print(bins)
else:
# just in case its a generator (i.e. range function)
# convert floats to ints, otherwise keep original
bins = [(int(x), int(x)) if isinstance(x, float) else x if isinstance(x, tuple) else (x, x) for x in bins]
binnames = OrderedDict()
for b in bins:
if b[0] < min(col_names) or b[0] > max(col_names):
continue
# create names for each bin
if isinstance(b, int):
binnames[str(b)] = (b, b)
elif len(b) == 2:
binnames[str(b[0]) + '-' + str(b[1])] = (b[0], b[1])
temp = xr.DataArray(
arr[np.random.choice(arr.shape[0], sample), :] if sample else arr,
dims=('read', 'position'),
coords={'position': col_names}
)
# define the quantile percentages we will return for each quality bin
percentiles = [round(p, 0) for p in percentiles]
per = copy.copy(percentiles)
# ensure that the following percentiles will ALWAYS be present
program_required = [0, 10, 25, 50, 75, 90, 100]
to_add_manually = set(program_required) - set(per)
# update percentil list
per = sorted(per + list(to_add_manually))
# loop through each of the binnames/bin counts
binned_data = OrderedDict()
binned_data_stats = OrderedDict()
graphs = [] # for storing plotly graphs
plotlychosendata = pd.DataFrame(0, index=list(binnames.keys()), columns=['min', 'max', 'mean', 'median'])
for name, binned_cols in binnames.items():
userchosen_stats = {}
userchosen = {}
if isinstance(binned_cols, int):
# not binning together multiple positions in sequence
binned_cols = (binned_cols, binned_cols)
# create a list of all column/base positions listed within this bin
# set_cols = set(list(range(binned_cols[0], binned_cols[1] + 1)))
# identify columns in dataframe that intersect with columns listed above
# sel_cols = list(col_names_set & set_cols)
# select qualities within bin, unwind list into a single list
p = list(set(np.arange(binned_cols[0], binned_cols[1] + 1)) & set(temp.position.values)) # make sure positions are present in columns
bin_qual = temp.sel(position=p).values.ravel()
if exclude_null_quality:
quantile_res = np.percentile(bin_qual[bin_qual > 0], per)
mean_val = bin_qual[bin_qual > 0].mean()
plotlychosendata.loc[name, 'mean'] = mean_val
if 'mean' in stats:
userchosen_stats['mean'] = mean_val
else:
mean_val = bin_qual[bin_qual > 0].mean()
quantile_res = np.percentile(bin_qual, per)
plotlychosendata.loc[name, 'mean'] = mean_val
if 'mean' in stats:
userchosen_stats['mean'] = mean_val
storevals = []
for p, qnt in zip(per, quantile_res):
if p == 0:
plotlychosendata.loc[name, 'min'] = qnt
if 'min' in stats:
userchosen_stats['min'] = qnt
if p == 100:
plotlychosendata.loc[name, 'max'] = qnt
if 'max' in stats:
userchosen_stats['max'] = qnt
if p in program_required:
# store the values required by the program in storevals
storevals.append(qnt)
if p in percentiles:
# store original quantile values desired by user in variable percentiles
userchosen[str(int(p)) + '%'] = qnt
if p == 50:
# store median
median = qnt
if 'median' in stats:
userchosen_stats['median'] = qnt
plotlychosendata.loc[name, 'median'] = qnt
userchosen = pd.Series(userchosen)
if plotly_sampledata_size < 10:
warnings.warn('Warning, the desired plotly_sampledata_size is too low, value has been changed to 10')
plotly_sampledata_size = 10
# next a fake set of data that we can pass into plotly for making boxplots. datas descriptive statistics will match current set
sample_data = np.zeros(plotly_sampledata_size)
# these indices in subsets indicates the 5% index values for the provided sample_data_size
subsets = [int(x) for x in np.arange(0, 1.00, 0.05) * plotly_sampledata_size]
# we hardcoded the values in program_required, so we can add those values into fake subsets
sample_data[0:subsets[1]] = storevals[1] # store min value in these indices
sample_data[subsets[1]:subsets[3]] = storevals[1] # store bottom 10% of data within 5-15% data range
sample_data[subsets[3]:subsets[7]] = storevals[2] # store 25% of data
sample_data[subsets[7]:subsets[13]] = storevals[3] # store median of data
sample_data[subsets[13]:subsets[17]] = storevals[4] # store 75% of data
sample_data[subsets[17]:subsets[19]] = storevals[5] # store max val
sample_data[subsets[19]:] = storevals[5] # store max val
color = 'red' if median < 20 else 'blue' if median < 30 else 'green'
if plotly_installed is True:
# create a box plot using the fake sample_data, again this is better for memory resources since plotly stores all datapoints in javascript
plotdata = go.Box(
y=sample_data,
pointpos=0,
name=name,
boxpoints=False,
fillcolor=color,
showlegend=False,
line={
'color': 'black',
'width': 0.7
},
marker=dict(
color='rgb(107, 174, 214)',
size=3
)
)
else:
warnings.warn('PLOTLY not installed. No graph object data was returned')
plotdata = None
graphs.append(plotdata)
binned_data[name] = userchosen
binned_data_stats[name] = userchosen_stats
if plotly_installed is True:
# also include a scatter plot for the minimum value, maximum value, and mean in distribution
scatter_min = go.Scatter(x=list(plotlychosendata.index), y=plotlychosendata['min'], mode='markers', name='min', showlegend=False)
scatter_max = go.Scatter(x=list(plotlychosendata.index), y=plotlychosendata['max'], mode='markers', name='max')
scatter_mean = go.Scatter(
x=list(plotlychosendata.index),
y=plotlychosendata['mean'], line=dict(shape='spline'),
name='mean'
)
graphs.extend([scatter_min, scatter_max, scatter_mean])
if use_multiindex is True:
stats_df = pd.concat([pd.DataFrame(binned_data), pd.DataFrame(binned_data_stats)], keys=['percentile', 'stats'])
else:
stats_df = pd.concat([pd.DataFrame(binned_data), | pd.DataFrame(binned_data_stats) | pandas.DataFrame |
from __future__ import absolute_import
import collections
import gzip
import logging
import os
import sys
import multiprocessing
import threading
import numpy as np
import pandas as pd
from itertools import cycle, islice
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsScaler
file_path = os.path.dirname(os.path.realpath(__file__))
lib_path = os.path.abspath(os.path.join(file_path, '..', '..', 'common'))
sys.path.append(lib_path)
from data_utils import get_file
logger = logging.getLogger(__name__)
SEED = 2017
np.set_printoptions(threshold=np.nan)
np.random.seed(SEED)
def get_p1_file(link):
fname = os.path.basename(link)
return get_file(fname, origin=link, cache_subdir='Pilot1')
def scale(df, scaling=None):
"""Scale data included in pandas dataframe.
Parameters
----------
df : pandas dataframe
dataframe to scale
scaling : 'maxabs', 'minmax', 'std', or None, optional (default 'std')
type of scaling to apply
"""
if scaling is None or scaling.lower() == 'none':
return df
df = df.dropna(axis=1, how='any')
# Scaling data
if scaling == 'maxabs':
# Normalizing -1 to 1
scaler = MaxAbsScaler()
elif scaling == 'minmax':
# Scaling to [0,1]
scaler = MinMaxScaler()
else:
# Standard normalization
scaler = StandardScaler()
mat = df.as_matrix()
mat = scaler.fit_transform(mat)
df = pd.DataFrame(mat, columns=df.columns)
return df
def impute_and_scale(df, scaling='std'):
"""Impute missing values with mean and scale data included in pandas dataframe.
Parameters
----------
df : pandas dataframe
dataframe to impute and scale
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
"""
df = df.dropna(axis=1, how='all')
imputer = Imputer(strategy='mean', axis=0)
mat = imputer.fit_transform(df)
if scaling is None or scaling.lower() == 'none':
return pd.DataFrame(mat, columns=df.columns)
if scaling == 'maxabs':
scaler = MaxAbsScaler()
elif scaling == 'minmax':
scaler = MinMaxScaler()
else:
scaler = StandardScaler()
mat = scaler.fit_transform(mat)
df = pd.DataFrame(mat, columns=df.columns)
return df
def load_cellline_expressions(path, ncols=None, scaling='std'):
"""Load cell line expression data, sub-select columns of gene expression
randomly if specificed, scale the selected data and return a
pandas dataframe.
Parameters
----------
path: string
path to 'RNA_5_Platform_Gene_Transcript_Averaged_intensities.transposed.txt'
ncols : int or None
number of columns (gene expression) to randomly subselect (default None : use all data)
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
"""
df = pd.read_csv(path, sep='\t', engine='c',
na_values=['na','-',''])
df1 = df['CellLine']
df1 = df1.map(lambda x: x.replace('.', ':'))
df1.name = 'CELLNAME'
df2 = df.drop('CellLine', 1)
total = df2.shape[1]
if ncols and ncols < total:
usecols = np.random.choice(total, size=ncols, replace=False)
df2 = df2.iloc[:, usecols]
df2 = impute_and_scale(df2, scaling)
df2 = df2.astype(np.float32)
df = pd.concat([df1, df2], axis=1)
return df
def load_cellline_mirna(path, ncols=None, scaling='std'):
"""Load cell line microRNA data, sub-select columns randomly if
specificed, scale the selected data and return a pandas
dataframe.
Parameters
----------
path: string
path to 'RNA__microRNA_OSU_V3_chip_log2.transposed.txt'
ncols : int or None
number of columns to randomly subselect (default None : use all data)
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
"""
df = pd.read_csv(path, sep='\t', engine='c',
na_values=['na','-',''])
df1 = df['CellLine']
df1 = df1.map(lambda x: x.replace('.', ':'))
df1.name = 'CELLNAME'
df2 = df.drop('CellLine', 1)
total = df2.shape[1]
if ncols and ncols < total:
usecols = np.random.choice(total, size=ncols, replace=False)
df2 = df2.iloc[:, usecols]
df2 = impute_and_scale(df2, scaling)
df2 = df2.astype(np.float32)
df = pd.concat([df1, df2], axis=1)
return df
def load_cellline_proteome(path, kinome_path=None, ncols=None, scaling='std'):
"""Load cell line microRNA data, sub-select columns randomly if
specificed, scale the selected data and return a pandas
dataframe.
Parameters
----------
path: string
path to 'nci60_proteome_log2.transposed.tsv'
kinome_path: string or None (default None)
path to 'nci60_kinome_log2.transposed.tsv'
ncols : int or None
number of columns to randomly subselect (default None : use all data)
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
"""
df = pd.read_csv(path, sep='\t', engine='c')
df = df.set_index('CellLine')
if kinome_path:
df_k = pd.read_csv(kinome_path, sep='\t', engine='c')
df_k = df_k.set_index('CellLine')
df_k = df_k.add_suffix('.K')
df = df.merge(df_k, left_index=True, right_index=True)
index = df.index.map(lambda x: x.replace('.', ':'))
total = df.shape[1]
if ncols and ncols < total:
usecols = np.random.choice(total, size=ncols, replace=False)
df = df.iloc[:, usecols]
df = impute_and_scale(df, scaling)
df = df.astype(np.float32)
df.index = index
df.index.names = ['CELLNAME']
df = df.reset_index()
return df
def load_drug_descriptors(path, ncols=None, scaling='std'):
"""Load drug descriptor data, sub-select columns of drugs descriptors
randomly if specificed, impute and scale the selected data, and return a
pandas dataframe.
Parameters
----------
path: string
path to 'descriptors.2D-NSC.5dose.filtered.txt'
ncols : int or None
number of columns (drugs descriptors) to randomly subselect (default None : use all data)
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
"""
df = pd.read_csv(path, sep='\t', engine='c',
na_values=['na','-',''],
dtype=np.float32)
df1 = pd.DataFrame(df.loc[:,'NAME'].astype(int).astype(str))
df1.rename(columns={'NAME': 'NSC'}, inplace=True)
df2 = df.drop('NAME', 1)
# # Filter columns if requested
total = df2.shape[1]
if ncols and ncols < total:
usecols = np.random.choice(total, size=ncols, replace=False)
df2 = df2.iloc[:,usecols]
df2 = impute_and_scale(df2, scaling)
df2 = df2.astype(np.float32)
df_dg = pd.concat([df1, df2], axis=1)
return df_dg
def load_drug_autoencoded(path, ncols=None, scaling='std'):
"""Load drug latent representation from autoencoder, sub-select
columns of drugs randomly if specificed, impute and scale the
selected data, and return a pandas dataframe.
Parameters
----------
path: string
path to 'Aspuru-Guzik_NSC_latent_representation_292D.csv'
ncols : int or None
number of columns (drug latent representations) to randomly subselect (default None : use all data)
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
"""
df = pd.read_csv(path, engine='c', dtype=np.float32)
df1 = pd.DataFrame(df.loc[:, 'NSC'].astype(int).astype(str))
df2 = df.drop('NSC', 1)
total = df2.shape[1]
if ncols and ncols < total:
usecols = np.random.choice(total, size=ncols, replace=False)
df2 = df2.iloc[:, usecols]
df2 = impute_and_scale(df2, scaling)
df2 = df2.astype(np.float32)
df = pd.concat([df1, df2], axis=1)
return df
def load_dose_response(path, min_logconc=-5., max_logconc=-5., subsample=None):
"""Load cell line response to different drug compounds, sub-select response for a specific
drug log concentration range and return a pandas dataframe.
Parameters
----------
path: string
path to 'NCI60_dose_response_with_missing_z5_avg.csv'
min_logconc : -3, -4, -5, -6, -7, optional (default -5)
min log concentration of drug to return cell line growth
max_logconc : -3, -4, -5, -6, -7, optional (default -5)
max log concentration of drug to return cell line growth
subsample: None, 'naive_balancing' (default None)
subsampling strategy to use to balance the data based on growth
"""
df = pd.read_csv(path, sep=',', engine='c',
na_values=['na','-',''],
dtype={'NSC':object, 'CELLNAME':str, 'LOG_CONCENTRATION':np.float32, 'GROWTH':np.float32})
df = df[(df['LOG_CONCENTRATION'] >= min_logconc) & (df['LOG_CONCENTRATION'] <= max_logconc)]
df = df[['NSC', 'CELLNAME', 'GROWTH', 'LOG_CONCENTRATION']]
if subsample and subsample == 'naive_balancing':
df1 = df[df['GROWTH'] <= 0]
df2 = df[(df['GROWTH'] > 0) & (df['GROWTH'] < 50)].sample(frac=0.7, random_state=SEED)
df3 = df[(df['GROWTH'] >= 50) & (df['GROWTH'] <= 100)].sample(frac=0.18, random_state=SEED)
df4 = df[df['GROWTH'] > 100].sample(frac=0.01, random_state=SEED)
df = pd.concat([df1, df2, df3, df4])
df = df.set_index(['NSC'])
return df
class DataLoader(object):
"""Load merged drug response, drug descriptors and cell line essay data
"""
def __init__(self, val_split=0.2, test_cell_split=None, shuffle=True,
cell_features=['expression'], drug_features=['descriptors'],
feature_subsample=None, scaling='std', scramble=False,
min_logconc=-5., max_logconc=-4., subsample='naive_balancing',
category_cutoffs=[0.]):
"""Initialize data merging drug response, drug descriptors and cell line essay.
Shuffle and split training and validation set
Parameters
----------
val_split : float, optional (default 0.2)
fraction of data to use in validation
test_cell_split : float or None, optional (default None)
fraction of cell lines to use in test; if None use predefined unseen cell lines instead of sampling cell lines used in training
shuffle : True or False, optional (default True)
if True shuffles the merged data before splitting training and validation sets
cell_features: list of strings from 'expression', 'mirna', 'proteome', 'all', 'categorical' (default ['expression'])
use one or more cell line feature sets: gene expression, microRNA, proteomics; or, use 'categorical' for one-hot encoded cell lines
drug_features: list of strings from 'descriptors', 'latent', 'all', 'noise' (default ['descriptors'])
use dragon7 descriptors, latent representations from Aspuru-Guzik's SMILES autoencoder trained on NSC drugs, or both; use random features if set to noise
feature_subsample: None or integer (default None)
number of feature columns to use from cellline expressions and drug descriptors
scaling: None, 'std', 'minmax' or 'maxabs' (default 'std')
type of feature scaling: 'maxabs' to [-1,1], 'maxabs' to [-1, 1], 'std' for standard normalization
scramble: True or False, optional (default False)
if True randomly shuffle dose response data as a control
min_logconc: float value between -3 and -7, optional (default -5.)
min log concentration of drug to return cell line growth
max_logconc: float value between -3 and -7, optional (default -4.)
max log concentration of drug to return cell line growth
subsample: 'naive_balancing' or None
if True balance dose response data with crude subsampling
category_cutoffs: list of floats (between -1 and +1) (default None)
growth thresholds seperating non-response and response categories
"""
server = 'http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/P1B3/'
cell_expr_path = get_p1_file(server+'P1B3_cellline_expressions.tsv')
cell_mrna_path = get_p1_file(server+'P1B3_cellline_mirna.tsv')
cell_prot_path = get_p1_file(server+'P1B3_cellline_proteome.tsv')
cell_kino_path = get_p1_file(server+'P1B3_cellline_kinome.tsv')
drug_desc_path = get_p1_file(server+'P1B3_drug_descriptors.tsv')
drug_auen_path = get_p1_file(server+'P1B3_drug_latent.csv')
dose_resp_path = get_p1_file(server+'P1B3_dose_response.csv')
test_cell_path = get_p1_file(server+'P1B3_test_celllines.txt')
test_drug_path = get_p1_file(server+'P1B3_test_drugs.txt')
df = load_dose_response(dose_resp_path, min_logconc=min_logconc, max_logconc=max_logconc, subsample=subsample)
logger.info('Loaded {} unique (D, CL) response sets.'.format(df.shape[0]))
# df[['GROWTH', 'LOG_CONCENTRATION']].to_csv('all.response.csv')
df = df.reset_index()
if 'all' in cell_features:
self.cell_features = ['expression', 'mirna', 'proteome']
else:
self.cell_features = cell_features
if 'all' in drug_features:
self.drug_features = ['descriptors', 'latent']
else:
self.drug_features = drug_features
self.input_shapes = collections.OrderedDict()
self.input_shapes['drug_concentration'] = (1,)
for fea in self.cell_features:
if fea == 'expression':
self.df_cell_expr = load_cellline_expressions(cell_expr_path, ncols=feature_subsample, scaling=scaling)
self.input_shapes['cell_expression'] = (self.df_cell_expr.shape[1] - 1,)
df = df.merge(self.df_cell_expr[['CELLNAME']], on='CELLNAME')
elif fea == 'mirna':
self.df_cell_mirna = load_cellline_mirna(cell_mrna_path, ncols=feature_subsample, scaling=scaling)
self.input_shapes['cell_microRNA'] = (self.df_cell_mirna.shape[1] - 1,)
df = df.merge(self.df_cell_mirna[['CELLNAME']], on='CELLNAME')
elif fea == 'proteome':
self.df_cell_prot = load_cellline_proteome(cell_prot_path, cell_kino_path, ncols=feature_subsample, scaling=scaling)
self.input_shapes['cell_proteome'] = (self.df_cell_prot.shape[1] - 1,)
df = df.merge(self.df_cell_prot[['CELLNAME']], on='CELLNAME')
elif fea == 'categorical':
df_cell_ids = df[['CELLNAME']].drop_duplicates()
cell_ids = df_cell_ids['CELLNAME'].map(lambda x: x.replace(':', '.'))
df_cell_cat = pd.get_dummies(cell_ids)
df_cell_cat.index = df_cell_ids['CELLNAME']
self.df_cell_cat = df_cell_cat.reset_index()
self.input_shapes['cell_categorical'] = (self.df_cell_cat.shape[1] - 1,)
for fea in self.drug_features:
if fea == 'descriptors':
self.df_drug_desc = load_drug_descriptors(drug_desc_path, ncols=feature_subsample, scaling=scaling)
self.input_shapes['drug_descriptors'] = (self.df_drug_desc.shape[1] - 1,)
df = df.merge(self.df_drug_desc[['NSC']], on='NSC')
elif fea == 'latent':
self.df_drug_auen = load_drug_autoencoded(drug_auen_path, ncols=feature_subsample, scaling=scaling)
self.input_shapes['drug_SMILES_latent'] = (self.df_drug_auen.shape[1] - 1,)
df = df.merge(self.df_drug_auen[['NSC']], on='NSC')
elif fea == 'noise':
df_drug_ids = df[['NSC']].drop_duplicates()
noise = np.random.normal(size=(df_drug_ids.shape[0], 500))
df_rand = pd.DataFrame(noise, index=df_drug_ids['NSC'],
columns=['RAND-{:03d}'.format(x) for x in range(500)])
self.df_drug_rand = df_rand.reset_index()
self.input_shapes['drug_random_vector'] = (self.df_drug_rand.shape[1] - 1,)
logger.debug('Filtered down to {} rows with matching information.'.format(df.shape[0]))
# df[['GROWTH', 'LOG_CONCENTRATION']].to_csv('filtered.response.csv')
df_test_cell = | pd.read_csv(test_cell_path) | pandas.read_csv |
from datetime import timedelta
import operator
from typing import Any, Callable, List, Optional, Sequence, Union
import numpy as np
from pandas._libs.tslibs import (
NaT,
NaTType,
frequencies as libfrequencies,
iNaT,
period as libperiod,
)
from pandas._libs.tslibs.fields import isleapyear_arr
from pandas._libs.tslibs.period import (
DIFFERENT_FREQ,
IncompatibleFrequency,
Period,
get_period_field_arr,
period_asfreq_arr,
)
from pandas._libs.tslibs.timedeltas import Timedelta, delta_to_nanoseconds
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
_TD_DTYPE,
ensure_object,
is_datetime64_dtype,
is_float_dtype,
is_period_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import PeriodDtype
from pandas.core.dtypes.generic import (
ABCIndexClass,
ABCPeriodArray,
ABCPeriodIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import isna, notna
import pandas.core.algorithms as algos
from pandas.core.arrays import datetimelike as dtl
import pandas.core.common as com
from pandas.tseries import frequencies
from pandas.tseries.offsets import DateOffset, Tick, _delta_to_tick
def _field_accessor(name, alias, docstring=None):
def f(self):
base, mult = libfrequencies.get_freq_code(self.freq)
result = get_period_field_arr(alias, self.asi8, base)
return result
f.__name__ = name
f.__doc__ = docstring
return property(f)
class PeriodArray(dtl.DatetimeLikeArrayMixin, dtl.DatelikeOps):
"""
Pandas ExtensionArray for storing Period data.
Users should use :func:`period_array` to create new instances.
Parameters
----------
values : Union[PeriodArray, Series[period], ndarray[int], PeriodIndex]
The data to store. These should be arrays that can be directly
converted to ordinals without inference or copy (PeriodArray,
ndarray[int64]), or a box around such an array (Series[period],
PeriodIndex).
freq : str or DateOffset
The `freq` to use for the array. Mostly applicable when `values`
is an ndarray of integers, when `freq` is required. When `values`
is a PeriodArray (or box around), it's checked that ``values.freq``
matches `freq`.
dtype : PeriodDtype, optional
A PeriodDtype instance from which to extract a `freq`. If both
`freq` and `dtype` are specified, then the frequencies must match.
copy : bool, default False
Whether to copy the ordinals before storing.
Attributes
----------
None
Methods
-------
None
See Also
--------
period_array : Create a new PeriodArray.
PeriodIndex : Immutable Index for period data.
Notes
-----
There are two components to a PeriodArray
- ordinals : integer ndarray
- freq : pd.tseries.offsets.Offset
The values are physically stored as a 1-D ndarray of integers. These are
called "ordinals" and represent some kind of offset from a base.
The `freq` indicates the span covered by each element of the array.
All elements in the PeriodArray have the same `freq`.
"""
# array priority higher than numpy scalars
__array_priority__ = 1000
_typ = "periodarray" # ABCPeriodArray
_scalar_type = Period
_recognized_scalars = (Period,)
_is_recognized_dtype = is_period_dtype
# Names others delegate to us
_other_ops: List[str] = []
_bool_ops = ["is_leap_year"]
_object_ops = ["start_time", "end_time", "freq"]
_field_ops = [
"year",
"month",
"day",
"hour",
"minute",
"second",
"weekofyear",
"weekday",
"week",
"dayofweek",
"dayofyear",
"quarter",
"qyear",
"days_in_month",
"daysinmonth",
]
_datetimelike_ops = _field_ops + _object_ops + _bool_ops
_datetimelike_methods = ["strftime", "to_timestamp", "asfreq"]
# --------------------------------------------------------------------
# Constructors
def __init__(self, values, freq=None, dtype=None, copy=False):
freq = validate_dtype_freq(dtype, freq)
if freq is not None:
freq = Period._maybe_convert_freq(freq)
if isinstance(values, ABCSeries):
values = values._values
if not isinstance(values, type(self)):
raise TypeError("Incorrect dtype")
elif isinstance(values, ABCPeriodIndex):
values = values._values
if isinstance(values, type(self)):
if freq is not None and freq != values.freq:
raise raise_on_incompatible(values, freq)
values, freq = values._data, values.freq
values = np.array(values, dtype="int64", copy=copy)
self._data = values
if freq is None:
raise ValueError("freq is not specified and cannot be inferred")
self._dtype = PeriodDtype(freq)
@classmethod
def _simple_new(cls, values: np.ndarray, freq=None, **kwargs):
# alias for PeriodArray.__init__
assert isinstance(values, np.ndarray) and values.dtype == "i8"
return cls(values, freq=freq, **kwargs)
@classmethod
def _from_sequence(
cls,
scalars: Sequence[Optional[Period]],
dtype: Optional[PeriodDtype] = None,
copy: bool = False,
) -> ABCPeriodArray:
if dtype:
freq = dtype.freq
else:
freq = None
if isinstance(scalars, cls):
validate_dtype_freq(scalars.dtype, freq)
if copy:
scalars = scalars.copy()
return scalars
periods = np.asarray(scalars, dtype=object)
if copy:
periods = periods.copy()
freq = freq or libperiod.extract_freq(periods)
ordinals = libperiod.extract_ordinals(periods, freq)
return cls(ordinals, freq=freq)
@classmethod
def _from_sequence_of_strings(cls, strings, dtype=None, copy=False):
return cls._from_sequence(strings, dtype, copy)
@classmethod
def _from_datetime64(cls, data, freq, tz=None):
"""
Construct a PeriodArray from a datetime64 array
Parameters
----------
data : ndarray[datetime64[ns], datetime64[ns, tz]]
freq : str or Tick
tz : tzinfo, optional
Returns
-------
PeriodArray[freq]
"""
data, freq = dt64arr_to_periodarr(data, freq, tz)
return cls(data, freq=freq)
@classmethod
def _generate_range(cls, start, end, periods, freq, fields):
periods = dtl.validate_periods(periods)
if freq is not None:
freq = Period._maybe_convert_freq(freq)
field_count = len(fields)
if start is not None or end is not None:
if field_count > 0:
raise ValueError(
"Can either instantiate from fields or endpoints, but not both"
)
subarr, freq = _get_ordinal_range(start, end, periods, freq)
elif field_count > 0:
subarr, freq = _range_from_fields(freq=freq, **fields)
else:
raise ValueError("Not enough parameters to construct Period range")
return subarr, freq
# -----------------------------------------------------------------
# DatetimeLike Interface
def _unbox_scalar(self, value: Union[Period, NaTType]) -> int:
if value is NaT:
return value.value
elif isinstance(value, self._scalar_type):
if not isna(value):
self._check_compatible_with(value)
return value.ordinal
else:
raise ValueError(f"'value' should be a Period. Got '{value}' instead.")
def _scalar_from_string(self, value: str) -> Period:
return Period(value, freq=self.freq)
def _check_compatible_with(self, other, setitem: bool = False):
if other is NaT:
return
if self.freqstr != other.freqstr:
raise raise_on_incompatible(self, other)
# --------------------------------------------------------------------
# Data / Attributes
@cache_readonly
def dtype(self):
return self._dtype
# error: Read-only property cannot override read-write property [misc]
@property # type: ignore
def freq(self):
"""
Return the frequency object for this PeriodArray.
"""
return self.dtype.freq
def __array__(self, dtype=None) -> np.ndarray:
# overriding DatetimelikeArray
return np.array(list(self), dtype=object)
def __arrow_array__(self, type=None):
"""
Convert myself into a pyarrow Array.
"""
import pyarrow
from pandas.core.arrays._arrow_utils import ArrowPeriodType
if type is not None:
if pyarrow.types.is_integer(type):
return pyarrow.array(self._data, mask=self.isna(), type=type)
elif isinstance(type, ArrowPeriodType):
# ensure we have the same freq
if self.freqstr != type.freq:
raise TypeError(
"Not supported to convert PeriodArray to array with different "
f"'freq' ({self.freqstr} vs {type.freq})"
)
else:
raise TypeError(
f"Not supported to convert PeriodArray to '{type}' type"
)
period_type = ArrowPeriodType(self.freqstr)
storage_array = pyarrow.array(self._data, mask=self.isna(), type="int64")
return pyarrow.ExtensionArray.from_storage(period_type, storage_array)
# --------------------------------------------------------------------
# Vectorized analogues of Period properties
year = _field_accessor(
"year",
0,
"""
The year of the period.
""",
)
month = _field_accessor(
"month",
3,
"""
The month as January=1, December=12.
""",
)
day = _field_accessor(
"day",
4,
"""
The days of the period.
""",
)
hour = _field_accessor(
"hour",
5,
"""
The hour of the period.
""",
)
minute = _field_accessor(
"minute",
6,
"""
The minute of the period.
""",
)
second = _field_accessor(
"second",
7,
"""
The second of the period.
""",
)
weekofyear = _field_accessor(
"week",
8,
"""
The week ordinal of the year.
""",
)
week = weekofyear
dayofweek = _field_accessor(
"dayofweek",
10,
"""
The day of the week with Monday=0, Sunday=6.
""",
)
weekday = dayofweek
dayofyear = day_of_year = _field_accessor(
"dayofyear",
9,
"""
The ordinal day of the year.
""",
)
quarter = _field_accessor(
"quarter",
2,
"""
The quarter of the date.
""",
)
qyear = _field_accessor("qyear", 1)
days_in_month = _field_accessor(
"days_in_month",
11,
"""
The number of days in the month.
""",
)
daysinmonth = days_in_month
@property
def is_leap_year(self):
"""
Logical indicating if the date belongs to a leap year.
"""
return isleapyear_arr(np.asarray(self.year))
@property
def start_time(self):
return self.to_timestamp(how="start")
@property
def end_time(self):
return self.to_timestamp(how="end")
def to_timestamp(self, freq=None, how="start"):
"""
Cast to DatetimeArray/Index.
Parameters
----------
freq : str or DateOffset, optional
Target frequency. The default is 'D' for week or longer,
'S' otherwise.
how : {'s', 'e', 'start', 'end'}
Whether to use the start or end of the time period being converted.
Returns
-------
DatetimeArray/Index
"""
from pandas.core.arrays import DatetimeArray
how = libperiod._validate_end_alias(how)
end = how == "E"
if end:
if freq == "B":
# roll forward to ensure we land on B date
adjust = Timedelta(1, "D") - Timedelta(1, "ns")
return self.to_timestamp(how="start") + adjust
else:
adjust = Timedelta(1, "ns")
return (self + self.freq).to_timestamp(how="start") - adjust
if freq is None:
base, mult = libfrequencies.get_freq_code(self.freq)
freq = libfrequencies.get_to_timestamp_base(base)
else:
freq = Period._maybe_convert_freq(freq)
base, mult = libfrequencies.get_freq_code(freq)
new_data = self.asfreq(freq, how=how)
new_data = libperiod.periodarr_to_dt64arr(new_data.asi8, base)
return DatetimeArray._from_sequence(new_data, freq="infer")
# --------------------------------------------------------------------
# Array-like / EA-Interface Methods
def _values_for_argsort(self):
return self._data
# --------------------------------------------------------------------
def _time_shift(self, periods, freq=None):
"""
Shift each value by `periods`.
Note this is different from ExtensionArray.shift, which
shifts the *position* of each element, padding the end with
missing values.
Parameters
----------
periods : int
Number of periods to shift by.
freq : pandas.DateOffset, pandas.Timedelta, or str
Frequency increment to shift by.
"""
if freq is not None:
raise TypeError(
"`freq` argument is not supported for "
f"{type(self).__name__}._time_shift"
)
values = self.asi8 + periods * self.freq.n
if self._hasnans:
values[self._isnan] = iNaT
return type(self)(values, freq=self.freq)
@property
def _box_func(self):
return lambda x: Period._from_ordinal(ordinal=x, freq=self.freq)
def asfreq(self, freq=None, how="E"):
"""
Convert the Period Array/Index to the specified frequency `freq`.
Parameters
----------
freq : str
A frequency.
how : str {'E', 'S'}
Whether the elements should be aligned to the end
or start within pa period.
* 'E', 'END', or 'FINISH' for end,
* 'S', 'START', or 'BEGIN' for start.
January 31st ('END') vs. January 1st ('START') for example.
Returns
-------
Period Array/Index
Constructed with the new frequency.
Examples
--------
>>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='A')
>>> pidx
PeriodIndex(['2010', '2011', '2012', '2013', '2014', '2015'],
dtype='period[A-DEC]', freq='A-DEC')
>>> pidx.asfreq('M')
PeriodIndex(['2010-12', '2011-12', '2012-12', '2013-12', '2014-12',
'2015-12'], dtype='period[M]', freq='M')
>>> pidx.asfreq('M', how='S')
PeriodIndex(['2010-01', '2011-01', '2012-01', '2013-01', '2014-01',
'2015-01'], dtype='period[M]', freq='M')
"""
how = libperiod._validate_end_alias(how)
freq = Period._maybe_convert_freq(freq)
base1, mult1 = libfrequencies.get_freq_code(self.freq)
base2, mult2 = libfrequencies.get_freq_code(freq)
asi8 = self.asi8
# mult1 can't be negative or 0
end = how == "E"
if end:
ordinal = asi8 + mult1 - 1
else:
ordinal = asi8
new_data = period_asfreq_arr(ordinal, base1, base2, end)
if self._hasnans:
new_data[self._isnan] = iNaT
return type(self)(new_data, freq=freq)
# ------------------------------------------------------------------
# Rendering Methods
def _formatter(self, boxed=False):
if boxed:
return str
return "'{}'".format
def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs):
"""
actually format my specific types
"""
values = self.astype(object)
if date_format:
formatter = lambda dt: dt.strftime(date_format)
else:
formatter = lambda dt: str(dt)
if self._hasnans:
mask = self._isnan
values[mask] = na_rep
imask = ~mask
values[imask] = np.array([formatter(dt) for dt in values[imask]])
else:
values = np.array([formatter(dt) for dt in values])
return values
# ------------------------------------------------------------------
def astype(self, dtype, copy=True):
# We handle Period[T] -> Period[U]
# Our parent handles everything else.
dtype = pandas_dtype(dtype)
if is_period_dtype(dtype):
return self.asfreq(dtype.freq)
return super().astype(dtype, copy=copy)
# ------------------------------------------------------------------
# Arithmetic Methods
def _sub_datelike(self, other):
assert other is not NaT
return NotImplemented
def _sub_period(self, other):
# If the operation is well-defined, we return an object-Index
# of DateOffsets. Null entries are filled with pd.NaT
self._check_compatible_with(other)
asi8 = self.asi8
new_data = asi8 - other.ordinal
new_data = np.array([self.freq * x for x in new_data])
if self._hasnans:
new_data[self._isnan] = NaT
return new_data
def _addsub_int_array(
self, other: np.ndarray, op: Callable[[Any, Any], Any],
) -> "PeriodArray":
"""
Add or subtract array of integers; equivalent to applying
`_time_shift` pointwise.
Parameters
----------
other : np.ndarray[integer-dtype]
op : {operator.add, operator.sub}
Returns
-------
result : PeriodArray
"""
assert op in [operator.add, operator.sub]
if op is operator.sub:
other = -other
res_values = algos.checked_add_with_arr(self.asi8, other, arr_mask=self._isnan)
res_values = res_values.view("i8")
res_values[self._isnan] = iNaT
return type(self)(res_values, freq=self.freq)
def _add_offset(self, other):
assert not isinstance(other, Tick)
base = libfrequencies.get_base_alias(other.rule_code)
if base != self.freq.rule_code:
raise raise_on_incompatible(self, other)
# Note: when calling parent class's _add_timedeltalike_scalar,
# it will call delta_to_nanoseconds(delta). Because delta here
# is an integer, delta_to_nanoseconds will return it unchanged.
result = super()._add_timedeltalike_scalar(other.n)
return type(self)(result, freq=self.freq)
def _add_timedeltalike_scalar(self, other):
"""
Parameters
----------
other : timedelta, Tick, np.timedelta64
Returns
-------
result : ndarray[int64]
"""
assert isinstance(self.freq, Tick) # checked by calling function
assert isinstance(other, (timedelta, np.timedelta64, Tick))
if notna(other):
# special handling for np.timedelta64("NaT"), avoid calling
# _check_timedeltalike_freq_compat as that would raise TypeError
other = self._check_timedeltalike_freq_compat(other)
# Note: when calling parent class's _add_timedeltalike_scalar,
# it will call delta_to_nanoseconds(delta). Because delta here
# is an integer, delta_to_nanoseconds will return it unchanged.
ordinals = super()._add_timedeltalike_scalar(other)
return ordinals
def _add_delta_tdi(self, other):
"""
Parameters
----------
other : TimedeltaArray or ndarray[timedelta64]
Returns
-------
result : ndarray[int64]
"""
assert isinstance(self.freq, Tick) # checked by calling function
if not np.all(isna(other)):
delta = self._check_timedeltalike_freq_compat(other)
else:
# all-NaT TimedeltaIndex is equivalent to a single scalar td64 NaT
return self + np.timedelta64("NaT")
return self._addsub_int_array(delta, operator.add).asi8
def _add_delta(self, other):
"""
Add a timedelta-like, Tick, or TimedeltaIndex-like object
to self, yielding a new PeriodArray
Parameters
----------
other : {timedelta, np.timedelta64, Tick,
TimedeltaIndex, ndarray[timedelta64]}
Returns
-------
result : PeriodArray
"""
if not isinstance(self.freq, Tick):
# We cannot add timedelta-like to non-tick PeriodArray
raise raise_on_incompatible(self, other)
new_ordinals = super()._add_delta(other)
return type(self)(new_ordinals, freq=self.freq)
def _check_timedeltalike_freq_compat(self, other):
"""
Arithmetic operations with timedelta-like scalars or array `other`
are only valid if `other` is an integer multiple of `self.freq`.
If the operation is valid, find that integer multiple. Otherwise,
raise because the operation is invalid.
Parameters
----------
other : timedelta, np.timedelta64, Tick,
ndarray[timedelta64], TimedeltaArray, TimedeltaIndex
Returns
-------
multiple : int or ndarray[int64]
Raises
------
IncompatibleFrequency
"""
assert isinstance(self.freq, Tick) # checked by calling function
own_offset = frequencies.to_offset(self.freq.rule_code)
base_nanos = delta_to_nanoseconds(own_offset)
if isinstance(other, (timedelta, np.timedelta64, Tick)):
nanos = delta_to_nanoseconds(other)
elif isinstance(other, np.ndarray):
# numpy timedelta64 array; all entries must be compatible
assert other.dtype.kind == "m"
if other.dtype != _TD_DTYPE:
# i.e. non-nano unit
# TODO: disallow unit-less timedelta64
other = other.astype(_TD_DTYPE)
nanos = other.view("i8")
else:
# TimedeltaArray/Index
nanos = other.asi8
if np.all(nanos % base_nanos == 0):
# nanos being added is an integer multiple of the
# base-frequency to self.freq
delta = nanos // base_nanos
# delta is the integer (or integer-array) number of periods
# by which will be added to self.
return delta
raise raise_on_incompatible(self, other)
def raise_on_incompatible(left, right):
"""
Helper function to render a consistent error message when raising
IncompatibleFrequency.
Parameters
----------
left : PeriodArray
right : None, DateOffset, Period, ndarray, or timedelta-like
Returns
-------
IncompatibleFrequency
Exception to be raised by the caller.
"""
# GH#24283 error message format depends on whether right is scalar
if isinstance(right, np.ndarray) or right is None:
other_freq = None
elif isinstance(right, (ABCPeriodIndex, PeriodArray, Period, DateOffset)):
other_freq = right.freqstr
else:
other_freq = _delta_to_tick(Timedelta(right)).freqstr
msg = DIFFERENT_FREQ.format(
cls=type(left).__name__, own_freq=left.freqstr, other_freq=other_freq
)
return IncompatibleFrequency(msg)
# -------------------------------------------------------------------
# Constructor Helpers
def period_array(
data: Sequence[Optional[Period]],
freq: Optional[Union[str, Tick]] = None,
copy: bool = False,
) -> PeriodArray:
"""
Construct a new PeriodArray from a sequence of Period scalars.
Parameters
----------
data : Sequence of Period objects
A sequence of Period objects. These are required to all have
the same ``freq.`` Missing values can be indicated by ``None``
or ``pandas.NaT``.
freq : str, Tick, or Offset
The frequency of every element of the array. This can be specified
to avoid inferring the `freq` from `data`.
copy : bool, default False
Whether to ensure a copy of the data is made.
Returns
-------
PeriodArray
See Also
--------
PeriodArray
pandas.PeriodIndex
Examples
--------
>>> period_array([pd.Period('2017', freq='A'),
... pd.Period('2018', freq='A')])
<PeriodArray>
['2017', '2018']
Length: 2, dtype: period[A-DEC]
>>> period_array([pd.Period('2017', freq='A'),
... pd.Period('2018', freq='A'),
... pd.NaT])
<PeriodArray>
['2017', '2018', 'NaT']
Length: 3, dtype: period[A-DEC]
Integers that look like years are handled
>>> period_array([2000, 2001, 2002], freq='D')
['2000-01-01', '2001-01-01', '2002-01-01']
Length: 3, dtype: period[D]
Datetime-like strings may also be passed
>>> period_array(['2000-Q1', '2000-Q2', '2000-Q3', '2000-Q4'], freq='Q')
<PeriodArray>
['2000Q1', '2000Q2', '2000Q3', '2000Q4']
Length: 4, dtype: period[Q-DEC]
"""
if is_datetime64_dtype(data):
return PeriodArray._from_datetime64(data, freq)
if isinstance(data, (ABCPeriodIndex, ABCSeries, PeriodArray)):
return PeriodArray(data, freq)
# other iterable of some kind
if not isinstance(data, (np.ndarray, list, tuple)):
data = list(data)
data = np.asarray(data)
dtype: Optional[PeriodDtype]
if freq:
dtype = PeriodDtype(freq)
else:
dtype = None
if is_float_dtype(data) and len(data) > 0:
raise TypeError("PeriodIndex does not allow floating point in construction")
data = ensure_object(data)
return PeriodArray._from_sequence(data, dtype=dtype)
def validate_dtype_freq(dtype, freq):
"""
If both a dtype and a freq are available, ensure they match. If only
dtype is available, extract the implied freq.
Parameters
----------
dtype : dtype
freq : DateOffset or None
Returns
-------
freq : DateOffset
Raises
------
ValueError : non-period dtype
IncompatibleFrequency : mismatch between dtype and freq
"""
if freq is not None:
freq = frequencies.to_offset(freq)
if dtype is not None:
dtype = pandas_dtype(dtype)
if not is_period_dtype(dtype):
raise ValueError("dtype must be PeriodDtype")
if freq is None:
freq = dtype.freq
elif freq != dtype.freq:
raise IncompatibleFrequency("specified freq and dtype are different")
return freq
def dt64arr_to_periodarr(data, freq, tz=None):
"""
Convert an datetime-like array to values Period ordinals.
Parameters
----------
data : Union[Series[datetime64[ns]], DatetimeIndex, ndarray[datetime64ns]]
freq : Optional[Union[str, Tick]]
Must match the `freq` on the `data` if `data` is a DatetimeIndex
or Series.
tz : Optional[tzinfo]
Returns
-------
ordinals : ndarray[int]
freq : Tick
The frequency extracted from the Series or DatetimeIndex if that's
used.
"""
if data.dtype != np.dtype("M8[ns]"):
raise ValueError(f"Wrong dtype: {data.dtype}")
if freq is None:
if isinstance(data, ABCIndexClass):
data, freq = data._values, data.freq
elif isinstance(data, ABCSeries):
data, freq = data._values, data.dt.freq
freq = | Period._maybe_convert_freq(freq) | pandas._libs.tslibs.period.Period._maybe_convert_freq |
r"""Submodule frequentist_statistics.py includes the following functions: <br>
- **normal_check():** compare the distribution of numeric variables to a normal distribution using the
Kolmogrov-Smirnov test <br>
- **correlation_analysis():** Run correlations for numerical features and return output in different formats <br>
- **correlations_as_sample_increases():** Run correlations for subparts of the data to check robustness <br>
- **multiple_univariate_OLSs():** Tmp <br>
- **potential_for_change_index():** Calculate the potential for change index based on either variants of the r-squared
(from linear regression) or the r-value (pearson correlation) <br>
- **correct_pvalues():** function to correct for multiple testing <br>
- **partial_correlation():** function to calculate the partial correlations whilst correcting for other variables <br>
"""
from itertools import combinations
from itertools import product
from typing import Tuple
from typing import Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
from matplotlib.lines import Line2D
from scipy import stats
from sklearn.linear_model import LinearRegression
from statsmodels.stats.multitest import multipletests
from .utils import apply_scaling
def normal_check(data: pd.DataFrame) -> pd.DataFrame:
r"""Compare the distribution of numeric variables to a normal distribution using the Kolmogrov-Smirnov test
Wrapper for `scipy.stats.kstest`: the empircal data is compared to a normally distributed variable with the
same mean and standard deviation. A significant result (p < 0.05) in the goodness of fit test means that the
data is not normally distributed.
Parameters
----------
data: pandas.DataFrame
Dataframe including the columns of interest
Returns
----------
df_normality_check: pd.DataFrame
Dataframe with column names, p-values and an indication of normality
Examples
----------
>>> tips = sns.load_dataset("tips")
>>> df_normality_check = normal_check(tips)
"""
# Select numeric columns only
num_features = data.select_dtypes(include="number").columns.tolist()
# Compare distribution of each feature to a normal distribution with given mean and std
df_normality_check = data[num_features].apply(
lambda x: stats.kstest(
x.dropna(), stats.norm.cdf, args=(np.nanmean(x), np.nanstd(x)), N=len(x)
)[1],
axis=0,
)
# create a label that indicates whether a feature has a normal distribution or not
df_normality_check = pd.DataFrame(df_normality_check).reset_index()
df_normality_check.columns = ["feature", "p-value"]
df_normality_check["normality"] = df_normality_check["p-value"] >= 0.05
return df_normality_check
def permute_test(a, test_type, test, **kwargs):
r"""Helper function to run tests for permutations
Parameters
----------
a : np.array
test_type: str {'correlation', 'independent_t_test'}
Type of the test to be used
test:
e.g. `scipy.stats.pearsonr` or `statsmodels.stats.weightstats.ttest_ind`
**kwargs:
Additional keywords to be added to `test`
- `a2` for the second feature if test_type = 'correlation'
Returns
----------
float:
p value for permutation
"""
if test_type == "correlation":
a2 = kwargs["a2"]
_, p = test(a, a2)
else:
raise ValueError("Unknown test_type provided")
def correlation_analysis(
data: pd.DataFrame,
col_list=None,
row_list=None,
check_norm=False,
method: str = "pearson",
dropna: str = "pairwise",
permutation_test: bool = False,
n_permutations: int = 1000,
random_state=None,
):
r"""Run correlations for numerical features and return output in different formats
Different methods to compute correlations and to handle missing values are implemented.
Inspired by `researchpy.corr_case` and `researchpy.corr_pair`.
Parameters
----------
data : pandas.DataFrame
Dataframe with variables in columns, cases in rows
row_list: list or None (default: None)
List with names of columns in `data` that should be in the rows of the correlogram.
If None, all columns are used but only every unique combination.
col_list: list or None (default: None)
List with names of columns in `data` that should be in the columns of the correlogram.
If None, all columns are used and only every unique combination.
check_norm: bool (default: False)
If True, normality will be checked for columns in `data` using `normal_check`. This influences the used method
for correlations, i.e. Pearson or Spearman. Note: normality check ignores missing values.
method: {'pearson', 'kendall', 'spearman'}, default 'pearson'
Type of correlation, either Pearson's r, Spearman's rho, or Kendall's tau, implemented via respectively
`scipy.stats.pearsonr`, `scipy.stats.spearmanr`, and `scipy.stats.kendalltau`
Will be ignored if check_norm=True. Instead, Person's r is used for every combination of normally distributed
columns and Spearman's rho is used for all other combinations.
dropna : {'listwise', 'pairwise'}, default 'pairwise'
Should rows with missing values be dropped over the complete `data` ('listwise') or for every correlation
separately ('pairwise')
permutation_test: bool (default: False)
If true, a permutation test will added
n_permutations: int (default: 1000)
Number of permutations in the permutation test
random_state: None or int (default: None)
Random state for permutation_test. If not None, random_state will be updated for every permutation
plot_permutation: bool (default: False)
Whether to plot the results of the permutation test
figsize: tuple (default: (11.7, 8.27))
Width and height of the figure in inches
Returns
----------
result_dict: dict
Dictionary containing with the following keys:
info : pandas.DataFrame
Description of correlation method, missing values handling and number of observations
r-values : pandas.DataFrame
Dataframe with correlation coefficients. Indices and columns are column names from `data`. Only lower
triangle is filled.
p-values : pandas.DataFrame
Dataframe with p-values. Indices and columns are column names from `data`. Only lower triangle is filled.
N : pandas.DataFrame
Dataframe with numbers of observations. Indices and columns are column names from `data`. Only lower
triangle is filled. If dropna ='listwise', every correlation will have the same number of observations.
summary : pandas.DataFrame
Dataframe with columns ['analysis', 'feature1', 'feature2', 'r-value', 'p-value', 'N', 'stat-sign']
which indicate the type of test used for the correlation, the pair of columns, the correlation coefficient,
the p-value, the number of observations for each combination of columns in `data` and whether the r-value is
statistically significant.
plotted_permuations: Figure
Examples
----------
>>> from jmspack.frequentist_statistics import correlation_analysis
>>> import seaborn as sns
>>> iris = sns.load_dataset('iris')
>>> dict_results = correlation_analysis(iris, method='pearson', dropna='listwise', permutation_test=True,
>>> n_permutations=100, check_norm=True)
>>> dict_results['summary']
References
----------
<NAME> (2018). researchpy's documentation [Revision 9ae5ed63]. Retrieved from
https://researchpy.readthedocs.io/en/latest/
"""
# Settings test
if method == "pearson":
test, test_name = stats.pearsonr, "Pearson"
elif method == "spearman":
test, test_name = stats.spearmanr, "Spearman Rank"
elif method == "kendall":
test, test_name = stats.kendalltau, "Kendall's Tau-b"
else:
raise ValueError("method not in {'pearson', 'kendall', 'spearman'}")
# Copy numerical data from the original data
data = data.copy().select_dtypes("number")
# Get correct lists
if col_list and not row_list:
row_list = data.select_dtypes("number").drop(col_list, axis=1).columns.tolist()
elif row_list and not col_list:
col_list = data.select_dtypes("number").drop(row_list, axis=1).columns.tolist()
# Initializing dataframes to store results
info = pd.DataFrame()
summary = pd.DataFrame()
if not col_list and not row_list:
r_vals = pd.DataFrame(columns=data.columns, index=data.columns)
p_vals = pd.DataFrame(columns=data.columns, index=data.columns)
n_vals = pd.DataFrame(columns=data.columns, index=data.columns)
iterator = combinations(data.columns, 2)
else:
r_vals = pd.DataFrame(columns=col_list, index=row_list)
p_vals = pd.DataFrame(columns=col_list, index=row_list)
n_vals = | pd.DataFrame(columns=col_list, index=row_list) | pandas.DataFrame |
import os
import timeit
import pandas as pd
from numpy.random import uniform
import featherstore as fs
def time_it(func, number, *args, **kwargs):
MS = 1000
runtime = timeit.timeit('func(*args, **kwargs)',
globals={**globals(), **locals()},
number=number)
runtime = runtime * MS / number
return runtime
def generate_df(rows, cols):
index = list(range(rows))
data = {f'c{c}': uniform(-10000, 10000, size=rows) for c in range(cols)}
return | pd.DataFrame(data=data, index=index) | pandas.DataFrame |
import pandas as pd
#import matplotlib.pyplot as plt
import numpy as np
import datetime
from datetime import datetime
import glob
import os.path as path
one_up = path.abspath(path.join(__file__ ,".."))
two_up = path.abspath(path.join(__file__ ,"../.."))
three_up = path.abspath(path.join(__file__ ,"../../.."))
df = | pd.read_csv(two_up + '/dataset/20210717182858/submissions.csv') | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# # Heart Disease Dataset
# ## **0. Before we begin**
# Please **comment** or **upvote** this kernel.
# ### Kernel goals:
#
# * Data exploration
# * Find important features for L1-regularized Logistic regression
# * Propose correct scoring metrics for this dataset
# * Fight off overly-optimistic score
# * Compare results of various classifiers
# In[ ]:
import warnings
warnings.filterwarnings('ignore')
# In[ ]:
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
# ## **1. Data exploration**
# In[ ]:
sns.set(style='whitegrid')
# In[ ]:
data = pd.read_csv("../../../input/ronitf_heart-disease-uci/heart.csv")
print(F"Null values? {data.isnull().values.any()}")
# **Attribute Information:**
# > 1. **age** - age
# > 2. **sex** - (1 = male; 0 = female)
# > 3. **cp** - chest pain type (4 values)
# > 4. **trestbps** - resting blood pressure
# > 5. **chol** - serum cholestoral in mg/dl
# > 6. **fbs** - fasting blood sugar > 120 mg/dl
# > 7. **restecg** - resting electrocardiographic results (values 0,1,2)
# > 8. **thalach** - maximum heart rate achieved
# > 9. **exang** - exercise induced angina
# > 10. **oldpeak** - ST depression induced by exercise relative to rest
# > 11. **slope** - the slope of the peak exercise ST segment
# > 12. **ca** - number of major vessels (0-3) colored by flourosopy
# > 13. **thal** - 3 = normal; 6 = fixed defect; 7 = reversable defect
# ### Dataset sample
# In[ ]:
data.head()
# ### Number of examples per class
# In[ ]:
plt.figure(figsize=(7, 5))
count_per_class = [len(data[data['target'] == 0]),len(data[data['target'] == 1])]
labels = [0, 1]
colors = ['yellowgreen', 'lightblue']
explode = (0.05, 0.1)
plt.pie(count_per_class, explode=explode, labels=labels,colors=colors,autopct='%4.2f%%',shadow=True, startangle=45)
plt.title('Examples per class')
plt.axis('equal')
print()
# Classes are well balanced!
# ### Gender shares in dataset
# In[ ]:
plt.figure(figsize=(7, 5))
count_per_class = [len(data[data['sex'] == 0]),len(data[data['sex'] == 1])]
labels = ['Female', 'Male']
colors = ['lightgreen', 'gold']
explode = (0.05, 0.1)
plt.pie(count_per_class, explode=explode, labels=labels,colors=colors,autopct='%4.2f%%',shadow=True, startangle=70)
plt.title('Gender shares')
plt.axis('equal')
print()
# ### Age-sex distribution
# In[ ]:
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
sns.kdeplot(data['age'], data['sex'], shade=True)
plt.title('Age-sex density estimate')
plt.subplot(1, 2, 2)
sns.distplot(data['age'])
plt.title('Age distribution')
print()
# ### Serum cholestoral per class distribution
# In[ ]:
plt.figure(figsize=(8, 6))
sns.distplot(data[data.target == 0]['chol'], label='without heart disease')
sns.distplot(data[data.target == 1]['chol'], label='with heart disease')
plt.xlabel('serum cholestoral in mg/dl')
plt.title('serum cholestoral per class')
plt.legend()
print()
# ### Maximum heart rate achieved per class distribution
# In[ ]:
plt.figure(figsize=(8, 6))
sns.distplot(data[data.target == 0]['thalach'], label='without heart disease')
sns.distplot(data[data.target == 1]['thalach'], label='with heart disease')
plt.title('maximum heart rate achieved per class')
plt.xlabel('maximum heart rate achieved')
plt.legend()
print()
# ### Features heatmap
# In[ ]:
plt.figure(figsize=(12,8))
print()
# ### Resting blood pressure per class
# In[ ]:
data.groupby('target')['trestbps'].describe()
# In[ ]:
ax2 = sns.jointplot("target", "trestbps", data=data, kind="reg", color='r')
ax2.set_axis_labels('target','resting blood pressure')
print()
# In[ ]:
X = data.values[:, :13]
y = data.values[:, 13]
# ## **2. Feature importances (for L1-regularized Logistic Regression)**
# In[ ]:
import eli5
from sklearn.linear_model import LogisticRegression
from eli5.sklearn import PermutationImportance
logistic_regression = LogisticRegression(penalty='l1')
logistic_regression.fit(X, y)
perm_imp = PermutationImportance(logistic_regression, random_state=42).fit(X, y)
eli5.show_weights(perm_imp, feature_names = data.columns.tolist()[:13])
# **Model interpretation:** We can see that the number of major vessels colored by fluoroscopy and chest pain type are the most important features for correct classification.
# ## **3. Appropriate metric? Recall!**
# **Q:** *Why Recall?* <br/>
# **A:** Our classifier should be sensitive to false negatives. For this dataset, false negative is a person that has heart disease but our classifier decided that the person does not have any heart problems. In other words, classifier said that the ill person is healthy. On the other side, false positive is a person that does not have any heart diseases and our classifier decided that person is ill. In that case, the person will run more tests and conclude it does not have any heart problems.
# ## **4. Nested cross-validation (way to fight off overly-optimistic score)**
# Nested cross-validation is used to train a model in which hyperparameters also need to be optimized. I've used it to fight off overly-optimistic scores.<br/>
# More about nested cross-validation:<br/>
# https://www.elderresearch.com/blog/nested-cross-validation <br/>
# https://scikit-learn.org/stable/auto_examples/model_selection/plot_nested_cross_validation_iris.html
# In[ ]:
from sklearn.model_selection import GridSearchCV, KFold
from sklearn.metrics import accuracy_score
def nested_kfold_cv(model, param_grid, X, y, outer_metric=accuracy_score,
scoring='accuracy' , k1=10, k2=3, verbose = 1, n_jobs=3, shuffle=True):
scores = []
estimators = []
kf = KFold(n_splits=k1, shuffle=shuffle)
for train_index, test_index in kf.split(X):
X_train = X[train_index]
X_test = X[test_index]
y_train = y[train_index]
y_test = y[test_index]
grid_search = GridSearchCV(estimator=model, param_grid=param_grid, cv=k2,verbose=verbose, n_jobs=n_jobs, scoring=scoring)
grid_search.fit(X=X_train, y=y_train)
estimator = grid_search.best_estimator_
estimators.append(estimator)
estimator.fit(X_train, y_train)
scores.append(outer_metric(estimator.predict(X_test), y_test))
return estimators, scores
# ## **5. Classification**
# ### **5.1. AdaBoost**
# In[ ]:
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import recall_score, confusion_matrix
# In[ ]:
tree_model = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(max_depth=1),random_state=42)
# In[ ]:
tree_params = {'n_estimators': [25, 50, 75]}
estimators, tree_scores = nested_kfold_cv(tree_model, tree_params, X, y, outer_metric=recall_score,scoring='f1' , k1=10, k2=5, verbose = 0, n_jobs=4, shuffle=True)
# In[ ]:
print(f"Average recall: {np.mean(tree_scores)}")
# ### **5.2. SVM**
# In[ ]:
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.preprocessing import Normalizer, StandardScaler
from sklearn.svm import SVC
# In[ ]:
svm_model = Pipeline(steps=[('standard_scaler', StandardScaler()),('feature_selection', SelectKBest(f_classif)), ('svm', SVC(kernel='rbf', random_state=42)) ])
# In[ ]:
svm_grid = {'feature_selection__k': [10, 12, 13],'svm__C': [3, 5, 10, 15, 20, 25, 30, 35],'svm__gamma': [0.0005, 0.001, 0.005, 0.01, 0.05, 0.1],}
estimators, svm_scores = nested_kfold_cv(svm_model, svm_grid, X, y, outer_metric=recall_score,scoring='f1' , k1=10, k2=5, verbose = 0, n_jobs=4, shuffle=True)
# In[ ]:
print(f"Average recall: {np.mean(svm_scores)}")
# ### **5.3. Logistic Regression**
# In[ ]:
from sklearn.linear_model import LogisticRegression
# In[ ]:
log_model = Pipeline(steps=[('feature_selection', SelectKBest(f_classif)), ('log', LogisticRegression()) ])
# In[ ]:
log_grid = {'log__C': [0.01, 0.1, 0.5, 1, 3, 5],'feature_selection__k': [5, 9, 10, 12, 13],}
estimators, lr_scores = nested_kfold_cv(log_model, log_grid, X, y, outer_metric=recall_score,scoring='f1' , k1=10, k2=5, verbose = 0, n_jobs=4, shuffle=True)
# In[ ]:
print(f"Average recall: {np.mean(lr_scores)}")
# ### **5.4. KNN**
# In[ ]:
from sklearn.neighbors import KNeighborsClassifier
# In[ ]:
knn_model = Pipeline(steps=[('standard_scaler', StandardScaler()),('knn', KNeighborsClassifier(weights='distance')) ])
# In[ ]:
knn_grid = {'knn__n_neighbors': [3, 5, 7, 10, 12, 15, 17, 20],}
estimators, knn_scores = nested_kfold_cv(knn_model, knn_grid, X, y, outer_metric=recall_score,scoring='f1' , k1=10, k2=5, verbose = 0, n_jobs=4, shuffle=True)
# In[ ]:
print(f"Average recall: {np.mean(knn_scores)}")
# ### **5.5. Neural network**
# In[ ]:
from sklearn.neural_network import MLPClassifier
# In[ ]:
nn_model = Pipeline(steps=[('standard_scaler', StandardScaler()),('nn', MLPClassifier(max_iter=400)) ])
# In[ ]:
nn_grid = {'nn__solver': ['adam', 'lbfgs']}
estimators, nn_scores = nested_kfold_cv(nn_model, nn_grid, X, y, outer_metric=recall_score,scoring='f1' , k1=10, k2=5, verbose = 0, n_jobs=4, shuffle=True)
# In[ ]:
print(f"Average recall: {np.mean(nn_scores)}")
# ## **6. Classification results overview**
# In[ ]:
results = | pd.DataFrame({'KNN': knn_scores, 'Logistic regression': lr_scores, 'SVC': svm_scores, 'AdaBoost': tree_scores, 'Neural network': nn_scores}) | pandas.DataFrame |
import unittest
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from yitian.datasource import *
from yitian.datasource import preprocess
class Test(unittest.TestCase):
# def test_standardize_date(self):
# data_pd = pd.DataFrame([
# ['01/01/2019', 11.11],
# ['01/04/2019', 44.44],
# ['01/03/2019', 33.33],
# ['01/02/2019', 22.22]
# ], columns=['Trade Date', 'price'])
#
# expect_pd = pd.DataFrame([
# ['01/01/2019', 11.11],
# ['01/04/2019', 44.44],
# ['01/03/2019', 33.33],
# ['01/02/2019', 22.22]
# ], columns=['date', 'price'])
#
# assert_frame_equal(expect_pd, preprocess.standardize_date(data_pd))
#
# def test_standardize_date_with_multi_date_column(self):
# data_pd = pd.DataFrame([
# ['2019-01-01 00:00:00', '2019-01-01 00:00:00', 11.11],
# ['2019-01-02 00:00:00', '2019-01-01 00:00:00', 22.22],
# ['2019-01-03 00:00:00', '2019-01-01 00:00:00', 33.33],
# ['2019-01-04 00:00:00', '2019-01-01 00:00:00', 44.44],
# ], columns=['DATE', 'date', 'price'])
#
# with self.assertRaises(ValueError) as context:
# preprocess.standardize_date(data_pd)
#
# assert str(context.exception) == \
# str("Original cols ({cols}) cannot be reconnciled with date options ({option})"\
# .format(cols=data_pd.columns.tolist(), option=RAW_DATE_OPTIONS))
def test_create_ts_pd(self):
data_pd = pd.DataFrame([
['01/01/2019', 11.11],
['01/04/2019', 44.44],
['01/03/2019', 33.33],
['01/02/2019', 22.22]
], columns=['date', 'price'])
expect_pd = pd.DataFrame([
[pd.Timestamp('2019-01-01'), 11.11],
[pd.Timestamp('2019-01-02'), 22.22],
[pd.Timestamp('2019-01-03'), 33.33],
[pd.Timestamp('2019-01-04'), 44.44]
], columns=['date', 'price']).set_index('date')
assert_frame_equal(expect_pd, preprocess.create_ts_pd(data_pd))
def test_create_ts_pd_datetime(self):
data_pd = pd.DataFrame([
['2019-01-01 11:11:11', 11.11],
['2019-01-04 04:44:44', 44.44],
['2019-01-03 03:33:33', 33.33],
['2019-01-02 22:22:22', 22.22]
], columns=['datetime', 'price'])
expect_pd = pd.DataFrame([
[pd.Timestamp('2019-01-01 11:11:11'), 11.11],
[pd.Timestamp('2019-01-02 22:22:22'), 22.22],
[pd.Timestamp('2019-01-03 03:33:33'), 33.33],
[pd.Timestamp('2019-01-04 04:44:44'), 44.44]
], columns=['datetime', 'price']).set_index('datetime')
assert_frame_equal(expect_pd, preprocess.create_ts_pd(data_pd, index_col=DATETIME))
def test_add_ymd(self):
data_pd = pd.DataFrame([
[pd.Timestamp('2019-01-01'), 11.11],
[pd.Timestamp('2019-02-02'), 22.22],
[pd.Timestamp('2019-03-03'), 33.33],
[pd.Timestamp('2019-04-04'), 44.44]
], columns=['date', 'price']).set_index('date')
expect_pd = pd.DataFrame([
[pd.Timestamp('2019-01-01'), 11.11, 2019, 1, 1],
[pd.Timestamp('2019-02-02'), 22.22, 2019, 2, 2],
[ | pd.Timestamp('2019-03-03') | pandas.Timestamp |
import numpy as np
import pytest
from pandas import (
Categorical,
CategoricalDtype,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
get_dummies,
period_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
class TestGetitem:
def test_getitem_unused_level_raises(self):
# GH#20410
mi = MultiIndex(
levels=[["a_lot", "onlyone", "notevenone"], [1970, ""]],
codes=[[1, 0], [1, 0]],
)
df = DataFrame(-1, index=range(3), columns=mi)
with pytest.raises(KeyError, match="notevenone"):
df["notevenone"]
def test_getitem_periodindex(self):
rng = period_range("1/1/2000", periods=5)
df = DataFrame(np.random.randn(10, 5), columns=rng)
ts = df[rng[0]]
tm.assert_series_equal(ts, df.iloc[:, 0])
# GH#1211; smoketest unrelated to the rest of this test
repr(df)
ts = df["1/1/2000"]
tm.assert_series_equal(ts, df.iloc[:, 0])
def test_getitem_list_of_labels_categoricalindex_cols(self):
# GH#16115
cats = Categorical([Timestamp("12-31-1999"), Timestamp("12-31-2000")])
expected = DataFrame(
[[1, 0], [0, 1]], dtype="uint8", index=[0, 1], columns=cats
)
dummies = get_dummies(cats)
result = dummies[list(dummies.columns)]
tm.assert_frame_equal(result, expected)
def test_getitem_sparse_column_return_type_and_dtype(self):
# https://github.com/pandas-dev/pandas/issues/23559
data = SparseArray([0, 1])
df = DataFrame({"A": data})
expected = Series(data, name="A")
result = df["A"]
tm.assert_series_equal(result, expected)
# Also check iloc and loc while we're here
result = df.iloc[:, 0]
tm.assert_series_equal(result, expected)
result = df.loc[:, "A"]
tm.assert_series_equal(result, expected)
class TestGetitemListLike:
def test_getitem_list_missing_key(self):
# GH#13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({"x": [1.0], "y": [2.0], "z": [3.0]})
df.columns = ["x", "x", "z"]
# Check that we get the correct value in the KeyError
with pytest.raises(KeyError, match=r"\['y'\] not in index"):
df[["x", "y", "z"]]
class TestGetitemCallable:
def test_getitem_callable(self, float_frame):
# GH#12533
result = float_frame[lambda x: "A"]
expected = float_frame.loc[:, "A"]
tm.assert_series_equal(result, expected)
result = float_frame[lambda x: ["A", "B"]]
expected = float_frame.loc[:, ["A", "B"]]
tm.assert_frame_equal(result, float_frame.loc[:, ["A", "B"]])
df = float_frame[:3]
result = df[lambda x: [True, False, True]]
expected = float_frame.iloc[[0, 2], :]
tm.assert_frame_equal(result, expected)
def test_loc_multiindex_columns_one_level(self):
# GH#29749
df = DataFrame([[1, 2]], columns=[["a", "b"]])
expected = DataFrame([1], columns=[["a"]])
result = df["a"]
tm.assert_frame_equal(result, expected)
result = df.loc[:, "a"]
tm.assert_frame_equal(result, expected)
class TestGetitemBooleanMask:
def test_getitem_bool_mask_categorical_index(self):
df3 = DataFrame(
{
"A": np.arange(6, dtype="int64"),
},
index=CategoricalIndex(
[1, 1, 2, 1, 3, 2],
dtype=CategoricalDtype([3, 2, 1], ordered=True),
name="B",
),
)
df4 = DataFrame(
{
"A": np.arange(6, dtype="int64"),
},
index=CategoricalIndex(
[1, 1, 2, 1, 3, 2],
dtype=CategoricalDtype([3, 2, 1], ordered=False),
name="B",
),
)
result = df3[df3.index == "a"]
expected = df3.iloc[[]]
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
import pickle
from glob import glob
import pandas as pd
from imdb import Cinemagoer
import numpy as np
import os
def extract_from_list_col(dataframe, col, max_items=4, normalize=True):
return dataframe[col].apply(
lambda x: extract_from_list(x, max_items=max_items, normalize=normalize)
)
def extract_from_list(list_, max_items=4, normalize=True):
if isinstance(list_, list):
if normalize:
list_ = [i.lower().strip() for i in list_[:4]]
else:
list_ = list_[:4]
else:
return np.nan
return "|".join(list_)
COLUMNS_TO_DOWNLOAD = [
"director",
"countries",
"country codes",
"language codes",
"languages",
"box office",
"cover url",
"full-size cover url",
"production companies",
]
class IMDBApiDataLoader:
def __init__(
self,
input_dir="data/raw",
output_dir="data/processed",
columns_to_download=COLUMNS_TO_DOWNLOAD,
):
self.columns_to_download = columns_to_download
self.input_dir = input_dir
self.output_dir = output_dir
def download_from_api(self, ids_to_download, movies_per_file=100):
# Get Data From API
loaded_ids = []
cg = Cinemagoer()
# Continue where we left off
files_names = glob(os.path.join(self.input_dir, "imdb_api_chkp_*.pickle"))
#print(files_names)
if files_names:
for i in files_names:
with open(i, "rb") as handle:
b = pickle.load(handle)
loaded_ids.extend([j["imdb_id"] for j in b])
#print(len(loaded_ids),len(ids_to_download))
ids_to_download = list(set(ids_to_download) - set(loaded_ids))
num_movies = len(ids_to_download)
movies_per_file = min(num_movies,movies_per_file)
print(f"fetching data of {num_movies} movies")
c = 1
rows = []
for imdb_id in ids_to_download:
obj = cg.get_movie(imdb_id)
row = {"imdb_id": imdb_id}
for col in self.columns_to_download:
try:
row[col] = obj[col]
if col == "production companies":
row[col] = [i["name"] for i in row["production companies"]]
if col == "director":
row[col] = [i["name"] for i in row["director"]]
if col == "box office":
for k, v in row["box office"].items():
row[k] = v
row.pop("box office")
except KeyError:
row[col] = np.nan
rows.append(row)
if c % movies_per_file == 0:
print(round(c * 100 / num_movies, 2))
with open(
os.path.join(self.input_dir, f"imdb_api_chkp_{c}_{imdb_id}.pickle"),
"wb",
) as handle:
pickle.dump(rows, handle, protocol=pickle.HIGHEST_PROTOCOL)
rows = []
c += 1
def to_df(self):
# Generate dataframe from downloaded data
data = []
files_names = glob(os.path.join(self.input_dir, "imdb_api_chkp_*.pickle"))
if files_names:
for i in files_names:
with open(i, "rb") as handle:
b = pickle.load(handle)
data.extend(b)
data = pd.DataFrame(data)
return data
# Clean a bit
def clean(self, data):
data["Opening Weekend"] = data[
[i for i in data.columns if "Opening Weekend" in i]
].apply(lambda x: x.dropna().sum(), axis=1)
data = data[
[
"imdb_id",
"director",
"countries",
"country codes",
"language codes",
"languages",
"Budget",
"cover url",
"full-size cover url",
"production companies",
"Opening Weekend",
]
]
list_cols = [
"director",
"countries",
"country codes",
"language codes",
"languages",
"production companies",
]
for i in list_cols:
data.loc[:, i] = extract_from_list_col(data, i)
return data
def to_csv(self, data):
# Save
data.to_csv(os.path.join(self.output_dir, "imdb_api_data.csv"), index=False)
def run_all(self, ids_to_download, movies_per_file=100):
self.download_from_api(ids_to_download, movies_per_file)
data = self.to_df()
data = self.clean(data)
self.to_csv(data)
return data
if __name__ == "__main__":
ids_to_download = | pd.read_csv("data/processed/filtered_id_list.csv") | pandas.read_csv |
# *****************************************************************************
# Copyright (c) 2019-2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import itertools
import os
import platform
import string
import unittest
from copy import deepcopy
from itertools import product
import numpy as np
import pandas as pd
from numba.core.errors import TypingError
from sdc.hiframes.rolling import supported_rolling_funcs
from sdc.tests.test_base import TestCase
from sdc.tests.test_series import gen_frand_array
from sdc.tests.test_utils import (count_array_REPs, count_parfor_REPs,
skip_numba_jit, skip_sdc_jit,
test_global_input_data_float64)
LONG_TEST = (int(os.environ['SDC_LONG_ROLLING_TEST']) != 0
if 'SDC_LONG_ROLLING_TEST' in os.environ else False)
test_funcs = ('mean', 'max',)
if LONG_TEST:
# all functions except apply, cov, corr
test_funcs = supported_rolling_funcs[:-3]
def rolling_std_usecase(obj, window, min_periods, ddof):
return obj.rolling(window, min_periods).std(ddof)
def rolling_var_usecase(obj, window, min_periods, ddof):
return obj.rolling(window, min_periods).var(ddof)
class TestRolling(TestCase):
@skip_numba_jit
def test_series_rolling1(self):
def test_impl(S):
return S.rolling(3).sum()
hpat_func = self.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@skip_numba_jit
def test_fixed1(self):
# test sequentially with manually created dfs
wins = (3,)
if LONG_TEST:
wins = (2, 3, 5)
centers = (False, True)
for func_name in test_funcs:
func_text = "def test_impl(df, w, c):\n return df.rolling(w, center=c).{}()\n".format(func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for args in itertools.product(wins, centers):
df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
df = pd.DataFrame({'B': [0, 1, 2, -2, 4]})
pd.testing.assert_frame_equal(hpat_func(df, *args), test_impl(df, *args))
@skip_numba_jit
def test_fixed2(self):
# test sequentially with generated dfs
sizes = (121,)
wins = (3,)
if LONG_TEST:
sizes = (1, 2, 10, 11, 121, 1000)
wins = (2, 3, 5)
centers = (False, True)
for func_name in test_funcs:
func_text = "def test_impl(df, w, c):\n return df.rolling(w, center=c).{}()\n".format(func_name)
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars['test_impl']
hpat_func = self.jit(test_impl)
for n, w, c in itertools.product(sizes, wins, centers):
df = pd.DataFrame({'B': np.arange(n)})
pd.testing.assert_frame_equal(hpat_func(df, w, c), test_impl(df, w, c))
@skip_numba_jit
def test_fixed_apply1(self):
# test sequentially with manually created dfs
def test_impl(df, w, c):
return df.rolling(w, center=c).apply(lambda a: a.sum())
hpat_func = self.jit(test_impl)
wins = (3,)
if LONG_TEST:
wins = (2, 3, 5)
centers = (False, True)
for args in itertools.product(wins, centers):
df = | pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}) | pandas.DataFrame |
import os
import audiofile
import audiofile as af
import numpy as np
import pandas as pd
import pytest
import audinterface
import audformat
def signal_duration(signal, sampling_rate):
return signal.shape[1] / sampling_rate
def signal_max(signal, sampling_rate):
return np.max(signal)
SEGMENT = audinterface.Segment(
process_func=lambda x, sr:
audinterface.utils.signal_index(
pd.to_timedelta(0),
pd.to_timedelta(x.shape[1] / sr, unit='s') / 2,
)
)
def signal_modification(signal, sampling_rate, subtract=False):
if subtract:
signal -= 0.1 * signal
else:
signal += 0.1 * signal
return signal
@pytest.mark.parametrize(
'process_func, segment, signal, sampling_rate, start, end, keep_nat, '
'channels, mixdown, expected_output',
[
(
signal_max,
None,
np.ones((1, 3)),
8000,
None,
None,
False,
None,
False,
1,
),
(
signal_max,
SEGMENT,
np.ones((1, 8000)),
8000,
None,
None,
False,
None,
False,
1,
),
(
signal_max,
None,
np.ones(3),
8000,
None,
None,
False,
0,
False,
1,
),
(
signal_max,
None,
np.array([[0., 0., 0.], [1., 1., 1.]]),
8000,
None,
None,
False,
0,
False,
0,
),
(
signal_max,
None,
np.array([[0., 0., 0.], [1., 1., 1.]]),
8000,
None,
None,
False,
0,
False,
0,
),
(
signal_max,
None,
np.array([[0., 0., 0.], [1., 1., 1.]]),
8000,
None,
None,
False,
1,
False,
1,
),
(
signal_max,
None,
np.array([[0., 0., 0.], [1., 1., 1.]]),
8000,
None,
None,
False,
None,
True,
0.5,
),
(
signal_max,
None,
np.array([[-1., -1., -1.], [0., 0., 0.], [1., 1., 1.]]),
8000,
None,
None,
False,
[1, 2],
True,
0.5,
),
# invalid channel selection
pytest.param(
signal_max,
None,
np.ones((1, 3)),
8000,
None,
None,
False,
1,
False,
1,
marks=pytest.mark.xfail(raises=ValueError),
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
None,
None,
False,
None,
False,
3.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
pd.NaT,
pd.NaT,
False,
None,
False,
3.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
pd.NaT,
pd.NaT,
True,
None,
False,
3.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
pd.to_timedelta('1s'),
None,
False,
None,
False,
2.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
| pd.to_timedelta('1s') | pandas.to_timedelta |
import collections
from functools import lru_cache
import logging
import pandas as pd
import time
import numpy as np
from tqdm import tqdm
from holoclean.dataset import AuxTables, CellStatus
from .estimators import *
from .correlations import compute_norm_cond_entropy_corr
from holoclean.utils import NULL_REPR
class DomainEngine:
def __init__(self, env, dataset, max_sample=5):
"""
:param env: (dict) contains global settings such as verbose
:param dataset: (Dataset) current dataset
:param max_sample: (int) maximum # of domain values from a random sample
"""
self.env = env
self.ds = dataset
self.domain_thresh_1 = env["domain_thresh_1"]
self.weak_label_thresh = env["weak_label_thresh"]
self.domain_thresh_2 = env["domain_thresh_2"]
self.max_domain = env["max_domain"]
self.cor_strength = env["cor_strength"]
self.estimator_type = env["estimator_type"]
self.setup_complete = False
self.domain = None
self.total = None
self.correlations = None
self.do_quantization = False
self._corr_attrs = {}
self.max_sample = max_sample
self.single_stats = {}
self.pair_stats = {}
def setup(self):
"""
setup initializes the in-memory and Postgres auxiliary tables (e.g.
'cell_domain', 'pos_values').
"""
tic = time.time()
if self.correlations is None:
self.compute_correlations()
self.setup_attributes()
self.domain_df = self.generate_domain()
self.store_domains(self.domain_df)
status = "DONE with domain preparation."
toc = time.time()
return status, toc - tic
# TODO(richardwu): move this to Dataset after loading data.
def compute_correlations(self):
"""
compute_correlations memoizes to self.correlations; a data structure
that contains pairwise correlations between attributes (values are treated as
discrete categories).
"""
logging.debug("Computing correlations...")
data_df = self.ds.get_quantized_data() if self.do_quantization \
else self.ds.get_raw_data()
self.correlations = compute_norm_cond_entropy_corr(data_df,
self.ds.get_attributes(),
self.ds.get_attributes())
corrs_df = pd.DataFrame.from_dict(self.correlations, orient='columns')
corrs_df.index.name = 'cond_attr'
corrs_df.columns.name = 'attr'
pd.set_option('display.max_columns', len(corrs_df.columns))
pd.set_option('display.max_rows', len(corrs_df.columns))
logging.debug("correlations:\n%s", corrs_df)
logging.debug("summary of correlations:\n%s", corrs_df.describe())
def store_domains(self, domain):
"""
store_domains stores the 'domain' DataFrame as the 'cell_domain'
auxiliary table as well as generates the 'pos_values' auxiliary table,
a long-format of the domain values, in Postgres.
pos_values schema:
_tid_: entity/tuple ID
_cid_: cell ID
_vid_: random variable ID (all cells with more than 1 domain value)
"""
if domain.empty:
raise Exception("ERROR: Generated domain is empty.")
self.ds.generate_aux_table(AuxTables.cell_domain, domain, store=True)
self.ds.aux_table[AuxTables.cell_domain].create_db_index(self.ds.engine, ['_vid_'])
self.ds.aux_table[AuxTables.cell_domain].create_db_index(self.ds.engine, ['_tid_'])
self.ds.aux_table[AuxTables.cell_domain].create_db_index(self.ds.engine, ['_cid_'])
query = "SELECT _vid_, _cid_, _tid_, attribute, a.rv_val, a.val_id from %s , unnest(string_to_array(regexp_replace(domain,\'[{\"\"}]\',\'\',\'gi\'),\'|||\')) WITH ORDINALITY a(rv_val,val_id)" % AuxTables.cell_domain.name
self.ds.generate_aux_table_sql(AuxTables.pos_values, query, index_attrs=['_tid_', 'attribute'])
def setup_attributes(self):
total, single_stats, pair_stats = self.ds.get_statistics()
self.total = total
self.single_stats = single_stats
logging.debug("preparing pruned co-occurring statistics...")
tic = time.clock()
self.pair_stats = self._pruned_pair_stats(pair_stats)
logging.debug("DONE with pruned co-occurring statistics in %.2f secs", time.clock() - tic)
self.setup_complete = True
def _pruned_pair_stats(self, pair_stats):
"""
_pruned_pair_stats converts 'pair_stats' which is a dictionary mapping
{ attr1 -> { attr2 -> {val1 -> {val2 -> count } } } } where
<val1>: all possible values for attr1
<val2>: all values for attr2 that appeared at least once with <val1>
<count>: frequency (# of entities) where attr1: <val1> AND attr2: <val2>
to a flattened 4-level dictionary { attr1 -> { attr2 -> { val1 -> [pruned list of val2] } } }
i.e. maps to the co-occurring values for attr2 that exceed
the self.domain_thresh_1 co-occurrence probability for a given
attr1-val1 pair.
"""
out = {}
for attr1 in tqdm(pair_stats.keys()):
out[attr1] = {}
for attr2 in pair_stats[attr1].keys():
out[attr1][attr2] = {}
for val1 in pair_stats[attr1][attr2].keys():
denominator = self.single_stats[attr1][val1]
# tau becomes a threshhold on co-occurrence frequency
# based on the co-occurrence probability threshold
# domain_thresh_1.
tau = float(self.domain_thresh_1*denominator)
top_cands = [(val2, count/denominator) for (val2, count) in pair_stats[attr1][attr2][val1].items() if count > tau]
out[attr1][attr2][val1] = top_cands
return out
@lru_cache(maxsize=None)
def get_corr_attributes(self, attr, thres):
"""
get_corr_attributes returns attributes from self.correlations
that are correlated with attr with magnitude at least self.cor_strength
(init parameter).
:param attr: (string) the original attribute to get the correlated attributes for.
:param thres: (float) correlation threshold (absolute) for returned attributes.
"""
if attr not in self.correlations:
return []
attr_correlations = self.correlations[attr]
return sorted([corr_attr
for corr_attr, corr_strength in attr_correlations.items()
if corr_attr != attr and corr_strength >= thres])
def generate_domain(self):
"""
Generates the domain for each cell in the active attributes as well
as assigns a random variable ID (_vid_) for cells that have
a domain of size >= 2.
See get_domain_cell for how the domain is generated from co-occurrence
and correlated attributes.
If no values can be found from correlated attributes, return a random
sample of domain values.
:return: DataFrame with columns
_tid_: entity/tuple ID
_cid_: cell ID (one for every cell in the raw data in active attributes)
_vid_: random variable ID (one for every cell with a domain of at least size 2)
attribute: attribute name
domain: ||| separated string of domain values
domain_size: length of domain
init_value: initial value for this cell
init_index: domain index of init_value
fixed: 1 if a random sample was taken since no correlated attributes/top K values
"""
if not self.setup_complete:
raise Exception(
"Call <setup_attributes> to setup active attributes. Error detection should be performed before setup.")
logging.debug('generating initial set of un-pruned domain values...')
tic = time.clock()
# Iterate over dataset rows.
cells = []
vid = 0
raw_df = self.ds.get_quantized_data() if self.do_quantization else self.ds.get_raw_data()
records = raw_df.to_records()
dk_lookup = {(val[0], val[1]) for val in self.ds.aux_table[AuxTables.dk_cells].df[['_tid_', 'attribute']].values}
for row in tqdm(list(records)):
tid = row['_tid_']
for attr in self.ds.get_active_attributes():
init_value, init_value_idx, dom = self.get_domain_cell(attr, row)
# We will use an estimator model for additional weak labelling
# below, which requires an initial pruned domain first.
# Weak labels will be trained on the init values.
cid = self.ds.get_cell_id(tid, attr)
# Originally, all cells have a NOT_SET status to be considered
# in weak labelling.
cell_status = CellStatus.NOT_SET.value
if len(dom) <= 1:
# Initial value is NULL and we cannot come up with
# a domain (note that NULL is not included in the domain);
# Note if len(dom) == 1, then we generated a single correct
# value (since NULL is not included in the domain).
# This would be a "SINGLE_VALUE" example and we'd still
# like to generate a random domain for it.
if init_value == NULL_REPR and len(dom) == 0:
continue
# Not enough domain values, we need to get some random
# values (other than 'init_value') for training. However,
# this might still get us zero domain values.
rand_dom_values = self.get_random_domain(attr, dom)
# We still want to add cells with only 1 single value and no
# additional random domain # they are required in the output.
# Otherwise, just add the random domain values to the domain
# and set the cell status accordingly.
dom.extend(rand_dom_values)
# Set the cell status that this is a single value and was
# randomly assigned other values in the domain. These will
# not be modified by the estimator.
cell_status = CellStatus.SINGLE_VALUE.value
dom_vals = "|||".join(dom)
cells.append({"_tid_": tid,
"attribute": attr,
"_cid_": cid,
"_vid_": vid,
"domain": dom_vals,
"domain_size": len(dom),
"init_value": init_value,
"init_index": init_value_idx,
"weak_label": init_value,
"weak_label_idx": init_value_idx,
"fixed": cell_status,
"is_dk": (tid, attr) in dk_lookup,
})
vid += 1
logging.debug('length of cells: %s', len(cells))
domain_df = pd.DataFrame(data=cells).sort_values('_vid_')
logging.debug('domain size stats: %s', domain_df['domain_size'].describe())
logging.debug('domain count by attr: %s', domain_df['attribute'].value_counts())
logging.debug('DONE generating initial set of domain values in %.2fs', time.clock() - tic)
return domain_df
def get_domain_cell(self, attr, row):
"""
get_domain_cell returns a list of all domain values for the given
entity (row) and attribute. the domain never has null as a possible value.
we define domain values as values in 'attr' that co-occur with values
in attributes ('cond_attr') that are correlated with 'attr' at least in
magnitude of self.cor_strength (init parameter).
for example:
cond_attr | attr
h b <-- current row
h c
i d
h e
this would produce [b,c,e] as domain values.
:return: (initial value of entity-attribute, domain values for entity-attribute).
"""
domain = collections.OrderedDict()
init_value = row[attr]
correlated_attributes = self.get_corr_attributes(attr, self.cor_strength)
# iterate through all correlated attributes and take the top k co-occurrence values
# for 'attr' with the current row's 'cond_attr' value.
for cond_attr in correlated_attributes:
# ignore correlations with index, tuple id or the same attribute.
if cond_attr == attr or cond_attr == '_tid_':
continue
if not self.pair_stats[cond_attr][attr]:
logging.warning("domain generation could not find pair_statistics between attributes: {}, {}".format(cond_attr, attr))
continue
cond_val = row[cond_attr]
# ignore co-occurrence with a null cond init value since we do not
# store them.
# also it does not make sense to retrieve the top co-occuring
# values with a null value.
# it is possible for cond_val to not be in pair stats if it only co-occurs
# with null values.
if cond_val == NULL_REPR or cond_val not in self.pair_stats[cond_attr][attr]:
continue
# update domain with top co-occuring values with the cond init value.
candidates = self.pair_stats[cond_attr][attr][cond_val]
for val, freq in candidates:
if val in domain and domain[val] > freq:
continue
domain[val] = freq
# We should not have any NULLs since we do not store co-occurring NULL
# values.
assert NULL_REPR not in domain
# Add the initial value to the domain if it is not NULL.
if init_value != NULL_REPR:
domain[init_value] = 1
domain = [val for (val, freq) in reversed(sorted(domain.items(), key=lambda t: t[1]))][:self.max_domain]
# Convert to ordered list to preserve order.
domain_lst = sorted(list(domain))
# Get the index of the initial value.
# NULL values are not in the domain so we set their index to -1.
init_value_idx = -1
if init_value != NULL_REPR:
init_value_idx = domain_lst.index(init_value)
return init_value, init_value_idx, domain_lst
def get_random_domain(self, attr, cur_dom):
"""
get_random_domain returns a random sample of at most size
'self.max_sample' of domain values for 'attr' that is NOT in 'cur_dom'.
"""
domain_pool = set(self.single_stats[attr].keys())
# We should not have any NULLs since we do not keep track of their
# counts.
assert NULL_REPR not in domain_pool
domain_pool = domain_pool.difference(cur_dom)
domain_pool = sorted(list(domain_pool))
size = len(domain_pool)
if size > 0:
k = min(self.max_sample, size)
additional_values = np.random.choice(domain_pool, size=k, replace=False)
else:
additional_values = []
# return sorted(additional_values)
## use the full domain
return sorted(domain_pool)
def generate_domain_embedding(self, domain_attrs):
"""
Simple version of generate_domain (for TupleEmbedding) (no random
sampling).
Generates domains for the attributes in domain_attrs.
:return: DataFrame with columns
_tid_: entity/tuple ID
attribute: attribute name
_cid_: cell ID (one for every cell in the raw data in active attributes)
_vid_: random variable ID (one for every cell with a domain of at least size 2)
domain: ||| separated string of domain values
domain_size: length of domain
init_value: initial value for this cell
init_index: domain index of init_value
"""
self.compute_correlations()
self.setup_attributes()
logging.debug('generating initial set of un-pruned domain values...')
records = self.ds.get_raw_data().to_records()
vid = 0
domain_df = None
cells = []
for row in tqdm(list(records)):
tid = row['_tid_']
for attr in domain_attrs:
init_value, init_value_idx, dom = self.get_domain_cell(attr, row)
cid = self.ds.get_cell_id(tid, attr)
cells.append({"_tid_": tid,
"attribute": attr,
"_cid_": cid,
"_vid_": vid,
"domain": "|||".join(dom),
"domain_size": len(dom),
"init_value": init_value,
"init_index": init_value_idx,
})
vid += 1
if domain_df is not None:
domain_df = pd.concat([domain_df, pd.DataFrame(data=cells)]).reset_index(drop=True)
else:
domain_df = | pd.DataFrame(data=cells) | pandas.DataFrame |
import os
import requests
import pandas as pd
from random import randint
from django.db.models import Q
from .models import Account
api_key = os.environ.get('IEX_API_KEYS')
TEST_OR_PROD = 'cloud'
def make_position_request(tickers):
data = []
for x in tickers:
response = requests.get("https://{}.iexapis.com/stable/stock/{}/quote?displayPercent=true&token={}".format(TEST_OR_PROD, x, api_key)).json()
data.append(response)
df = pd.DataFrame(data)
return df
def create_sector_information(tickers):
data = []
for x in tickers:
response = requests.get("https://{}.iexapis.com/stable/stock/{}/company?token={}".format(TEST_OR_PROD, x, api_key)).json()
data.append(response)
df = | pd.DataFrame(data) | pandas.DataFrame |
from typing import List, Tuple, Iterable
from cobra import Model, Reaction, Metabolite
import re
import pandas as pd
import numpy as np
from ncmw.utils import pad_dict_list
def transport_reactions(model: Model) -> List[str]:
"""This function return a list of potential transport reactions, we define a
transport reaction as a reaction which contains metabolites from atleast two different compartments!
Args:
model (Model): A cobra model
Returns:
list: List of names that potentially are transport reaction
"""
compartment_name = ["_" + id for id in model.compartments.keys()]
res = []
for rec in model.reactions:
for i, c1 in enumerate(compartment_name):
for c2 in compartment_name[i + 1 :]:
if c1 in rec.reaction and c2 in rec.reaction:
res.append(rec.id)
return res
def table_ex_transport(model: Model) -> pd.DataFrame:
"""This method checks if all exchange reaction has an associated transporter
Args:
model (Model): Cobra model
Returns:
pd.DataFrame: Table of indicators (0 indicates abscence, 1 indicates presence)
"""
compartments = [id for id in model.compartments.keys()]
metabolites_ex = [key[3:-2] for key in model.medium]
metabolites_comp = []
transport_reaction = transport_reactions(model)
for c in compartments:
metabolites_comp.append(
[met for met in model.metabolites if c in met.compartment]
)
df = dict(
zip(
metabolites_ex,
[[0 for _ in range(len(compartments))] for _ in range(len(metabolites_ex))],
)
)
for met in metabolites_ex:
met_id = re.compile(str(met) + "_.")
hits = []
for met_c in metabolites_comp:
hits.append(list(filter(lambda x: re.match(met_id, x.id), met_c)))
for i, hits_c in enumerate(hits):
for hit in hits_c:
for rec in [rec.id for rec in hit.reactions]:
if rec in transport_reaction:
df[met][i] = 1
df = pd.DataFrame(df).T
df.columns = compartments
return df
def sekretion_uptake_fba(model: Model) -> Tuple[List[str], List[str]]:
"""This gives the uptake and sekretion reaction in a FBA solution
NOTE: This is not unique! Use the method base on FVA instead for unique solutions.
Args:
model (Model): A cobra model
Returns:
list: List of uptake reactions
list: List of sekretion reactions
"""
summary = model.summary()
uptake = [
id
for id in summary.uptake_flux.index
if summary.uptake_flux.loc[id]["flux"] > 0
]
sekretion = [
id
for id in summary.secretion_flux.index
if summary.secretion_flux.loc[id]["flux"] < 0
]
return uptake, sekretion
def sekretion_uptake_fva(fva) -> Tuple[List, List]:
"""This computes the uptake and sekreation reaction using FVA, this is UNIQUE!
Args:
fva (DataFrame): Fva results
Returns:
list: List of uptake reactions
list: List of sekretion reactions
"""
ex_fva = fva.loc[fva.index.str.contains("EX_")]
uptake = ex_fva[ex_fva["minimum"] < 0].index.tolist()
sekretion = ex_fva[ex_fva["maximum"] > 0].index.tolist()
return uptake, sekretion
def compute_uptake_sekretion_table(
model_name1: str,
model_name2: str,
uptake1: List[str],
uptake2: List[str],
sekretion1: List[str],
sekretion2: List[str],
) -> pd.DataFrame:
"""Constructs a table of the uptake sekretion and overlap for a pair of models
Args:
model_name1: Name of the model 1
model_name2: Name of the model 2
uptake1: Uptake reactions of model 1
uptake2: Uptake reactions of model 2
sekretion1: Sekretion reactions of model 1
sekretion2: Sekretion reactions of model 2
Returns:
pd.DataFrame: Tabel of uptake/sekretions as well as Sekretion -> Uptake relationships.
"""
# Common up/sekretion from SA to DP
sek2_up1 = []
for sek in sekretion2:
for up in uptake1:
if str(sek) == str(up):
sek2_up1.append(str(sek))
sek1_up2 = []
for sek in sekretion1:
for up in uptake2:
if str(sek) == str(up):
sek1_up2.append(str(sek))
df_dict = {
f"{model_name1} Uptake": uptake1,
f"{model_name1} Secretion": sekretion1,
f"{model_name1} -> {model_name2}": sek1_up2,
f"{model_name2} -> {model_name1}": sek2_up1,
f"{model_name2} Secretion": sekretion2,
f"{model_name2} Uptake": uptake2,
}
df_dict = pad_dict_list(df_dict, "na")
df = | pd.DataFrame(df_dict) | pandas.DataFrame |
from numpy.random import default_rng
import numpy as np
import emcee
import pandas as pd
from tqdm.auto import tqdm
from sklearn.preprocessing import StandardScaler
import copy
from scipy.stats import norm, ortho_group
import random
import math
import scipy.stats as ss
"""
A collection of synthetic data generators, including multivariate normal data, data generated with archimedean copulas,
data generated with arbitrary marginals and gaussian copula and data from already existing drift generators.
"""
rng = default_rng()
# three available archimedean copulas
def clayton(theta, n):
v = random.gammavariate(1/theta, 1)
uf = [random.expovariate(1)/v for _ in range(n)]
return [(k+1)**(-1.0/theta) for k in uf]
def amh(theta, n):
# NOTE: Use SciPy RNG for convenience here
v = ss.geom(1-theta).rvs()
uf = [random.expovariate(1)/v for _ in range(n)]
return [(1-theta)/(math.exp(k)-theta) for k in uf]
def frank(theta, n):
v = ss.logser(1-math.exp(-theta)).rvs()
uf = [random.expovariate(1)/v for _ in range(n)]
return [-math.log(1-(1-math.exp(-theta))*(math.exp(-k))/theta) for k in uf]
def new_distribution_cholesky(pre_mean, ch_mean, perturbation=0.1):
pre_mean[ch_mean] = pre_mean[ch_mean] + np.random.random(sum(ch_mean)) - perturbation
cond = 10000
var = None
while cond > 1000:
chol = ortho_group.rvs(len(pre_mean))
var = [email protected]
cond = np.linalg.cond(var)
return pre_mean, var
def new_similar_distribution_cholesky(pre_mean, pre_chol, ch_mean, perturbation=0.1):
"""Problematic, as the resulting cov matrix is almost diagonal!"""
pre_mean[ch_mean] = pre_mean[ch_mean] + np.random.random(sum(ch_mean)) - perturbation # not to change the mean too much
cond = 10000
var = None
while cond > 1000:
chol = pre_chol + np.random.uniform(0, perturbation, (len(pre_mean), len(pre_mean)))
chol = nearest_orthogonal_matrix(chol)
var = [email protected]
cond = np.linalg.cond(var)
return pre_mean, var
def new_distribution_svd(pre_mean, ch_mean, perturbation=0.1, conditioning=1000):
pre_mean[ch_mean] = pre_mean[ch_mean] + np.random.random(sum(ch_mean)) - perturbation
cond = conditioning*100*len(pre_mean)
var = None
while cond > conditioning*10*len(pre_mean) or cond < conditioning*len(pre_mean):
nums = np.random.uniform(0, 1, len(pre_mean)) # change eigenvalues distribution
corr = ss.random_correlation.rvs(nums/sum(nums)*len(pre_mean), random_state=rng)
S = np.diag(np.random.uniform(0, 1, len(pre_mean)))
var = S.T@corr@S
cond = np.linalg.cond(var)
return pre_mean, var
def new_similar_distribution_svd(pre_mean, pre_nums, pre_S, ch_mean, perturbation=0.02):
pre_mean[ch_mean] = pre_mean[ch_mean] + np.random.random(sum(ch_mean)) - perturbation
cond = 10000*len(pre_mean)
var = None
while cond > 1000*len(pre_mean) or cond < 10*len(pre_mean):
nums = pre_nums + np.random.uniform(0, perturbation, len(pre_mean))
corr = ss.random_correlation.rvs(nums/sum(nums)*len(pre_mean), random_state=rng)
S = pre_S + np.diag(np.random.uniform(0, perturbation/2, len(pre_mean)))
var = S.T@corr@S
cond = np.linalg.cond(var)
return pre_mean, var
def new_distribution(pre_mean, pre_cov, ch_mean, ch_cov, change_X=True, change_y=True):
# ch_mean and ch_cov are masks with where to change mean and cov (localised drift)
# important! A complete mask for cov has to be passed, but only the upper triangular part will be considered
if change_y and change_X:
pre_mean[ch_mean] = pre_mean[ch_mean] + np.random.random(sum(ch_mean)) - 0.5 # not to change the mean too much
pre_cov[ch_cov] = np.random.normal(size=sum(sum(ch_cov)))
pre_cov = np.tril(pre_cov.T) + np.triu(pre_cov, 1)
if not np.all(np.linalg.eigvals(pre_cov) > 0):
pre_cov = nearestPD(pre_cov)
elif change_X:
pre_mean_old = pre_mean
pre_mean[ch_mean] = pre_mean[ch_mean] + np.random.random(sum(ch_mean)) - 0.5 # not to change the mean too much
pre_mean[-1] = pre_mean_old[-1]
pre_cov_old = pre_cov
pre_cov[ch_cov] = np.random.normal(size=sum(sum(ch_cov)))
pre_cov = np.tril(pre_cov.T) + np.triu(pre_cov, 1)
pre_cov[-1][-1] = pre_cov_old[-1][-1]
while np.any(np.linalg.eigvals(pre_cov) <= 0):
pre_cov_ = nearestPD(pre_cov)
pre_cov_[-1][-1] = pre_cov_old[-1][-1]
pre_cov = pre_cov_
else: # non mi serve ora fare caso solo y cambia
n_dim = len(pre_cov)
ch_cov = np.array([[False] * int(n_dim)] * int(n_dim), dtype=bool)
ch_cov[:, -1] = [True] * (n_dim-1) + [False]
pre_cov[ch_cov] = np.random.normal(size=sum(sum(ch_cov)))
pre_cov = np.tril(pre_cov.T) + np.triu(pre_cov, 1)
pre_cov_old = pre_cov
while np.any(np.linalg.eigvals(pre_cov) <= 0):
pre_cov_ = nearestPD(pre_cov)
# i add a small perturbation to P(X) too, if not I cannot change P(Y|X) without singularity in the cov matrix
pre_cov_[np.invert(ch_cov)] = pre_cov_old[np.invert(ch_cov)]+np.random.normal(size=sum(sum(np.invert(ch_cov))))/20
pre_cov_ = np.tril(pre_cov_.T) + np.triu(pre_cov_, 1)
pre_cov = pre_cov_
return pre_mean, pre_cov
def new_similar_distribution(pre_mean, pre_cov, ch_mean, ch_cov, change_X=True, change_y=True):
# ch_mean and ch_cov are masks with where to change mean and cov (localised drift)
# important! A complete mask for cov has to be passed, but only the upper triangular part will be considered
# new similar distribution, as of now, only permits data drift + covariate drift, unlike abrupt where
# the two can be separated and simulated independently
if change_y:
pre_mean[ch_mean] = pre_mean[ch_mean] + rng.uniform(-0.1, 0.1, size=sum(ch_mean))
pre_cov[ch_cov] = np.reshape(pre_cov[ch_cov], -1) + rng.uniform(-0.1, 0.1, sum(sum(ch_cov)))
pre_cov = np.tril(pre_cov.T) + np.triu(pre_cov, 1)
if not np.all(np.linalg.eigvals(pre_cov) > 0):
pre_cov = nearestPD(pre_cov)
else:
pre_mean_old = pre_mean
pre_mean[ch_mean] = pre_mean[ch_mean] + rng.uniform(-0.1, 0.1, size=sum(ch_mean))
pre_mean[-1] = pre_mean_old[-1]
pre_cov_old = pre_cov
pre_cov[ch_cov] = np.reshape(pre_cov[ch_cov], -1) + rng.uniform(-0.1, 0.1, sum(sum(ch_cov)))
pre_cov = np.tril(pre_cov.T) + np.triu(pre_cov, 1)
pre_cov[-1][-1] = pre_cov_old[-1][-1]
while np.any(np.linalg.eigvals(pre_cov) <= 0):
pre_cov_ = nearestPD(pre_cov)
pre_cov_[-1][-1] = pre_cov_old[-1][-1]
pre_cov = pre_cov_
return pre_mean, pre_cov
def new_distribution_deprecated(pre_mean, pre_cov, ch_mean, ch_cov):
# ch_mean and ch_cov are masks with where to change mean and cov (localised drift)
pre_mean[ch_mean] = pre_mean[ch_mean] + np.random.random(sum(ch_mean)) - 0.5
pre_cov[ch_cov] = np.random.random((sum(ch_cov),len(pre_mean)))
pre_cov = nearestPD(pre_cov)
return pre_mean, pre_cov
def lnprob_trunc_norm(x, mean, n_dim, C):
if sum(x) > 0 *n_dim:
return -np.inf
else:
return -0.5 *( x -mean).dot(np.linalg.inv(C)).dot( x -mean)
def truncated_normal_sampling(pre_mean, pre_cov, size, n_dim):
if size <= 0:
return None
if size >= n_dim*2:
pos = emcee.utils.sample_ball(pre_mean, np.sqrt(np.diag(pre_cov)), size=size)
else:
pos = rng.multivariate_normal(pre_mean, pre_cov, size=size)
S = emcee.EnsembleSampler(size, n_dim, lnprob_trunc_norm, args=(pre_mean, n_dim, pre_cov))
pos, prob, state = S.run_mcmc(pos, 100)
# print(np.max(pos))
return pos
def nearestPD(A):
"""Find the nearest positive-definite matrix to input
A Python/Numpy port of <NAME>'s `nearestSPD` MATLAB code [1], which
credits [2].
[1] https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd
[2] <NAME>, "Computing a nearest symmetric positive semidefinite
matrix" (1988): https://doi.org/10.1016/0024-3795(88)90223-6
"""
B = (A + A.T) / 2
_, s, V = np.linalg.svd(B)
H = np.dot(V.T, np.dot(np.diag(s), V))
A2 = (B + H) / 2
A3 = (A2 + A2.T) / 2
if isPD(A3):
return A3
spacing = np.spacing(np.linalg.norm(A))
I = np.eye(A.shape[0])
k = 1
while not isPD(A3):
mineig = np.min(np.real(np.linalg.eigvals(A3)))
A3 += I * (-mineig * k**2 + spacing)
k += 1
return A3
def isPD(B):
"""Returns true when input is positive-definite, via Cholesky"""
try:
_ = np.linalg.cholesky(B)
return True
except np.linalg.LinAlgError:
return False
def nearest_orthogonal_matrix(A):
'''
Find closest orthogonal matrix to *A* using iterative method.
Bases on the code from REMOVE_SOURCE_LEAKAGE function from OSL Matlab package.
Args:
A (numpy.array): array shaped k, n, where k is number of channels, n - data points
Returns:
L (numpy.array): orthogonalized matrix with amplitudes preserved
Reading:
<NAME>., A symmetric multivariate leakage correction for MEG connectomes.,
Neuroimage. 2015 Aug 15;117:439-48. doi: 10.1016/j.neuroimage.2015.03.071
'''
#
MAX_ITER = 2000
TOLERANCE = np.max((1, np.max(A.shape) * np.linalg.svd(A.T, False, False)[0])) * np.finfo(A.dtype).eps # TODO
reldiff = lambda a, b: 2 * abs(a - b) / (abs(a) + abs(b))
convergence = lambda rho, prev_rho: reldiff(rho, prev_rho) <= TOLERANCE
A_b = A.conj()
d = np.sqrt(np.sum(A * A_b, axis=1))
rhos = np.zeros(MAX_ITER)
for i in range(MAX_ITER):
scA = A.T * d
u, s, vh = np.linalg.svd(scA, False)
V = np.dot(u, vh)
# TODO check is rank is full
d = np.sum(A_b * V.T, axis=1)
L = (V * d).T
E = A - L
rhos[i] = np.sqrt(np.sum(E * E.conj()))
if i > 0 and convergence(rhos[i], rhos[i - 1]):
break
return L
def generate_normal_drift_data(batch_size, train_size, length, pre_mean_, pre_cov_, ch_mean, ch_cov,
change, n_dim, scale=False, gradual_drift=False, oracle=False, change_X=True,
change_y=True, verbose=False):
"""Generates multivariate normal drifting data"""
if scale:
scaler = StandardScaler()
pre_mean = pre_mean_.copy()
pre_cov = pre_cov_.copy()
df = pd.DataFrame()
means = []
covs = []
if verbose:
disable = False
else:
disable = True
for i in tqdm(range(length), disable=disable):
if i % change == 0:
pre_mean, pre_cov = new_distribution(pre_mean, pre_cov, ch_mean, ch_cov,
change_X=change_X, change_y=change_y)
if gradual_drift:
pre_mean, pre_cov = new_similar_distribution(np.zeros(n_dim), pre_cov, [False] * n_dim, ch_cov,
change_X=change_X, change_y=change_y)
if i == 0:
data = rng.multivariate_normal(pre_mean, pre_cov, size=train_size)
else:
data = rng.multivariate_normal(pre_mean, pre_cov, size=batch_size)
prov = pd.DataFrame(data)
if i == 0 and scale:
scaled_features = scaler.fit_transform(prov.values)
prov = pd.DataFrame(scaled_features, index=prov.index, columns=prov.columns)
elif i != 0 and scale:
scaled_features = scaler.transform(prov.values)
prov = pd.DataFrame(scaled_features, index=prov.index, columns=prov.columns)
prov["batch"] = i
df = df.append(prov, ignore_index=True)
means.append(list(pre_mean))
covs.append(copy.deepcopy(pre_cov))
df.rename(columns={n_dim - 1: 'label'}, inplace=True)
if oracle:
return df, means, covs
else:
return df
def generate_normal_drift_data_cholesky(batch_size, train_size, length, pre_mean_, pre_chol_, ch_mean,
change, n_dim, scale=False, gradual_drift=False, oracle=False, verbose=False):
"""Generates multivariate normal drifting data -> no correlation! Do not use!!!"""
if scale:
scaler = StandardScaler()
pre_mean = pre_mean_.copy()
pre_chol = pre_chol_.copy()
df = pd.DataFrame()
means = []
covs = []
if verbose:
disable = False
else:
disable = True
for i in tqdm(range(length), disable=disable):
if i % change == 0:
pre_mean, cov = new_distribution_cholesky(pre_mean, ch_mean)
if gradual_drift:
pre_mean, cov = new_similar_distribution_cholesky(pre_mean, pre_chol, ch_mean)
if i == 0:
data = rng.multivariate_normal(pre_mean, cov, size=train_size)
else:
data = rng.multivariate_normal(pre_mean, cov, size=batch_size)
prov = pd.DataFrame(data)
if i == 0 and scale:
scaled_features = scaler.fit_transform(prov.values)
prov = pd.DataFrame(scaled_features, index=prov.index, columns=prov.columns)
elif i != 0 and scale:
scaled_features = scaler.transform(prov.values)
prov = pd.DataFrame(scaled_features, index=prov.index, columns=prov.columns)
prov["batch"] = i
df = df.append(prov, ignore_index=True)
means.append(list(pre_mean))
covs.append(copy.deepcopy(cov))
df.rename(columns={n_dim - 1: 'label'}, inplace=True)
if oracle:
return df, means, covs
else:
return df
def generate_normal_drift_data_svd(batch_size, train_size, length, pre_mean_, pre_eigs_, pre_S_, ch_mean,
change, n_dim, scale=False, gradual_drift=False, oracle=False, verbose=False):
"""Generates multivariate normal drifting data"""
if scale:
scaler = StandardScaler()
pre_mean = pre_mean_.copy()
pre_eigs = pre_eigs_.copy()
pre_S = pre_S_.copy()
df = pd.DataFrame()
means = []
covs = []
if verbose:
disable = False
else:
disable = True
for i in tqdm(range(length), disable=disable):
if i % change == 0:
pre_mean, cov = new_distribution_svd(pre_mean, ch_mean)
if gradual_drift:
pre_mean, cov = new_similar_distribution_svd(pre_mean, pre_eigs, pre_S, ch_mean)
if i == 0:
data = rng.multivariate_normal(pre_mean, cov, size=train_size)
else:
data = rng.multivariate_normal(pre_mean, cov, size=batch_size)
prov = pd.DataFrame(data)
if i == 0 and scale:
scaled_features = scaler.fit_transform(prov.values)
prov = pd.DataFrame(scaled_features, index=prov.index, columns=prov.columns)
elif i != 0 and scale:
scaled_features = scaler.transform(prov.values)
prov = pd.DataFrame(scaled_features, index=prov.index, columns=prov.columns)
prov["batch"] = i
df = df.append(prov, ignore_index=True)
means.append(list(pre_mean))
covs.append(copy.deepcopy(cov))
df.rename(columns={n_dim - 1: 'label'}, inplace=True)
if oracle:
return df, means, covs
else:
return df
def generate_normal_localised_drift_data(batch_size, train_size, length, pre_mean, pre_cov, ch_mean, ch_cov,
change, n_dim, scale=False, oracle=False, verbose=False):
"""Generates multivariate normal drifting data with drift localised in space with truncated normal sampling
with shifting covariance in the desired part of the space"""
if scale:
scaler = StandardScaler()
df = pd.DataFrame()
means = []
covs = []
pre_mean_2 = pre_mean.copy()
pre_cov_2 = pre_cov.copy()
if verbose:
disable = False
else:
disable = True
for i in tqdm(range(length), disable=disable):
if i == 0:
data = np.random.multivariate_normal(pre_mean, pre_cov, size=train_size)
else:
data = np.random.multivariate_normal(pre_mean, pre_cov, size=batch_size)
# se in una zona del piano -> change distribution
data = data[data.sum(axis=1) < 0]
if i == 0:
data2 = truncated_normal_sampling(pre_mean_2, pre_cov_2, train_size - len(data), n_dim)
else:
data2 = truncated_normal_sampling(pre_mean_2, pre_cov_2, batch_size - len(data), n_dim)
data2 = data2.clip(-4, 4) # there are some problems in the sampling from the truncated normal
data = np.concatenate((data, data2))
prov = pd.DataFrame(data)
if i == 0 and scale:
scaled_features = scaler.fit_transform(prov.values)
prov = pd.DataFrame(scaled_features, index=prov.index, columns=prov.columns)
elif i != 0 and scale:
scaled_features = scaler.transform(prov.values)
prov = | pd.DataFrame(scaled_features, index=prov.index, columns=prov.columns) | pandas.DataFrame |
#!/usr/bin/env python3
"""Tools to export data from MS2Analyte as flat files for viewing in Tableau"""
import os
import pickle
import pandas as pd
import sys
import csv
from ms2analyte.file_handling import file_load
def full_export(input_file, input_data, input_structure, input_type, **kwargs):
"""Export data at analyte stage (with peaks, but no replicate comparisons). Input type = 'Samples' or 'Blanks'"""
subname = kwargs.get("subname", None)
mstype = kwargs.get("mstype", "")
root_filename = input_file[:-(len(input_structure.ms_data_file_suffix) + 1)]
if subname:
output_filename = os.path.join(input_structure.output_directory, input_type, root_filename + "_" + mstype +
"_" + subname + "_tableau_output.csv")
else:
output_filename = os.path.join(input_structure.output_directory, input_type, root_filename + "_" + mstype +
"_tableau_output.csv")
input_data.to_csv(output_filename, index=False, header=True)
def replicate_analyte_export(input_structure, input_type, sample_name, ms_type, **kwargs):
"""Export data at replicate comparison stage"""
subname = kwargs.get("subname", None)
output_data = file_load.sample_dataframe_concat(input_structure, input_type, sample_name, ms_type)
if subname:
output_filename = os.path.join(input_structure.output_directory, input_type, sample_name + "_" + subname
+ "_" + ms_type + "_replicated_tableau_output.csv")
else:
output_filename = os.path.join(input_structure.output_directory, input_type, sample_name
+ "_" + ms_type + "_replicated_tableau_output.csv")
output_data.to_csv(output_filename, index=False, header=True)
def experiment_analyte_export(input_structure, input_type, sample_name, **kwargs):
"""Export data at experiment comparison stage"""
subname = kwargs.get("subname", None)
with open((os.path.join(input_structure.output_directory, input_type, sample_name
+ "_all_replicates_dataframe.pickle")), "rb") as pickle_file:
output_data = pickle.load(pickle_file)
if subname:
output_filename = os.path.join(input_structure.output_directory, input_type, sample_name + "_" + subname
+ "_experiment_ids_tableau_output.csv")
else:
output_filename = os.path.join(input_structure.output_directory, input_type, sample_name
+ "_experiment_ids_tableau_output.csv")
output_data.to_csv(output_filename, index=False, header=True)
def experiment_blank_annotation_export(input_structure, input_type, sample_name, **kwargs):
"""Export data at experiment blank annotation stage"""
subname = kwargs.get("subname", None)
with open((os.path.join(input_structure.output_directory, input_type, sample_name
+ "_all_replicates_blanked_dataframe.pickle")), "rb") as pickle_file:
output_data = pickle.load(pickle_file)
if subname:
output_filename = os.path.join(input_structure.output_directory, input_type, sample_name + "_" + subname
+ "_experiment_ids_blanked_tableau_output.csv")
else:
output_filename = os.path.join(input_structure.output_directory, input_type, sample_name
+ "_experiment_ids_blanked_tableau_output.csv")
output_data.to_csv(output_filename, index=False, header=True)
def ms1_ms2_combined_export(input_structure, input_type, sample_name, **kwargs):
"""Combine data from ms1 experiment and ms2 data in to a single file"""
subname = kwargs.get("subname", None)
with open((os.path.join(input_structure.output_directory, input_type, sample_name
+ "_ms1_dataframe.pickle")), "rb") as pickle_file:
ms1_data = pickle.load(pickle_file)
with open((os.path.join(input_structure.output_directory, input_type, sample_name
+ "_ms2_dataframe.pickle")), "rb") as pickle_file:
ms2_data = pickle.load(pickle_file)
ms1_data["ms_level"] = "ms1"
ms2_data["ms_level"] = "ms2"
output_data = | pd.concat([ms1_data, ms2_data]) | pandas.concat |
# coding: utf-8
# In[34]:
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
# In[35]:
import sklearn
# In[36]:
data = pd.read_csv('a.csv')
# In[37]:
data.head()
# In[38]:
cor = data.corr()
cor = abs(cor['mortality_rate'])
print(cor[cor > 0.3])
# In[39]:
data.drop([33, 47], inplace=True) # Get rid of Guam/Puerto Rico
y = data['mortality_rate'] # Labels
states = data['state'] # If we want to look a state up later
data.drop(columns=['mortality_rate', 'Locationdesc', 'country_region', 'last_update', 'lat', 'long', 'confirmed', 'deaths',
'recovered', 'active', 'people_tested', 'people_hospitalized', 'testing_rate', 'incident_rate', 'hospitalization_rate',
'state'], inplace=True)
data.fillna(data.mean(), inplace=True)
# In[40]:
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
scaled = StandardScaler().fit_transform(data)
X = | pd.DataFrame(scaled, columns=data.columns) | pandas.DataFrame |
#!/usr/bin/env python3
# Copyright (C) <NAME> 2019
# Licensed under the 2-clause BSD licence
# Plots a coloured matrix of Android versions over time, showing the types of exploit possible per month per version
import numpy as np
import pandas
import matplotlib.pyplot as plt
from matplotlib import colors
from graph_utils import *
from pyplot_utils import load_graph_colours, get_bounds
START_YEAR = 2009
versions = load_version_list('../../input/release_dates.json')
#versions.append('all')
dates = dates_to_today(START_YEAR)
grid = np.zeros((len(versions), len(dates)), dtype=int)
for vindex, version in enumerate(versions):
print('Analysing version {v}'.format(v=version))
for dindex, date in enumerate(dates):
path = '../../output/graphs/{:s}/graph-{:d}-{:02d}.gv'.format(version, date.year, date.month)
graph = import_graph(path)
if graph is None:
grid[vindex, dindex] = -1
continue
# Experimental: remove fixed edges
#graph = remove_patched(graph)
# Add "backwards" edges and remove duplicates
add_backwards_edges(graph)
sgraph = strictify(graph)
# Get level of possible exploit
grid[vindex, dindex] = get_score(sgraph)
# Export table as csv
data = | pandas.DataFrame(grid, columns=dates, index=versions) | pandas.DataFrame |
import jsonlines
import pandas as pd
def write_output_to_file(output, path):
with jsonlines.open(path, mode="w") as writer:
for obj in output:
writer.write(obj)
def create_dfs_from_file(path, include_articles):
with jsonlines.open(path) as reader:
articles = []
entities = []
for obj in reader:
if include_articles:
articles += [obj["article"]]
article_id = obj["article"]["id"]
for entity in obj["entities"]:
entities += [entity]
entities[-1]["article_id"] = article_id
return | pd.DataFrame(articles) | pandas.DataFrame |
"""
Detection Recipe - 192.168.3.11
References:
(1) 'Asteroseismic detection predictions: TESS' by Chaplin (2015)
(2) 'On the use of empirical bolometric corrections for stars' by Torres (2010)
(3) 'The amplitude of solar oscillations using stellar techniques' by Kjeldson (2008)
(4) 'An absolutely calibrated Teff scale from the infrared flux method'
by Casagrande (2010) table 4
(5) 'Characterization of the power excess of solar-like oscillations in red giants with Kepler'
by Mosser (2011)
(6) 'Predicting the detectability of oscillations in solar-type stars observed by Kepler'
by Chaplin (2011)
(7) 'The connection between stellar granulation and oscillation as seen by the Kepler mission'
by Kallinger et al (2014)
(8) 'The Transiting Exoplanet Survey Satellite: Simulations of Planet Detections and
Astrophysical False Positives' by Sullivan et al. (2015)
(9) Astropysics module at https://pythonhosted.org/Astropysics/coremods/coords.html
(10) <NAME>'s calc_noise IDL procedure for TESS.
(11) <NAME>lin's soldet6 IDL procedure to calculate the probability of detecting
oscillations with Kepler.
(12) Coordinate conversion at https://ned.ipac.caltech.edu/forms/calculator.html
(13) Bedding 1996
(14) 'The Asteroseismic potential of TESS' by Campante et al. 2016
"""
import numpy as np
from itertools import groupby
from operator import itemgetter
import sys
import pandas as pd
from scipy import stats
import warnings
warnings.simplefilter("ignore")
def bv2teff(b_v):
# from Torres 2010 table 2. Applies to MS, SGB and giant stars
# B-V limits from Flower 1996 fig 5
a = 3.979145106714099
b = -0.654992268598245
c = 1.740690042385095
d = -4.608815154057166
e = 6.792599779944473
f = -5.396909891322525
g = 2.192970376522490
h = -0.359495739295671
lteff = a + b*b_v + c*(b_v**2) + d*(b_v**3) + e*(b_v**4) + f*(b_v**5) + g*(b_v**6) + h*(b_v**7)
teff = 10.0**lteff
return teff
# from <NAME> 2003. BCv values from Flower 1996 polynomials presented in Torres 2010
# Av is a keword argument. If reddening values not available, ignore it's effect
def Teff2bc2lum(teff, parallax, parallax_err, vmag, Av=0):
lteff = np.log10(teff)
BCv = np.full(len(lteff), -100.5)
BCv[lteff<3.70] = (-0.190537291496456*10.0**5) + \
(0.155144866764412*10.0**5*lteff[lteff<3.70]) + \
(-0.421278819301717*10.0**4.0*lteff[lteff<3.70]**2.0) + \
(0.381476328422343*10.0**3*lteff[lteff<3.70]**3.0)
BCv[(3.70<lteff) & (lteff<3.90)] = (-0.370510203809015*10.0**5) + \
(0.385672629965804*10.0**5*lteff[(3.70<lteff) & (lteff<3.90)]) + \
(-0.150651486316025*10.0**5*lteff[(3.70<lteff) & (lteff<3.90)]**2.0) + \
(0.261724637119416*10.0**4*lteff[(3.70<lteff) & (lteff<3.90)]**3.0) + \
(-0.170623810323864*10.0**3*lteff[(3.70<lteff) & (lteff<3.90)]**4.0)
BCv[lteff>3.90] = (-0.118115450538963*10.0**6) + \
(0.137145973583929*10.0**6*lteff[lteff > 3.90]) + \
(-0.636233812100225*10.0**5*lteff[lteff > 3.90]**2.0) + \
(0.147412923562646*10.0**5*lteff[lteff > 3.90]**3.0) + \
(-0.170587278406872*10.0**4*lteff[lteff > 3.90]**4.0) + \
(0.788731721804990*10.0**2*lteff[lteff > 3.90]**5.0)
u = 4.0 + 0.4 * 4.73 - 2.0 * np.log10(parallax) - 0.4 * (vmag - Av + BCv)
lum = 10**u # in solar units
e_lum = (2.0 / parallax * 10**u)**2 * parallax_err**2
e_lum = np.sqrt(e_lum)
return lum, e_lum
# calculate seismic parameters
def seismicParameters(teff, lum):
# solar parameters
teff_solar = 5777.0 # Kelvin
teffred_solar = 8907.0 #in Kelvin
numax_solar = 3090.0 # in micro Hz
dnu_solar = 135.1 # in micro Hz
cadence = 120 # in s
vnyq = (1.0 / (2.0*cadence)) * 10**6 # in micro Hz
teffred = teffred_solar*(lum**-0.093) # from (6) eqn 8. red-edge temp
rad = lum**0.5 * ((teff/teff_solar)**-2) # Steffan-Boltzmann law
numax = numax_solar*(rad**-1.85)*((teff/teff_solar)**0.92) # from (14)
return cadence, vnyq, rad, numax, teffred, teff_solar, teffred_solar, numax_solar, dnu_solar
# no coordinate conversion before calculating tess field observing time. Only
# works with ecliptic coordinates
def tess_field_only(e_lng, e_lat):
# create a list to append all of the total observing times 'T' in the TESS field to
T = [] # units of sectors (0-13)
# create a list to append all of the maximum contiguous observations to
max_T = [] # units of sectors (0-13)
for star in range(len(e_lng)):
# 'n' defines the distance between each equidistant viewing sector in the TESS field.
n = 360.0/13
# Define a variable to count the total number of sectors a star is observed in.
counter = 0
# Define a variable to count all of the observations for each star.
# Put each observation sector into sca separately in order to find the largest number
# of contiguous observations for each star.
sca = []
# 'ranges' stores all of the contiguous observations for each star.
ranges = []
# Defines the longitude range of the observing sectors at the inputted stellar latitude
lngrange = 24.0/abs(np.cos(np.radians(e_lat[star])))
if lngrange>=360.0:
lngrange=360.0
# if the star is in the northern hemisphere:
if e_lat[star] >= 0.0:
# For each viewing sector.
for i in range(1,14):
# Define an ra position for the centre of each sector in increasing longitude.
# if a hemisphere has an overshoot, replace 0.0 with the value.
a = 0.0+(n*(i-1))
# calculate the distances both ways around the
# circle between the star and the centre of the sector.
# The smallest distance is the one that should be used
# to see if the star lies in the observing sector.
d1 = abs(e_lng[star]-a)
d2 = (360.0 - abs(e_lng[star]-a))
if d1>d2:
d1 = d2
# if the star is in the 'overshoot' region for some sectors, calculate d3 and d4;
# the distances both ways around the circle bwtween the star and the centre of the
# 'overshooting past the pole' region of the sector.
# The smallest distance is the one that should be used
# to see if the star lies in the observing sector.
# the shortest distances between the centre of the sector and star, and the sector's
# overshoot and the star should add to 180.0 apart (i.e d1+d3=180.0)
d3 = abs(e_lng[star] - (a+180.0)%360.0)
d4 = 360.0 - abs(e_lng[star] - (a+180.0)%360.0)
if d3>d4:
d3 = d4
# check if a star lies in the field of that sector.
if (d1<=lngrange/2.0 and 6.0<=e_lat[star]) or (d3<=lngrange/2.0 and 78.0<=e_lat[star]):
counter += 1
sca = np.append(sca, i)
else:
pass
# if the star is in the southern hemisphere:
if e_lat[star] < 0.0:
# For each viewing sector.
for i in range(1,14):
# Define an ra position for the centre of each sector in increasing longitude.
# if a hemisphere has an overshoot, replace 0.0 with the value.
a = 0.0+(n*(i-1))
# calculate the distances both ways around the
# circle between the star and the centre of the sector.
# The smallest distance is the one that should be used
# to see if the star lies in the observing sector.
d1 = abs(e_lng[star]-a)
d2 = (360 - abs(e_lng[star]-a))
if d1>d2:
d1 = d2
# if the star is in the 'overshoot' region for some sectors, calculate d3 and d4;
# the distances both ways around the circle between the star and the centre of the
# 'overshooting past the pole' region of the sector.
# The smallest distance of the 2 is the one that should be used
# to see if the star lies in the observing sector.
d3 = abs(e_lng[star] - (a+180.0)%360.0)
d4 = (360 - abs(e_lng[star] - (a+180.0)%360.0))
if d3>d4:
d3 = d4
# check if a star lies in the field of that sector.
if (d1<=lngrange/2.0 and -6.0>=e_lat[star]) or (d3<=lngrange/2.0 and -78.0>=e_lat[star]):
counter += 1
sca = np.append(sca, i)
else:
pass
if len(sca) == 0:
ranges = [0]
else:
for k,g in groupby(enumerate(sca), lambda i_x:i_x[0]-i_x[1]):
group = map(itemgetter(1), g)
if np.array(group).sum() !=0:
ranges.append([len(list(group))])
T=np.append(T, counter)
max_T = np.append(max_T, np.max(np.array(ranges)))
return T, max_T
def calc_noise(imag, exptime, teff, e_lng = 0, e_lat = 30, g_lng = 96, g_lat = -30, subexptime = 2.0, npix_aper = 10, \
frac_aper = 0.76, e_pix_ro = 10, geom_area = 60.0, pix_scale = 21.1, sys_limit = 0):
omega_pix = pix_scale**2.0
n_exposures = exptime/subexptime
# electrons from the star
megaph_s_cm2_0mag = 1.6301336 + 0.14733937*(teff-5000.0)/5000.0
e_star = 10.0**(-0.4*imag) * 10.0**6 * megaph_s_cm2_0mag * geom_area * exptime * frac_aper
e_star_sub = e_star*subexptime/exptime
# e/pix from zodi
dlat = (abs(e_lat)-90.0)/90.0
vmag_zodi = 23.345 - (1.148*dlat**2.0)
e_pix_zodi = 10.0**(-0.4*(vmag_zodi-22.8)) * (2.39*10.0**-3) * geom_area * omega_pix * exptime
# e/pix from background stars
dlat = abs(g_lat)/40.0*10.0**0
dlon = g_lng
q = np.where(dlon>180.0)
if len(q[0])>0:
dlon[q] = 360.0-dlon[q]
dlon = abs(dlon)/180.0*10.0**0
p = [18.97338*10.0**0, 8.833*10.0**0, 4.007*10.0**0, 0.805*10.0**0]
imag_bgstars = p[0] + p[1]*dlat + p[2]*dlon**(p[3])
e_pix_bgstars = 10.0**(-0.4*imag_bgstars) * 1.7*10.0**6 * geom_area * omega_pix * exptime
# compute noise sources
noise_star = np.sqrt(e_star) / e_star
noise_sky = np.sqrt(npix_aper*(e_pix_zodi + e_pix_bgstars)) / e_star
noise_ro = np.sqrt(npix_aper*n_exposures)*e_pix_ro / e_star
noise_sys = 0.0*noise_star + sys_limit/(1*10.0**6)/np.sqrt(exptime/3600.0)
noise1 = np.sqrt(noise_star**2.0 + noise_sky**2.0 + noise_ro**2.0)
noise2 = np.sqrt(noise_star**2.0 + noise_sky**2.0 + noise_ro**2.0 + noise_sys**2.0)
return noise2
# calculate the granulation at a set of frequencies from (7) eqn 2 model F
def granulation(nu0, dilution, a_nomass, b1, b2, vnyq):
# Divide by dilution squared as it affects stars in the time series.
# The units of dilution change from ppm to ppm^2 microHz^-1 when going from the
# time series to frequency. p6: c=4 and zeta = 2*sqrt(2)/pi
Pgran = (((2*np.sqrt(2))/np.pi) * (a_nomass**2/b1) / (1 + ((nu0/b1)**4)) \
+ ((2*np.sqrt(2))/np.pi) * (a_nomass**2/b2) / (1 + ((nu0/b2)**4))) / (dilution**2)
# From (9). the amplitude suppression factor. Normalised sinc with pi (area=1)
eta = np.sinc((nu0/(2*vnyq)))
# the granulation after attenuation
Pgran = Pgran * eta**2
return Pgran, eta
# the total number of pixels used by the highest ranked x number of targets in the tCTL
def pixel_cost(x):
N = np.ceil(10.0**-5.0 * 10.0**(0.4*(20.0-x)))
N_tot = 10*(N+10)
total = np.cumsum(N_tot)
# want to find: the number of ranked tCTL stars (from highest to lowest rank) that correspond to a pixel cost of 1.4Mpix at a given time
per_cam = 26*4 # to get from the total pixel cost to the cost per camera at a given time, divide by this
pix_limit = 1.4e6 # the pixel limit per camera at a given time
return total[-1], per_cam, pix_limit, N_tot
# detection recipe to find whether a star has an observed solar-like Gaussian mode power excess
def globalDetections(g_lng, g_lat, e_lng, e_lat, imag, \
lum, rad, teff, numax, max_T, teffred, teff_solar, \
teffred_solar, numax_solar, dnu_solar, sys_limit, dilution, vnyq, cadence, vary_beta=False):
dnu = dnu_solar*(rad**-1.42)*((teff/teff_solar)**0.71) # from (14) eqn 21
beta = 1.0-np.exp(-(teffred-teff)/1550.0) # beta correction for hot solar-like stars from (6) eqn 9.
if isinstance(teff, float): # for only 1 star
if (teff>=teffred):
beta = 0.0
else:
beta[teff>=teffred] = 0.0
# to remove the beta correction, set Beta=1
if vary_beta == False:
beta = 1.0
# modified from (6) eqn 11. Now consistent with dnu proportional to numax^0.77 in (14)
amp = 0.85*2.5*beta*(rad**1.85)*((teff/teff_solar)**0.57)
# From (5) table 2 values for delta nu_{env}. env_width is defined as +/- some value.
env_width = 0.66 * numax**0.88
env_width[numax>100.] = numax[numax>100.]/2. # from (6) p12
total, per_cam, pix_limit, npix_aper = pixel_cost(imag)
noise = calc_noise(imag=imag, teff=teff, exptime=cadence, e_lng=e_lng, e_lat=e_lat, \
g_lng=g_lng, g_lat=g_lat, sys_limit=sys_limit, npix_aper=npix_aper)
noise = noise*10.0**6 # total noise in units of ppm
a_nomass = 0.85 * 3382*numax**-0.609 # multiply by 0.85 to convert to redder TESS bandpass.
b1 = 0.317 * numax**0.970
b2 = 0.948 * numax**0.992
# call the function for the real and aliased components (above and below vnyq) of the granulation
# the order of the stars is different for the aliases so fun the function in a loop
Pgran, eta = granulation(numax, dilution, a_nomass, b1, b2, vnyq)
Pgranalias = np.zeros(len(Pgran))
etaalias = np.zeros(len(eta))
# if vnyq is 1 fixed value
if isinstance(vnyq, float):
for i in range(len(numax)):
if numax[i] > vnyq:
Pgranalias[i], etaalias[i] = granulation((vnyq - (numax[i] - vnyq)), \
dilution, a_nomass[i], b1[i], b2[i], vnyq)
elif numax[i] < vnyq:
Pgranalias[i], etaalias[i] = granulation((vnyq + (vnyq - numax[i])), \
dilution, a_nomass[i], b1[i], b2[i], vnyq)
# if vnyq varies for each star
else:
for i in range(len(numax)):
if numax[i] > vnyq[i]:
Pgranalias[i], etaalias[i] = granulation((vnyq[i] - (numax[i] - vnyq[i])), \
dilution, a_nomass[i], b1[i], b2[i], vnyq[i])
elif numax[i] < vnyq[i]:
Pgranalias[i], etaalias[i] = granulation((vnyq[i] + (vnyq[i] - numax[i])), \
dilution, a_nomass[i], b1[i], b2[i], vnyq[i])
Pgrantotal = Pgran + Pgranalias
ptot = (0.5*2.94*amp**2.*((2.*env_width)/dnu)*eta**2.) / (dilution**2.)
Binstr = 2.0 * (noise)**2. * cadence*10**-6.0 # from (6) eqn 18
bgtot = ((Binstr + Pgrantotal) * 2.*env_width) # units are ppm**2
snr = ptot/bgtot # global signal to noise ratio from (11)
fap = 0.05 # false alarm probability
pdet = 1.0 - fap
pfinal = np.full(rad.shape[0], -99)
idx = np.where(max_T != 0) # calculate the indexes where T is not 0
tlen=max_T[idx]*27.4*86400.0 # the length of the TESS observations in seconds
bw=1.0 * (10.0**6.0)/tlen
nbins=(2.*env_width[idx]/bw).astype(int) # from (11)
snrthresh = stats.chi2.ppf(pdet, 2.0*nbins) / (2.0*nbins) - 1.0
pfinal[idx] = stats.chi2.sf((snrthresh+1.0) / (snr[idx]+1.0)*2.0*nbins, 2.*nbins)
return pfinal, snr, dnu # snr is needed in TESS_telecon2.py
def BV2VI(bv, vmag, g_mag_abs):
whole = | pd.DataFrame(data={'B-V': bv, 'Vmag': vmag, 'g_mag_abs': g_mag_abs, 'Ai': 0}) | pandas.DataFrame |
import base64
import calendar
import json
import logging
import re
import sqlparse
import uuid
from collections import OrderedDict
from datetime import datetime
from io import BytesIO
from django.conf import settings
from django.db import models, DatabaseError, connection
from django.db.models import signals
from django.utils.datastructures import OrderedSet
import pandas as pd
from geoalchemy2 import Geometry
from PIL import Image, ImageOps
from sqlparse.tokens import Token
from tablo import wkt, LARGE_IMAGE_NAME, NO_PK, PANDAS_TYPE_CONVERSION, POSTGIS_ESRI_FIELD_MAPPING, IMPORT_SUFFIX
from tablo import TABLE_NAME_PREFIX, PRIMARY_KEY_NAME, GEOM_FIELD_NAME, SOURCE_DATASET_FIELD_NAME, WEB_MERCATOR_SRID
from tablo import ADJUSTED_GLOBAL_EXTENT
from tablo.csv_utils import prepare_row_set_for_import, convert_header_to_column_name
from tablo.exceptions import InvalidFieldsError, InvalidSQLError, RelatedFieldsError
from tablo.geom_utils import Extent, SpatialReference
from tablo.utils import get_jenks_breaks, get_sqlalchemy_engine, dictfetchall
from tablo.storage import default_public_storage as image_storage
TEMPORARY_FILE_LOCATION = getattr(settings, 'TABLO_TEMPORARY_FILE_LOCATION', 'temp')
FILE_STORE_DOMAIN_NAME = getattr(settings, 'FILESTORE_DOMAIN_NAME', 'domain')
logger = logging.getLogger(__name__)
class FeatureService(models.Model):
id = models.AutoField(auto_created=True, primary_key=True)
description = models.TextField(null=True)
copyright_text = models.TextField(null=True)
spatial_reference = models.CharField(max_length=255)
_initial_extent = models.TextField(db_column='initial_extent', null=True)
_full_extent = models.TextField(db_column='full_extent', null=True)
units = models.CharField(max_length=255, null=True)
allow_geometry_updates = models.BooleanField(default=False)
@property
def initial_extent(self):
if self._initial_extent is None and self.featureservicelayer_set.all():
self._initial_extent = json.dumps(determine_extent(self.featureservicelayer_set.all()[0].table))
self.save()
return self._initial_extent
@property
def full_extent(self):
if self._full_extent is None and self.featureservicelayer_set.all():
self._full_extent = json.dumps(determine_extent(self.featureservicelayer_set.all()[0].table))
self.save()
return self._full_extent
@property
def dataset_id(self):
if self.featureservicelayer_set.all():
dataset_id = self.featureservicelayer_set.all()[0].table
return dataset_id.replace(TABLE_NAME_PREFIX, '').replace(IMPORT_SUFFIX, '')
return 0
def finalize(self, dataset_id):
# Renames the table associated with the feature service to remove the IMPORT tag
fs_layer = self.featureservicelayer_set.first()
fs_layer.table = TABLE_NAME_PREFIX + dataset_id
fs_layer.save()
old_table_name = TABLE_NAME_PREFIX + dataset_id + IMPORT_SUFFIX
new_table_name = TABLE_NAME_PREFIX + dataset_id
with connection.cursor() as c:
c.execute('DROP TABLE IF EXISTS {new_table_name}'.format(
new_table_name=new_table_name
))
c.execute('ALTER TABLE {old_table_name} RENAME TO {new_table_name}'.format(
old_table_name=old_table_name,
new_table_name=new_table_name
))
class FeatureServiceLayer(models.Model):
id = models.AutoField(auto_created=True, primary_key=True)
service = models.ForeignKey(FeatureService)
layer_order = models.IntegerField()
table = models.CharField(max_length=255)
name = models.CharField(max_length=255, null=True)
description = models.TextField(null=True)
object_id_field = models.CharField(max_length=255, default=PRIMARY_KEY_NAME)
global_id_field = models.CharField(max_length=255, default=PRIMARY_KEY_NAME)
display_field = models.CharField(max_length=255, default=PRIMARY_KEY_NAME)
geometry_type = models.CharField(max_length=255)
_extent = models.TextField(db_column='extent', null=True)
supports_time = models.BooleanField(default=False)
start_time_field = models.CharField(max_length=255, null=True)
_time_extent = models.TextField(null=True, db_column='time_extent')
time_interval = models.TextField(null=True)
time_interval_units = models.CharField(max_length=255, null=True)
drawing_info = models.TextField()
_fields = None
_related_fields = None
_relations = None
_srid = None
@property
def extent(self):
if self._extent is None:
self._extent = json.dumps(determine_extent(self.table))
self.save()
return self._extent
@property
def srid(self):
if not self._srid:
self._srid = json.loads(self.service.spatial_reference)['wkid']
return self._srid
@property
def time_extent(self):
if not self.supports_time:
return '[]'
# TODO: Remove fields from database if we really don't want to continue using them
return json.dumps(self.get_raw_time_extent())
def get_raw_time_extent(self):
query = 'SELECT MIN({date_field}), MAX({date_field}) FROM {table_name}'.format(
date_field=self.start_time_field,
table_name=self.table
)
with connection.cursor() as c:
c.execute(query)
min_date, max_date = (calendar.timegm(x.timetuple()) * 1000 for x in c.fetchone())
return [min_date, max_date]
@property
def fields(self):
if self._fields is None:
fields = get_fields(self.table)
for field in fields:
if field['name'] == 'db_id':
field['type'] = 'esriFieldTypeOID'
elif field['name'] == GEOM_FIELD_NAME:
field['type'] = 'esriFieldTypeGeometry'
self._fields = fields
return self._fields
@property
def relations(self):
if self._relations is None:
self._relations = self.featureservicelayerrelations_set.all()
return self._relations
@property
def related_fields(self):
if self._related_fields is None:
self._related_fields = OrderedDict()
for field in (f for r in self.relations for f in r.fields):
field_key = field['qualified'] # Will be related_title.field
self.related_fields[field_key] = field
return self._related_fields
@property
def time_info(self):
if self.start_time_field:
return {
'startTimeField': self.start_time_field,
'timeExtent': json.loads(self.time_extent),
'timeInterval': int(self.time_interval),
'timeIntervalUnits': self.time_interval_units
}
return None
def perform_query(self, limit=0, offset=0, **kwargs):
limit, offset = max(limit, 0), max(offset, 0)
count_only = bool(kwargs.pop('count_only', False))
ids_only = bool(kwargs.pop('ids_only', False))
return_fields = [f.strip() for f in list(kwargs.get('return_fields') or '*')]
return_geometry = bool(kwargs.get('return_geometry', True))
out_sr = kwargs.get('out_sr') or WEB_MERCATOR_SRID
additional_where_clause = kwargs.get('additional_where_clause')
additional_where_clause = additional_where_clause.replace('"', '') if additional_where_clause else None
# Break out fields and DESC / ASC modifiers
order_by_field_objs = []
for field in [f.strip() for f in kwargs.get('order_by_fields') or '']:
m = re.match('(\S*)\s?(asc|desc)?', field, re.IGNORECASE)
field_obj = {'field_name': m.group(1)}
try:
field_obj['order_modifier'] = m.group(2)
except IndexError:
pass # No modifier
order_by_field_objs.append(field_obj)
order_by_field_names = [f['field_name'] for f in order_by_field_objs]
# These are the possible points of SQL injection. All other dynamically composed pieces of SQL are
# constructed using items within the database, or are escaped using the database engine.
if not ids_only:
include_related = bool(kwargs.get('object_ids'))
self._validate_fields(return_fields, include_related)
self._validate_fields(order_by_field_names, include_related)
self._validate_where_clause(additional_where_clause)
# Build SELECT, JOIN, WHERE and ORDER BY from inputs
if count_only:
return_fields = order_by_field_names = []
select_fields = 'COUNT(0)'
elif ids_only:
return_fields = order_by_field_names = [self.object_id_field]
select_fields = 'DISTINCT {0}'.format(self._alias_fields(return_fields))
else:
if self.object_id_field not in return_fields:
return_fields.insert(0, self.object_id_field)
select_fields = self._expand_fields(return_fields)
if return_geometry:
select_fields += ', ST_AsText(ST_Transform("source"."dbasin_geom", {0}))'.format(out_sr)
join, related_tables = self._build_join_clause(return_fields, additional_where_clause)
where, query_params = self._build_where_clause(additional_where_clause, count_only, **kwargs)
order_by = '' if count_only else self._build_order_by_clause(
field_objs=order_by_field_objs, related_tables=(None if ids_only else related_tables)
)
query_clause = 'SELECT {fields} FROM "{table}" AS "source" {join} {where} {order_by} {limit} {offset}'
query_clause = query_clause.format(
fields=select_fields, table=self.table, join=join.strip(), where=where.strip(), order_by=order_by,
limit='' if limit == 0 else 'LIMIT {limit}'.format(limit=limit + 1),
offset='' if offset == 0 else 'OFFSET {offset}'.format(offset=offset)
)
# Execute query with optional limit and offset, and prepare return data
with connection.cursor() as c:
c.execute(query_clause, query_params)
queried_data = dictfetchall(c)
limited_data = 0 < limit < len(queried_data)
queried_data = queried_data[:-1] if limited_data else queried_data
return {'data': queried_data, 'exceeded_limit': limited_data}
def _alias_fields(self, fields):
""" Prepend table alias to fields, delimiting them in double quotes, but ignore '*' """
if not fields:
return self._expand_fields('*', True)
elif isinstance(fields, str):
fields = fields.split(',')
aliased_fields = [f if '.' in f else 'source.{0}'.format(f) for f in fields]
quoted_fields = '", "'.join('"."'.join(f.split('.')) for f in aliased_fields).join('""')
return quoted_fields.replace('"*"', '*')
def _expand_fields(self, fields, aliased_only=False):
""" Expand '*' in fields to those that will be queried, and optionally alias them to avoid clashes """
if isinstance(fields, str):
fields = fields.split(',')
if not any(f == '*' or '.' in f for f in fields):
return self._alias_fields(fields)
fields_to_expand = [self.object_id_field]
fields_to_expand.extend(r.source_column for r in self.relations)
for field in fields:
if field == '*':
fields_to_expand.extend(f['name'] for f in self.fields)
elif field.endswith('.*'):
related_prefix = field[:-1]
fields_to_expand.extend(f for f in self.related_fields if f.startswith(related_prefix))
else:
fields_to_expand.append(field)
field_format = '{0}' if aliased_only else '{0} AS "{1}"'
return ', '.join(field_format.format(self._alias_fields(f), f) for f in OrderedSet(fields_to_expand))
def _build_join_clause(self, fields, where):
if not fields and where is None:
return '', []
elif where is None:
query_fields = set(fields)
else:
query_fields = set(fields).union(self._parse_where_clause(where)[0])
join_tables = query_fields.intersection(self.related_fields.keys()) # Filter by available related fields
join_tables = join_tables.union(f for f in query_fields if '*' in f) # Ensure wildcard fields are included
join_tables = set(f[:f.index('.')] for f in join_tables if '.' in f) # Derive distinct table prefixes
join_clause = ''
relations = self.relations.filter(related_title__in=join_tables)
for relation in relations:
join_clause += ' LEFT OUTER JOIN "{table}" AS "{related_title}"'.format(
table=relation.table, related_title=relation.related_title
)
join_clause += ' ON "source"."{source}" = "{related_title}"."{target}"'.format(
source=relation.source_column, related_title=relation.related_title, target=relation.target_column
)
return join_clause, [r.related_title for r in relations]
def _build_where_clause(self, where, count_only, **kwargs):
""" :return: a Python format where clause with corresponding params for the SQL engine to escape """
start_time = kwargs.get('start_time')
end_time = kwargs.get('end_time')
if where is None:
where_clause = 'WHERE 1=1'
else:
where_clause = ''
for token in sqlparse.parse('WHERE {0}'.format(where.replace('%', '%%')))[0].flatten():
if token.ttype != Token.Name:
where_clause += token.value
elif token.value != token.parent.value:
# Token is aliased: just write the segments as they come
where_clause += token.value.strip('"').join('""')
else:
# Token is not aliased if parent doesn't include it: add source alias
where_clause += '"source"."{field}"'.format(field=token.value.strip('"'))
query_params = []
if self.start_time_field:
layer_time_field = '"source"."{time_field}"'.format(time_field=self.start_time_field.lower())
if start_time:
if start_time == end_time:
where_clause += ' AND {time_field} = %s::date'.format(time_field=layer_time_field)
query_params.append(start_time)
else:
where_clause += ' AND {time_field} BETWEEN %s::date AND %s::date'.format(time_field=layer_time_field)
query_params.append(start_time)
query_params.append(end_time)
elif where is None and not count_only:
# If layer has a time component, default is to show first time step
where_clause += ' AND {time_field} = (SELECT MIN({subquery_time_field}) FROM "{table}")'.format(
time_field=layer_time_field,
subquery_time_field=self.start_time_field,
table=self.table
)
if kwargs.get('object_ids'):
object_ids = kwargs['object_ids']
where_clause += ' AND "source"."{primary_key}" IN ({object_id_list})'.format(
primary_key=PRIMARY_KEY_NAME,
object_id_list=','.join(['%s' for obj_id in object_ids]),
)
for obj_id in object_ids:
query_params.append(obj_id)
if kwargs.get('extent'):
where_clause += ' AND ST_Intersects("source"."dbasin_geom", ST_GeomFromText(%s, 3857)) '
query_params.append(kwargs['extent'])
return where_clause, query_params
def _build_order_by_clause(self, field_objs, related_tables=None):
order_by_clause = 'ORDER BY {fields}'
def insert_field(field_list, field):
matching_field = [d for d in field_list if d['field_name'] == field['field_name']]
if not matching_field:
field_list.insert(0, field)
elif field_list[0]['field_name'] != field['field_name']:
field_list.remove(field)
field_list.insert(0, field)
order_by_field_objs = list(field_objs or '')
if related_tables is None:
if not order_by_field_objs:
# Ensure ordering by primary key if nothing else
insert_field(order_by_field_objs, {'field_name': PRIMARY_KEY_NAME})
else:
for relation in self.relations.filter(related_title__in=related_tables).order_by('-related_index'):
# Ensure ordering by source table keys
insert_field(order_by_field_objs, {'field_name': relation.source_column})
if not order_by_field_objs:
all_fields = self._expand_fields([])
else:
# Expand out the fields individually, and add their modifiers (ASC / DESC) if they exist
expanded_fields = []
for field_obj in order_by_field_objs:
expanded_field = self._expand_fields([field_obj['field_name']], aliased_only=True)
if field_obj.get('order_modifier'):
expanded_field = expanded_field + ' ' + field_obj['order_modifier']
expanded_fields.append(expanded_field)
all_fields = ', '.join(expanded_fields)
return order_by_clause.format(fields=all_fields)
def _parse_where_clause(self, where):
if where is None:
return None
parsed = sqlparse.parse('WHERE {where_clause}'.format(where_clause=where))
fields = set(t.parent.value.replace('"', '') for t in parsed[0].flatten() if t.ttype == Token.Name)
return fields, parsed[1:] # Additional statements may occur but are invalid
def _validate_where_clause(self, where):
parsed = self._parse_where_clause(where)
if parsed is None:
return
elif parsed[1]:
raise InvalidSQLError('Invalid where clause')
self._validate_fields(parsed[0])
def _validate_fields(self, fields, include_related=True):
if isinstance(fields, str):
fields = fields.split(',')
query_fields = {field.replace('"', '') for field in fields}
if not query_fields:
return
all_related = ('{0}.*'.format(r.related_title) for r in self.relations)
valid_fields = {
'source': set(f['name'] for f in self.fields).union('*'),
'target': set(f for f in self.related_fields).union(all_related)
}
invalid_fields = query_fields.difference(valid_fields['source'])
if invalid_fields:
if include_related:
invalid_fields = invalid_fields.difference(valid_fields['target'])
else:
related_fields = invalid_fields.intersection(valid_fields['target'])
if related_fields:
raise RelatedFieldsError(
'Related fields not allowed: {0}'.format(', '.join(related_fields)),
fields=related_fields
)
if invalid_fields:
raise InvalidFieldsError(
message='Invalid fields: {0}'.format(', '.join(invalid_fields)),
fields=invalid_fields
)
def get_distinct_geometries_across_time(self, *kwargs):
time_query = 'SELECT DISTINCT ST_AsText({geom_field}), COUNT(0) FROM {table} GROUP BY ST_AsText({geom_field})'
time_query = time_query.format(geom_field=GEOM_FIELD_NAME, table=self.table)
with connection.cursor() as c:
c.execute(time_query)
response = dictfetchall(c)
return response
def get_unique_values(self, field):
self._validate_fields(field)
unique_values = []
field_name = field
table = self.table
if '.' in field:
relationship_name, field_name = field.split('.')
table = self.relations.filter(related_title=relationship_name).first().table
with connection.cursor() as c:
c.execute('SELECT distinct {field_name} FROM {table} ORDER BY {field_name}'.format(
table=table, field_name=field_name
))
for row in c.fetchall():
unique_values.append(row[0])
return unique_values
def get_equal_breaks(self, field, break_count):
self._validate_fields(field)
breaks = []
with connection.cursor() as c:
c.execute('SELECT MIN({field_name}), MAX({field_name}) FROM {table}'.format(
table=self.table, field_name=field
))
min_value, max_value = c.fetchone()
step = (max_value - min_value) / break_count
low_value = min_value
for _ in range(break_count):
breaks.append(low_value)
low_value += step
breaks.append(max_value)
return breaks
def get_quantile_breaks(self, field, break_count):
self._validate_fields(field)
sql_statement = """
SELECT all_data.{field}
FROM
(
SELECT ROW_NUMBER() OVER (ORDER BY {field}) AS row_number, {field}
FROM {table}
WHERE {field} IS NOT NULL
ORDER BY {field}
) all_data,
(
SELECT ROUND((COUNT(0) / {break_count}), 0) AS how_many
FROM {table} serviceTable
WHERE serviceTable.{field} IS NOT NULL
) count_table
WHERE MOD(row_number, how_many) = 0
ORDER BY all_data.{field}
""".format(field=field, table=self.table, break_count=break_count)
values = []
with connection.cursor() as c:
c.execute('SELECT MIN({field_name}), MAX({field_name}) FROM {table}'.format(
table=self.table,
field_name=field
))
minimum, maximum = c.fetchone()
values.append(minimum)
c.execute(sql_statement)
for row in c.fetchall():
values.append(row[0])
if values[-1] != maximum:
values[-1] = maximum
return values
def get_natural_breaks(self, field, break_count):
self._validate_fields(field)
sql_statement = """
SELECT service_table.{field}
FROM {table} service_table,
(
SELECT ROW_NUMBER() OVER (ORDER BY {field}) AS row_number, {primary_key}
FROM {table}
WHERE {field} IS NOT NULL
ORDER BY {field}
) all_data,
(
SELECT
COUNT(0) AS total,
CASE
WHEN COUNT(0) < {num_samples} THEN 1
ELSE ROUND((COUNT(0) / {num_samples}), 0)
END AS how_many
FROM {table}
WHERE {field} IS NOT NULL
) count_table
WHERE service_table.{primary_key} = all_data.{primary_key} AND
(
MOD(row_number, how_many) = 0 OR
row_number IN (1, total)
)
ORDER BY service_table.{field}
""".format(field=field, primary_key=self.object_id_field, table=self.table, num_samples=1000)
with connection.cursor() as c:
c.execute(sql_statement)
values = [row[0] for row in c.fetchall()]
c.execute('SELECT MIN({field_name}), MAX({field_name}) FROM {table}'.format(
table=self.table, field_name=field
))
minimum, maximum = c.fetchone()
values[0] = minimum
values[-1] = maximum
return get_jenks_breaks(values, break_count)
def add_feature(self, feature):
system_cols = {PRIMARY_KEY_NAME, GEOM_FIELD_NAME}
with connection.cursor() as c:
c.execute('SELECT * from {dataset_table_name} LIMIT 0'.format(
dataset_table_name=self.table
))
colnames_in_table = [desc[0].lower() for desc in c.description if desc[0] not in system_cols]
columns_not_present = colnames_in_table[0:]
columns_in_request = feature['attributes'].copy()
for field in system_cols:
if field in columns_in_request:
columns_in_request.pop(field)
for key in columns_in_request:
if key not in colnames_in_table:
raise AttributeError('attributes do not match')
columns_not_present.remove(key)
if len(columns_not_present):
raise AttributeError('Missing attributes {0}'.format(','.join(columns_not_present)))
insert_command = (
'INSERT INTO {table_name} ({attribute_names}) VALUES ({placeholders}) RETURNING {pk}'.format(
table_name=self.table,
attribute_names=','.join(colnames_in_table),
placeholders=','.join(['%s'] * len(colnames_in_table)),
pk=PRIMARY_KEY_NAME
)
)
transform_op = 'ST_Transform(ST_GeomFromEWKT(\'{geom}\'), {table_srid})'.format(
geom=wkt.from_esri_feature(feature['geometry'], self.geometry_type),
table_srid=self.srid
)
set_geom_command = 'UPDATE {table_name} SET {geom_column} = {transform_op} WHERE {pk}=%s'.format(
table_name=self.table,
geom_column=GEOM_FIELD_NAME,
transform_op=transform_op,
pk=PRIMARY_KEY_NAME
)
date_fields = [field['name'] for field in self.fields if field['type'] == 'esriFieldTypeDate']
image_fields = [field['name'] for field in self.fields if field['type'] == 'esriFieldTypeBlob']
# Creating a dictionary where the key is the Amazon S3 path and the value is the Image for the field
images_large = {}
images_thumbs = {}
values = []
for attribute_name in colnames_in_table:
if attribute_name in date_fields and feature['attributes'][attribute_name]:
if isinstance(feature['attributes'][attribute_name], str):
values.append(feature['attributes'][attribute_name])
else:
values.append(datetime.fromtimestamp(feature['attributes'][attribute_name] / 1000))
elif attribute_name in image_fields and feature['attributes'][attribute_name]:
# Using NO_PK as a placeholder for the actual PK which is available after the insert
image_path = FeatureServiceLayer.create_image_path(self.service.id, NO_PK, attribute_name)
img_base64_data = FeatureServiceLayer.process_image_data(
feature['attributes'][attribute_name], image_path, images_large, images_thumbs)
values.append(img_base64_data)
else:
values.append(feature['attributes'][attribute_name])
with connection.cursor() as c:
c.execute(insert_command, values)
primary_key = c.fetchone()[0]
c.execute(set_geom_command, [primary_key])
for key, value in images_large.items():
image_path = key.replace(NO_PK, str(primary_key))
FeatureServiceLayer.save_image(value, image_path, LARGE_IMAGE_NAME)
return primary_key
def update_feature(self, feature):
with connection.cursor() as c:
c.execute('SELECT * from {dataset_table_name} LIMIT 0'.format(
dataset_table_name=self.table
))
colnames_in_table = [desc[0].lower() for desc in c.description]
if PRIMARY_KEY_NAME not in feature['attributes']:
raise AttributeError('Cannot update feature without a primary key')
primary_key = feature['attributes'][PRIMARY_KEY_NAME]
date_fields = [field['name'] for field in self.fields if field['type'] == 'esriFieldTypeDate']
image_fields = [field['name'] for field in self.fields if field['type'] == 'esriFieldTypeBlob']
# Creating a dictionary where the key is the Amazon S3 path and the value is the Image for the field
images_large = {}
images_thumbs = {}
argument_updates = []
argument_values = []
for key in feature['attributes']:
if key == PRIMARY_KEY_NAME:
continue
if key not in colnames_in_table:
raise AttributeError('attributes do not match')
if key != GEOM_FIELD_NAME:
argument_updates.append('{0} = %s'.format(key))
if key in date_fields and feature['attributes'][key]:
if isinstance(feature['attributes'][key], str):
argument_values.append(feature['attributes'][key])
else:
argument_values.append(datetime.fromtimestamp(feature['attributes'][key] / 1000))
elif key in image_fields and feature['attributes'][key]:
image_path = FeatureServiceLayer.create_image_path(self.service.id, primary_key, key)
img_base64_data = FeatureServiceLayer.process_image_data(
feature['attributes'][key], image_path, images_large, images_thumbs
)
argument_values.append(img_base64_data)
else:
argument_values.append(feature['attributes'][key])
argument_values.append(feature['attributes'][PRIMARY_KEY_NAME])
update_command = 'UPDATE {table_name} SET {set_portion} WHERE {pk}=%s'.format(
table_name=self.table,
set_portion=','.join(argument_updates),
pk=PRIMARY_KEY_NAME
)
with connection.cursor() as c:
c.execute(update_command, argument_values)
if feature.get('geometry'):
transform_op = 'ST_Transform(ST_GeomFromEWKT(\'{geom}\'), {table_srid})'.format(
geom=wkt.from_esri_feature(feature['geometry'], self.geometry_type),
table_srid=self.srid
)
set_geom_command = 'UPDATE {table_name} SET {geom_column} = {transform_op} WHERE {pk}=%s'.format(
table_name=self.table,
geom_column=GEOM_FIELD_NAME,
transform_op=transform_op,
pk=PRIMARY_KEY_NAME
)
c.execute(set_geom_command, [primary_key])
# Save out large images
for key, value in images_large.items():
FeatureServiceLayer.save_image(value, image_path, LARGE_IMAGE_NAME)
return primary_key
def delete_feature(self, primary_key):
delete_command = 'DELETE FROM {table_name} WHERE {pk}=%s'.format(
table_name=self.table,
pk=PRIMARY_KEY_NAME
)
with connection.cursor() as c:
c.execute(delete_command, [primary_key])
image_fields = [field['name'] for field in self.fields if field['type'] == 'esriFieldTypeBlob']
# Delete image from S3 storage
for col_name in image_fields:
try:
s3_path = '{0}/{1}'.format(
FeatureServiceLayer.create_image_path(self.service.id, primary_key, col_name),
LARGE_IMAGE_NAME
)
if image_storage.exists(s3_path):
image_storage.delete(s3_path)
except Exception as e:
logger.exception(e)
return primary_key
@staticmethod
def create_image_path(service_id, row_id, field_name):
file_path = '{0}/{1}/{2}/{3}'.format(
FILE_STORE_DOMAIN_NAME,
service_id,
row_id,
field_name
)
return file_path
@staticmethod
def process_image_data(data, image_path, images_large, images_thumbs):
# Create large image from base64 string in attributes
# Remove the 'data:image/jpeg;base64' so the data can be converted to an image...
list_lines = data.split(',', 1)
image_data = list_lines[1]
im = Image.open(BytesIO(base64.b64decode(image_data)))
# May want to resize image here in the future...
# im = ImageOps.fit(image, (800, 600), Image.ANTIALIAS)
# Save large image to temporary location
images_large[image_path] = im
# Create and save thumbnail image
thumb = ImageOps.fit(im, (64, 64), Image.ANTIALIAS)
images_thumbs[image_path] = thumb
# Convert thumbnail to base64 string and save in database field
buffer = BytesIO()
thumb.save(buffer, format="JPEG")
img_str = 'data:image/jpeg;base64,' + base64.b64encode(buffer.getvalue()).decode('utf-8')
return img_str
@staticmethod
def save_image(img, file_path, file_name):
try:
s3_path = '{0}/{1}'.format(file_path, file_name)
with image_storage.open(s3_path, 'wb') as fh:
buffer = BytesIO()
img.save(buffer, format="JPEG")
fh.write(buffer.getvalue())
img.close()
except Exception as e:
logger.exception(e)
class FeatureServiceLayerRelations(models.Model):
id = models.AutoField(auto_created=True, primary_key=True)
layer = models.ForeignKey(FeatureServiceLayer)
related_index = models.PositiveIntegerField(default=0)
related_title = models.CharField(max_length=255)
source_column = models.CharField(max_length=255)
target_column = models.CharField(max_length=255)
_fields = None
@property
def fields(self):
if self._fields is None:
fields = get_fields(self.table)
for field in fields:
field['qualified'] = '{0}.{1}'.format(self.related_title, field['name'])
if field['name'] == self.source_column:
field['relatesTo'] = self.target_column
self._fields = fields
return self._fields
@property
def table(self):
return '{table}_{index}'.format(table=self.layer.table, index=self.related_index)
class TemporaryFile(models.Model):
""" A temporary file upload """
uuid = models.CharField(max_length=36, default=uuid.uuid4)
date = models.DateTimeField(auto_now_add=True)
filename = models.CharField(max_length=100)
filesize = models.BigIntegerField()
file = models.FileField(upload_to=TEMPORARY_FILE_LOCATION, max_length=1024)
@property
def extension(self):
if '.' not in self.filename:
return ''
return self.filename[self.filename.rfind('.') + 1:]
def delete_data_table(sender, instance, **kwargs):
with connection.cursor() as c:
c.execute('DROP table IF EXISTS {table_name}'.format(table_name=instance.table))
signals.pre_delete.connect(delete_data_table, sender=FeatureServiceLayer)
def determine_extent(table):
try:
query = 'SELECT ST_Expand(CAST(ST_Extent({field_name}) AS box2d), 1000) AS box2d FROM {table_name}'.format(
field_name=GEOM_FIELD_NAME,
table_name=table
)
with connection.cursor() as c:
c.execute(query)
extent_box = c.fetchone()[0]
if extent_box:
extent = Extent.from_sql_box(extent_box, SpatialReference({'wkid': WEB_MERCATOR_SRID}))
else:
extent = ADJUSTED_GLOBAL_EXTENT
except DatabaseError:
logger.exception('Error generating extent for table {0}, returning adjusted global extent'.format(table))
# Default to adjusted global extent if there is an error, similar to the one we present on the map page
extent = ADJUSTED_GLOBAL_EXTENT
return extent.as_dict()
def copy_data_table_for_import(dataset_id):
import_table_name = '{}{}{}'.format(TABLE_NAME_PREFIX, dataset_id, IMPORT_SUFFIX)
counter = 0
# Find the first non-used sequence for this import table
sequence_name = '{0}_{1}_seq'.format(import_table_name, counter)
while sequence_exists(sequence_name):
counter += 1
sequence_name = '{0}_{1}_seq'.format(import_table_name, counter)
drop_table_command = 'DROP TABLE IF EXISTS {table_name}'.format(
table_name=import_table_name
)
copy_table_command = 'CREATE TABLE {import_table} AS TABLE {data_table}'.format(
import_table=import_table_name,
data_table=TABLE_NAME_PREFIX + dataset_id
)
# Creating a table based on another table does not pull over primary key information or indexes, nor does
# it create a sequence for the primary key. Need to do that by hand.
create_sequence_command = 'CREATE SEQUENCE {sequence_name}'.format(sequence_name=sequence_name)
alter_table_command = (
'ALTER TABLE {import_table} ADD PRIMARY KEY ({pk}), '
'ALTER COLUMN {pk} SET DEFAULT nextval(\'{sequence_name}\')'
).format(
import_table=import_table_name,
pk=PRIMARY_KEY_NAME,
sequence_name=sequence_name
)
alter_sequence_command = 'ALTER SEQUENCE {sequence_name} OWNED BY {import_table}.{pk}'.format(
import_table=import_table_name,
pk=PRIMARY_KEY_NAME,
sequence_name=sequence_name
)
alter_sequence_start_command = (
'SELECT setval(\'{sequence_name}\', (SELECT max({pk})+1 FROM {import_table}), false)'
).format(
import_table=import_table_name,
pk=PRIMARY_KEY_NAME,
sequence_name=sequence_name
)
with connection.cursor() as c:
c.execute(drop_table_command)
c.execute(copy_table_command)
c.execute(create_sequence_command)
c.execute(alter_table_command)
c.execute(alter_sequence_command)
c.execute(alter_sequence_start_command)
return TABLE_NAME_PREFIX + dataset_id + IMPORT_SUFFIX
def sequence_exists(sequence_name):
with connection.cursor() as c:
c.execute('SELECT 1 FROM pg_class WHERE relname=%s', (sequence_name,))
return bool(c.fetchone())
class Column(object):
""" Helper for table creation """
def __init__(self, **entries):
self.__dict__.update(entries)
def create_aggregate_database_table(row, dataset_id):
columns = [convert_header_to_column_name(SOURCE_DATASET_FIELD_NAME)]
data_types = ['String']
optional_fields = []
type_conversion = {
'decimal': 'Decimal',
'date': 'Date',
'integer': 'Integer',
'string': 'String',
'xLocation': 'Decimal',
'yLocation': 'Decimal',
'dropdownedit': 'String',
'dropdown': 'String',
}
for col in row:
col_name = convert_header_to_column_name(col.column)
columns.append(col_name)
data_types.append(type_conversion[col.type])
if hasattr(col, 'required') and not col.required:
optional_fields.append(col_name)
df = | pd.DataFrame(columns=columns) | pandas.DataFrame |
import logging
from typing import List
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import SelectKBest, chi2
from classes.Dataset import Dataset
class FeaturesSelector:
def __init__(self, dataset: Dataset):
self.__features = dataset.get_features()
self.__labels = dataset.get_labels()
self.__best_features_ids = []
def get_features(self) -> pd.DataFrame:
return self.__features
def get_best_features_ids(self) -> List[str]:
return self.__best_features_ids
def univariate_selection(self, num_features: int):
"""
Apply sklearn.SelectKBest class to extract the top num_features best features
:param num_features: the number of top features to be extracted
"""
logging.info("Performing univariate selection...")
# Perform univariate selection
best_features = SelectKBest(score_func=chi2, k=num_features)
fit = best_features.fit(self.__features, self.__labels)
scores = pd.DataFrame(fit.scores_)
columns = pd.DataFrame(self.__features.columns)
# Concat two dataframes for better visualization
feature_scores = | pd.concat([columns, scores], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""Human Activity Recognition dataset example.
http://groupware.les.inf.puc-rio.br/har
<NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>. Wearable Computing: Accelerometers' Data Classification of Body Postures and Movements.
Proceedings of 21st Brazilian Symposium on Artificial Intelligence. Advances in Artificial Intelligence - SBIA 2012. In: Lecture Notes in Computer Science. ,
pp. 52-61. Curitiba, PR: Springer Berlin / Heidelberg, 2012. ISBN 978-3-642-34458-9. DOI: 10.1007/978-3-642-34459-6_6.
Read more: http://groupware.les.inf.puc-rio.br/har#ixzz6bPytcguP
Neural network model definition
Example:
>>> ./dq0 project create --name demo # doctest: +SKIP
>>> cd demo # doctest: +SKIP
>>> copy user_model.py to demo/model/ # doctest: +SKIP
>>> ../dq0 data list # doctest: +SKIP
>>> ../dq0 model attach --id <dataset id> # doctest: +SKIP
>>> ../dq0 project deploy # doctest: +SKIP
>>> ../dq0 model train # doctest: +SKIP
>>> ../dq0 model state # doctest: +SKIP
>>> ../dq0 model predict --input-path </path/to/numpy.npy> # doctest: +SKIP
>>> ../dq0 model state # doctest: +SKIP
Copyright 2020, Gradient Zero
All rights reserved
"""
import logging
from dq0.sdk.errors.errors import fatal_error
from dq0.sdk.models.tf import NeuralNetworkClassification
logger = logging.getLogger()
class UserModel(NeuralNetworkClassification):
"""Derived from dq0.sdk.models.tf.NeuralNetwork class
Model classes provide a setup method for data and model
definitions.
"""
def __init__(self):
super().__init__()
def setup_data(self, **kwargs):
"""Setup data function
This function can be used to prepare data or perform
other tasks for the training run.
At runtime the selected datset is attached to this model. It
is available as the `data_source` attribute.
For local testing call `model.attach_data_source(some_data_source)`
manually before calling `setup_data()`.
Use `self.data_source.read()` to read the attached data.
"""
from sklearn.model_selection import train_test_split
# read and preprocess the data
dataset_df = self.preprocess()
# do the train test split
X_train_df, X_test_df, y_train_ts, y_test_ts =\
train_test_split(dataset_df.iloc[:, :-1],
dataset_df.iloc[:, -1],
test_size=500,
train_size=500,
stratify=dataset_df.iloc[:, -1])
# set data attributes
self.X_train = X_train_df
self.X_test = X_test_df
self.y_train = y_train_ts
self.y_test = y_test_ts
self.input_dim = self.X_train.shape[1]
self.output_dim = len(self.y_train.unique())
def preprocess(self):
"""Preprocess the data
Preprocess the data set. The input data is read from the attached source.
At runtime the selected datset is attached to this model. It
is available as the `data_source` attribute.
For local testing call `model.attach_data_source(some_data_source)`
manually before calling `setup_data()`.
Use `self.data_source.read()` to read the attached data.
Returns:
preprocessed data
"""
import sklearn.preprocessing
import pandas as pd
# get the input dataset
if self.data_source is None:
fatal_error('No data source found', logger=logger)
# read the data via the attached input data source
dataset = self.data_source.read(
sep=';',
decimal=',',
)
X = dataset.drop('class', axis=1).copy()
y = dataset['class']
dtypes = X.dtypes
cat_cals = dtypes.loc[dtypes == 'O'].index
_ = dtypes.loc[dtypes != 'O'].index
# Convert cat to dummies
for col in cat_cals:
X = X.join(pd.get_dummies(X[col], drop_first=True))
X.drop(col, axis=1, inplace=True)
# Standardize numeric
# X[num_cols] = sklearn.preprocessing.StandardScaler().fit_transform(X[num_cols])
# LE class
y = sklearn.preprocessing.LabelEncoder().fit_transform(y)
dataset = pd.concat([X, | pd.Series(y) | pandas.Series |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.