prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# Copyright (c) 2018, deepakn94. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import pandas as pd
RatingData = namedtuple('RatingData',
['items', 'users', 'ratings', 'min_date', 'max_date'])
def describe_ratings(ratings):
info = RatingData(items=len(ratings['item_id'].unique()),
users=len(ratings['user_id'].unique()),
ratings=len(ratings),
min_date=ratings['timestamp'].min(),
max_date=ratings['timestamp'].max())
print("{ratings} ratings on {items} items from {users} users"
" from {min_date} to {max_date}"
.format(**(info._asdict())))
return info
def process_movielens(ratings, sort=True):
ratings['timestamp'] = | pd.to_datetime(ratings['timestamp'], unit='s') | pandas.to_datetime |
import numpy as np
import pandas as pd
import datetime
import chinese_calendar
from sklearn.preprocessing import OrdinalEncoder
class offsets_pool:
neighbor = [-1, 1]
second = [-1, 1, -60 * 4, -60 * 3, -60 * 2, -60 * 1, 60 * 1, 60 * 2, 60 * 3, 60 * 4]
minute = [-1, 1, -60 * 4, -60 * 3, -60 * 2, -60 * 1, 60 * 1, 60 * 2, 60 * 3, 60 * 4]
hour = [-1, 1, -24 * 4, -24 * 3, -24 * 2, -24 * 1, 24 * 1, 24 * 2, 24 * 3, 24 * 4,
-168 * 4, -168 * 3, -168 * 2, -168 * 1, 168 * 1, 168 * 2, 168 * 3, 168 * 4]
day = [-1, 1, -30 * 4, -30 * 3, -30 * 2, -30 * 1, 30 * 1, 30 * 2, 30 * 3, 30 * 4]
month = [-1, 1, -12 * 4, -12 * 3, -12 * 2, -12 * 1, 12 * 1, 12 * 2, 12 * 3, 12 * 4]
year = [-1, 1]
def reduce_memory_usage(df: pd.DataFrame, verbose=True):
'''Reduce RAM Usage
'''
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / 1024 ** 2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024 ** 2
if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (
start_mem - end_mem) / start_mem))
return df
def infer_ts_freq(df: pd.DataFrame, ts_name: str = 'TimeStamp'):
dateindex = pd.DatetimeIndex(pd.to_datetime(df[ts_name]))
for i in range(len(df)):
freq = pd.infer_freq(dateindex[i:i + 3])
if freq != None:
return freq
def _inpute(values, offsets):
indices0, indices1 = np.where(np.isnan(values))
padding = []
for offset in offsets:
offset_indices0 = indices0 + offset
start_bound_limit = np.where(indices0 + offset < 0)
end_bound_limit = np.where(indices0 + offset > len(values) - 1)
offset_indices0[start_bound_limit] = indices0[start_bound_limit]
offset_indices0[end_bound_limit] = indices0[end_bound_limit]
padding.append(values[(offset_indices0, indices1)])
values[(indices0, indices1)] = np.nanmean(padding, axis=0)
missing_rate = np.sum(np.isnan(values)) / values.size
return values, missing_rate
def multi_period_loop_imputer(df: pd.DataFrame, offsets: list, freq: str, max_loops: int = 10):
"""Multiple Period Loop Impute NAN.
Args:
offsets: list
freq: str
'S' - second
'T' - minute
'H' - hour
'D' - day
'M' - month
'Y','A', A-DEC' - year
"""
if offsets == None and freq == 'S':
offsets = offsets_pool.minute
elif offsets == None and freq == 'T':
offsets = offsets_pool.minute
elif offsets == None and freq == 'H':
offsets = offsets_pool.hour
elif offsets == None and freq == 'D':
offsets = offsets_pool.day
elif offsets == None and freq == 'M':
offsets = offsets_pool.month
elif offsets == None and freq == 'Y':
offsets = offsets_pool.year
elif offsets == None:
offsets = offsets_pool.neighbor
values = df.values.copy()
loop, missing_rate = 0, 1
while loop < max_loops and missing_rate > 0:
values, missing_rate = _inpute(values, offsets)
loop += 1
values[np.where(np.isnan(values))] = np.nanmean(values)
fill_df = pd.DataFrame(values, columns=df.columns)
return fill_df
def forward_period_imputer(df: pd.DataFrame, offset: int):
fill_df = df.fillna(df.rolling(window=offset, min_periods=1).agg(lambda x: x.iloc[0]))
return fill_df
def simple_numerical_imputer(df: pd.DataFrame, mode='mean'):
"""Fill NaN with mean, mode, 0."""
if mode == 'mean':
df = df.fillna(df.mean().fillna(0).to_dict())
elif mode == 'mode':
df = df.fillna(df.mode().fillna(0).to_dict())
else:
df = df.fillna(0)
return df
def columns_ordinal_encoder(df: pd.DataFrame):
enc = OrdinalEncoder(dtype=np.int)
encoder_df = enc.fit_transform(df)
return encoder_df
def drop_duplicated_ts_rows(df: pd.DataFrame, ts_name: str = 'TimeStamp', keep_data: str = 'last'):
"""Returns without duplicate time series, the last be keeped by default.
Example:
TimeStamp y
2021-03-01 3.4
2021-03-02 5.2
2021-03-03 9.3
2021-03-03 9.5
2021-03-04 6.7
2021-03-05 2.3
>>
TimeStamp y
2021-03-01 3.4
2021-03-02 5.2
2021-03-03 9.5
2021-03-04 6.7
2021-03-05 2.3
"""
assert isinstance(df, pd.DataFrame)
drop_df = df.drop_duplicates(subset=[ts_name], keep=keep_data)
return drop_df
def smooth_missed_ts_rows(df: pd.DataFrame, freq: str = None, ts_name: str = 'TimeStamp'):
"""Returns full time series.
Example:
TimeStamp y
2021-03-01 3.4
2021-03-02 5.2
2021-03-04 6.7
2021-03-05 2.3
>>
TimeStamp y
2021-03-01 3.4
2021-03-02 5.2
2021-03-03 NaN
2021-03-04 6.7
2021-03-05 2.3
"""
assert isinstance(df, pd.DataFrame)
if freq == None:
freq = infer_ts_freq(df, ts_name)
if df[ts_name].dtypes == object:
df[ts_name] = pd.to_datetime(df[ts_name])
df = df.sort_values(by=ts_name)
start, end = df[ts_name].iloc[0], df[ts_name].iloc[-1]
full_ts = pd.DataFrame(pd.date_range(start=start, end=end, freq=freq), columns=[ts_name])
smooth_df = full_ts.join(df.set_index(ts_name), on=ts_name)
return smooth_df
def clip_to_outliers(df: pd.DataFrame, std_threshold: int = 3):
"""Replace outliers above threshold with that threshold.
Args:
df (pandas.DataFrame): DataFrame containing numeric data
std_threshold (float): The number of standard deviations away from mean to count as outlier.
"""
assert isinstance(df, pd.DataFrame)
df_std = df.std(axis=0, skipna=True)
df_mean = df.mean(axis=0, skipna=True)
lower = df_mean - (df_std * std_threshold)
upper = df_mean + (df_std * std_threshold)
df_outlier = df.clip(lower=lower, upper=upper, axis=1)
return df_outlier
def nan_to_outliers(df: pd.DataFrame, std_threshold: int = 3):
"""Replace outliers above threshold with that threshold.
Args:
df (pandas.DataFrame): DataFrame containing numeric data
std_threshold (float): The number of standard deviations away from mean to count as outlier.
"""
assert isinstance(df, pd.DataFrame)
df_outlier = df.copy()
df_std = df.std(axis=0, skipna=True)
df_mean = df.mean(axis=0, skipna=True)
outlier_indices = np.abs(df - df_mean) > df_std * std_threshold
df_outlier = df_outlier.mask(outlier_indices, other=np.nan)
return df_outlier
def get_holidays(year=None, include_weekends=True):
"""
:param year: which year
:param include_weekends: False for excluding Saturdays and Sundays
:return: list
"""
if not year:
year = datetime.datetime.now().year
else:
year = year
start = datetime.date(year, 1, 1)
end = datetime.date(year, 12, 31)
holidays = chinese_calendar.get_holidays(start, end, include_weekends)
holidays = pd.DataFrame(holidays, columns=['Date'])
holidays['Date'] = holidays['Date'].apply(lambda x: x.strftime('%Y-%m-%d'))
return holidays
def generate_ts_covariables(start_date, periods, freq='H'):
dstime = pd.date_range(start_date, periods=periods, freq=freq)
fds = pd.DataFrame(dstime, columns={'TimeStamp'})
fds['Hour'] = fds['TimeStamp'].dt.hour
fds['WeekDay'] = fds['TimeStamp'].dt.weekday
period_dict = {
23: 0, 0: 0, 1: 0,
2: 1, 3: 1, 4: 1,
5: 2, 6: 2, 7: 2,
8: 3, 9: 3, 10: 3, 11: 3,
12: 4, 13: 4,
14: 5, 15: 5, 16: 5, 17: 5,
18: 6,
19: 7, 20: 7, 21: 7, 22: 7,
}
fds['TimeSegmnet'] = fds['Hour'].map(period_dict)
fds['MonthStart'] = fds['TimeStamp'].apply(lambda x: x.is_month_start * 1)
fds['MonthEnd'] = fds['TimeStamp'].apply(lambda x: x.is_month_end * 1)
fds['SeasonStart'] = fds['TimeStamp'].apply(lambda x: x.is_quarter_start * 1)
fds['SeasonEnd'] = fds['TimeStamp'].apply(lambda x: x.is_quarter_end * 1)
fds['Weekend'] = fds['TimeStamp'].apply(lambda x: 1 if x.dayofweek in [5, 6] else 0)
public_holiday_list = get_holidays(year=int(start_date[:4]))
public_holiday_list = public_holiday_list['Date'].to_list()
fds['Date'] = fds['TimeStamp'].apply(lambda x: x.strftime('%Y%m%d'))
fds['Holiday'] = fds['Date'].apply(lambda x: 1 if x in public_holiday_list else 0)
fds.drop(['Date'], axis=1, inplace=True)
return fds
def infer_forecast_interval(train, forecast, n: int = 5, prediction_interval: float = 0.9):
"""A corruption of Bayes theorem.
It will be sensitive to the transformations of the data."""
prior_mu = train.mean()
prior_sigma = train.std()
from scipy.stats import norm
p_int = 1 - ((1 - prediction_interval) / 2)
adj = norm.ppf(p_int)
upper_forecast, lower_forecast = pd.DataFrame(), pd.DataFrame()
for index, row in forecast.iterrows():
data_mu = row
post_mu = ((prior_mu / prior_sigma ** 2) + ((n * data_mu) / prior_sigma ** 2)
) / ((1 / prior_sigma ** 2) + (n / prior_sigma ** 2))
lower = pd.DataFrame(post_mu - adj * prior_sigma).transpose()
lower = lower.where(lower <= data_mu, data_mu, axis=1)
upper = pd.DataFrame(post_mu + adj * prior_sigma).transpose()
upper = upper.where(upper >= data_mu, data_mu, axis=1)
lower_forecast = | pd.concat([lower_forecast, lower], axis=0) | pandas.concat |
##
import pandas, os
##
path = {
'train':{
'csv':{
'label':"../DATA/BMSMT/TRAIN/CSV/LABEL.csv"
}
},
'test':{
'csv':{
'label':"../DATA/BMSMT/TEST/CSV/LABEL.csv"
}
}
}
##
table = {
'train':{
"label": | pandas.read_csv(path['train']['csv']['label']) | pandas.read_csv |
import os
import time
import psycopg2
import base64
import random
import pandas as pd
from sqlalchemy import create_engine
from google.cloud import secretmanager
from google.cloud import pubsub_v1
from google.cloud import storage
from google.cloud import tasks_v2
import json
from datetime import datetime
import gcsfs
PROJECT_ID = '<PROJECT_ID>'
HEADERS = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Credentials': True,
"Access-Control-Allow-Methods": "GET, POST, PUT, DELETE, PATCH, OPTIONS",
"Access-Control-Allow-Headers": "X-Requested-With, content-type, Authorization"
}
fs = gcsfs.GCSFileSystem()
fs.invalidate_cache()
def generate(random_chars: int = 10, alphabet: str = "0123456789abcdef") -> str:
r = random.SystemRandom()
return ''.join([r.choice(alphabet) for _ in range(random_chars)])
def cors_handler(request):
if request.method == 'OPTIONS':
headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': '*',
'Access-Control-Allow-Headers': 'X-Requested-With, content-type, Authorization',
'Access-Control-Max-Age': '3600',
'Access-Control-Allow-Credentials': 'true'
}
return ('', 204, headers)
headers = {
'Access-Control-Allow-Origin': '*'
}
return ('Hello World!', 200, headers)
def get_secret(project_id: str, secret_id: str):
client = secretmanager.SecretManagerServiceClient()
name = client.secret_version_path(project_id, secret_id, 'latest')
response = client.access_secret_version(request={"name": name})
payload = response.payload.data.decode("UTF-8")
return payload
def get_conn(secret):
conn = psycopg2.connect(
dbname='<DB_NAME>',
user='<DB_USER>',
password=secret,
port=5432,
host='<DB_IP>'
)
conn.autocommit = True
return conn
secret = get_secret(PROJECT_ID, 'db-password')
def test_connection(request):
conn = get_conn(secret)
headers = {**HEADERS, **{
"Content-Type": "text/html"
}}
if conn:
return ("<html><body><p>Connection Successful</p></body></html>", 200, headers)
else:
return ("<html><body><p>Could not establish connection</p></body></html>", 500, headers)
def handle_error(error):
print(error)
headers = {**HEADERS, **{
"Content-Type": "application/json"
}}
res = {"response": str(error)}
return (res, 500, headers)
def hello(request):
# html endpoint
try:
time = datetime.now()
time_str = str(time)
headers = {**HEADERS, **{
"Content-Type": "text/html"
}}
return (
"<html><body><p>Hello World! It is now {0}.</p></br><p>Request:{1}</p></body></html>".format(time_str, request),
200, headers)
except Exception as e:
return handle_error(e)
def get_all_table_data(request):
conn = get_conn(secret)
# request should be an object and include a `table_name` field
request_json = request.get_json(silent=True)
request_args = request.args
table_name = None
if request_json and 'table_name' in request_json:
table_name = request_json['table_name']
elif request_args and 'table_name' in request_args:
table_name = request_args['table_name']
cur = conn.cursor()
query = f'SELECT * FROM {table_name}'
try:
cur.execute(query)
colnames = [desc[0] for desc in cur.description]
results = cur.fetchall()
df = pd.DataFrame(results, columns=colnames)
json_data = df.to_json(orient='records')
headers = {**HEADERS, **{
"Content-Type": "application/json"
}
}
return (f"{json_data}", 200, headers)
except Exception as e:
return handle_error(e)
def generic_select_query(request):
conn = get_conn(secret)
# request should be an object and include a `table_name` field and a `columns` field which should be a list of column names
request_json = request.get_json(silent=True)
request_args = request.args
table_name, columns = None, None
if request_json and 'table_name' in request_json and 'columns' in request_json:
table_name, columns = request_json['table_name'], request_json['columns']
elif request_args and 'table_name' in request_args and 'columns' in request_args:
table_name, columns = request_args['table_name'], request_args['columns']
columns = ', '.join([str(_) for _ in columns])
query = f'SELECT {columns} FROM {table_name};'
cur = conn.cursor()
try:
cur.execute(query)
results = cur.fetchall()
colnames = [desc[0] for desc in cur.description]
df = pd.DataFrame(results, columns=colnames)
json_data = df.to_json(orient='records')
headers = {**HEADERS, **{
"Content-Type": "application/json"
}
}
return (f"{json_data}", 200, headers)
except Exception as e:
return handle_error(e)
def generic_select_where_query(request):
conn = get_conn(secret)
# request should be an object and include a `table_name` field and a `project_id` field
request_json = request.get_json(silent=True)
request_args = request.args
table_name, project_id, columns = None, None, None
if request_json and 'table_name' in request_json and 'project_id' in request_json:
table_name, project_id = request_json['table_name'], request_json['project_id']
if request_json['columns']:
columns = request_json['columns']
elif request_args and 'table_name' in request_args and 'project_id' in request_args:
table_name, project_id = request_args['table_name'], request_args['project_id']
if request_args['columns']:
columns = request_args['columns']
if columns:
columns = ', '.join([str(_) for _ in columns])
query = f"SELECT {columns} FROM {table_name} WHERE project_id='{project_id}';"
else:
query = f"SELECT * FROM {table_name} WHERE project_id='{project_id}';"
cur = conn.cursor()
try:
cur.execute(query)
results = cur.fetchall()
colnames = [desc[0] for desc in cur.description]
df = pd.DataFrame(results, columns=colnames)
json_data = df.to_json(orient='records')
headers = {**HEADERS, **{
"Content-Type": "application/json"
}
}
return (f"{json_data}", 200, headers)
except Exception as e:
return handle_error(e)
def query(request):
# request should include a `query_string` field which is a string of a valid SQL query
request_json = request.get_json(silent=True)
request_args = request.args
if request_json and 'query_string' in request_json:
query_string = request_json['query_string']
elif request_args and 'query_string' in request_args:
query_string = request_args['query_string']
conn = get_conn(secret)
cur = conn.cursor()
try:
cur.execute(query_string)
results = cur.fetchall()
colnames = [desc[0] for desc in cur.description]
df = pd.DataFrame(results, columns=colnames)
json_data = df.to_json(orient='records')
headers = {**HEADERS, **{
"Content-Type": "application/json"
}
}
return (f"{json_data}", 200, headers)
except Exception as e:
return handle_error(e)
def list_tables(request):
conn = get_conn(secret)
cur = conn.cursor()
try:
cur.execute(
"SELECT * FROM pg_catalog.pg_tables WHERE schemaname != 'pg_catalog' AND schemaname != 'information_schema';")
results = cur.fetchall()
colnames = [desc[0] for desc in cur.description]
drop_cols = list(set(colnames) - set(['tablename']))
df = pd.DataFrame(results, columns=colnames)
df.drop(drop_cols, axis=1, inplace=True)
res = df.values.tolist()
res = [_[0] for _ in res]
json_data = {"tables": res}
json_data = json.dumps(json_data)
headers = {**HEADERS, **{
"Content-Type": "application/json"
}
}
return (f"{json_data}", 200, headers)
except Exception as e:
return handle_error(e)
def get_table_schema(request):
request_json = request.get_json(silent=True)
request_args = request.args
table_name = None
if request_json and 'table_name' in request_json:
table_name = request_json['table_name']
elif request_args and 'table_name' in request_args:
table_name = request_args['table_name']
try:
conn = get_conn(secret)
cur = conn.cursor()
cur.execute(f"SELECT column_name FROM information_schema.columns WHERE TABLE_NAME = '{table_name}';")
results = cur.fetchall()
results = [_[0] for _ in results]
json_data = {"columns": results}
json_data = json.dumps(json_data)
headers = {**HEADERS, **{
"Content-Type": "application/json"
}
}
return (f"{json_data}", 200, headers)
except Exception as e:
return handle_error(e)
def get_projects(request):
conn = get_conn(secret)
query = f"SELECT * FROM projects;"
cur = conn.cursor()
try:
cur.execute(query)
results = cur.fetchall()
colnames = [desc[0] for desc in cur.description]
df = pd.DataFrame(results, columns=colnames)
json_data = df.to_json(orient='records')
headers = {**HEADERS, **{
"Content-Type": "application/json"
}
}
return (f"{json_data}", 200, headers)
except Exception as e:
return handle_error(e)
def list_gcloud_files(request) -> list:
request_json = request.get_json(silent=True)
request_args = request.args
bucket_id = None
if request_json and 'bucket_id' in request_json:
bucket_id = request_json['bucket_id']
elif request_args and 'bucket_id' in request_args:
bucket_id = request_args['bucket_id']
client = storage.Client()
bucket = client.get_bucket(bucket_id)
all_blobs = list(client.list_blobs(bucket))
return all_blobs
def download_gcloud_file(bucket_id: str, directory: str, filename: str):
client = storage.Client()
bucket = client.get_bucket(bucket_id)
blob = storage.Blob(directory + filename, bucket)
with open(filename, 'wb') as f:
blob.download_to_file(f)
def read_gcloud_data(bucket_id: str, file_name: str) -> pd.DataFrame:
gcloud_path = f"gs://{bucket_id}/{file_name}"
if '.csv' in file_name:
df = | pd.read_csv(gcloud_path) | pandas.read_csv |
import fact.io
import os
import pytest
from irf import gadf
import astropy.units as u
import pandas as pd
import numpy as np
FIXTURE_DIR = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'test_files',
)
@pytest.fixture
def events():
return fact.io.read_data(
os.path.join(FIXTURE_DIR, 'crab_dl3_small.hdf5'), key='events')
@pytest.fixture
def predictions():
return fact.io.read_data(
os.path.join(FIXTURE_DIR, 'gamma_predictions_dl2.hdf5'), key='events')
@pytest.fixture
def showers():
return fact.io.read_data(
os.path.join(FIXTURE_DIR, 'showers.hdf5'), key='showers')
def test_timestamp_conversion_from_events(events):
timestamp = gadf.time.timestamp_to_mjdref(events.timestamp)
assert timestamp.unit == u.s
def test_timestamp_conversion_from_pandas(events):
ts = | pd.Series(['01-01-2013', '01-02-2013'], name='foo') | pandas.Series |
"""Locator functions to interact with geographic data"""
import numpy as np
import pandas as pd
import flood_tool.geo as geo
__all__ = ['Tool']
def clean_postcodes(postcodes):
"""
Takes list or array of postcodes, and returns it in a cleaned numpy array
"""
postcode_df = pd.DataFrame({'Postcode':postcodes})
postcode_df['Postcode'] = postcode_df['Postcode'].str.upper()
# If length is not 7 get rid of spaces. This fixes e.g. "SW19 2AZ" -> "SW192AZ"
postcode_df['Postcode'] = postcode_df['Postcode'].where(
postcode_df['Postcode'].str.len() == 7, postcode_df['Postcode'].str.replace(" ", ""))
# If length is 5 (e.g. "W67HZ") add two spaces in the middle (-> "W6 7HZ")
postcode_df['Postcode'] = postcode_df['Postcode'].where(
postcode_df['Postcode'].str.len() != 5,
postcode_df['Postcode'].str[:2]+ " " + postcode_df['Postcode'].str[2:])
# If length is 6 (e.g. "SW72AZ") add a space in the middle and end(-> "SW7 2AZ")
postcode_df['Postcode'] = postcode_df['Postcode'].where(
postcode_df['Postcode'].str.len() != 6,
postcode_df['Postcode'].str[:3]+ " " + postcode_df['Postcode'].str[3:])
return postcode_df['Postcode'].to_numpy()
class Tool(object):
"""Class to interact with a postcode database file."""
def __init__(self, postcode_file=None, risk_file=None, values_file=None):
"""
Reads postcode and flood risk files and provides a postcode locator service.
Parameters
---------
postcode_file : str, optional
Filename of a .csv file containing geographic location data for postcodes.
risk_file : str, optional
Filename of a .csv file containing flood risk data.
values_file : str, optional
Filename of a .csv file containing property value data for postcodes.
"""
self.postcode_file = postcode_file
self.risk_file = risk_file
self.values_file = values_file
self.postcode_df = pd.read_csv(self.postcode_file)
# Make data frame of values & clean the postcodes in them.
self.values_df = pd.read_csv(self.values_file)
postcode_arr = self.values_df['Postcode'].to_numpy()
postcode_arr = clean_postcodes(postcode_arr)
self.values_df['Postcode'] = postcode_arr
# Make data frame of risks, add columns to be used in get...flood_probability
self.risk_df = pd.read_csv(self.risk_file)
# Northing_max (:= northing+radius), northing_min, easting_max, easting_min for each row
self.risk_df["X_max"] = self.risk_df["X"] + self.risk_df["radius"]
self.risk_df["X_min"] = self.risk_df["X"] - self.risk_df["radius"]
self.risk_df["Y_max"] = self.risk_df["Y"] + self.risk_df["radius"]
self.risk_df["Y_min"] = self.risk_df["Y"] - self.risk_df["radius"]
# Also add column of radius squared r2
self.risk_df["radius_squared"] = np.square(self.risk_df["radius"])
def get_lat_long(self, postcodes):
"""Get an array of WGS84 (latitude, longitude) pairs from a list of postcodes.
Parameters
----------
postcodes: sequence of strs
Ordered sequence of N postcode strings
Returns
-------
ndarray
Array of Nx_2 (latitude, longitdue) pairs for the input postcodes.
Invalid postcodes return [`numpy.nan`, `numpy.nan`].
"""
# Fix evil postcodes
postcodes = clean_postcodes(postcodes)
postcode_df = self.postcode_df
postcode_df = postcode_df.fillna('np.nan')
postcode_df = postcode_df.set_index('Postcode')
index_data = postcode_df.loc[postcodes]
lat = np.array(index_data['Latitude']).T
lng = np.array(index_data['Longitude']).T
return np.vstack((lat, lng)).transpose()
def get_easting_northing_flood_probability(self, easting, northing):
"""Get an array of flood risk probabilities from arrays of eastings and northings.
Flood risk data is extracted from the Tool flood risk file. Locations
not in a risk band circle return `Zero`, otherwise returns the name of the
highest band it sits in.
Parameters
----------
easting: numpy.ndarray of floats
OS Eastings of locations of interest
northing: numpy.ndarray of floats
Ordered sequence of postcodes
Returns
-------
numpy.ndarray of strs
numpy array of flood probability bands corresponding to input locations.
"""
# Read in risk files as pandas dataframe
risks = self.risk_df
prob_bands = np.full(np.size(easting), "Zero", dtype='<U8')
# For each point we get:
for point, point_east in enumerate(easting):
point_north = northing[point]
# Pick the zones where easting_min < easting < easting_max
zones = risks.loc[(risks.X_max >= point_east) & (risks.X_min <= point_east)]
# Further reduce these to where northing_min < northing < northing_max
zones_pot = zones.loc[(zones.Y_max >= point_north) & (zones.Y_min <= point_north)]
# For each potential zone:
for i in range(len(zones_pot.index)):
# Don't bother with further zones if we already know the risk is High
if prob_bands[point] == "High":
break
row = zones_pot.iloc[i]
# Squared distance from point to zone (we use squares to avoid square-rooting)
dist2 = (row.X-point_east)*(row.X-point_east) + (row.Y-point_north)*(row.Y-point_north)
if dist2 <= row.radius_squared:
risk = row.prob_4band
current_band = prob_bands[point]
if risk == "High":
prob_bands[point] = risk
elif risk == "Medium" and current_band != "High":
prob_bands[point] = risk
elif risk == "Low" and (current_band != "High" and current_band != "Medium"):
prob_bands[point] = risk
elif risk == "Very Low" and current_band == "Zero":
prob_bands[point] = "Very Low"
return prob_bands
def get_sorted_flood_probability(self, postcodes):
"""Get an array of flood risk probabilities from a sequence of postcodes.
Probability is ordered High>Medium>Low>Very low>Zero.
Flood risk data is extracted from the `Tool` flood risk file.
Parameters
----------
postcodes: sequence of strs
Ordered sequence of postcodes
Returns
-------
pandas.DataFrame
Dataframe of flood probabilities indexed by postcode and ordered from `High` to `Zero`,
then by lexagraphic (dictionary) order on postcode. The index is named `Postcode`, the
data column is named `Probability Band`. Invalid postcodes and duplicates
are removed.
"""
# Fix evil postcodes
postcodes = clean_postcodes(postcodes)
# Get latitude and longitude
output = self.get_lat_long(postcodes) # Returns latitude,longitude pairs in an array
lat_long = pd.DataFrame(
{'Postcode':postcodes, 'latitude':output[:, 0], 'longitude':output[:, 1]})
# Delete the wrong format of postcode
lat_long = lat_long.dropna(how='any')
latitude = np.array(lat_long.latitude)
longitude = np.array(lat_long.longitude)
# Returns Eastings and Northings in an array
output_2 = geo.get_easting_northing_from_lat_long(latitude, longitude)
# Returns array of flood risk probabilities
output_3 = self.get_easting_northing_flood_probability(output_2[0], output_2[1])
# New column in dataframe containing the probabilities
lat_long['Probability Band'] = output_3
# Removing invalid postcodes
lat_long = lat_long.dropna(how='any')
# Removing duplicates
lat_long = lat_long.drop_duplicates(subset='Postcode')
# Sort by Probability Bands
# add variable ordered to sort later by Xun Xie
lat_long['Probability Band'] = pd.Categorical(
lat_long['Probability Band'],
categories=["High", "Medium", "Low", "Very Low", "Zero"], ordered=True)
#add sort firstly by Probability Band and then sort secondly by Postcode
lat_long = lat_long.sort_values(by=['Probability Band', 'Postcode'], ascending=[True, True])
lat_long = lat_long.set_index('Postcode')
return lat_long # Make Postcode the Index
def get_flood_cost(self, postcodes):
"""Get an array of estimated cost of a flood event from a sequence of postcodes.
Parameters
----------
postcodes: sequence of strs
Ordered collection of postcodes
Returns
-------
numpy.ndarray of floats
array of floats for the pound sterling cost for the input postcodes.
Invalid postcodes return `numpy.nan`.
"""
# Fix evil postcodes
postcodes = clean_postcodes(postcodes)
values_df = self.values_df[['Postcode', 'Total Value']]
values_df = values_df.loc[values_df.Postcode.isin(postcodes)]
values_df = values_df.set_index('Postcode').reindex(postcodes)
values_df = values_df.fillna(0)
return np.array(values_df['Total Value'])
def get_annual_flood_risk(self, postcodes, probability_bands):
"""Get an array of estimated annual flood risk in pounds sterling per year of a flood
event from a sequence of postcodes and flood probabilities.
Parameters
----------
postcodes: sequence of strs
Ordered collection of postcodes
probability_bands: sequence of strs
Ordered collection of flood probabilities
Returns
-------
numpy.ndarray
array of floats for the annual flood risk in pounds sterling for the input postcodes.
Invalid postcodes return `numpy.nan`.
"""
#get cost_value
cost_value = self.get_flood_cost(postcodes)
#create Dataframe for replacing corresonding value
risk_df = pd.DataFrame({'Probability Band': probability_bands})
total_df = risk_df.replace(
{'High':0.1, 'Medium': 0.02, 'Low': 0.01, 'Very Low': 0.001, 'Zero': 0})
pro_ser = np.array(total_df['Probability Band'])
#compute result
annual = pro_ser * cost_value * 0.05
return annual
def get_sorted_annual_flood_risk(self, postcodes):
"""Get a sorted pandas DataFrame of flood risks.
Parameters
----------
postcodes: sequence of strs
Ordered sequence of postcodes
Returns
-------
pandas.DataFrame
Dataframe of flood risks indexed by (normalized) postcode and ordered by risk,
then by lexagraphic (dictionary) order on the postcode. The index is named
`Postcode` and the data column `Flood Risk`.
Invalid postcodes and duplicates are removed.
"""
# Fix evil postcodes
postcodes = clean_postcodes(postcodes)
# Get lat, long of postcodes
arr = self.get_lat_long(postcodes)
lat = arr[:, 0] # Latitude
lng = arr[:, 1] # Longitude
# Convert lat, long -> easting, northing
tem = geo.get_easting_northing_from_lat_long(lat, lng, radians=False)
eos = tem[0] # Easting
nos = tem[1] # Northing
# Get our data frame of postcodes and risks
prob_band = self.get_easting_northing_flood_probability(eos, nos)
flood_risk = self.get_annual_flood_risk(postcodes, prob_band)
risk_df = | pd.DataFrame({'Postcode':postcodes, 'Flood Risk':flood_risk}) | pandas.DataFrame |
import sys
import numpy as np
import pytest
from pandas.compat import (
IS64,
PYPY,
)
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_dtype_equal,
is_object_dtype,
)
import pandas as pd
from pandas import (
Index,
Series,
)
import pandas._testing as tm
def test_isnull_notnull_docstrings():
# GH#41855 make sure its clear these are aliases
doc = pd.DataFrame.notnull.__doc__
assert doc.startswith("\nDataFrame.notnull is an alias for DataFrame.notna.\n")
doc = pd.DataFrame.isnull.__doc__
assert doc.startswith("\nDataFrame.isnull is an alias for DataFrame.isna.\n")
doc = Series.notnull.__doc__
assert doc.startswith("\nSeries.notnull is an alias for Series.notna.\n")
doc = Series.isnull.__doc__
assert doc.startswith("\nSeries.isnull is an alias for Series.isna.\n")
@pytest.mark.parametrize(
"op_name, op",
[
("add", "+"),
("sub", "-"),
("mul", "*"),
("mod", "%"),
("pow", "**"),
("truediv", "/"),
("floordiv", "//"),
],
)
def test_binary_ops_docstring(frame_or_series, op_name, op):
# not using the all_arithmetic_functions fixture with _get_opstr
# as _get_opstr is used internally in the dynamic implementation of the docstring
klass = frame_or_series
operand1 = klass.__name__.lower()
operand2 = "other"
expected_str = " ".join([operand1, op, operand2])
assert expected_str in getattr(klass, op_name).__doc__
# reverse version of the binary ops
expected_str = " ".join([operand2, op, operand1])
assert expected_str in getattr(klass, "r" + op_name).__doc__
def test_ndarray_compat_properties(index_or_series_obj):
obj = index_or_series_obj
# Check that we work.
for p in ["shape", "dtype", "T", "nbytes"]:
assert getattr(obj, p, None) is not None
# deprecated properties
for p in ["strides", "itemsize", "base", "data"]:
assert not hasattr(obj, p)
msg = "can only convert an array of size 1 to a Python scalar"
with pytest.raises(ValueError, match=msg):
obj.item() # len > 1
assert obj.ndim == 1
assert obj.size == len(obj)
assert Index([1]).item() == 1
assert Series([1]).item() == 1
def test_array_wrap_compat():
# Note: at time of dask 2022.01.0, this is still used by eg dask
# (https://github.com/dask/dask/issues/8580).
# This test is a small dummy ensuring coverage
orig = | Series([1, 2, 3], dtype="int64", index=["a", "b", "c"]) | pandas.Series |
"""
Author: <NAME>
GitHub: phideltaee
Description: Custom training model for Detectron2 using a modified version of the TACO dataset and the ARC Litter Dataset.
------------------------------------------------------
------------------------------------------------------
NOTES on Implementation:
# Training on TACO dataset.
# Step 1: Remap output to the desired number of classes. Choose a map (dictionary) or create
# your own and place it in the folder 'maps'.
# - - Run the remapping - -
# python remap_classes --class_map <path_to_map/file.csv> --ann_dir <path_to_annotations/file.json>
# Step 2: Split dataset into train-test splits, k-times for k-fold cross validation.
# - - Split the dataset - -
# python split_dataset.py --nr_trials <K_folds> --out_name <name of file> --dataset_dir <path_to_data>
# To train the model:
# Template: python arcnet_main.py --class_num <number of classes> --data_dir <path_to_dataset/> train
# EXAMPLE: python arcnet_main.py --class_num 1 --data_dir data train --ann_train ann_0_map1train.json --ann_val ann_0_map1val.json
# To test the model:
# Template: python arcnet_main.py test --weights <path_to/weigts.pth>
# EXAMPLE: python arcnet_main.py test --weights output/taco_3000.pth
# To try the model for inference an image
# TEMPLATE: python arcnet_main.py inference --image_path <path/to/test_image.jpg> --weights <path_to/weights.pth>
# EXAMPLE: python arcnet_main.py inference --image_path img_test/trash_01.jpg --weights output/taco_3000.pth
# Infering on an image and the Mask of the image. Predicts random image and shows its mask for the test dataset
# python arcnet_main.py infer_mask --weights output/taco_3000.pth --ann_train ann_0_map1train.json --ann_val ann_0_map1val.json
# Check Tensorboard for model training validation information.
tensorboard --logdir ./output/
"""
# Importing general libraries
import json
import random
import cv2
import os
import argparse
import time
from datetime import datetime
import shutil
# Importing custom functions
from utils import *
# Importing Detectron2 libraries
from detectron2.utils.visualizer import Visualizer
from detectron2.data.datasets import register_coco_instances
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.engine import DefaultTrainer
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2 import model_zoo
from detectron2.utils.visualizer import ColorMode
from detectron2.evaluation import COCOEvaluator, inference_on_dataset
from detectron2.data import build_detection_test_loader
import pandas as pd
# Parsing global arguments
parser = argparse.ArgumentParser(description='Custom implementation of Detectron2 using the TACO dataset.')
parser.add_argument('--class_num', required=True, type=int, metavar="Number of classes", help='Number of target classes')
parser.add_argument('--image_path', required=False, default='./img_test/test_img1.jpg', metavar="/path/file.jpg", help='Test image path')
parser.add_argument('--data_dir', required=False, default='./data', metavar="/path_to_data/", help='Dataset directory')
parser.add_argument("command", metavar="<command>",help="Opt: 'train', 'test', 'inference")
parser.add_argument('--weights', required=False, default='./output/taco_500_arc.pth', metavar="/trained_weights.pth", help='weights used for inference')
parser.add_argument('--ann_train', required=False, metavar="file.json", help='Train Data Annotations')
parser.add_argument('--ann_test', required=False, metavar="file.json", help='Test Data Annotations')
parser.add_argument('--ann_val', required=False, metavar="file.json", help='Validation Data Annotations')
args = parser.parse_args()
# TODO Include 5-Fold cross validation when using data split.
# Registering the custom dataset using Detectron2 libraries
# TODO: load train, test, and val data directly from the run commands (not hardcoded)
# gets the annotation directly from the train set.
# Registering "class_num" many classes and their respective datasets. Train/Val on Taco, Test on ARC.
register_coco_instances("taco_train",{},args.data_dir+"/"+args.ann_train, args.data_dir)
register_coco_instances("taco_val",{},args.data_dir+"/"+args.ann_val, args.data_dir)
# Adding custom test file for ARC Litter dataset. NOTE: ...coco2.json was modified to match taco format for label ids.
# Test file is static. Load the corresponding number of classes.
register_coco_instances("arc_test",{},"./segments/festay_arc_litter/arc_litter-v2.1_coco"+ str(args.class_num)+".json", "./segments/festay_arc_litter/v2.1")
# # Alternative Configurations
# Registering with 2 classes - map_to_2 annotations (Hardcoded)
#register_coco_instances("taco_train",{},"./data/annotations_0_map_2_train.json","./data")
#register_coco_instances("taco_test",{},"./data/annotations_0_map_2_test.json","./data")
#register_coco_instances("taco_val",{},"./data/annotations_0_map_2_val.json","./data")
# Registering with standard TACO annotations - 60 classes. (Hardcoded)
#register_coco_instances("taco_train",{},"./data/annotations_0__train.json","./data")
#register_coco_instances("taco_test",{},"./data/annotations_0__test.json","./data")
#register_coco_instances("taco_val",{},"./data/annotations_0__val.json","./data")
# Obtaining the dataset catalog for each, train, val and test.
dataset_dicts_train = DatasetCatalog.get("taco_train")
#dataset_dicts_test = DatasetCatalog.get("taco_test")
dataset_dicts_val = DatasetCatalog.get("taco_val")
dataset_dicts_test = DatasetCatalog.get("arc_test")
# Registering Metadatas
arc_metadata = MetadataCatalog.get("arc_test")
taco_metadata = MetadataCatalog.get("taco_train")
print("datasets registered successfully")
# verify the custom dataset was imported successfully by loading some images
for d in random.sample(dataset_dicts_train, 1):
print(d["file_name"])
assert os.path.isfile(d["file_name"]), "Image not loaded correctly!"
img = cv2.imread(d["file_name"])
visualizer = Visualizer(img[:, :, ::-1], metadata=taco_metadata, scale=0.5)
out = visualizer.draw_dataset_dict(d)
# image too large to display - resize down to fit in the screen
img_new = out.get_image()[:, :, ::-1]
img_resized = ResizeWithAspectRatio(img_new, width=800)
cv2.imshow("train image", img_resized)# out.get_image()[:, :, ::-1])
cv2.waitKey(0)
cv2.destroyAllWindows()
# verify the custom test dataset was imported successfully by loading some images
for d in random.sample(dataset_dicts_test, 1):
print(d["file_name"])
assert os.path.isfile(d["file_name"]), "Image not loaded correctly!"
img = cv2.imread(d["file_name"])
visualizer = Visualizer(img[:, :, ::-1], metadata=arc_metadata, scale=0.5)
out = visualizer.draw_dataset_dict(d)
# image too large to display - resize down to fit in the screen
img_new = out.get_image()[:, :, ::-1]
img_resized = ResizeWithAspectRatio(img_new, width=800)
cv2.imshow("test image", img_resized)# out.get_image()[:, :, ::-1])
cv2.waitKey(0)
cv2.destroyAllWindows()
# verify the custom test dataset was imported successfully by loading some images
for d in random.sample(dataset_dicts_val, 1):
print(d["file_name"])
assert os.path.isfile(d["file_name"]), "Image not loaded correctly!"
img = cv2.imread(d["file_name"])
visualizer = Visualizer(img[:, :, ::-1], metadata=taco_metadata, scale=0.5)
out = visualizer.draw_dataset_dict(d)
# image too large to display - resize down to fit in the screen
img_new = out.get_image()[:, :, ::-1]
img_resized = ResizeWithAspectRatio(img_new, width=800)
cv2.imshow("val image", img_resized)# out.get_image()[:, :, ::-1])
cv2.waitKey(0)
cv2.destroyAllWindows()
# Custom Validation # 1
from detectron2.engine import HookBase
from detectron2.data import build_detection_train_loader
import detectron2.utils.comm as comm
import torch
class ValidationLoss(HookBase):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg.clone()
self.cfg.DATASETS.TRAIN = cfg.DATASETS.VAL
self._loader = iter(build_detection_train_loader(self.cfg))
def after_step(self):
data = next(self._loader)
with torch.no_grad():
loss_dict = self.trainer.model(data)
losses = sum(loss_dict.values())
assert torch.isfinite(losses).all(), loss_dict
loss_dict_reduced = {"val_" + k: v.item() for k, v in
comm.reduce_dict(loss_dict).items()}
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
if comm.is_main_process():
self.trainer.storage.put_scalars(total_val_loss=losses_reduced,
**loss_dict_reduced)
# Training custom dataset
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.DATASETS.TRAIN = ("taco_train",)
cfg.DATASETS.VAL = ("taco_val",)
cfg.DATASETS.TEST = ("arc_test",)
cfg.TEST.EVAL_PERIOD = 50 # change this value to compute the validation metrics at different iterations.
cfg.DATALOADER.NUM_WORKERS = 2
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml") # Let training initialize from model zoo
cfg.SOLVER.IMS_PER_BATCH = 4
cfg.SOLVER.BASE_LR = 0.005 # Starting lr scheduling.
cfg.SOLVER.MAX_ITER = 3000 # each iteration corresponds to a full batch of images going throuhg the network. With batch size of 16 (default).
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512 # (default: 512)
cfg.MODEL.ROI_HEADS.NUM_CLASSES = args.class_num # (see https://detectron2.readthedocs.io/tutorials/datasets.html#update-the-config-for-new-datasets)
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
#cfg.MODEL.DEVICE = 'cuda:0'
# There are 5 stages in ResNet. The first is a convolution, and the following stages are each group of residual blocks.
cfg.MODEL.BACKBONE.FREEZE_AT = 2 # Best obtained at 2.
if args.command == "train":
# default trainer, does not include test or val loss. Custom coco trainer created to tackle this.
#trainer = DefaultTrainer(cfg)
# Training with custom validation loss trainer CocoTrainer.py, which evaluates the COCO AP values
from CocoTrainer import CocoTrainer
trainer = CocoTrainer(cfg)
#trainer = Trainer(cfg) # From https://github.com/facebookresearch/detectron2/issues/810
val_loss = ValidationLoss(cfg)
trainer.register_hooks([val_loss])
# swap the order of PeriodicWriter and ValidationLoss
trainer._hooks = trainer._hooks[:-2] + trainer._hooks[-2:][::-1]
trainer.resume_or_load(resume=False)
trainer.train()
elif args.command == "train_crossval":
import pandas as pd
import numpy as np
import json
def load_json_arr(json_path):
lines = []
with open(json_path, 'r') as f:
for line in f:
lines.append(json.loads(line))
return lines
experiment_folder = './output'
freeze_stages = [1, 2, 3, 4, 5]
LRs = [0.01, 0.005, 0.0025, 0.001]
kfold_num = 5
metric_keys = ['bbox/AP', 'bbox/AP50', 'bbox/AP75', 'bbox/APl', 'bbox/APm', 'bbox/APs', 'segm/AP',
'segm/AP50', 'segm/AP75', 'segm/APl', 'segm/APm', 'segm/APs', 'mask_rcnn/accuracy',
'mask_rcnn/false_negative', 'mask_rcnn/false_positive']
results_df = pd.DataFrame(columns=['Freeze', 'LR', 'KFold', 'bbox/AP', 'bbox/AP50', 'bbox/AP75', 'bbox/APl', 'bbox/APm', 'bbox/APs',
'segm/AP',
'segm/AP50', 'segm/AP75', 'segm/APl', 'segm/APm', 'segm/APs', 'mask_rcnn/accuracy',
'mask_rcnn/false_negative', 'mask_rcnn/false_positive'])
interim_results_df = results_df
kfold_results_df = results_df
k_fold_num = 5
for freeze in freeze_stages:
for lr in LRs:
cfg.SOLVER.BASE_LR = lr
cfg.MODEL.BACKBONE.FREEZE_AT = freeze
for kfold in range(kfold_num):
print(f"Starting {kfold + 1} of {k_fold_num} - Freeze={freeze} ,validation at LR={lr}")
# Registering and using each training dataset.
ann_train = "ann_"+str(kfold)+"_map"+str(args.class_num)+"train.json"
registry_name = "taco_dataset"+str(kfold)+"_"+str(lr)+"_"+str(freeze)
register_coco_instances(registry_name, {}, args.data_dir + "/" + ann_train, args.data_dir)
dataset_kfold_train = DatasetCatalog.get(registry_name)
# Getting configurations for this setup:
cfg.DATASETS.TRAIN = (registry_name,)
# Training with custom validation loss trainer CocoTrainer.py, which evaluates the COCO AP values
from CocoTrainer import CocoTrainer
trainer = CocoTrainer(cfg)
trainer.resume_or_load(resume=False)
trainer.train()
# load the information from the metrics.json and save the mAP, AP50, AP75, class_accy, mask_rcnn_accy
exp_metrics = load_json_arr(experiment_folder + '/metrics.json')
# loading all metrics in the metrics.json folder to dataframe
x = []
for metric in metric_keys:
# Getting metric values, getting the avg of last three in list # NOTE: This doesn't work with 500iter AP. We are averaging all three values, which is def. not correct.
x.append(([x[metric] for x in exp_metrics if metric in x][-1]))
row = pd.DataFrame(x, metric_keys).T
cross_val_df = | pd.concat([results_df, row], 0) | pandas.concat |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import nose
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, Timestamp, isnull, notnull,
bdate_range, date_range, _np_version_under1p7)
import pandas.core.common as com
from pandas.compat import StringIO, lrange, range, zip, u, OrderedDict, long
from pandas import compat, to_timedelta, tslib
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_almost_equal,
ensure_clean)
import pandas.util.testing as tm
def _skip_if_numpy_not_friendly():
# not friendly for < 1.7
if _np_version_under1p7:
raise nose.SkipTest("numpy < 1.7")
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
pass
def test_numeric_conversions(self):
_skip_if_numpy_not_friendly()
self.assertEqual(ct(0), np.timedelta64(0,'ns'))
self.assertEqual(ct(10), np.timedelta64(10,'ns'))
self.assertEqual(ct(10,unit='ns'), np.timedelta64(10,'ns').astype('m8[ns]'))
self.assertEqual(ct(10,unit='us'), np.timedelta64(10,'us').astype('m8[ns]'))
self.assertEqual(ct(10,unit='ms'), np.timedelta64(10,'ms').astype('m8[ns]'))
self.assertEqual(ct(10,unit='s'), np.timedelta64(10,'s').astype('m8[ns]'))
self.assertEqual(ct(10,unit='d'), np.timedelta64(10,'D').astype('m8[ns]'))
def test_timedelta_conversions(self):
_skip_if_numpy_not_friendly()
self.assertEqual(ct(timedelta(seconds=1)), np.timedelta64(1,'s').astype('m8[ns]'))
self.assertEqual(ct(timedelta(microseconds=1)), np.timedelta64(1,'us').astype('m8[ns]'))
self.assertEqual(ct(timedelta(days=1)), np.timedelta64(1,'D').astype('m8[ns]'))
def test_short_format_converters(self):
_skip_if_numpy_not_friendly()
def conv(v):
return v.astype('m8[ns]')
self.assertEqual(ct('10'), np.timedelta64(10,'ns'))
self.assertEqual(ct('10ns'), np.timedelta64(10,'ns'))
self.assertEqual(ct('100'), np.timedelta64(100,'ns'))
self.assertEqual(ct('100ns'), np.timedelta64(100,'ns'))
self.assertEqual(ct('1000'), np.timedelta64(1000,'ns'))
self.assertEqual(ct('1000ns'), np.timedelta64(1000,'ns'))
self.assertEqual(ct('1000NS'), np.timedelta64(1000,'ns'))
self.assertEqual(ct('10us'), np.timedelta64(10000,'ns'))
self.assertEqual(ct('100us'), np.timedelta64(100000,'ns'))
self.assertEqual(ct('1000us'), np.timedelta64(1000000,'ns'))
self.assertEqual(ct('1000Us'), np.timedelta64(1000000,'ns'))
self.assertEqual(ct('1000uS'), np.timedelta64(1000000,'ns'))
self.assertEqual(ct('1ms'), np.timedelta64(1000000,'ns'))
self.assertEqual(ct('10ms'), np.timedelta64(10000000,'ns'))
self.assertEqual( | ct('100ms') | pandas.tseries.timedeltas._coerce_scalar_to_timedelta_type |
import pandas as pd
import numpy as np
#census_data = pd.read_csv('processed.csv')
# read the csv file from the data store: flatten-form-data.csv
flatten_data = | pd.read_csv('flatten-form-data.csv') | pandas.read_csv |
import pandas as pd
import os
import glob
import re
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.decomposition import PCA
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.metrics import mean_squared_error
from helper import *
def readfilerun(run):
"reads in files from each run"
#path = os.path.join(os.getcwd())
testdir = os.path.join(os.getcwd(), "test", "run" + str(run))
dir_files = os.listdir(testdir)
#print(dir_files) # listing all files in directory
df_lst = []
# loop over the list of csv files
for f in dir_files:
#print(f)
filedir = os.path.join(os.getcwd(), "test", "run" + str(run), f)
#dftest = open(filedir)
df = | pd.read_csv(filedir) | pandas.read_csv |
# pylint: disable=E1101
from datetime import datetime
import datetime as dt
import os
import warnings
import nose
import struct
import sys
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas.compat import iterkeys
from pandas.core.frame import DataFrame, Series
from pandas.core.common import is_categorical_dtype
from pandas.io.parsers import read_csv
from pandas.io.stata import (read_stata, StataReader, InvalidColumnName,
PossiblePrecisionLoss, StataMissingValue)
import pandas.util.testing as tm
from pandas.tslib import NaT
from pandas import compat
class TestStata(tm.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.dta1_114 = os.path.join(self.dirpath, 'stata1_114.dta')
self.dta1_117 = os.path.join(self.dirpath, 'stata1_117.dta')
self.dta2_113 = os.path.join(self.dirpath, 'stata2_113.dta')
self.dta2_114 = os.path.join(self.dirpath, 'stata2_114.dta')
self.dta2_115 = os.path.join(self.dirpath, 'stata2_115.dta')
self.dta2_117 = os.path.join(self.dirpath, 'stata2_117.dta')
self.dta3_113 = os.path.join(self.dirpath, 'stata3_113.dta')
self.dta3_114 = os.path.join(self.dirpath, 'stata3_114.dta')
self.dta3_115 = os.path.join(self.dirpath, 'stata3_115.dta')
self.dta3_117 = os.path.join(self.dirpath, 'stata3_117.dta')
self.csv3 = os.path.join(self.dirpath, 'stata3.csv')
self.dta4_113 = os.path.join(self.dirpath, 'stata4_113.dta')
self.dta4_114 = os.path.join(self.dirpath, 'stata4_114.dta')
self.dta4_115 = os.path.join(self.dirpath, 'stata4_115.dta')
self.dta4_117 = os.path.join(self.dirpath, 'stata4_117.dta')
self.dta_encoding = os.path.join(self.dirpath, 'stata1_encoding.dta')
self.csv14 = os.path.join(self.dirpath, 'stata5.csv')
self.dta14_113 = os.path.join(self.dirpath, 'stata5_113.dta')
self.dta14_114 = os.path.join(self.dirpath, 'stata5_114.dta')
self.dta14_115 = os.path.join(self.dirpath, 'stata5_115.dta')
self.dta14_117 = os.path.join(self.dirpath, 'stata5_117.dta')
self.csv15 = os.path.join(self.dirpath, 'stata6.csv')
self.dta15_113 = os.path.join(self.dirpath, 'stata6_113.dta')
self.dta15_114 = os.path.join(self.dirpath, 'stata6_114.dta')
self.dta15_115 = os.path.join(self.dirpath, 'stata6_115.dta')
self.dta15_117 = os.path.join(self.dirpath, 'stata6_117.dta')
self.dta16_115 = os.path.join(self.dirpath, 'stata7_115.dta')
self.dta16_117 = os.path.join(self.dirpath, 'stata7_117.dta')
self.dta17_113 = os.path.join(self.dirpath, 'stata8_113.dta')
self.dta17_115 = os.path.join(self.dirpath, 'stata8_115.dta')
self.dta17_117 = os.path.join(self.dirpath, 'stata8_117.dta')
self.dta18_115 = os.path.join(self.dirpath, 'stata9_115.dta')
self.dta18_117 = os.path.join(self.dirpath, 'stata9_117.dta')
self.dta19_115 = os.path.join(self.dirpath, 'stata10_115.dta')
self.dta19_117 = os.path.join(self.dirpath, 'stata10_117.dta')
self.dta20_115 = os.path.join(self.dirpath, 'stata11_115.dta')
self.dta20_117 = os.path.join(self.dirpath, 'stata11_117.dta')
self.dta21_117 = os.path.join(self.dirpath, 'stata12_117.dta')
def read_dta(self, file):
# Legacy default reader configuration
return read_stata(file, convert_dates=True)
def read_csv(self, file):
return read_csv(file, parse_dates=True)
def test_read_empty_dta(self):
empty_ds = DataFrame(columns=['unit'])
# GH 7369, make sure can read a 0-obs dta file
with tm.ensure_clean() as path:
empty_ds.to_stata(path,write_index=False)
empty_ds2 = read_stata(path)
tm.assert_frame_equal(empty_ds, empty_ds2)
def test_data_method(self):
# Minimal testing of legacy data method
reader_114 = StataReader(self.dta1_114)
with warnings.catch_warnings(record=True) as w:
parsed_114_data = reader_114.data()
reader_114 = StataReader(self.dta1_114)
parsed_114_read = reader_114.read()
tm.assert_frame_equal(parsed_114_data, parsed_114_read)
def test_read_dta1(self):
reader_114 = StataReader(self.dta1_114)
parsed_114 = reader_114.read()
reader_117 = StataReader(self.dta1_117)
parsed_117 = reader_117.read()
# Pandas uses np.nan as missing value.
# Thus, all columns will be of type float, regardless of their name.
expected = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
# this is an oddity as really the nan should be float64, but
# the casting doesn't fail so need to match stata here
expected['float_miss'] = expected['float_miss'].astype(np.float32)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta2(self):
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('datetime interp under 2.6 is faulty')
expected = DataFrame.from_records(
[
(
datetime(2006, 11, 19, 23, 13, 20),
1479596223000,
datetime(2010, 1, 20),
datetime(2010, 1, 8),
datetime(2010, 1, 1),
datetime(1974, 7, 1),
datetime(2010, 1, 1),
datetime(2010, 1, 1)
),
(
datetime(1959, 12, 31, 20, 3, 20),
-1479590,
datetime(1953, 10, 2),
datetime(1948, 6, 10),
datetime(1955, 1, 1),
datetime(1955, 7, 1),
datetime(1955, 1, 1),
datetime(2, 1, 1)
),
(
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
)
],
columns=['datetime_c', 'datetime_big_c', 'date', 'weekly_date',
'monthly_date', 'quarterly_date', 'half_yearly_date',
'yearly_date']
)
expected['yearly_date'] = expected['yearly_date'].astype('O')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed_114 = self.read_dta(self.dta2_114)
parsed_115 = self.read_dta(self.dta2_115)
parsed_117 = self.read_dta(self.dta2_117)
# 113 is buggy due to limits of date format support in Stata
# parsed_113 = self.read_dta(self.dta2_113)
# Remove resource warnings
w = [x for x in w if x.category is UserWarning]
# should get warning for each call to read_dta
tm.assert_equal(len(w), 3)
# buggy test because of the NaT comparison on certain platforms
# Format 113 test fails since it does not support tc and tC formats
# tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta3(self):
parsed_113 = self.read_dta(self.dta3_113)
parsed_114 = self.read_dta(self.dta3_114)
parsed_115 = self.read_dta(self.dta3_115)
parsed_117 = self.read_dta(self.dta3_117)
# match stata here
expected = self.read_csv(self.csv3)
expected = expected.astype(np.float32)
expected['year'] = expected['year'].astype(np.int16)
expected['quarter'] = expected['quarter'].astype(np.int8)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta4(self):
parsed_113 = self.read_dta(self.dta4_113)
parsed_114 = self.read_dta(self.dta4_114)
parsed_115 = self.read_dta(self.dta4_115)
parsed_117 = self.read_dta(self.dta4_117)
expected = DataFrame.from_records(
[
["one", "ten", "one", "one", "one"],
["two", "nine", "two", "two", "two"],
["three", "eight", "three", "three", "three"],
["four", "seven", 4, "four", "four"],
["five", "six", 5, np.nan, "five"],
["six", "five", 6, np.nan, "six"],
["seven", "four", 7, np.nan, "seven"],
["eight", "three", 8, np.nan, "eight"],
["nine", "two", 9, np.nan, "nine"],
["ten", "one", "ten", np.nan, "ten"]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled'])
# these are all categoricals
expected = pd.concat([expected[col].astype('category') for col in expected], axis=1)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
# File containing strls
def test_read_dta12(self):
parsed_117 = self.read_dta(self.dta21_117)
expected = DataFrame.from_records(
[
[1, "abc", "abcdefghi"],
[3, "cba", "qwertywertyqwerty"],
[93, "", "strl"],
],
columns=['x', 'y', 'z'])
tm.assert_frame_equal(parsed_117, expected, check_dtype=False)
def test_read_write_dta5(self):
original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_write_dta6(self):
original = self.read_csv(self.csv3)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['year'] = original['year'].astype(np.int32)
original['quarter'] = original['quarter'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_read_write_dta10(self):
original = DataFrame(data=[["string", "object", 1, 1.1,
np.datetime64('2003-12-25')]],
columns=['string', 'object', 'integer', 'floating',
'datetime'])
original["object"] = Series(original["object"], dtype=object)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['integer'] = original['integer'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, {'datetime': 'tc'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_stata_doc_examples(self):
with tm.ensure_clean() as path:
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df.to_stata(path)
def test_write_preserves_original(self):
# 9795
np.random.seed(423)
df = pd.DataFrame(np.random.randn(5,4), columns=list('abcd'))
df.ix[2, 'a':'c'] = np.nan
df_copy = df.copy()
df.to_stata('test.dta', write_index=False)
tm.assert_frame_equal(df, df_copy)
def test_encoding(self):
# GH 4626, proper encoding handling
raw = read_stata(self.dta_encoding)
encoded = read_stata(self.dta_encoding, encoding="latin-1")
result = encoded.kreis1849[0]
if compat.PY3:
expected = raw.kreis1849[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, compat.string_types)
else:
expected = raw.kreis1849.str.decode("latin-1")[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, unicode)
with tm.ensure_clean() as path:
encoded.to_stata(path,encoding='latin-1', write_index=False)
reread_encoded = read_stata(path, encoding='latin-1')
tm.assert_frame_equal(encoded, reread_encoded)
def test_read_write_dta11(self):
original = DataFrame([(1, 2, 3, 4)],
columns=['good', compat.u('b\u00E4d'), '8number', 'astringwithmorethan32characters______'])
formatted = DataFrame([(1, 2, 3, 4)],
columns=['good', 'b_d', '_8number', 'astringwithmorethan32characters_'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
# should get a warning for that format.
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta12(self):
original = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_1',
'astringwithmorethan32characters_2',
'+',
'-',
'short',
'delete'])
formatted = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_',
'_0astringwithmorethan32character',
'_',
'_1_',
'_short',
'_delete'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
tm.assert_equal(len(w), 1) # should get a warning for that format.
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta13(self):
s1 = Series(2**9, dtype=np.int16)
s2 = Series(2**17, dtype=np.int32)
s3 = Series(2**33, dtype=np.int64)
original = DataFrame({'int16': s1, 'int32': s2, 'int64': s3})
original.index.name = 'index'
formatted = original
formatted['int64'] = formatted['int64'].astype(np.float64)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
formatted)
def test_read_write_reread_dta14(self):
expected = self.read_csv(self.csv14)
cols = ['byte_', 'int_', 'long_', 'float_', 'double_']
for col in cols:
expected[col] = expected[col].convert_objects(convert_numeric=True)
expected['float_'] = expected['float_'].astype(np.float32)
expected['date_td'] = | pd.to_datetime(expected['date_td'], coerce=True) | pandas.to_datetime |
# -*- coding: utf-8 -*-
# @Time : 2018/11/8 15:08
# @Author : MengnanChen
# @FileName: audio_process.py
# @Software: PyCharm
import os
import subprocess
from six.moves import cPickle as pickle
import numpy as np
import pandas as pd
import librosa
class AudioProcess(object):
def __init__(self):
self.open_smile_root_dir = r'D:\ProgramFile\Program\openSMILE-2.1.0'
self.open_smile_path = os.path.join(self.open_smile_root_dir, 'bin\Win32')
self.config_path = os.path.join(self.open_smile_root_dir, 'config\emo_large.conf')
self.output_arff_dir = '../data/arff'
self.output_csv_path = '../data/iemocap_output_emo_large.csv'
self.raw_output_dir='../data/raw_audio_data'
def arff2csv(self, arff_paths, csv_path):
# extra features: name, class
frame_dict = {}
for arff_path in arff_paths:
print('process arff2csv, arff path:{}'.format(arff_path))
with open(arff_path, 'rb') as fin:
lines = fin.readlines()
lines = [x.decode('utf-8').strip('\r\n ') for x in lines]
data = ''
index = 1
while data == '':
data = lines[-index].strip('\r\n ')
index += 1
data = data.split(',')
index = 0
for line in lines:
line_t = line.split(' ')
if line_t[0] == '@attribute':
if line_t[2] == 'numeric':
try:
if line_t[1] not in frame_dict.keys():
frame_dict[line_t[1]] = []
frame_dict[line_t[1]].append(float(data[index]))
except:
# print('error:', line)
frame_dict[line_t[1]].append(data[index])
else:
if line_t[1] not in frame_dict.keys():
frame_dict[line_t[1]] = []
frame_dict[line_t[1]].append(data[index])
index += 1
dataframe = | pd.DataFrame(data=frame_dict) | pandas.DataFrame |
# Requirments
# pandas==1.1.5
# To download the dataset: wget https://www.cse.msu.edu/computervision/SVW.zip && unzip SVW.zip
# To create the data directories: mkdir -p /mydata/CSQ/Hadamard-Matrix-for-hashing/video/dataset/SVW/raw/data && cd /mydata/Videos && mv * /mydata/CSQ/Hadamard-Matrix-for-hashing/video/dataset/SVW/raw/data
# Remove spaces from data files: cd /mydata/CSQ/Hadamard-Matrix-for-hashing/video/dataset/SVW/scripts && bash rename.sh
# Remove spaces from test and train files: sed -i 's/ - Copy/_-_Copy/g' ../raw/list_cvt/train.txt && sed -i 's/ - Copy/_-_Copy/g' ../raw/list_cvt/test.txt
import pandas as pd
ANN_FILE = "/mydata/SVW.csv"
DATA_DIR = "/mydata/SVW"
if __name__ == "__main__":
df = | pd.read_csv(ANN_FILE) | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan, u(
'fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'), u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
tm.assert_almost_equal(result, exp)
def test_find(self):
values = Series(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF', 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.find(0)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan(self):
values = Series(['ABCDEFG', np.nan, 'DEFGHIJEF', np.nan, 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_index(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF'])
result = s.str.index('EF')
_check(result, klass([4, 3, 1, 0]))
expected = np.array([v.index('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF')
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('EF', 3)
_check(result, klass([4, 3, 7, 4]))
expected = np.array([v.index('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF', 3)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('E', 4, 8)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.index('E', 4, 8) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('E', 0, 5)
_check(result, klass([4, 3, 1, 4]))
expected = np.array([v.rindex('E', 0, 5) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(ValueError,
"substring not found"):
result = s.str.index('DE')
with tm.assert_raises_regex(TypeError,
"expected a string "
"object, not int"):
result = s.str.index(0)
# test with nan
s = Series(['abcb', 'ab', 'bcbe', np.nan])
result = s.str.index('b')
tm.assert_series_equal(result, Series([1, 1, 0, np.nan]))
result = s.str.rindex('b')
tm.assert_series_equal(result, Series([3, 1, 2, np.nan]))
def test_pad(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left')
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='left')
xp = Series([' a', NA, ' b', NA, NA, ' ee', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='right')
xp = Series(['a ', NA, 'b ', NA, NA, 'ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='both')
xp = Series([' a ', NA, ' b ', NA, NA, ' ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.pad(5, side='left')
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_pad_fillchar(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left', fillchar='X')
exp = Series(['XXXXa', 'XXXXb', NA, 'XXXXc', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right', fillchar='X')
exp = Series(['aXXXX', 'bXXXX', NA, 'cXXXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both', fillchar='X')
exp = Series(['XXaXX', 'XXbXX', NA, 'XXcXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.pad(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.pad(5, fillchar=5)
def test_pad_width(self):
# GH 13598
s = Series(['1', '22', 'a', 'bb'])
for f in ['center', 'ljust', 'rjust', 'zfill', 'pad']:
with tm.assert_raises_regex(TypeError,
"width must be of "
"integer type, not*"):
getattr(s.str, f)('f')
def test_translate(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['abcdefg', 'abcc', 'cdddfg', 'cdefggg'])
if not compat.PY3:
import string
table = string.maketrans('abc', 'cde')
else:
table = str.maketrans('abc', 'cde')
result = s.str.translate(table)
expected = klass(['cdedefg', 'cdee', 'edddfg', 'edefggg'])
_check(result, expected)
# use of deletechars is python 2 only
if not compat.PY3:
result = s.str.translate(table, deletechars='fg')
expected = klass(['cdede', 'cdee', 'eddd', 'ede'])
_check(result, expected)
result = s.str.translate(None, deletechars='fg')
expected = klass(['abcde', 'abcc', 'cddd', 'cde'])
_check(result, expected)
else:
with tm.assert_raises_regex(
ValueError, "deletechars is not a valid argument"):
result = s.str.translate(table, deletechars='fg')
# Series with non-string values
s = Series(['a', 'b', 'c', 1.2])
expected = Series(['c', 'd', 'e', np.nan])
result = s.str.translate(table)
tm.assert_series_equal(result, expected)
def test_center_ljust_rjust(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.center(5)
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'c', 'eee', None,
1, 2.])
rs = Series(mixed).str.center(5)
xp = Series([' a ', NA, ' b ', NA, NA, ' c ', ' eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.ljust(5)
xp = Series(['a ', NA, 'b ', NA, NA, 'c ', 'eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rjust(5)
xp = Series([' a', NA, ' b', NA, NA, ' c', ' eee', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.center(5)
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_center_ljust_rjust_fillchar(self):
values = Series(['a', 'bb', 'cccc', 'ddddd', 'eeeeee'])
result = values.str.center(5, fillchar='X')
expected = Series(['XXaXX', 'XXbbX', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.center(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.ljust(5, fillchar='X')
expected = Series(['aXXXX', 'bbXXX', 'ccccX', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.ljust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rjust(5, fillchar='X')
expected = Series(['XXXXa', 'XXXbb', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.rjust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
# If fillchar is not a charatter, normal str raises TypeError
# 'aaa'.ljust(5, 'XY')
# TypeError: must be char, not str
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.center(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.ljust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.rjust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.center(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.ljust(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.rjust(5, fillchar=1)
def test_zfill(self):
values = Series(['1', '22', 'aaa', '333', '45678'])
result = values.str.zfill(5)
expected = Series(['00001', '00022', '00aaa', '00333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(5) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.zfill(3)
expected = Series(['001', '022', 'aaa', '333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(3) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
values = Series(['1', np.nan, 'aaa', np.nan, '45678'])
result = values.str.zfill(5)
expected = Series(['00001', np.nan, '00aaa', np.nan, '45678'])
tm.assert_series_equal(result, expected)
def test_split(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.split('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.split('__')
tm.assert_series_equal(result, exp)
result = values.str.split('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.split('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.split('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.split('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.split('[,_]')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
def test_rsplit(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.rsplit('__')
tm.assert_series_equal(result, exp)
result = values.str.rsplit('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.rsplit('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.rsplit('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.rsplit('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split is not supported by rsplit
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.rsplit('[,_]')
exp = Series([[u('a,b_c')], [u('c_d,e')], NA, [u('f,g,h')]])
tm.assert_series_equal(result, exp)
# setting max number of splits, make sure it's from reverse
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_', n=1)
exp = Series([['a_b', 'c'], ['c_d', 'e'], NA, ['f_g', 'h']])
tm.assert_series_equal(result, exp)
def test_split_noargs(self):
# #1859
s = Series(['<NAME>', '<NAME>'])
result = s.str.split()
expected = ['Travis', 'Oliphant']
assert result[1] == expected
result = s.str.rsplit()
assert result[1] == expected
def test_split_maxsplit(self):
# re.split 0, str.split -1
s = Series(['bd asdf jfg', 'kjasdflqw asdfnfk'])
result = s.str.split(n=-1)
xp = s.str.split()
tm.assert_series_equal(result, xp)
result = s.str.split(n=0)
tm.assert_series_equal(result, xp)
xp = s.str.split('asdf')
result = s.str.split('asdf', n=0)
tm.assert_series_equal(result, xp)
result = s.str.split('asdf', n=-1)
tm.assert_series_equal(result, xp)
def test_split_no_pat_with_nonzero_n(self):
s = Series(['split once', 'split once too!'])
result = s.str.split(n=1)
expected = Series({0: ['split', 'once'], 1: ['split', 'once too!']})
tm.assert_series_equal(expected, result, check_index_type=False)
def test_split_to_dataframe(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_unequal_splits', 'one_of_these_things_is_not'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'one'],
1: ['unequal', 'of'],
2: ['splits', 'these'],
3: [NA, 'things'],
4: [NA, 'is'],
5: [NA, 'not']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
with tm.assert_raises_regex(ValueError, "expand must be"):
s.str.split('_', expand="not_a_boolean")
def test_split_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.split('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_unequal_splits', 'one_of_these_things_is_not'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'unequal', 'splits', NA, NA, NA
), ('one', 'of', 'these', 'things',
'is', 'not')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 6
with tm.assert_raises_regex(ValueError, "expand must be"):
idx.str.split('_', expand="not_a_boolean")
def test_rsplit_to_dataframe_expand(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=2)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=1)
exp = DataFrame({0: ['some_equal', 'with_no'], 1: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
def test_rsplit_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.rsplit('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True, n=1)
exp = MultiIndex.from_tuples([('some_equal', 'splits'),
('with_no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 2
def test_split_nan_expand(self):
# gh-18450
s = Series(["foo,bar,baz", NA])
result = s.str.split(",", expand=True)
exp = DataFrame([["foo", "bar", "baz"], [NA, NA, NA]])
tm.assert_frame_equal(result, exp)
# check that these are actually np.nan and not None
# TODO see GH 18463
# tm.assert_frame_equal does not differentiate
assert all(np.isnan(x) for x in result.iloc[1])
def test_split_with_name(self):
# GH 12617
# should preserve name
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.split(',')
exp = Series([['a', 'b'], ['c', 'd']], name='xxx')
tm.assert_series_equal(res, exp)
res = s.str.split(',', expand=True)
exp = DataFrame([['a', 'b'], ['c', 'd']])
tm.assert_frame_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.split(',')
exp = Index([['a', 'b'], ['c', 'd']], name='xxx')
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
res = idx.str.split(',', expand=True)
exp = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')])
assert res.nlevels == 2
tm.assert_index_equal(res, exp)
def test_partition_series(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([('a', '_', 'b_c'), ('c', '_', 'd_e'), NA,
('f', '_', 'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('a_b', '_', 'c'), ('c_d', '_', 'e'), NA,
('f_g', '_', 'h')])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.partition('__', expand=False)
exp = Series([('a', '__', 'b__c'), ('c', '__', 'd__e'), NA,
('f', '__', 'g__h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('__', expand=False)
exp = Series([('a__b', '__', 'c'), ('c__d', '__', 'e'), NA,
('f__g', '__', 'h')])
tm.assert_series_equal(result, exp)
# None
values = Series(['a b c', 'c d e', NA, 'f g h'])
result = values.str.partition(expand=False)
exp = Series([('a', ' ', 'b c'), ('c', ' ', 'd e'), NA,
('f', ' ', 'g h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition(expand=False)
exp = Series([('a b', ' ', 'c'), ('c d', ' ', 'e'), NA,
('f g', ' ', 'h')])
tm.assert_series_equal(result, exp)
# Not splited
values = Series(['abc', 'cde', NA, 'fgh'])
result = values.str.partition('_', expand=False)
exp = Series([('abc', '', ''), ('cde', '', ''), NA, ('fgh', '', '')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('', '', 'abc'), ('', '', 'cde'), NA, ('', '', 'fgh')])
tm.assert_series_equal(result, exp)
# unicode
values = Series([u'a_b_c', u'c_d_e', NA, u'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([(u'a', u'_', u'b_c'), (u'c', u'_', u'd_e'),
NA, (u'f', u'_', u'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([(u'a_b', u'_', u'c'), (u'c_d', u'_', u'e'),
NA, (u'f_g', u'_', u'h')])
tm.assert_series_equal(result, exp)
# compare to standard lib
values = Series(['A_B_C', 'B_C_D', 'E_F_G', 'EFGHEF'])
result = values.str.partition('_', expand=False).tolist()
assert result == [v.partition('_') for v in values]
result = values.str.rpartition('_', expand=False).tolist()
assert result == [v.rpartition('_') for v in values]
def test_partition_index(self):
values = Index(['a_b_c', 'c_d_e', 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Index(np.array([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_',
'g_h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.rpartition('_', expand=False)
exp = Index(np.array([('a_b', '_', 'c'), ('c_d', '_', 'e'), (
'f_g', '_', 'h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.partition('_')
exp = | Index([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_', 'g_h')]) | pandas.Index |
import re
import numpy as np
import pandas as pd
from nltk.util import ngrams
from blingfire import text_to_words
from unidecode import unidecode
STOPWORDS = {
'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves',
'you', "you're", "you've", "you'll", "you'd", 'your', 'yours', 'yourself',
'yourselves', 'he', 'him', 'his', 'himself', 'she', "she's", 'her', 'hers',
'herself', 'it', "it's", 'its', 'itself', 'they', 'them', 'their', 'theirs',
'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', "that'll", 'these',
'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has',
'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but',
'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with',
'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after',
'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over',
'under', 'again', 'further', 'then', 'once', 'here', 'there', 'when', 'where',
'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other',
'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so', 'than',
'too', 'very', 's', 't', 'can', 'will', 'just', 'don', "don't", 'should',
"should've", 'now', 'd', 'll', 'm', 'o', 're', 've', 'y', 'ain', 'aren',
"aren't", 'couldn', "couldn't", 'didn', "didn't", 'doesn', "doesn't", 'hadn',
"hadn't", 'hasn', "hasn't", 'haven', "haven't", 'isn', "isn't", 'ma', 'mightn',
"mightn't", 'mustn', "mustn't", 'needn', "needn't", 'shan', "shan't", 'shouldn',
"shouldn't", 'wasn', "wasn't", 'weren', "weren't", 'won', "won't", 'wouldn', "wouldn't"
}
REGEX_TRANSLATION_TABLE = str.maketrans('', '', "^$.\+*?{}[]()|")
def extract_from_between_quotations(text):
"""Get everything that's in double quotes
"""
results = re.findall('"([^"]*)"', text)
return [i.strip() for i in results]
def remove_single_non_alphanumerics(text):
"""Removes any single characters that are not
alphanumerics and not important punctuation.
"""
text = re.sub(r"\B[^\w\"\s]\B", "", text)
return standardize_whitespace_length(text)
def replace_special_whitespace_chars(text: str) -> str:
"""It's annoying to deal with nonbreaking whitespace chars like u'xa0'
or other whitespace chars. Let's replace all of them with the standard
char before doing any other processing."""
text = re.sub(r"\s", " ", text)
return text
def standardize_whitespace_length(text: str) -> str:
"""Tokenization is problematic when there are extra-long whitespaces.
Make them all a single character in length.
Also remove any whitespaces at beginning/end of a string"""
return re.sub(r" +", " ", text).strip()
def fix_text(s):
"""General purpose text fixing using nlpre package
and then tokenizing with blingfire
"""
if | pd.isnull(s) | pandas.isnull |
import math
import numpy as np
import pandas as pd
import sys
import importlib
from sklearn.decomposition import PCA
import networkx as nx
import pickle
from src.analyze import analyze_utils as au
import argparse
import os
from time import time
def get_parser():
parser = argparse.ArgumentParser(description='Compute the n_bottleneck and pca_elongation features.')
parser.add_argument('--net-dir', dest='net_dir',
help='The directory with the network model definitions (default: ./graphs)',
default='./graphs', type=str)
parser.add_argument('--output',
help='The directory used to save features (default: ./reports/newdata<nr-nodes>.pkl)',
default='./reports', type=str)
return parser
def main():
parser = get_parser()
args = parser.parse_args()
y30, y30t, y60, y60t = load_data()
new_train_30 = get_df_for_graphs(y30.index, args)
new_test_30 = get_df_for_graphs(y30t.index, args)
new_data_30 = {'X_train': new_train_30, 'X_test': new_test_30}
print()
new_train_60 = get_df_for_graphs(y60.index, args)
new_test_60 = get_df_for_graphs(y60t.index, args)
new_data_60 = {'X_train': new_train_60, 'X_test': new_test_60}
with open(os.path.join(args.output,"newdata30.pkl"), "wb") as file:
pickle.dump(new_data_30, file)
with open(os.path.join(args.output, "newdata60.pkl"), "wb") as file:
pickle.dump(new_data_60, file)
def load_data():
Xall30 = pd.read_pickle('./reports/data30.pkl')
y30 = Xall30['y_train']
y30t = Xall30['y_test']
Xall60 = pd.read_pickle('./reports/data60.pkl')
y60 = Xall60['y_train']
y60t = Xall60['y_test']
return y30, y30t, y60, y60t
def select_best(H, pos):
adj = nx.to_numpy_array(H)
num_nodes = H.number_of_nodes()
adjcum = np.zeros_like(adj)
res = dict()
for i in range(num_nodes-1):
row = adj[i]
adjcum[i, i+1:] = (np.cumsum(row[::-1])[::-1])[i+1:]
widths = np.sum(adjcum, axis=0)
n_bottlenecks = np.sum(widths == 1)
res['n_bottlenecks'] = n_bottlenecks
positions = np.zeros((num_nodes,2))
for i in range(num_nodes):
positions[i, :] = pos[i]
pca = PCA()
pca.fit(positions)
pca_length = pca.explained_variance_ratio_[0]
res['pca_elongation'] = 2 * (pca_length - 0.5)
return res
def get_df_for_graphs(graphs, args):
ngraphs = len(graphs)
df = None
t0 = time()
for i, g in enumerate(graphs):
print('{}/{} {}'.format(i + 1, ngraphs, g))
info = au.graphinfo(g, args.net_dir)
pos = info['pos']
H = info['G']
features = select_best(H, pos)
if df is None:
columns = features.keys()
df = | pd.DataFrame(index=graphs, columns=columns) | pandas.DataFrame |
import sys
import pandas as pd
from sqlalchemy import create_engine
import sqlite3
def load_data(messages_filepath, categories_filepath):
"""
Load messages and categories datasets
Merge these datasets into a dataframe
Args:
messages_filepath : filepath of messages.read_csv
categories_filepath: filepaths of categories.csv
Returns:
dataframe df
"""
# Load raw datasets
messages = pd.read_csv(messages_filepath)
categories = | pd.read_csv(categories_filepath) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Tue May 3 10:49:58 2016
Auger peak finding and quantitative routines ... batch processing
@author: tkc
First get it working for single file.
"""
#%%
import pandas as pd
import numpy as np
import os, sys, shutil, glob, re
if 'C:\\Users\\tkc\\Documents\\Python_Scripts' not in sys.path:
sys.path.append('C:\\Users\\tkc\\Documents\\Python_Scripts')
import Auger_smdifquant_functions as AESsmquant
import Auger_integquant_functions as AESintquant
import Auger_utility_functions as AESutils
import Auger_plot_functions as AESplot
''' AESsmquant contains functions related to peak finding in smooth-differentiated spectra
whereas AESquant contains background fitting and integration over peaks for direct from counts '''
# import Auger_integquant_functions as AESquant
#%% REFIT of problematic peaks
# Manual refitting of failed fits on single peaks (usually Ca)
# filter with SPE list above
AugerParamLog=pd.read_csv('Augerparamlog.csv', encoding='cp437')
Smdifpeakslog=pd.read_csv('Smdifpeakslog.csv', encoding='cp437')
Integquantlog=pd.read_csv('Integquantlog.csv', encoding='cp437')
Backfitlog=pd.read_csv('Backfitlog.csv', encoding='cp437')
AESquantparams=pd.read_csv('C:\\Users\\tkc\\Documents\\Python_Scripts\\AESquantparams.csv', encoding='utf-8') # global version
# ALTERNATE QUANT and FIT PARAMS (which are sometimes used if problems arise)
AESquantparams=pd.read_csv('AESquantparams.csv', encoding='utf-8') # load local version instead
AESquantparams=pd.read_csv('C:\\Users\\tkc\\Documents\\Python_Scripts\\AESquantparams_Ca_refit.csv', encoding='utf-8')
# Change type of Ca fit to 'Carefit'... linear
# Pick out the spectra for processing
spelist=AugerParamLog[(AugerParamLog['Areas']>=1)] # selects only spe files
excludemask=spelist['Comments'].str.contains('exclude', case=False, na=False)
spelist=spelist.loc[~excludemask]
Elements=['Ca']
# Create smoothed column for all csvs who lack them (now added during import process)
AESutils.addsmoothloop(spelist) # autosaves smcounts column for each area (7pt adjacent averaging)
# Comparing Ti and Ti2 peak magnitudes
Tidata=Integquantlog[Integquantlog['Element'].str.contains('Ti')]
Tidata=Integquantlog[Integquantlog['Element']=='Ti']
Ti2data=Integquantlog[Integquantlog['Element']=='Ti2']
Tidata=Tidata.dropna(subset=['Erradjcnts'])
Ti2data=Ti2data.dropna(subset=['Erradjcnts'])
Ticomp= | pd.merge(Tidata, Ti2data, how='inner',on=['Filenumber','Area'], suffixes=('','_2')) | pandas.merge |
import nose
import unittest
import os
import sys
import warnings
from datetime import datetime
import numpy as np
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, Index)
from pandas.io.pytables import HDFStore, get_store, Term, IncompatibilityWarning
import pandas.util.testing as tm
from pandas.tests.test_series import assert_series_equal
from pandas.tests.test_frame import assert_frame_equal
from pandas import concat, Timestamp
try:
import tables
except ImportError:
raise nose.SkipTest('no pytables')
from distutils.version import LooseVersion
_default_compressor = LooseVersion(tables.__version__) >= '2.2' \
and 'blosc' or 'zlib'
_multiprocess_can_split_ = False
class TestHDFStore(unittest.TestCase):
path = '__test__.h5'
scratchpath = '__scratch__.h5'
def setUp(self):
self.store = HDFStore(self.path)
def tearDown(self):
self.store.close()
os.remove(self.path)
def test_factory_fun(self):
try:
with get_store(self.scratchpath) as tbl:
raise ValueError('blah')
except ValueError:
pass
with get_store(self.scratchpath) as tbl:
tbl['a'] = tm.makeDataFrame()
with get_store(self.scratchpath) as tbl:
self.assertEquals(len(tbl), 1)
self.assertEquals(type(tbl['a']), DataFrame)
os.remove(self.scratchpath)
def test_keys(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeStringSeries()
self.store['c'] = tm.makeDataFrame()
self.store['d'] = tm.makePanel()
self.store['foo/bar'] = tm.makePanel()
self.assertEquals(len(self.store), 5)
self.assert_(set(self.store.keys()) == set(['/a', '/b', '/c', '/d', '/foo/bar']))
def test_repr(self):
repr(self.store)
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeStringSeries()
self.store['c'] = tm.makeDataFrame()
self.store['d'] = tm.makePanel()
self.store['foo/bar'] = tm.makePanel()
self.store.append('e', tm.makePanel())
repr(self.store)
str(self.store)
def test_contains(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeDataFrame()
self.store['foo/bar'] = tm.makeDataFrame()
self.assert_('a' in self.store)
self.assert_('b' in self.store)
self.assert_('c' not in self.store)
self.assert_('foo/bar' in self.store)
self.assert_('/foo/bar' in self.store)
self.assert_('/foo/b' not in self.store)
self.assert_('bar' not in self.store)
def test_versioning(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
self.store.remove('df1')
self.store.append('df1', df[:10])
self.store.append('df1', df[10:])
self.assert_(self.store.root.a._v_attrs.pandas_version == '0.10')
self.assert_(self.store.root.b._v_attrs.pandas_version == '0.10')
self.assert_(self.store.root.df1._v_attrs.pandas_version == '0.10')
# write a file and wipe its versioning
self.store.remove('df2')
self.store.append('df2', df)
self.store.get_node('df2')._v_attrs.pandas_version = None
self.store.select('df2')
self.store.select('df2', [ Term('index','>',df.index[2]) ])
def test_meta(self):
raise nose.SkipTest('no meta')
meta = { 'foo' : [ 'I love pandas ' ] }
s = tm.makeTimeSeries()
s.meta = meta
self.store['a'] = s
self.assert_(self.store['a'].meta == meta)
df = tm.makeDataFrame()
df.meta = meta
self.store['b'] = df
self.assert_(self.store['b'].meta == meta)
# this should work, but because slicing doesn't propgate meta it doesn
self.store.remove('df1')
self.store.append('df1', df[:10])
self.store.append('df1', df[10:])
results = self.store['df1']
#self.assert_(getattr(results,'meta',None) == meta)
# no meta
df = tm.makeDataFrame()
self.store['b'] = df
self.assert_(hasattr(self.store['b'],'meta') == False)
def test_reopen_handle(self):
self.store['a'] = tm.makeTimeSeries()
self.store.open('w', warn=False)
self.assert_(self.store.handle.isopen)
self.assertEquals(len(self.store), 0)
def test_flush(self):
self.store['a'] = tm.makeTimeSeries()
self.store.flush()
def test_get(self):
self.store['a'] = tm.makeTimeSeries()
left = self.store.get('a')
right = self.store['a']
tm.assert_series_equal(left, right)
left = self.store.get('/a')
right = self.store['/a']
tm.assert_series_equal(left, right)
self.assertRaises(KeyError, self.store.get, 'b')
def test_put(self):
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
self.store['a'] = ts
self.store['b'] = df[:10]
self.store['foo/bar/bah'] = df[:10]
self.store['foo'] = df[:10]
self.store['/foo'] = df[:10]
self.store.put('c', df[:10], table=True)
# not OK, not a table
self.assertRaises(ValueError, self.store.put, 'b', df[10:], append=True)
# node does not currently exist, test _is_table_type returns False in
# this case
self.assertRaises(ValueError, self.store.put, 'f', df[10:], append=True)
# OK
self.store.put('c', df[10:], append=True)
# overwrite table
self.store.put('c', df[:10], table=True, append=False)
tm.assert_frame_equal(df[:10], self.store['c'])
def test_put_string_index(self):
index = Index([ "I am a very long string index: %s" % i for i in range(20) ])
s = Series(np.arange(20), index = index)
df = DataFrame({ 'A' : s, 'B' : s })
self.store['a'] = s
tm.assert_series_equal(self.store['a'], s)
self.store['b'] = df
tm.assert_frame_equal(self.store['b'], df)
# mixed length
index = Index(['abcdefghijklmnopqrstuvwxyz1234567890'] + [ "I am a very long string index: %s" % i for i in range(20) ])
s = Series(np.arange(21), index = index)
df = DataFrame({ 'A' : s, 'B' : s })
self.store['a'] = s
tm.assert_series_equal(self.store['a'], s)
self.store['b'] = df
tm.assert_frame_equal(self.store['b'], df)
def test_put_compression(self):
df = tm.makeTimeDataFrame()
self.store.put('c', df, table=True, compression='zlib')
tm.assert_frame_equal(self.store['c'], df)
# can't compress if table=False
self.assertRaises(ValueError, self.store.put, 'b', df,
table=False, compression='zlib')
def test_put_compression_blosc(self):
tm.skip_if_no_package('tables', '2.2', app='blosc support')
df = tm.makeTimeDataFrame()
# can't compress if table=False
self.assertRaises(ValueError, self.store.put, 'b', df,
table=False, compression='blosc')
self.store.put('c', df, table=True, compression='blosc')
tm.assert_frame_equal(self.store['c'], df)
def test_put_integer(self):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal)
def test_append(self):
df = tm.makeTimeDataFrame()
self.store.remove('df1')
self.store.append('df1', df[:10])
self.store.append('df1', df[10:])
tm.assert_frame_equal(self.store['df1'], df)
self.store.remove('df2')
self.store.put('df2', df[:10], table=True)
self.store.append('df2', df[10:])
tm.assert_frame_equal(self.store['df2'], df)
self.store.remove('df3')
self.store.append('/df3', df[:10])
self.store.append('/df3', df[10:])
tm.assert_frame_equal(self.store['df3'], df)
# this is allowed by almost always don't want to do it
warnings.filterwarnings('ignore', category=tables.NaturalNameWarning)
self.store.remove('/df3 foo')
self.store.append('/df3 foo', df[:10])
self.store.append('/df3 foo', df[10:])
tm.assert_frame_equal(self.store['df3 foo'], df)
warnings.filterwarnings('always', category=tables.NaturalNameWarning)
# panel
wp = tm.makePanel()
self.store.remove('wp1')
self.store.append('wp1', wp.ix[:,:10,:])
self.store.append('wp1', wp.ix[:,10:,:])
tm.assert_panel_equal(self.store['wp1'], wp)
# ndim
p4d = tm.makePanel4D()
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:])
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
# test using axis labels
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=['items','major_axis','minor_axis'])
self.store.append('p4d', p4d.ix[:,:,10:,:], axes=['items','major_axis','minor_axis'])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
# test using differnt number of items on each axis
p4d2 = p4d.copy()
p4d2['l4'] = p4d['l1']
p4d2['l5'] = p4d['l1']
self.store.remove('p4d2')
self.store.append('p4d2', p4d2, axes=['items','major_axis','minor_axis'])
tm.assert_panel4d_equal(self.store['p4d2'], p4d2)
# test using differt order of items on the non-index axes
self.store.remove('wp1')
wp_append1 = wp.ix[:,:10,:]
self.store.append('wp1', wp_append1)
wp_append2 = wp.ix[:,10:,:].reindex(items = wp.items[::-1])
self.store.append('wp1', wp_append2)
tm.assert_panel_equal(self.store['wp1'], wp)
def test_append_frame_column_oriented(self):
# column oriented
df = tm.makeTimeDataFrame()
self.store.remove('df1')
self.store.append('df1', df.ix[:,:2], axes = ['columns'])
self.store.append('df1', df.ix[:,2:])
tm.assert_frame_equal(self.store['df1'], df)
result = self.store.select('df1', 'columns=A')
expected = df.reindex(columns=['A'])
tm.assert_frame_equal(expected, result)
# this isn't supported
self.assertRaises(Exception, self.store.select, 'df1', ('columns=A', Term('index','>',df.index[4])))
# selection on the non-indexable
result = self.store.select('df1', ('columns=A', Term('index','=',df.index[0:4])))
expected = df.reindex(columns=['A'],index=df.index[0:4])
tm.assert_frame_equal(expected, result)
def test_ndim_indexables(self):
""" test using ndim tables in new ways"""
p4d = tm.makePanel4D()
def check_indexers(key, indexers):
for i,idx in enumerate(indexers):
self.assert_(getattr(getattr(self.store.root,key).table.description,idx)._v_pos == i)
# append then change (will take existing schema)
indexers = ['items','major_axis','minor_axis']
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store.select('p4d'),p4d)
check_indexers('p4d',indexers)
# same as above, but try to append with differnt axes
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:], axes=['labels','items','major_axis'])
tm.assert_panel4d_equal(self.store.select('p4d'),p4d)
check_indexers('p4d',indexers)
# pass incorrect number of axes
self.store.remove('p4d')
self.assertRaises(Exception, self.store.append, 'p4d', p4d.ix[:,:,:10,:], axes=['major_axis','minor_axis'])
# different than default indexables #1
indexers = ['labels','major_axis','minor_axis']
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
check_indexers('p4d',indexers)
# different than default indexables #2
indexers = ['major_axis','labels','minor_axis']
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
check_indexers('p4d',indexers)
# partial selection
result = self.store.select('p4d',['labels=l1'])
expected = p4d.reindex(labels = ['l1'])
tm.assert_panel4d_equal(result, expected)
# partial selection2
result = self.store.select('p4d',[Term('labels=l1'), Term('items=ItemA'), Term('minor_axis=B')])
expected = p4d.reindex(labels = ['l1'], items = ['ItemA'], minor_axis = ['B'])
tm.assert_panel4d_equal(result, expected)
# non-existant partial selection
result = self.store.select('p4d',[Term('labels=l1'), Term('items=Item1'), Term('minor_axis=B')])
expected = p4d.reindex(labels = ['l1'], items = [], minor_axis = ['B'])
tm.assert_panel4d_equal(result, expected)
def test_append_with_strings(self):
wp = tm.makePanel()
wp2 = wp.rename_axis(dict([ (x,"%s_extra" % x) for x in wp.minor_axis ]), axis = 2)
self.store.append('s1', wp, min_itemsize = 20)
self.store.append('s1', wp2)
expected = concat([ wp, wp2], axis = 2)
expected = expected.reindex(minor_axis = sorted(expected.minor_axis))
tm.assert_panel_equal(self.store['s1'], expected)
# test dict format
self.store.append('s2', wp, min_itemsize = { 'minor_axis' : 20 })
self.store.append('s2', wp2)
expected = concat([ wp, wp2], axis = 2)
expected = expected.reindex(minor_axis = sorted(expected.minor_axis))
tm.assert_panel_equal(self.store['s2'], expected)
# apply the wrong field (similar to #1)
self.store.append('s3', wp, min_itemsize = { 'major_axis' : 20 })
self.assertRaises(Exception, self.store.append, 's3')
# test truncation of bigger strings
self.store.append('s4', wp)
self.assertRaises(Exception, self.store.append, 's4', wp2)
# avoid truncation on elements
df = DataFrame([[123,'asdqwerty'], [345,'dggnhebbsdfbdfb']])
self.store.append('df_big',df, min_itemsize = { 'values' : 1024 })
tm.assert_frame_equal(self.store.select('df_big'), df)
# appending smaller string ok
df2 = DataFrame([[124,'asdqy'], [346,'dggnhefbdfb']])
self.store.append('df_big',df2)
expected = concat([ df, df2 ])
tm.assert_frame_equal(self.store.select('df_big'), expected)
# avoid truncation on elements
df = DataFrame([[123,'as<PASSWORD>'], [345,'dggnhebbsdfbdfb']])
self.store.append('df_big2',df, min_itemsize = { 'values' : 10 })
tm.assert_frame_equal(self.store.select('df_big2'), df)
# bigger string on next append
self.store.append('df_new',df, min_itemsize = { 'values' : 16 })
df_new = DataFrame([[124,'abcdefqhij'], [346, 'abcdefghijklmnopqrtsuvwxyz']])
self.assertRaises(Exception, self.store.append, 'df_new',df_new)
def test_create_table_index(self):
wp = tm.makePanel()
self.store.append('p5', wp)
self.store.create_table_index('p5')
assert(self.store.handle.root.p5.table.cols.major_axis.is_indexed == True)
assert(self.store.handle.root.p5.table.cols.minor_axis.is_indexed == False)
# default optlevels
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 6)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'medium')
# let's change the indexing scheme
self.store.create_table_index('p5')
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 6)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'medium')
self.store.create_table_index('p5', optlevel=9)
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 9)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'medium')
self.store.create_table_index('p5', kind='full')
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 9)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'full')
self.store.create_table_index('p5', optlevel=1, kind='light')
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 1)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'light')
df = tm.makeTimeDataFrame()
self.store.append('f', df[:10])
self.store.append('f', df[10:])
self.store.create_table_index('f')
# try to index a non-table
self.store.put('f2', df)
self.assertRaises(Exception, self.store.create_table_index, 'f2')
# try to change the version supports flag
from pandas.io import pytables
pytables._table_supports_index = False
self.assertRaises(Exception, self.store.create_table_index, 'f')
# test out some versions
original = tables.__version__
for v in ['2.2','2.2b']:
pytables._table_mod = None
pytables._table_supports_index = False
tables.__version__ = v
self.assertRaises(Exception, self.store.create_table_index, 'f')
for v in ['2.3.1','2.3.1b','2.4dev','2.4',original]:
pytables._table_mod = None
pytables._table_supports_index = False
tables.__version__ = v
self.store.create_table_index('f')
pytables._table_mod = None
pytables._table_supports_index = False
tables.__version__ = original
def test_big_table(self):
raise nose.SkipTest('no big table')
# create and write a big table
wp = Panel(np.random.randn(20, 1000, 1000), items= [ 'Item%s' % i for i in xrange(20) ],
major_axis=date_range('1/1/2000', periods=1000), minor_axis = [ 'E%s' % i for i in xrange(1000) ])
wp.ix[:,100:200,300:400] = np.nan
try:
store = HDFStore(self.scratchpath)
store._debug_memory = True
store.append('wp',wp)
recons = store.select('wp')
finally:
store.close()
os.remove(self.scratchpath)
def test_append_diff_item_order(self):
raise nose.SkipTest('append diff item order')
wp = tm.makePanel()
wp1 = wp.ix[:, :10, :]
wp2 = wp.ix[['ItemC', 'ItemB', 'ItemA'], 10:, :]
self.store.put('panel', wp1, table=True)
self.assertRaises(Exception, self.store.put, 'panel', wp2,
append=True)
def test_table_index_incompatible_dtypes(self):
df1 = DataFrame({'a': [1, 2, 3]})
df2 = DataFrame({'a': [4, 5, 6]},
index=date_range('1/1/2000', periods=3))
self.store.put('frame', df1, table=True)
self.assertRaises(Exception, self.store.put, 'frame', df2,
table=True, append=True)
def test_table_values_dtypes_roundtrip(self):
df1 = DataFrame({'a': [1, 2, 3]}, dtype = 'f8')
self.store.append('df1', df1)
assert df1.dtypes == self.store['df1'].dtypes
df2 = DataFrame({'a': [1, 2, 3]}, dtype = 'i8')
self.store.append('df2', df2)
assert df2.dtypes == self.store['df2'].dtypes
# incompatible dtype
self.assertRaises(Exception, self.store.append, 'df2', df1)
def test_table_mixed_dtypes(self):
# frame
def _make_one_df():
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
return df.consolidate()
df1 = _make_one_df()
self.store.append('df1_mixed', df1)
tm.assert_frame_equal(self.store.select('df1_mixed'), df1)
# panel
def _make_one_panel():
wp = tm.makePanel()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['ItemA'] > 0
wp['bool2'] = wp['ItemB'] > 0
wp['int1'] = 1
wp['int2'] = 2
return wp.consolidate()
p1 = _make_one_panel()
self.store.append('p1_mixed', p1)
tm.assert_panel_equal(self.store.select('p1_mixed'), p1)
# ndim
def _make_one_p4d():
wp = tm.makePanel4D()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['l1'] > 0
wp['bool2'] = wp['l2'] > 0
wp['int1'] = 1
wp['int2'] = 2
return wp.consolidate()
p4d = _make_one_p4d()
self.store.append('p4d_mixed', p4d)
tm.assert_panel4d_equal(self.store.select('p4d_mixed'), p4d)
def test_remove(self):
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
self.store['a'] = ts
self.store['b'] = df
self.store.remove('a')
self.assertEquals(len(self.store), 1)
tm.assert_frame_equal(df, self.store['b'])
self.store.remove('b')
self.assertEquals(len(self.store), 0)
# pathing
self.store['a'] = ts
self.store['b/foo'] = df
self.store.remove('foo')
self.store.remove('b/foo')
self.assertEquals(len(self.store), 1)
self.store['a'] = ts
self.store['b/foo'] = df
self.store.remove('b')
self.assertEquals(len(self.store), 1)
# __delitem__
self.store['a'] = ts
self.store['b'] = df
del self.store['a']
del self.store['b']
self.assertEquals(len(self.store), 0)
def test_remove_where(self):
# non-existance
crit1 = Term('index','>','foo')
self.store.remove('a', where=[crit1])
# try to remove non-table (with crit)
# non-table ok (where = None)
wp = tm.makePanel()
self.store.put('wp', wp, table=True)
self.store.remove('wp', [('minor_axis', ['A', 'D'])])
rs = self.store.select('wp')
expected = wp.reindex(minor_axis = ['B','C'])
tm.assert_panel_equal(rs,expected)
# empty where
self.store.remove('wp')
self.store.put('wp', wp, table=True)
# deleted number (entire table)
n = self.store.remove('wp', [])
assert(n == 120)
# non - empty where
self.store.remove('wp')
self.store.put('wp', wp, table=True)
self.assertRaises(Exception, self.store.remove,
'wp', ['foo'])
# selectin non-table with a where
#self.store.put('wp2', wp, table=False)
#self.assertRaises(Exception, self.store.remove,
# 'wp2', [('column', ['A', 'D'])])
def test_remove_crit(self):
wp = tm.makePanel()
# group row removal
date4 = wp.major_axis.take([ 0,1,2,4,5,6,8,9,10 ])
crit4 = Term('major_axis',date4)
self.store.put('wp3', wp, table=True)
n = self.store.remove('wp3', where=[crit4])
assert(n == 36)
result = self.store.select('wp3')
expected = wp.reindex(major_axis = wp.major_axis-date4)
tm.assert_panel_equal(result, expected)
# upper half
self.store.put('wp', wp, table=True)
date = wp.major_axis[len(wp.major_axis) // 2]
crit1 = Term('major_axis','>',date)
crit2 = Term('minor_axis',['A', 'D'])
n = self.store.remove('wp', where=[crit1])
assert(n == 56)
n = self.store.remove('wp', where=[crit2])
assert(n == 32)
result = self.store['wp']
expected = wp.truncate(after=date).reindex(minor=['B', 'C'])
tm.assert_panel_equal(result, expected)
# individual row elements
self.store.put('wp2', wp, table=True)
date1 = wp.major_axis[1:3]
crit1 = Term('major_axis',date1)
self.store.remove('wp2', where=[crit1])
result = self.store.select('wp2')
expected = wp.reindex(major_axis=wp.major_axis-date1)
tm.assert_panel_equal(result, expected)
date2 = wp.major_axis[5]
crit2 = Term('major_axis',date2)
self.store.remove('wp2', where=[crit2])
result = self.store['wp2']
expected = wp.reindex(major_axis=wp.major_axis-date1-Index([date2]))
tm.assert_panel_equal(result, expected)
date3 = [wp.major_axis[7],wp.major_axis[9]]
crit3 = Term('major_axis',date3)
self.store.remove('wp2', where=[crit3])
result = self.store['wp2']
expected = wp.reindex(major_axis=wp.major_axis-date1-Index([date2])-Index(date3))
tm.assert_panel_equal(result, expected)
# corners
self.store.put('wp4', wp, table=True)
n = self.store.remove('wp4', where=[Term('major_axis','>',wp.major_axis[-1])])
result = self.store.select('wp4')
tm.assert_panel_equal(result, wp)
def test_terms(self):
wp = tm.makePanel()
p4d = tm.makePanel4D()
self.store.put('wp', wp, table=True)
self.store.put('p4d', p4d, table=True)
# some invalid terms
terms = [
[ 'minor', ['A','B'] ],
[ 'index', ['20121114'] ],
[ 'index', ['20121114', '20121114'] ],
]
for t in terms:
self.assertRaises(Exception, self.store.select, 'wp', t)
self.assertRaises(Exception, Term.__init__)
self.assertRaises(Exception, Term.__init__, 'blah')
self.assertRaises(Exception, Term.__init__, 'index')
self.assertRaises(Exception, Term.__init__, 'index', '==')
self.assertRaises(Exception, Term.__init__, 'index', '>', 5)
# panel
result = self.store.select('wp',[ Term('major_axis<20000108'), Term('minor_axis', '=', ['A','B']) ])
expected = wp.truncate(after='20000108').reindex(minor=['A', 'B'])
tm.assert_panel_equal(result, expected)
# p4d
result = self.store.select('p4d',[ Term('major_axis<20000108'), Term('minor_axis', '=', ['A','B']), Term('items', '=', ['ItemA','ItemB']) ])
expected = p4d.truncate(after='20000108').reindex(minor=['A', 'B'],items=['ItemA','ItemB'])
tm.assert_panel4d_equal(result, expected)
# valid terms
terms = [
dict(field = 'major_axis', op = '>', value = '20121114'),
('major_axis', '20121114'),
('major_axis', '>', '20121114'),
(('major_axis', ['20121114','20121114']),),
('major_axis', datetime(2012,11,14)),
'major_axis>20121114',
'major_axis>20121114',
'major_axis>20121114',
(('minor_axis', ['A','B']),),
(('minor_axis', ['A','B']),),
((('minor_axis', ['A','B']),),),
(('items', ['ItemA','ItemB']),),
('items=ItemA'),
]
for t in terms:
self.store.select('wp', t)
self.store.select('p4d', t)
# valid for p4d only
terms = [
(('labels', '=', ['l1','l2']),),
Term('labels', '=', ['l1','l2']),
]
for t in terms:
self.store.select('p4d', t)
def test_series(self):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object),
dtype=object))
self._check_roundtrip(ts3, tm.assert_series_equal)
def test_sparse_series(self):
s = tm.makeStringSeries()
s[3:5] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_series_equal,
check_series_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_series_equal,
check_series_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_series_equal,
check_series_type=True)
def test_sparse_frame(self):
s = tm.makeDataFrame()
s.ix[3:5, 1:3] = np.nan
s.ix[8:10, -2] = np.nan
ss = s.to_sparse()
self._check_double_roundtrip(ss, tm.assert_frame_equal,
check_frame_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_double_roundtrip(ss2, tm.assert_frame_equal,
check_frame_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_double_roundtrip(ss3, tm.assert_frame_equal,
check_frame_type=True)
def test_sparse_panel(self):
items = ['x', 'y', 'z']
p = Panel(dict((i, tm.makeDataFrame().ix[:2, :2]) for i in items))
sp = p.to_sparse()
self._check_double_roundtrip(sp, tm.assert_panel_equal,
check_panel_type=True)
sp2 = p.to_sparse(kind='integer')
self._check_double_roundtrip(sp2, tm.assert_panel_equal,
check_panel_type=True)
sp3 = p.to_sparse(fill_value=0)
self._check_double_roundtrip(sp3, tm.assert_panel_equal,
check_panel_type=True)
def test_float_index(self):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal)
def test_tuple_index(self):
# GH #492
col = np.arange(10)
idx = [(0.,1.), (2., 3.), (4., 5.)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
self._check_roundtrip(DF, tm.assert_frame_equal)
def test_index_types(self):
values = np.random.randn(2)
func = lambda l, r : tm.assert_series_equal(l, r, True, True, True)
ser = Series(values, [0, 'y'])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime.today(), 0])
self._check_roundtrip(ser, func)
ser = Series(values, ['y', 0])
self._check_roundtrip(ser, func)
from datetime import date
ser = Series(values, [date.today(), 'a'])
self._check_roundtrip(ser, func)
ser = Series(values, [1.23, 'b'])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime(2012, 1, 1), datetime(2012, 1, 2)])
self._check_roundtrip(ser, func)
def test_timeseries_preepoch(self):
if sys.version_info[0] == 2 and sys.version_info[1] < 7:
raise nose.SkipTest
dr = bdate_range('1/1/1940', '1/1/1960')
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal)
except OverflowError:
raise nose.SkipTest('known failer on some windows platforms')
def test_frame(self):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(df, tm.assert_frame_equal)
self._check_roundtrip(df, tm.assert_frame_equal)
self._check_roundtrip_table(df, tm.assert_frame_equal,
compression=True)
self._check_roundtrip(df, tm.assert_frame_equal,
compression=True)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(tdf, tm.assert_frame_equal)
self._check_roundtrip(tdf, tm.assert_frame_equal,
compression=True)
# not consolidated
df['foo'] = np.random.randn(len(df))
self.store['df'] = df
recons = self.store['df']
self.assert_(recons._data.is_consolidated())
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal)
def test_empty_series_frame(self):
s0 = | Series() | pandas.Series |
"""
This module provides Preparer classes to
prepare the raw files and make them ready
to be stored in the database.
"""
import numpy as np
import pandas as pd
def _types_to_native(values):
"""
Converts numpy types to native types.
"""
native_values = values.apply(
lambda x: x.items() if isinstance(x, np.generic) else x, axis=1)
native_values = native_values.where(native_values.notnull(), None)
return native_values
def _dataframe_to_dict(dataframe):
"""
Converts pandas dataframe to dictionary.
"""
return dataframe.to_dict("records")
class Preparer:
"""
A generic preparer class.
"""
def prepare(self, values):
"""
Applies table specific preparation.
"""
raise NotImplementedError
def format(self, values):
"""
Applies any post formatting, if necessary.
"""
pass
def transform(self, values):
"""
Prepares the file, converts to native data types and converts to
dictionary.
"""
if isinstance(values, pd.Series):
values = | pd.DataFrame(values) | pandas.DataFrame |
import math
import pandas as pd
from copy import copy
from pbu import JSON
from datetime import datetime, timedelta
DEFAULT_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
class TimeSeries:
"""
Helper class to manage time series with multiple data points. It offers the ability to align dates of different time
series, force a fixed resolution and interpolate missing values, add another time series to an existing one or
simply fill columns with defaults or remove columns.
"""
# different types (auto-detect) for this time series
TYPE_DICT_OF_LISTS = "dict_list"
TYPE_LIST_OF_DICTS = "list_dict"
# initial resolution (auto-detect)
resolution = None
def __init__(self, input_data, date_time_key="date_time", date_format=None,
time_zone=None):
"""
Creates a new time series instance.
:param input_data: the input data, which should be either a list of dictionaries, containing at least a date
time key with values OR a map of lists, where the map contains at least a date time key with a list of values
underneath.
:param date_time_key: the key under which to find the date time information
:param date_format: optional date format to parse dates if they're provided as string.
:param time_zone: optional time zone for the date time column. Will default to the current time zone of the
machine the code is running on.
"""
# fallback for timezone internally, because Python older than 3.6 causes issues when importing pbu
self.time_zone = time_zone
if self.time_zone is None:
self.time_zone = datetime.utcnow().astimezone().tzinfo
# store fields
self.data = input_data
self.date_time_key = date_time_key
self.date_format = date_format
self.time_zone = time_zone
# analyse input data
self.type, self.keys = self._check_input_data()
# parse dates if necessary
self._parse_dates()
# extract resolution if possible as well
if self.type is not None:
self.set_resolution()
def set_resolution(self, custom_resolution=None):
"""
Sets the resolution for the time series. If not resolution is provided, it will be detected automatically. The
method doesn't return anything, but rather updates the resolution attribute of this instance.
:param custom_resolution: a timedelta instance specifiying the gap between 2 date time values.
"""
if custom_resolution is None:
self.resolution = self.get_resolution()
else:
self.resolution = custom_resolution
def get_resolution(self):
"""
Auto-detects the resolution of the current date time column series. The method will iterate through all values
of the date time column and measure the difference between timestamps, create a statistic for each difference
and returns the most prominent result in the time series.
:return: the most common timedelta between date time column values in the data.
"""
last_dt = datetime.now().astimezone(self.time_zone)
# collect stats
stats = {}
# determine datetime series from input data
dt_series = self.get_dates()
for dt in dt_series:
if isinstance(dt, str):
# string dates, need to be parsed
if self.date_format is None:
raise AttributeError("Attempted to determine resolution, but didn't provide a date format for "
"the date time column values, which are strings like: {}".format(dt))
dt = datetime.strptime(dt, self.date_format).astimezone(self.time_zone)
# check the diff from last datetime and collect diff in stats
diff = dt - last_dt
if diff not in stats:
stats[diff] = 0
stats[diff] += 1
last_dt = dt
# evaluate collected stats and find most common resolution
res_max = 0
current_max = None
for resolution in stats:
if stats[resolution] > res_max:
res_max = stats[resolution]
current_max = resolution
if current_max is None:
raise ValueError("Resolution could not be determined. This is most likely due to missing data.")
return current_max
def add_values(self, new_key, new_values):
"""
Adds a new series to the existing one. We assume that the new series is already aligned to the date time column
of this time series. The new values will simply be added to the existing data.
:param new_key: the key under which the new data series will be available
:param new_values: a list of all the values for the this key over the whole data series.
"""
# don't add duplicate keys
if new_key in self.keys:
raise AttributeError("Can't add key '{}' to given time series, because it already contains that "
"key".format(new_key))
# check length of new data
existing_data_length = len(self.data)
if self.type == TimeSeries.TYPE_DICT_OF_LISTS:
existing_data_length = len(self.data[self.date_time_key])
# make sure the value series align in length, if not fill new series with values (max +2)
new_values = TimeSeries._align_value_series_length(new_values, existing_data_length)
if len(new_values) != existing_data_length:
# difference in size is more than 2
raise ValueError(
"Failed to add new data series with length {} to existing data series with length {}".format(
len(new_values), existing_data_length))
# add data
if self.type == TimeSeries.TYPE_DICT_OF_LISTS:
self.data[new_key] = new_values
elif self.type == TimeSeries.TYPE_LIST_OF_DICTS:
# translate to dictionary, add values and then translate back
output = self.translate_to_dict_of_lists()
output[new_key] = new_values
self.data = TimeSeries(output, self.date_time_key, time_zone=self.time_zone).translate_to_list_of_dicts()
self.keys.append(new_key)
def fill_values(self, new_key, constant_value):
"""
Adds a new data series to the existing data by filling it up completely with a constant value. The method does
not return any results, but instead updates the instance's data directly.
:param new_key: the key under which to store the new series
:param constant_value: the constant value to add for each entry in the new series
"""
# check for duplicates
if new_key in self.keys:
raise AttributeError(
"Can't add key '{}' to given time series as constant, because it already exists.".format(new_key))
# add data
if self.type == TimeSeries.TYPE_DICT_OF_LISTS:
self.data[new_key] = [constant_value] * len(self.data[self.date_time_key])
else:
output = self.translate_to_dict_of_lists()
output[new_key] = [constant_value] * len(self.data)
self.data = TimeSeries(output, self.date_time_key, time_zone=self.time_zone).translate_to_list_of_dicts()
def get_values(self, selected_key):
"""
Extracts the values of a given column as list from the current time series.
:param selected_key: the key to retrieve
:return: a list of values representing this series
"""
if self.type == TimeSeries.TYPE_DICT_OF_LISTS:
# check if the key is available
if selected_key not in self.data:
raise ValueError("Requested key {} could not be found in input data".format(selected_key))
# return the series
dt_series = self.data[selected_key]
return dt_series
elif self.type == TimeSeries.TYPE_LIST_OF_DICTS:
# check if the key exists in the first item
if selected_key not in self.data[0]:
raise ValueError("Requested key {} could not be found in first item of input data".format(selected_key))
# return the extracted column
dt_series = list(map(lambda x: x[selected_key], self.data))
return dt_series
raise AttributeError("Data series doesn't have a valid type. Extraction of value series not possible.")
def get_dates(self):
"""
Extracts the values of the date time column as list from the current time series.
:return: a list of datetime objects
"""
if self.type == TimeSeries.TYPE_DICT_OF_LISTS:
return self.data[self.date_time_key]
elif self.type == TimeSeries.TYPE_LIST_OF_DICTS:
return list(map(lambda x: x[self.date_time_key], self.data))
raise AttributeError("Data series doesn't have a valid type. Extraction of date time series not possible.")
def get_start_date(self):
"""
Extracts the first date in the date time series.
:return: a datetime object representing the first entry in the time series.
"""
return self._get_date_value(0)
def get_end_date(self):
"""
Extracts the last date in the date time series.
:return: a datetime object representing the last entry in the time series
"""
return self._get_date_value(-1)
def add_series(self, time_series, keys_to_add=None):
"""
Adds a new time series to this existing one. The first step is to align the new time series with this instance's
date time. Then the columns of the argument will be added one by one to this instance. The method doesn't return
anything, but directly updates the existing data.
:param time_series: a time series instance to add
:param keys_to_add: optional a list of keys to add that exist in the new series. If omitted, the method will add
all columns it finds (except the date time column).
"""
# check type
if not isinstance(time_series, TimeSeries):
raise ValueError("Provided time series is not of type TimeSeries, but {}".format(type(time_series)))
# prepare keys
if keys_to_add is None:
# fallback to all keys of the series
keys_to_add = time_series.keys
# check for duplicate keys
for key in keys_to_add:
if key in self.keys:
raise ValueError("Attempting to add key {} to existing data series, which already has "
"such a key".format(key))
# first align current data set (ensure it is aligned)
resolution = self.get_resolution() # resolution of existing series
self.align_to_resolution(resolution)
# also align the new time series to the given resolution and time frame
time_series.align_to_resolution(resolution=resolution, start_date=self.get_start_date(),
end_date=self.get_end_date())
for key in time_series.keys:
self.add_values(key, time_series.get_values(key))
_, self.keys = self._check_input_data()
def remove_series(self, keys_to_remove):
"""
Removes a column from the time series. The method will not return anything, but directly modify the data of this
instance.
:param keys_to_remove: the key to remove
"""
if isinstance(keys_to_remove, str):
keys_to_remove = [keys_to_remove]
if self.date_time_key in keys_to_remove:
raise AttributeError("Can't remove date time key {}".format(self.date_time_key))
if self.type == TimeSeries.TYPE_DICT_OF_LISTS:
for key in keys_to_remove:
del self.data[key]
else:
# translate
output = self.translate_to_dict_of_lists()
for key in keys_to_remove:
del output[key]
self.data = TimeSeries(output, self.date_time_key, time_zone=self.time_zone).translate_to_list_of_dicts()
_, self.keys = self._check_input_data()
def align_to_resolution(self, resolution=None, start_date=None, end_date=None):
"""
Aligns the current input data to the most dominant resolution. This will keep the first date of the datetime
column and then interpolate values in between and use existing values to fill the data series with a fixed
interval between each data point. This will update the original data of this time series. The method doesn't
return anything, but directly modifies the instance's data.
:param resolution: optional, if provided will force a specific resolution and interpolate/skip values to match
that resolution
:param start_date: an optional start date, in case you want to prepend or cut the existing data series to a
specific time frame. If not provided the first date from the time series will be used.
:param end_date: an optional end date, in case you want to append or cut the existing data series to a specific
time frame. If not provided, the last date from the time series will be used.
"""
# fetch resolution and tolerance
if resolution is None:
resolution = self.get_resolution()
tolerance = resolution / 2
# start date (use start date minus resolution to match the first entry perfectly)
current_date = self.get_start_date()
if start_date is not None:
current_date = start_date
if end_date is None:
end_date = self.get_end_date()
# standardise initial data
main_data = self.data
if self.type == TimeSeries.TYPE_DICT_OF_LISTS:
main_data = self.translate_to_list_of_dicts()
# runtime variables for the alignment operation below
prev_value = None
current_original_data_index = 0
result = []
# as long as we haven't reached the end date, keep adding values
while current_date <= end_date:
is_last = False
if current_original_data_index >= len(main_data) - 1:
is_last = True
# ensure we don't run into index errors (repeat last element until end-date in worst-case scenario)
if current_original_data_index >= len(main_data):
current_original_data_index = len(main_data) - 1 # might cause endless loop in skip branch
# extract date information and time delta
original_date = main_data[current_original_data_index][self.date_time_key]
diff_seconds = (current_date - original_date).total_seconds()
# close enough
if current_date == original_date or abs(diff_seconds) < tolerance.total_seconds():
# fetch the value from the original data
prev_value = main_data[current_original_data_index]
# update date of this entry to current date
prev_value[self.date_time_key] = current_date
# add to result
result.append(prev_value)
# remember to increment the key for next item and add resolution to current date
current_original_data_index += 1
current_date += resolution
# implies the data point is no where close, skip this entry
elif current_date > original_date:
prev_value = main_data[current_original_data_index]
if is_last:
# we've already reached the end of the data series, fill with last value
if current_date < end_date:
missing_entries = math.floor((end_date - current_date) / resolution) + 1
for index in range(0, missing_entries):
# copy last item
new_item = copy(prev_value)
# update timestamp
new_item[self.date_time_key] = current_date + resolution
# add to result
result.append(new_item)
# remember for next loop
prev_value = new_item
# increment timestamp
current_date += resolution
if current_date == end_date:
# breaks the while loop
current_date += resolution
else:
current_original_data_index += 1
# implies that the next data point is in the future and we need to interpolate missing values
elif current_date < original_date:
# compute gaps
gaps = {}
next_value = main_data[current_original_data_index]
if prev_value is None:
prev_value = next_value
# special handling for initial result to prefill with first value
if len(result) == 0:
prev_value[self.date_time_key] = current_date
for key in self.keys:
gaps[key] = next_value[key] - prev_value[key]
missing_entries = math.floor((original_date - current_date) / resolution)
# interpolate entries starting from prev_value
for index in range(0, missing_entries):
new_item = {
self.date_time_key: prev_value[self.date_time_key] + resolution
}
for key in self.keys:
new_item[key] = prev_value[key] + (gaps[key] / missing_entries)
prev_value = new_item
result.append(new_item)
# update current date with latest added value
current_date = new_item[self.date_time_key]
# increment resolution and index
current_date += resolution
current_original_data_index += 1
# store aligned data
if self.type == TimeSeries.TYPE_DICT_OF_LISTS:
# convert if necessary
self.data = TimeSeries(input_data=result,
date_time_key=self.date_time_key,
time_zone=self.time_zone).translate_to_dict_of_lists()
else:
# already in correct format
self.data = result
def translate_to_list_of_dicts(self, date_format=None):
"""
Returns the data of this instance as a list containing dictionary items with all keys and the datetime key
being contained in each item of the list.
:param date_format: optional date format to render date_time columns as strings rather than native datetime
:return: a list of dictionaries
"""
if self.type == TimeSeries.TYPE_LIST_OF_DICTS:
# already in correct format
result = copy(self.data)
# format date_time column if necessary
if date_format is not None:
result = list(map(lambda x: TimeSeries._format_date_list_of_dict(x, self.date_time_key, date_format),
result))
return result
else:
# need translation
result = []
# iterate through all values
for index in range(0, len(self.data[self.date_time_key]), 1):
# iterate through all keys for current index and add to result
item = {}
for key in self.data:
value = self.data[key][index]
if key == self.date_time_key and date_format is not None:
# handle datetime rendering
value = value.strftime(date_format)
# add key to current item
item[key] = value
# append finished item to list of dicts
result.append(item)
# return list of dicts
return result
def translate_to_dict_of_lists(self, date_format=None):
"""
Returns the data of this instance as a dictionary with keys for each data series, including the date time key.
Underneath each key, all values (sorted by date time) are stored in a simple list containing the values as
primitive types.
:param date_format: optional date format to render date_time columns as strings rather than native datetime
objects
:return: a dictionary containing lists
"""
if self.type == TimeSeries.TYPE_DICT_OF_LISTS:
# already in correct format
result = copy(self.data)
# format date_time column if necessary
if date_format is not None:
result[self.date_time_key] = list(map(lambda x: x.strftime(date_format), result[self.date_time_key]))
return result
else:
# need translation
result = {}
# init keys
for key in self.data[0]:
result[key] = []
# map to lists
for item in self.data:
for key in item:
value = item[key]
if key == self.date_time_key and date_format is not None:
# handle datetime rendering
value = value.strftime(date_format)
result[key].append(value)
# return dict of lists
return result
def to_pd_data_frame(self):
"""
Converts the time series into a pandas DataFrame with a given time index
:return: a pandas DataFrame with the date_time column set as the index
"""
df = pd.DataFrame(self.translate_to_dict_of_lists(date_format=None)).set_index(self.date_time_key)
for col in df:
if col != self.date_time_key:
df[col] = | pd.to_numeric(df[col], errors='coerce') | pandas.to_numeric |
from datetime import datetime
from dateutil.tz import tzlocal, tzutc
import pandas as pd
import numpy as np
from hdmf.backends.hdf5 import HDF5IO
from hdmf.common import DynamicTable
from pynwb import NWBFile, TimeSeries, NWBHDF5IO, get_manager
from pynwb.file import Subject
from pynwb.epoch import TimeIntervals
from pynwb.ecephys import ElectricalSeries
from pynwb.testing import NWBH5IOMixin, TestCase, remove_test_file
class TestNWBFileHDF5IO(TestCase):
""" Test reading/writing an NWBFile using HDF5IO """
def setUp(self):
""" Set up an NWBFile object with an acquisition TimeSeries, analysis TimeSeries, and a processing module """
self.start_time = datetime(1970, 1, 1, 12, tzinfo=tzutc())
self.ref_time = datetime(1979, 1, 1, 0, tzinfo=tzutc())
self.create_date = datetime(2017, 4, 15, 12, tzinfo=tzlocal())
self.manager = get_manager()
self.filename = 'test_nwbfileio.h5'
self.nwbfile = NWBFile(session_description='a test NWB File',
identifier='TEST123',
session_start_time=self.start_time,
timestamps_reference_time=self.ref_time,
file_create_date=self.create_date,
experimenter='test experimenter',
stimulus_notes='test stimulus notes',
data_collection='test data collection notes',
experiment_description='test experiment description',
institution='nomad',
lab='nolab',
notes='nonotes',
pharmacology='nopharmacology',
protocol='noprotocol',
related_publications='nopubs',
session_id='007',
slices='noslices',
source_script='nosources',
surgery='nosurgery',
virus='novirus',
source_script_file_name='nofilename')
self.ts = TimeSeries(name='test_timeseries', data=list(range(100, 200, 10)),
unit='SIunit', timestamps=np.arange(10.), resolution=0.1)
self.nwbfile.add_acquisition(self.ts)
self.ts2 = TimeSeries(name='test_timeseries2', data=list(range(200, 300, 10)),
unit='SIunit', timestamps=np.arange(10.), resolution=0.1)
self.nwbfile.add_analysis(self.ts2)
self.mod = self.nwbfile.create_processing_module('test_module', 'a test module')
self.ts3 = TimeSeries(name='test_timeseries2', data=list(range(100, 200, 10)),
unit='SIunit', timestamps=np.arange(10.), resolution=0.1)
self.mod.add(self.ts3)
def tearDown(self):
""" Delete the created test file """
remove_test_file(self.filename)
def test_children(self):
""" Test that the TimeSeries and processing module are children of their respective parents """
self.assertIn(self.ts, self.nwbfile.children)
self.assertIn(self.ts2, self.nwbfile.children)
self.assertIn(self.mod, self.nwbfile.children)
self.assertIn(self.ts3, self.mod.children)
def test_write(self):
""" Test writing the NWBFile using HDF5IO """
hdf5io = HDF5IO(self.filename, manager=self.manager, mode='a')
hdf5io.write(self.nwbfile)
hdf5io.close()
# TODO add some asserts
def test_read(self):
""" Test reading the NWBFile using HDF5IO """
hdf5io = HDF5IO(self.filename, manager=self.manager, mode='w')
hdf5io.write(self.nwbfile)
hdf5io.close()
hdf5io = HDF5IO(self.filename, manager=self.manager, mode='r')
container = hdf5io.read()
self.assertIsInstance(container, NWBFile)
self.assertEqual(len(container.acquisition), 1)
self.assertEqual(len(container.analysis), 1)
for v in container.acquisition.values():
self.assertIsInstance(v, TimeSeries)
self.assertContainerEqual(container, self.nwbfile)
hdf5io.close()
class TestNWBFileIO(NWBH5IOMixin, TestCase):
""" Test writing an NWBFile to disk and reading back the file """
# this uses methods tearDown, test_roundtrip, and validate from NWBH5IOMixin. the rest are overridden
def setUp(self):
super().setUp()
self.start_time = datetime(1970, 1, 1, 12, tzinfo=tzutc())
self.ref_time = datetime(1979, 1, 1, 0, tzinfo=tzutc())
self.create_dates = [datetime(2017, 5, 1, 12, tzinfo=tzlocal()),
datetime(2017, 5, 2, 13, 0, 0, 1, tzinfo=tzutc()),
datetime(2017, 5, 2, 14, tzinfo=tzutc())]
def setUpContainer(self):
""" Return a placeholder NWBFile """
return NWBFile('placeholder', 'placeholder', datetime(1970, 1, 1, 12, tzinfo=tzutc()))
def build_nwbfile(self):
""" Create an NWB file """
self.container = NWBFile(session_description='a test session description for a test NWBFile',
identifier='FILE123',
session_start_time=self.start_time,
file_create_date=self.create_dates,
timestamps_reference_time=self.ref_time,
experimenter='A test experimenter',
lab='a test lab',
institution='a test institution',
experiment_description='a test experiment description',
session_id='test1',
notes='my notes',
pharmacology='drugs',
protocol='protocol',
related_publications='my pubs',
slices='my slices',
surgery='surgery',
virus='a virus',
source_script='noscript',
source_script_file_name='nofilename',
stimulus_notes='test stimulus notes',
data_collection='test data collection notes',
keywords=('these', 'are', 'keywords'))
def roundtripContainer(self, cache_spec=False):
""" Build and write an NWBFile to disk, read the file, and return the NWBFile """
self.build_nwbfile()
self.writer = NWBHDF5IO(self.filename, mode='w')
self.writer.write(self.container, cache_spec=cache_spec)
self.writer.close()
self.reader = NWBHDF5IO(self.filename, mode='r')
self.read_nwbfile = self.reader.read()
return self.read_nwbfile
def addContainer(self, nwbfile):
""" No-op. roundtripContainer is overridden and no longer uses addContainer """
pass
def getContainer(self, nwbfile):
""" Get the NWBFile object from the given NWBFile """
return nwbfile
class TestExperimentersConstructorRoundtrip(TestNWBFileIO):
""" Test that a list of multiple experimenters in a constructor is written to and read from file """
def build_nwbfile(self):
description = 'test nwbfile experimenter'
identifier = 'TEST_experimenter'
self.nwbfile = NWBFile(session_description=description,
identifier=identifier,
session_start_time=self.start_time,
experimenter=('experimenter1', 'experimenter2'))
class TestExperimentersSetterRoundtrip(TestNWBFileIO):
""" Test that a list of multiple experimenters in a setter is written to and read from file """
def build_nwbfile(self):
description = 'test nwbfile experimenter'
identifier = 'TEST_experimenter'
self.nwbfile = NWBFile(session_description=description,
identifier=identifier,
session_start_time=self.start_time)
self.nwbfile.experimenter = ('experimenter1', 'experimenter2')
class TestPublicationsConstructorRoundtrip(TestNWBFileIO):
""" Test that a list of multiple publications in a constructor is written to and read from file """
def build_nwbfile(self):
description = 'test nwbfile publications'
identifier = 'TEST_publications'
self.nwbfile = NWBFile(session_description=description,
identifier=identifier,
session_start_time=self.start_time,
related_publications=('pub1', 'pub2'))
class TestPublicationsSetterRoundtrip(TestNWBFileIO):
""" Test that a list of multiple publications in a setter is written to and read from file """
def build_nwbfile(self):
description = 'test nwbfile publications'
identifier = 'TEST_publications'
self.nwbfile = NWBFile(session_description=description,
identifier=identifier,
session_start_time=self.start_time)
self.nwbfile.related_publications = ('pub1', 'pub2')
class TestSubjectIO(NWBH5IOMixin, TestCase):
def setUpContainer(self):
""" Return the test Subject """
return Subject(age='P90D',
description='An unfortunate rat',
genotype='WT',
sex='M',
species='Rattus norvegicus',
subject_id='RAT123',
weight='2 kg',
date_of_birth=datetime(1970, 1, 1, 12, tzinfo=tzutc()),
strain='my_strain')
def addContainer(self, nwbfile):
""" Add the test Subject to the given NWBFile """
nwbfile.subject = self.container
def getContainer(self, nwbfile):
""" Return the test Subject from the given NWBFile """
return nwbfile.subject
class TestEmptySubjectIO(TestSubjectIO):
def setUpContainer(self):
return Subject()
class TestEpochsIO(NWBH5IOMixin, TestCase):
def setUpContainer(self):
""" Return placeholder epochs object. Tested epochs are added directly to the NWBFile in addContainer """
return TimeIntervals('epochs')
def addContainer(self, nwbfile):
""" Add the test epochs to the given NWBFile """
nwbfile.add_epoch_column(
name='temperature',
description='average temperture (c) during epoch'
)
nwbfile.add_epoch(
start_time=5.3,
stop_time=6.1,
timeseries=[],
tags='ambient',
temperature=26.4,
)
# reset the thing
self.container = nwbfile.epochs
def getContainer(self, nwbfile):
""" Return the test epochs from the given NWBFile """
return nwbfile.epochs
class TestEpochsIODf(TestEpochsIO):
def addContainer(self, nwbfile):
""" Add the test epochs with TimeSeries objects to the given NWBFile """
tsa, tsb = [
TimeSeries(name='a', data=np.arange(11), unit='flubs', timestamps=np.linspace(0, 1, 11)),
TimeSeries(name='b', data=np.arange(13), unit='flubs', timestamps=np.linspace(0.1, 5, 13)),
]
nwbfile.add_acquisition(tsa)
nwbfile.add_acquisition(tsb)
nwbfile.epochs = TimeIntervals.from_dataframe(
pd.DataFrame({
'foo': [1, 2, 3, 4],
'bar': ['fish', 'fowl', 'dog', 'cat'],
'start_time': [0.2, 0.25, 0.30, 0.35],
'stop_time': [0.25, 0.30, 0.40, 0.45],
'timeseries': [[(2, 1, tsa)],
[(3, 1, tsa)],
[(3, 1, tsa)],
[(4, 1, tsa)]],
'tags': [[''], [''], ['fizz', 'buzz'], ['qaz']]
}),
'epochs',
columns=[
{'name': 'foo', 'description': 'a column of integers'},
{'name': 'bar', 'description': 'a column of strings'},
]
)
# reset the thing
self.container = nwbfile.epochs
def test_df_comparison(self):
"""
Test that the epochs read from file converted to a data frame are the same as the data frame converted
from the original epochs and the timeseries columns within them are the same
"""
self.read_container = self.roundtripContainer()
df_obt = self.read_container.to_dataframe()
tsa = self.read_nwbfile.get_acquisition('a')
df_exp = pd.DataFrame({
'foo': [1, 2, 3, 4],
'bar': ['fish', 'fowl', 'dog', 'cat'],
'start_time': [0.2, 0.25, 0.30, 0.35],
'stop_time': [0.25, 0.30, 0.40, 0.45],
'timeseries': [[(2, 1, tsa)],
[(3, 1, tsa)],
[(3, 1, tsa)],
[(4, 1, tsa)]],
'tags': [[''], [''], ['fizz', 'buzz'], ['qaz']]
},
index=pd.Index(np.arange(4), name='id')
)
# pop the timeseries column out because ts_obt has rows of lists of tuples and ts_exp has rows of lists of lists
ts_obt = df_obt.pop('timeseries')
ts_exp = df_exp.pop('timeseries')
pd.testing.assert_frame_equal(df_exp, df_obt, check_like=True, check_dtype=False)
# check the timeseries columns match
for ex, obt in zip(ts_exp, ts_obt):
self.assertEqual(ex[0][0], obt[0][0])
self.assertEqual(ex[0][1], obt[0][1])
self.assertContainerEqual(ex[0][2], obt[0][2])
def test_df_comparison_no_ts(self):
"""
Test that the epochs read from file converted to a data frame are the same as the data frame converted
from the original epochs without a timeseries column
"""
self.read_container = self.roundtripContainer()
df_exp = pd.DataFrame({
'foo': [1, 2, 3, 4],
'bar': ['fish', 'fowl', 'dog', 'cat'],
'start_time': [0.2, 0.25, 0.30, 0.35],
'stop_time': [0.25, 0.30, 0.40, 0.45],
'tags': [[''], [''], ['fizz', 'buzz'], ['qaz']]
},
index=pd.Index(np.arange(4), name='id')
)
df_obt = self.read_container.to_dataframe(exclude=set(['timeseries', 'timeseries_index']))
| pd.testing.assert_frame_equal(df_exp, df_obt, check_like=True, check_dtype=False) | pandas.testing.assert_frame_equal |
import pandas as pd
import pandas as pd
sample1 = pd.read_table('MUT-1_2.annotate.csv', sep='\t', index_col=0)["score"]
sample2 = pd.read_table('MUT-2_2.annotate.csv', sep='\t', index_col=0)["score"]
sample3 = pd.read_table('MUT-4_2.annotate.csv', sep='\t', index_col=0)["score"]
sample4 = pd.read_table('MUT-5_2.annotate.csv', sep='\t', index_col=0)["score"]
sample5 = pd.read_table('MUT-6_2.annotate.csv', sep='\t', index_col=0)["score"]
sample6 = pd.read_table('WT-1_2.annotate.csv', sep='\t', index_col=0)["score"]
sample7 = pd.read_table('WT-2_2.annotate.csv', sep='\t', index_col=0)["score"]
sample8 = pd.read_table('WT-3_2.annotate.csv', sep='\t', index_col=0)["score"]
sample9 = pd.read_table('WT-4_2.annotate.csv', sep='\t', index_col=0)["score"]
sample10 = pd.read_table('WT-5_2.annotate.csv', sep='\t', index_col=0)["score"]
#
meta1 = pd.read_table('MUT-1_2.annotate.csv', sep='\t', index_col=0).loc[:,['splice_site','intron_size', 'anchor','genes', 'exons_skipped','transcripts']]
meta2 = pd.read_table('MUT-2_2.annotate.csv', sep='\t', index_col=0).loc[:,['splice_site','intron_size', 'anchor','genes', 'exons_skipped','transcripts']]
meta3 = pd.read_table('MUT-4_2.annotate.csv', sep='\t', index_col=0).loc[:,['splice_site','intron_size', 'anchor','genes', 'exons_skipped','transcripts']]
meta4 = pd.read_table('MUT-5_2.annotate.csv', sep='\t', index_col=0).loc[:,['splice_site','intron_size', 'anchor','genes', 'exons_skipped','transcripts']]
meta5 = pd.read_table('MUT-6_2.annotate.csv', sep='\t', index_col=0).loc[:,['splice_site','intron_size', 'anchor','genes', 'exons_skipped','transcripts']]
meta6 = pd.read_table('WT-1_2.annotate.csv', sep='\t', index_col=0).loc[:,['splice_site','intron_size', 'anchor','genes', 'exons_skipped','transcripts']]
meta7= pd.read_table('WT-2_2.annotate.csv', sep='\t', index_col=0).loc[:,['splice_site','intron_size', 'anchor','genes', 'exons_skipped','transcripts']]
meta8 = pd.read_table('WT-3_2.annotate.csv', sep='\t', index_col=0).loc[:,['splice_site','intron_size', 'anchor','genes', 'exons_skipped','transcripts']]
meta9 = pd.read_table('WT-4_2.annotate.csv', sep='\t', index_col=0).loc[:,['splice_site','intron_size', 'anchor','genes', 'exons_skipped','transcripts']]
meta10 = | pd.read_table('WT-5_2.annotate.csv', sep='\t', index_col=0) | pandas.read_table |
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted
from src.processing.errors import InvalidModelInputError
#.- HELPERS
def _define_variables(variables):
# Check that variable names are passed in a list.
# Can take None as value
if not variables or isinstance(variables, list):
variables = variables
else:
variables = [variables]
return variables
def _find_numerical_variables(X, variables=None):
# Find numerical variables in a data set or check that
# the variables entered by the user are numerical.
if not variables:
variables = list(X.select_dtypes(include='number').columns)
else:
if len(X[variables].select_dtypes(exclude='number').columns) != 0:
raise TypeError("Some of the variables are not numerical. Please cast them as numerical "
"before calling this transformer")
return variables
def _is_dataframe(X):
# checks if the input is a dataframe. Also creates a copy,
# important not to transform the original dataset.
if not isinstance(X, pd.DataFrame):
raise TypeError("The data set should be a pandas dataframe")
return X.copy()
def _check_input_matches_training_df(X, reference):
# check that dataframe to transform has the same number of columns
# that the dataframe used during fit method
if X.shape[1] != reference:
raise ValueError('The number of columns in this data set is different from that of the train set used during'
'the fit method')
return None
def _check_contains_na(X, variables):
if X[variables].isnull().values.any():
raise ValueError('Some of the variables to trasnform contain missing values. Check and remove those '
'before using this transformer.')
class BaseNumericalTransformer(BaseEstimator, TransformerMixin):
# shared set-up procedures across numerical transformers, i.e.,
# variable transformers, discretisers, outlier handlers
def fit(self, X, y=None):
# check input dataframe
X = _is_dataframe(X)
# find or check for numerical variables
self.variables = _find_numerical_variables(X, self.variables)
# check if dataset contains na
_check_contains_na(X, self.variables)
return X
def transform(self, X):
# Check method fit has been called
check_is_fitted(self)
# check that input is a dataframe
X = _is_dataframe(X)
# check if dataset contains na
_check_contains_na(X, self.variables)
# Check that the dataframe contains the same number of columns
# than the dataframe used to fit the imputer.
_check_input_matches_training_df(X, self.input_shape_[1])
return X
class Winsorizer(BaseNumericalTransformer):
"""
The Winsorizer() caps maximum and / or minimum values of a variable.
The Winsorizer() works only with numerical variables. A list of variables can
be indicated. Alternatively, the Winsorizer() will select all numerical
variables in the train set.
The Winsorizer() first calculates the capping values at the end of the
distribution. The values are determined using 1) a Gaussian approximation,
2) the inter-quantile range proximity rule or 3) percentiles.
Gaussian limits:
right tail: mean + 3* std
left tail: mean - 3* std
IQR limits:
right tail: 75th quantile + 3* IQR
left tail: 25th quantile - 3* IQR
where IQR is the inter-quartile range: 75th quantile - 25th quantile.
percentiles or quantiles:
right tail: 95th percentile
left tail: 5th percentile
You can select how far out to cap the maximum or minimum values with the
parameter 'fold'.
If distribution='gaussian' fold gives the value to multiply the std.
If distribution='skewed' fold is the value to multiply the IQR.
If distribution='quantile', fold is the percentile on each tail that should
be censored. For example, if fold=0.05, the limits will be the 5th and 95th
percentiles. If fold=0.1, the limits will be the 10th and 90th percentiles.
The transformer first finds the values at one or both tails of the distributions
(fit).
The transformer then caps the variables (transform).
Parameters
----------
distribution : str, default=gaussian
Desired distribution. Can take 'gaussian', 'skewed' or 'quantiles'.
gaussian: the transformer will find the maximum and / or minimum values to
cap the variables using the Gaussian approximation.
skewed: the transformer will find the boundaries using the IQR proximity rule.
quantiles: the limits are given by the percentiles.
tail : str, default=right
Whether to cap outliers on the right, left or both tails of the distribution.
Can take 'left', 'right' or 'both'.
fold: int or float, default=3
How far out to to place the capping values. The number that will multiply
the std or IQR to calculate the capping values. Recommended values, 2
or 3 for the gaussian approximation, or 1.5 or 3 for the IQR proximity
rule.
If distribution='quantile', then 'fold' indicates the percentile. So if
fold=0.05, the limits will be the 95th and 5th percentiles.
variables : list, default=None
The list of variables for which the outliers will be capped. If None,
the transformer will find and select all numerical variables.
"""
def __init__(self, distribution='gaussian', tail='right', fold=3, variables=None):
if distribution not in ['gaussian', 'skewed', 'quantiles']:
raise ValueError("distribution takes only values 'gaussian', 'skewed' or 'quantiles'")
if tail not in ['right', 'left', 'both']:
raise ValueError("tail takes only values 'right', 'left' or 'both'")
if fold <= 0:
raise ValueError("fold takes only positive numbers")
self.distribution = distribution
self.tail = tail
self.fold = fold
self.variables = _define_variables(variables)
def fit(self, X, y=None):
"""
Learns the values that should be used to replace outliers.
Parameters
----------
X : pandas dataframe of shape = [n_samples, n_features]
The training input samples.
y : None
y is not needed in this transformer. You can pass y or None.
Attributes
----------
right_tail_caps_: dictionary
The dictionary containing the maximum values at which variables
will be capped.
left_tail_caps_ : dictionary
The dictionary containing the minimum values at which variables
will be capped.
"""
# check input dataframe
X = super().fit(X, y)
self.right_tail_caps_ = {}
self.left_tail_caps_ = {}
# estimate the end values
if self.tail in ['right', 'both']:
if self.distribution == 'gaussian':
self.right_tail_caps_ = (X[self.variables].mean() + self.fold * X[self.variables].std()).to_dict()
elif self.distribution == 'skewed':
IQR = X[self.variables].quantile(0.75) - X[self.variables].quantile(0.25)
self.right_tail_caps_ = (X[self.variables].quantile(0.75) + (IQR * self.fold)).to_dict()
elif self.distribution == 'quantiles':
self.right_tail_caps_ = X[self.variables].quantile(1-self.fold).to_dict()
if self.tail in ['left', 'both']:
if self.distribution == 'gaussian':
self.left_tail_caps_ = (X[self.variables].mean() - self.fold * X[self.variables].std()).to_dict()
elif self.distribution == 'skewed':
IQR = X[self.variables].quantile(0.75) - X[self.variables].quantile(0.25)
self.left_tail_caps_ = (X[self.variables].quantile(0.25) - (IQR * self.fold)).to_dict()
elif self.distribution == 'quantiles':
self.left_tail_caps_ = X[self.variables].quantile(self.fold).to_dict()
self.input_shape_ = X.shape
return self
def transform(self, X):
"""
Caps the variable values, that is, censors outliers.
Parameters
----------
X : pandas dataframe of shape = [n_samples, n_features]
The data to be transformed.
Returns
-------
X_transformed : pandas dataframe of shape = [n_samples, n_features]
The dataframe with the capped variables.
"""
# check input dataframe an if class was fitted
X = super().transform(X)
for feature in self.right_tail_caps_.keys():
X[feature] = np.where(X[feature] > self.right_tail_caps_[feature], self.right_tail_caps_[feature],
X[feature])
for feature in self.left_tail_caps_.keys():
X[feature] = np.where(X[feature] < self.left_tail_caps_[feature], self.left_tail_caps_[feature], X[feature])
return X
class CategoricalImputer(BaseEstimator, TransformerMixin):
"""Categorical data missing value imputer."""
def __init__(self, variables=None) -> None:
if not isinstance(variables, list):
self.variables = [variables]
else:
self.variables = variables
def fit(self, X: pd.DataFrame, y: pd.Series = None
) -> 'CategoricalImputer':
"""Fit statement to accomodate the sklearn pipeline."""
return self
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
"""Apply the transforms to the dataframe."""
X = X.copy()
for feature in self.variables:
X[feature] = X[feature].fillna('Missing')
return X
class NumericalImputer(BaseEstimator, TransformerMixin):
"""Numerical missing value imputer."""
def __init__(self, variables=None):
if not isinstance(variables, list):
self.variables = [variables]
else:
self.variables = variables
def fit(self, X, y=None):
# persist mode in a dictionary
self.imputer_dict_ = {}
for feature in self.variables:
self.imputer_dict_[feature] = X[feature].mode()[0]
return self
def transform(self, X):
X = X.copy()
for feature in self.variables:
X[feature].fillna(self.imputer_dict_[feature], inplace=True)
return X
class RareLabelCategoricalEncoder(BaseEstimator, TransformerMixin):
"""Rare label categorical encoder"""
def __init__(self, tol=0.05, variables=None):
self.tol = tol
if not isinstance(variables, list):
self.variables = [variables]
else:
self.variables = variables
def fit(self, X, y=None):
# persist frequent labels in dictionary
self.encoder_dict_ = {}
for var in self.variables:
# the encoder will learn the most frequent categories
t = pd.Series(X[var].value_counts() / np.float(len(X)))
# frequent labels:
self.encoder_dict_[var] = list(t[t >= self.tol].index)
return self
def transform(self, X):
X = X.copy()
for feature in self.variables:
X[feature] = np.where(X[feature].isin(
self.encoder_dict_[feature]), X[feature], 'Rare')
return X
class CategoricalEncoder(BaseEstimator, TransformerMixin):
"""String to numbers categorical encoder."""
def __init__(self, variables=None):
if not isinstance(variables, list):
self.variables = [variables]
else:
self.variables = variables
def fit(self, X, y):
temp = | pd.concat([X, y], axis=1) | pandas.concat |
from text_analysis import Analysis
import pandas as pd
# initialise lists
positive_scores = []
negative_scores = []
polarity_scores = []
subjective_scores = []
average_sentence_lengths = []
complex_words_percentages = []
fog_indexes = []
average_words_per_sentences = []
complex_words_counts = []
words_counts = []
syllable_counts = []
personal_pronouns_counts = []
average_word_lengths = []
def read_input(file):
# read input data
data = pd.read_excel(file)
return data
def get_scores(file):
# loop through the urls and append the result to the lists
for url in read_input(file)['URL']:
analysis = Analysis(url, 'files/master_dictionary.xlsx')
positive_scores.append(analysis.positive_score)
negative_scores.append(analysis.negative_score)
polarity_scores.append(analysis.polarity_score)
subjective_scores.append(analysis.subjective_score)
average_sentence_lengths.append(analysis.average_sentence_length)
complex_words_percentages.append(analysis.complex_words_percentage)
fog_indexes.append(analysis.fog_index)
average_words_per_sentences.append(analysis.average_words_per_sentence)
complex_words_counts.append(analysis.complex_words_count)
words_counts.append(analysis.words_count)
syllable_counts.append(analysis.syllable_count)
personal_pronouns_counts.append(analysis.personal_pronouns_count)
average_word_lengths.append(analysis.average_word_length)
print(len(positive_scores))
def write_data(output_file="output.xlsx"):
get_scores()
# read the output file
output_file = | pd.read_excel('files/output.xlsx') | pandas.read_excel |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 4 2018
File to pull fluids out of raw mimic data cut for use in downstream RL modeling.
Takes data from the raw mimic csv's in raw-data:
path on odyssey: /n/dtak/mimic-iii-v1-4/raw-data
Most of what we need is in INPUTEVENTS_CV & INPUTEVENTS_MV
@author: josephfutoma
"""
import sys
import numpy as np
import pickle
import pandas as pd
import pickle
np.set_printoptions(threshold=1000)
| pd.set_option("display.max_columns",101) | pandas.set_option |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 10 12:34:45 2020
@author: gweiss01
"""
import sys
import numpy as np
import pandas as pd
import cv2
import os
import pdb
import tkinter
from tkinter.filedialog import askdirectory,askopenfilename
from tkinter.simpledialog import askstring
#tkinter.Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
global prevRightROI
global prevLeftROI
def getVideo(vidPath):
# show an "Open" dialog box and return the path to the selected file
global prevRightROI
global prevLeftROI
vid_name = os.path.splitext(os.path.basename(vidPath))[0]
print(vidPath)
cap = cv2.VideoCapture(vidPath)
cap.set(cv2.CAP_PROP_POS_FRAMES,1800)
ret, frame = cap.read()
leftFromFile = rightFromFile =""
try:
leftFromFile,rightFromFile = os.path.basename(vidPath).split("_")
rightFromFile=rightFromFile[:-4]
except: pass
cv2.rectangle(frame,(int(prevLeftROI[0]),int(prevLeftROI[1])),(int(prevLeftROI[0]+prevLeftROI[2]),int(prevLeftROI[1]+prevLeftROI[3])),color=(255,200,200),thickness=2)
leftROI = list(cv2.selectROI("Select Left Mouse, then Press ENTER"+vid_name,frame))
# leftROI[3]=leftROI[2] make the roi square
if leftROI==[0,0,0,0]: leftROI = prevLeftROI
else: prevLeftROI = leftROI
cv2.rectangle(frame,(int(prevLeftROI[0]),int(prevLeftROI[1])),(int(prevLeftROI[0]+prevLeftROI[2]),int(prevLeftROI[1]+prevLeftROI[3])),color=(255,200,200),thickness=2)
cv2.imshow("Currently Selected Left ROI",frame)
leftMouse = askstring("Enter Mouse ID's (Cancel for Prev Vid)","Left Mouse",initialvalue=leftFromFile)
if leftMouse == None:
cv2.destroyWindow("Select Left Mouse, then Press ENTER"+vid_name)
return pd.Series([])
cv2.rectangle(frame,(int(prevRightROI[0]),int(prevRightROI[1])),(int(prevRightROI[0]+prevRightROI[2]),int(prevRightROI[1]+prevRightROI[3])),color=(255,200,200),thickness=2)
rightROI = list(cv2.selectROI("Select Right Mouse, then Press ENTER"+vid_name,frame))
# rightROI[3]=rightROI[2] makes the roi square
if rightROI==[0,0,0,0]: rightROI = prevRightROI
else: prevRightROI = rightROI
cv2.rectangle(frame,(int(prevRightROI[0]),int(prevRightROI[1])),(int(prevRightROI[0]+prevRightROI[2]),int(prevRightROI[1]+prevRightROI[3])),color=(255,200,200),thickness=2)
cv2.imshow("Currently Selected Right ROI",frame)
rightMouse = askstring("Enter Mouse ID's (Cancel for Prev Vid)","Right Mouse",initialvalue=rightFromFile)
if rightMouse == None:
cv2.destroyWindow("Select Right Mouse, then Press ENTER"+vid_name)
return | pd.Series([]) | pandas.Series |
import os
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
from torchvision import datasets
import argparse
def _get_target_indexes(dataset, target_label):
target_indexes = []
for index, (_, label) in enumerate(dataset):
if label == target_label:
target_indexes.append(index)
return np.array(target_indexes)
def create_folds(cifar10_root, output_root, n_folds):
folds_dir = os.path.join(output_root, 'folds', 'cifar10')
validation_classes_root = os.path.join(output_root, 'validation_classes')
validation_classes_path = os.path.join(validation_classes_root, 'cifar10.csv')
os.makedirs(validation_classes_root, exist_ok=True)
os.makedirs(folds_dir, exist_ok=True)
dataset = datasets.CIFAR10(root=cifar10_root, train=True, download=True)
classes = dataset.classes
n_classes = len(classes)
"====================== GENERATE CLASSES FOR VALIDATION ======================"
if not os.path.exists(validation_classes_path):
df = | pd.DataFrame(columns=['class', 'class_name', 'valid_class', 'valid_class_name']) | pandas.DataFrame |
"""
Module contains tools for processing Stata files into DataFrames
The StataReader below was originally written by <NAME> as part of PyDTA.
It has been extended and improved by <NAME> from the Statsmodels
project who also developed the StataWriter and was finally added to pandas in
a once again improved version.
You can find more information on http://presbrey.mit.edu/PyDTA and
https://www.statsmodels.org/devel/
"""
from __future__ import annotations
from collections import abc
import datetime
from io import BytesIO
import os
import struct
import sys
from typing import (
Any,
AnyStr,
Hashable,
Sequence,
cast,
)
import warnings
from dateutil.relativedelta import relativedelta
import numpy as np
from pandas._libs.lib import infer_dtype
from pandas._libs.writers import max_len_string_array
from pandas._typing import (
Buffer,
CompressionOptions,
FilePathOrBuffer,
StorageOptions,
)
from pandas.util._decorators import (
Appender,
doc,
)
from pandas.core.dtypes.common import (
ensure_object,
is_categorical_dtype,
is_datetime64_dtype,
)
from pandas import (
Categorical,
DatetimeIndex,
NaT,
Timestamp,
concat,
isna,
to_datetime,
to_timedelta,
)
from pandas.core import generic
from pandas.core.frame import DataFrame
from pandas.core.indexes.base import Index
from pandas.core.series import Series
from pandas.io.common import get_handle
_version_error = (
"Version of given Stata file is {version}. pandas supports importing "
"versions 105, 108, 111 (Stata 7SE), 113 (Stata 8/9), "
"114 (Stata 10/11), 115 (Stata 12), 117 (Stata 13), 118 (Stata 14/15/16),"
"and 119 (Stata 15/16, over 32,767 variables)."
)
_statafile_processing_params1 = """\
convert_dates : bool, default True
Convert date variables to DataFrame time values.
convert_categoricals : bool, default True
Read value labels and convert columns to Categorical/Factor variables."""
_statafile_processing_params2 = """\
index_col : str, optional
Column to set as index.
convert_missing : bool, default False
Flag indicating whether to convert missing values to their Stata
representations. If False, missing values are replaced with nan.
If True, columns containing missing values are returned with
object data types and missing values are represented by
StataMissingValue objects.
preserve_dtypes : bool, default True
Preserve Stata datatypes. If False, numeric data are upcast to pandas
default types for foreign data (float64 or int64).
columns : list or None
Columns to retain. Columns will be returned in the given order. None
returns all columns.
order_categoricals : bool, default True
Flag indicating whether converted categorical data are ordered."""
_chunksize_params = """\
chunksize : int, default None
Return StataReader object for iterations, returns chunks with
given number of lines."""
_compression_params = f"""\
compression : str or dict, default None
If string, specifies compression mode. If dict, value at key 'method'
specifies compression mode. Compression mode must be one of {{'infer',
'gzip', 'bz2', 'zip', 'xz', None}}. If compression mode is 'infer'
and `filepath_or_buffer` is path-like, then detect compression from
the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise
no compression). If dict and compression mode is one of
{{'zip', 'gzip', 'bz2'}}, or inferred as one of the above,
other entries passed as additional compression options.
{generic._shared_docs["storage_options"]}"""
_iterator_params = """\
iterator : bool, default False
Return StataReader object."""
_reader_notes = """\
Notes
-----
Categorical variables read through an iterator may not have the same
categories and dtype. This occurs when a variable stored in a DTA
file is associated to an incomplete set of value labels that only
label a strict subset of the values."""
_read_stata_doc = f"""
Read Stata file into DataFrame.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be: ``file://localhost/path/to/table.dta``.
If you want to pass in a path object, pandas accepts any ``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handle (e.g. via builtin ``open`` function)
or ``StringIO``.
{_statafile_processing_params1}
{_statafile_processing_params2}
{_chunksize_params}
{_iterator_params}
{_compression_params}
Returns
-------
DataFrame or StataReader
See Also
--------
io.stata.StataReader : Low-level reader for Stata data files.
DataFrame.to_stata: Export Stata data files.
{_reader_notes}
Examples
--------
Creating a dummy stata for this example
>>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon',
... 'parrot'],
... 'speed': [350, 18, 361, 15]}})
>>> df.to_stata('animals.dta')
Read a Stata dta file:
>>> df = pd.read_stata('animals.dta')
Read a Stata dta file in 10,000 line chunks:
>>> values = np.random.randint(0, 10, size=(20_000, 1), dtype="uint8")
>>> df = pd.DataFrame(values, columns=["i"])
>>> df.to_stata('filename.dta')
>>> itr = pd.read_stata('filename.dta', chunksize=10000)
>>> for chunk in itr:
... # Operate on a single chunk, e.g., chunk.mean()
... pass
>>> import os
>>> os.remove("./filename.dta")
>>> os.remove("./animals.dta")
"""
_read_method_doc = f"""\
Reads observations from Stata file, converting them into a dataframe
Parameters
----------
nrows : int
Number of lines to read from data file, if None read whole file.
{_statafile_processing_params1}
{_statafile_processing_params2}
Returns
-------
DataFrame
"""
_stata_reader_doc = f"""\
Class for reading Stata dta files.
Parameters
----------
path_or_buf : path (string), buffer or path object
string, path object (pathlib.Path or py._path.local.LocalPath) or object
implementing a binary read() functions.
{_statafile_processing_params1}
{_statafile_processing_params2}
{_chunksize_params}
{_compression_params}
{_reader_notes}
"""
_date_formats = ["%tc", "%tC", "%td", "%d", "%tw", "%tm", "%tq", "%th", "%ty"]
stata_epoch = datetime.datetime(1960, 1, 1)
# TODO: Add typing. As of January 2020 it is not possible to type this function since
# mypy doesn't understand that a Series and an int can be combined using mathematical
# operations. (+, -).
def _stata_elapsed_date_to_datetime_vec(dates, fmt) -> Series:
"""
Convert from SIF to datetime. https://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
The Stata Internal Format date to convert to datetime according to fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
Returns
Returns
-------
converted : Series
The converted dates
Examples
--------
>>> dates = pd.Series([52])
>>> _stata_elapsed_date_to_datetime_vec(dates , "%tw")
0 1961-01-01
dtype: datetime64[ns]
Notes
-----
datetime/c - tc
milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day
datetime/C - tC - NOT IMPLEMENTED
milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
date - td
days since 01jan1960 (01jan1960 = 0)
weekly date - tw
weeks since 1960w1
This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.
The datetime value is the start of the week in terms of days in the
year, not ISO calendar weeks.
monthly date - tm
months since 1960m1
quarterly date - tq
quarters since 1960q1
half-yearly date - th
half-years since 1960h1 yearly
date - ty
years since 0000
"""
MIN_YEAR, MAX_YEAR = Timestamp.min.year, Timestamp.max.year
MAX_DAY_DELTA = (Timestamp.max - datetime.datetime(1960, 1, 1)).days
MIN_DAY_DELTA = (Timestamp.min - datetime.datetime(1960, 1, 1)).days
MIN_MS_DELTA = MIN_DAY_DELTA * 24 * 3600 * 1000
MAX_MS_DELTA = MAX_DAY_DELTA * 24 * 3600 * 1000
def convert_year_month_safe(year, month) -> Series:
"""
Convert year and month to datetimes, using pandas vectorized versions
when the date range falls within the range supported by pandas.
Otherwise it falls back to a slower but more robust method
using datetime.
"""
if year.max() < MAX_YEAR and year.min() > MIN_YEAR:
return to_datetime(100 * year + month, format="%Y%m")
else:
index = getattr(year, "index", None)
return Series(
[datetime.datetime(y, m, 1) for y, m in zip(year, month)], index=index
)
def convert_year_days_safe(year, days) -> Series:
"""
Converts year (e.g. 1999) and days since the start of the year to a
datetime or datetime64 Series
"""
if year.max() < (MAX_YEAR - 1) and year.min() > MIN_YEAR:
return to_datetime(year, format="%Y") + to_timedelta(days, unit="d")
else:
index = getattr(year, "index", None)
value = [
datetime.datetime(y, 1, 1) + relativedelta(days=int(d))
for y, d in zip(year, days)
]
return Series(value, index=index)
def convert_delta_safe(base, deltas, unit) -> Series:
"""
Convert base dates and deltas to datetimes, using pandas vectorized
versions if the deltas satisfy restrictions required to be expressed
as dates in pandas.
"""
index = getattr(deltas, "index", None)
if unit == "d":
if deltas.max() > MAX_DAY_DELTA or deltas.min() < MIN_DAY_DELTA:
values = [base + relativedelta(days=int(d)) for d in deltas]
return Series(values, index=index)
elif unit == "ms":
if deltas.max() > MAX_MS_DELTA or deltas.min() < MIN_MS_DELTA:
values = [
base + relativedelta(microseconds=(int(d) * 1000)) for d in deltas
]
return Series(values, index=index)
else:
raise ValueError("format not understood")
base = to_datetime(base)
deltas = to_timedelta(deltas, unit=unit)
return base + deltas
# TODO: If/when pandas supports more than datetime64[ns], this should be
# improved to use correct range, e.g. datetime[Y] for yearly
bad_locs = np.isnan(dates)
has_bad_values = False
if bad_locs.any():
has_bad_values = True
data_col = Series(dates)
data_col[bad_locs] = 1.0 # Replace with NaT
dates = dates.astype(np.int64)
if fmt.startswith(("%tc", "tc")): # Delta ms relative to base
base = stata_epoch
ms = dates
conv_dates = convert_delta_safe(base, ms, "ms")
elif fmt.startswith(("%tC", "tC")):
warnings.warn("Encountered %tC format. Leaving in Stata Internal Format.")
conv_dates = Series(dates, dtype=object)
if has_bad_values:
conv_dates[bad_locs] = NaT
return conv_dates
# Delta days relative to base
elif fmt.startswith(("%td", "td", "%d", "d")):
base = stata_epoch
days = dates
conv_dates = convert_delta_safe(base, days, "d")
# does not count leap days - 7 days is a week.
# 52nd week may have more than 7 days
elif fmt.startswith(("%tw", "tw")):
year = stata_epoch.year + dates // 52
days = (dates % 52) * 7
conv_dates = convert_year_days_safe(year, days)
elif fmt.startswith(("%tm", "tm")): # Delta months relative to base
year = stata_epoch.year + dates // 12
month = (dates % 12) + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt.startswith(("%tq", "tq")): # Delta quarters relative to base
year = stata_epoch.year + dates // 4
quarter_month = (dates % 4) * 3 + 1
conv_dates = convert_year_month_safe(year, quarter_month)
elif fmt.startswith(("%th", "th")): # Delta half-years relative to base
year = stata_epoch.year + dates // 2
month = (dates % 2) * 6 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt.startswith(("%ty", "ty")): # Years -- not delta
year = dates
first_month = np.ones_like(dates)
conv_dates = convert_year_month_safe(year, first_month)
else:
raise ValueError(f"Date fmt {fmt} not understood")
if has_bad_values: # Restore NaT for bad values
conv_dates[bad_locs] = NaT
return conv_dates
def _datetime_to_stata_elapsed_vec(dates: Series, fmt: str) -> Series:
"""
Convert from datetime to SIF. https://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
Series or array containing datetime.datetime or datetime64[ns] to
convert to the Stata Internal Format given by fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
"""
index = dates.index
NS_PER_DAY = 24 * 3600 * 1000 * 1000 * 1000
US_PER_DAY = NS_PER_DAY / 1000
def parse_dates_safe(dates, delta=False, year=False, days=False):
d = {}
if is_datetime64_dtype(dates.dtype):
if delta:
time_delta = dates - stata_epoch
d["delta"] = time_delta._values.view(np.int64) // 1000 # microseconds
if days or year:
date_index = DatetimeIndex(dates)
d["year"] = date_index._data.year
d["month"] = date_index._data.month
if days:
days_in_ns = dates.view(np.int64) - to_datetime(
d["year"], format="%Y"
).view(np.int64)
d["days"] = days_in_ns // NS_PER_DAY
elif infer_dtype(dates, skipna=False) == "datetime":
if delta:
delta = dates._values - stata_epoch
def f(x: datetime.timedelta) -> float:
return US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds
v = np.vectorize(f)
d["delta"] = v(delta)
if year:
year_month = dates.apply(lambda x: 100 * x.year + x.month)
d["year"] = year_month._values // 100
d["month"] = year_month._values - d["year"] * 100
if days:
def g(x: datetime.datetime) -> int:
return (x - datetime.datetime(x.year, 1, 1)).days
v = np.vectorize(g)
d["days"] = v(dates)
else:
raise ValueError(
"Columns containing dates must contain either "
"datetime64, datetime.datetime or null values."
)
return DataFrame(d, index=index)
bad_loc = isna(dates)
index = dates.index
if bad_loc.any():
dates = Series(dates)
if is_datetime64_dtype(dates):
dates[bad_loc] = to_datetime(stata_epoch)
else:
dates[bad_loc] = stata_epoch
if fmt in ["%tc", "tc"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta / 1000
elif fmt in ["%tC", "tC"]:
warnings.warn("Stata Internal Format tC not supported.")
conv_dates = dates
elif fmt in ["%td", "td"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta // US_PER_DAY
elif fmt in ["%tw", "tw"]:
d = parse_dates_safe(dates, year=True, days=True)
conv_dates = 52 * (d.year - stata_epoch.year) + d.days // 7
elif fmt in ["%tm", "tm"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 12 * (d.year - stata_epoch.year) + d.month - 1
elif fmt in ["%tq", "tq"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3
elif fmt in ["%th", "th"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 2 * (d.year - stata_epoch.year) + (d.month > 6).astype(int)
elif fmt in ["%ty", "ty"]:
d = parse_dates_safe(dates, year=True)
conv_dates = d.year
else:
raise ValueError(f"Format {fmt} is not a known Stata date format")
conv_dates = Series(conv_dates, dtype=np.float64)
missing_value = struct.unpack("<d", b"\x00\x00\x00\x00\x00\x00\xe0\x7f")[0]
conv_dates[bad_loc] = missing_value
return Series(conv_dates, index=index)
excessive_string_length_error = """
Fixed width strings in Stata .dta files are limited to 244 (or fewer)
characters. Column '{0}' does not satisfy this restriction. Use the
'version=117' parameter to write the newer (Stata 13 and later) format.
"""
class PossiblePrecisionLoss(Warning):
pass
precision_loss_doc = """
Column converted from {0} to {1}, and some data are outside of the lossless
conversion range. This may result in a loss of precision in the saved data.
"""
class ValueLabelTypeMismatch(Warning):
pass
value_label_mismatch_doc = """
Stata value labels (pandas categories) must be strings. Column {0} contains
non-string labels which will be converted to strings. Please check that the
Stata data file created has not lost information due to duplicate labels.
"""
class InvalidColumnName(Warning):
pass
invalid_name_doc = """
Not all pandas column names were valid Stata variable names.
The following replacements have been made:
{0}
If this is not what you expect, please make sure you have Stata-compliant
column names in your DataFrame (strings only, max 32 characters, only
alphanumerics and underscores, no Stata reserved words)
"""
class CategoricalConversionWarning(Warning):
pass
categorical_conversion_warning = """
One or more series with value labels are not fully labeled. Reading this
dataset with an iterator results in categorical variable with different
categories. This occurs since it is not possible to know all possible values
until the entire dataset has been read. To avoid this warning, you can either
read dataset without an iterator, or manually convert categorical data by
``convert_categoricals`` to False and then accessing the variable labels
through the value_labels method of the reader.
"""
def _cast_to_stata_types(data: DataFrame) -> DataFrame:
"""
Checks the dtypes of the columns of a pandas DataFrame for
compatibility with the data types and ranges supported by Stata, and
converts if necessary.
Parameters
----------
data : DataFrame
The DataFrame to check and convert
Notes
-----
Numeric columns in Stata must be one of int8, int16, int32, float32 or
float64, with some additional value restrictions. int8 and int16 columns
are checked for violations of the value restrictions and upcast if needed.
int64 data is not usable in Stata, and so it is downcast to int32 whenever
the value are in the int32 range, and sidecast to float64 when larger than
this range. If the int64 values are outside of the range of those
perfectly representable as float64 values, a warning is raised.
bool columns are cast to int8. uint columns are converted to int of the
same size if there is no loss in precision, otherwise are upcast to a
larger type. uint64 is currently not supported since it is concerted to
object in a DataFrame.
"""
ws = ""
# original, if small, if large
conversion_data = (
(np.bool_, np.int8, np.int8),
(np.uint8, np.int8, np.int16),
(np.uint16, np.int16, np.int32),
(np.uint32, np.int32, np.int64),
)
float32_max = struct.unpack("<f", b"\xff\xff\xff\x7e")[0]
float64_max = struct.unpack("<d", b"\xff\xff\xff\xff\xff\xff\xdf\x7f")[0]
for col in data:
dtype = data[col].dtype
# Cast from unsupported types to supported types
for c_data in conversion_data:
if dtype == c_data[0]:
if data[col].max() <= np.iinfo(c_data[1]).max:
dtype = c_data[1]
else:
dtype = c_data[2]
if c_data[2] == np.int64: # Warn if necessary
if data[col].max() >= 2 ** 53:
ws = precision_loss_doc.format("uint64", "float64")
data[col] = data[col].astype(dtype)
# Check values and upcast if necessary
if dtype == np.int8:
if data[col].max() > 100 or data[col].min() < -127:
data[col] = data[col].astype(np.int16)
elif dtype == np.int16:
if data[col].max() > 32740 or data[col].min() < -32767:
data[col] = data[col].astype(np.int32)
elif dtype == np.int64:
if data[col].max() <= 2147483620 and data[col].min() >= -2147483647:
data[col] = data[col].astype(np.int32)
else:
data[col] = data[col].astype(np.float64)
if data[col].max() >= 2 ** 53 or data[col].min() <= -(2 ** 53):
ws = precision_loss_doc.format("int64", "float64")
elif dtype in (np.float32, np.float64):
value = data[col].max()
if np.isinf(value):
raise ValueError(
f"Column {col} has a maximum value of infinity which is outside "
"the range supported by Stata."
)
if dtype == np.float32 and value > float32_max:
data[col] = data[col].astype(np.float64)
elif dtype == np.float64:
if value > float64_max:
raise ValueError(
f"Column {col} has a maximum value ({value}) outside the range "
f"supported by Stata ({float64_max})"
)
if ws:
warnings.warn(ws, PossiblePrecisionLoss)
return data
class StataValueLabel:
"""
Parse a categorical column and prepare formatted output
Parameters
----------
catarray : Series
Categorical Series to encode
encoding : {"latin-1", "utf-8"}
Encoding to use for value labels.
"""
def __init__(self, catarray: Series, encoding: str = "latin-1"):
if encoding not in ("latin-1", "utf-8"):
raise ValueError("Only latin-1 and utf-8 are supported.")
self.labname = catarray.name
self._encoding = encoding
categories = catarray.cat.categories
self.value_labels = list(zip(np.arange(len(categories)), categories))
self.value_labels.sort(key=lambda x: x[0])
self.text_len = 0
self.txt: list[bytes] = []
self.n = 0
# Compute lengths and setup lists of offsets and labels
offsets: list[int] = []
values: list[int] = []
for vl in self.value_labels:
category = vl[1]
if not isinstance(category, str):
category = str(category)
warnings.warn(
value_label_mismatch_doc.format(catarray.name),
ValueLabelTypeMismatch,
)
category = category.encode(encoding)
offsets.append(self.text_len)
self.text_len += len(category) + 1 # +1 for the padding
values.append(vl[0])
self.txt.append(category)
self.n += 1
if self.text_len > 32000:
raise ValueError(
"Stata value labels for a single variable must "
"have a combined length less than 32,000 characters."
)
# Ensure int32
self.off = np.array(offsets, dtype=np.int32)
self.val = np.array(values, dtype=np.int32)
# Total length
self.len = 4 + 4 + 4 * self.n + 4 * self.n + self.text_len
def generate_value_label(self, byteorder: str) -> bytes:
"""
Generate the binary representation of the value labels.
Parameters
----------
byteorder : str
Byte order of the output
Returns
-------
value_label : bytes
Bytes containing the formatted value label
"""
encoding = self._encoding
bio = BytesIO()
null_byte = b"\x00"
# len
bio.write(struct.pack(byteorder + "i", self.len))
# labname
labname = str(self.labname)[:32].encode(encoding)
lab_len = 32 if encoding not in ("utf-8", "utf8") else 128
labname = _pad_bytes(labname, lab_len + 1)
bio.write(labname)
# padding - 3 bytes
for i in range(3):
bio.write(struct.pack("c", null_byte))
# value_label_table
# n - int32
bio.write(struct.pack(byteorder + "i", self.n))
# textlen - int32
bio.write(struct.pack(byteorder + "i", self.text_len))
# off - int32 array (n elements)
for offset in self.off:
bio.write(struct.pack(byteorder + "i", offset))
# val - int32 array (n elements)
for value in self.val:
bio.write(struct.pack(byteorder + "i", value))
# txt - Text labels, null terminated
for text in self.txt:
bio.write(text + null_byte)
return bio.getvalue()
class StataMissingValue:
"""
An observation's missing value.
Parameters
----------
value : {int, float}
The Stata missing value code
Notes
-----
More information: <https://www.stata.com/help.cgi?missing>
Integer missing values make the code '.', '.a', ..., '.z' to the ranges
101 ... 127 (for int8), 32741 ... 32767 (for int16) and 2147483621 ...
2147483647 (for int32). Missing values for floating point data types are
more complex but the pattern is simple to discern from the following table.
np.float32 missing values (float in Stata)
0000007f .
0008007f .a
0010007f .b
...
00c0007f .x
00c8007f .y
00d0007f .z
np.float64 missing values (double in Stata)
000000000000e07f .
000000000001e07f .a
000000000002e07f .b
...
000000000018e07f .x
000000000019e07f .y
00000000001ae07f .z
"""
# Construct a dictionary of missing values
MISSING_VALUES: dict[float, str] = {}
bases = (101, 32741, 2147483621)
for b in bases:
# Conversion to long to avoid hash issues on 32 bit platforms #8968
MISSING_VALUES[b] = "."
for i in range(1, 27):
MISSING_VALUES[i + b] = "." + chr(96 + i)
float32_base = b"\x00\x00\x00\x7f"
increment = struct.unpack("<i", b"\x00\x08\x00\x00")[0]
for i in range(27):
key = struct.unpack("<f", float32_base)[0]
MISSING_VALUES[key] = "."
if i > 0:
MISSING_VALUES[key] += chr(96 + i)
int_value = struct.unpack("<i", struct.pack("<f", key))[0] + increment
float32_base = struct.pack("<i", int_value)
float64_base = b"\x00\x00\x00\x00\x00\x00\xe0\x7f"
increment = struct.unpack("q", b"\x00\x00\x00\x00\x00\x01\x00\x00")[0]
for i in range(27):
key = struct.unpack("<d", float64_base)[0]
MISSING_VALUES[key] = "."
if i > 0:
MISSING_VALUES[key] += chr(96 + i)
int_value = struct.unpack("q", struct.pack("<d", key))[0] + increment
float64_base = struct.pack("q", int_value)
BASE_MISSING_VALUES = {
"int8": 101,
"int16": 32741,
"int32": 2147483621,
"float32": struct.unpack("<f", float32_base)[0],
"float64": struct.unpack("<d", float64_base)[0],
}
def __init__(self, value: int | float):
self._value = value
# Conversion to int to avoid hash issues on 32 bit platforms #8968
value = int(value) if value < 2147483648 else float(value)
self._str = self.MISSING_VALUES[value]
@property
def string(self) -> str:
"""
The Stata representation of the missing value: '.', '.a'..'.z'
Returns
-------
str
The representation of the missing value.
"""
return self._str
@property
def value(self) -> int | float:
"""
The binary representation of the missing value.
Returns
-------
{int, float}
The binary representation of the missing value.
"""
return self._value
def __str__(self) -> str:
return self.string
def __repr__(self) -> str:
return f"{type(self)}({self})"
def __eq__(self, other: Any) -> bool:
return (
isinstance(other, type(self))
and self.string == other.string
and self.value == other.value
)
@classmethod
def get_base_missing_value(cls, dtype: np.dtype) -> int | float:
# error: Non-overlapping equality check (left operand type: "dtype[Any]", right
# operand type: "Type[signedinteger[Any]]")
if dtype == np.int8: # type: ignore[comparison-overlap]
value = cls.BASE_MISSING_VALUES["int8"]
# error: Non-overlapping equality check (left operand type: "dtype[Any]", right
# operand type: "Type[signedinteger[Any]]")
elif dtype == np.int16: # type: ignore[comparison-overlap]
value = cls.BASE_MISSING_VALUES["int16"]
# error: Non-overlapping equality check (left operand type: "dtype[Any]", right
# operand type: "Type[signedinteger[Any]]")
elif dtype == np.int32: # type: ignore[comparison-overlap]
value = cls.BASE_MISSING_VALUES["int32"]
# error: Non-overlapping equality check (left operand type: "dtype[Any]", right
# operand type: "Type[floating[Any]]")
elif dtype == np.float32: # type: ignore[comparison-overlap]
value = cls.BASE_MISSING_VALUES["float32"]
# error: Non-overlapping equality check (left operand type: "dtype[Any]", right
# operand type: "Type[floating[Any]]")
elif dtype == np.float64: # type: ignore[comparison-overlap]
value = cls.BASE_MISSING_VALUES["float64"]
else:
raise ValueError("Unsupported dtype")
return value
class StataParser:
def __init__(self):
# type code.
# --------------------
# str1 1 = 0x01
# str2 2 = 0x02
# ...
# str244 244 = 0xf4
# byte 251 = 0xfb (sic)
# int 252 = 0xfc
# long 253 = 0xfd
# float 254 = 0xfe
# double 255 = 0xff
# --------------------
# NOTE: the byte type seems to be reserved for categorical variables
# with a label, but the underlying variable is -127 to 100
# we're going to drop the label and cast to int
self.DTYPE_MAP = dict(
list(zip(range(1, 245), [np.dtype("a" + str(i)) for i in range(1, 245)]))
+ [
(251, np.dtype(np.int8)),
(252, np.dtype(np.int16)),
(253, np.dtype(np.int32)),
(254, np.dtype(np.float32)),
(255, np.dtype(np.float64)),
]
)
self.DTYPE_MAP_XML = {
32768: np.dtype(np.uint8), # Keys to GSO
65526: np.dtype(np.float64),
65527: np.dtype(np.float32),
65528: np.dtype(np.int32),
65529: np.dtype(np.int16),
65530: np.dtype(np.int8),
}
# error: Argument 1 to "list" has incompatible type "str";
# expected "Iterable[int]" [arg-type]
self.TYPE_MAP = list(range(251)) + list("bhlfd") # type: ignore[arg-type]
self.TYPE_MAP_XML = {
# Not really a Q, unclear how to handle byteswap
32768: "Q",
65526: "d",
65527: "f",
65528: "l",
65529: "h",
65530: "b",
}
# NOTE: technically, some of these are wrong. there are more numbers
# that can be represented. it's the 27 ABOVE and BELOW the max listed
# numeric data type in [U] 12.2.2 of the 11.2 manual
float32_min = b"\xff\xff\xff\xfe"
float32_max = b"\xff\xff\xff\x7e"
float64_min = b"\xff\xff\xff\xff\xff\xff\xef\xff"
float64_max = b"\xff\xff\xff\xff\xff\xff\xdf\x7f"
self.VALID_RANGE = {
"b": (-127, 100),
"h": (-32767, 32740),
"l": (-2147483647, 2147483620),
"f": (
np.float32(struct.unpack("<f", float32_min)[0]),
np.float32(struct.unpack("<f", float32_max)[0]),
),
"d": (
np.float64(struct.unpack("<d", float64_min)[0]),
np.float64(struct.unpack("<d", float64_max)[0]),
),
}
self.OLD_TYPE_MAPPING = {
98: 251, # byte
105: 252, # int
108: 253, # long
102: 254, # float
100: 255, # double
}
# These missing values are the generic '.' in Stata, and are used
# to replace nans
self.MISSING_VALUES = {
"b": 101,
"h": 32741,
"l": 2147483621,
"f": np.float32(struct.unpack("<f", b"\x00\x00\x00\x7f")[0]),
"d": np.float64(
struct.unpack("<d", b"\x00\x00\x00\x00\x00\x00\xe0\x7f")[0]
),
}
self.NUMPY_TYPE_MAP = {
"b": "i1",
"h": "i2",
"l": "i4",
"f": "f4",
"d": "f8",
"Q": "u8",
}
# Reserved words cannot be used as variable names
self.RESERVED_WORDS = (
"aggregate",
"array",
"boolean",
"break",
"byte",
"case",
"catch",
"class",
"colvector",
"complex",
"const",
"continue",
"default",
"delegate",
"delete",
"do",
"double",
"else",
"eltypedef",
"end",
"enum",
"explicit",
"export",
"external",
"float",
"for",
"friend",
"function",
"global",
"goto",
"if",
"inline",
"int",
"local",
"long",
"NULL",
"pragma",
"protected",
"quad",
"rowvector",
"short",
"typedef",
"typename",
"virtual",
"_all",
"_N",
"_skip",
"_b",
"_pi",
"str#",
"in",
"_pred",
"strL",
"_coef",
"_rc",
"using",
"_cons",
"_se",
"with",
"_n",
)
class StataReader(StataParser, abc.Iterator):
__doc__ = _stata_reader_doc
def __init__(
self,
path_or_buf: FilePathOrBuffer,
convert_dates: bool = True,
convert_categoricals: bool = True,
index_col: str | None = None,
convert_missing: bool = False,
preserve_dtypes: bool = True,
columns: Sequence[str] | None = None,
order_categoricals: bool = True,
chunksize: int | None = None,
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
):
super().__init__()
self.col_sizes: list[int] = []
# Arguments to the reader (can be temporarily overridden in
# calls to read).
self._convert_dates = convert_dates
self._convert_categoricals = convert_categoricals
self._index_col = index_col
self._convert_missing = convert_missing
self._preserve_dtypes = preserve_dtypes
self._columns = columns
self._order_categoricals = order_categoricals
self._encoding = ""
self._chunksize = chunksize
self._using_iterator = False
if self._chunksize is None:
self._chunksize = 1
elif not isinstance(chunksize, int) or chunksize <= 0:
raise ValueError("chunksize must be a positive integer when set.")
# State variables for the file
self._has_string_data = False
self._missing_values = False
self._can_read_value_labels = False
self._column_selector_set = False
self._value_labels_read = False
self._data_read = False
self._dtype: np.dtype | None = None
self._lines_read = 0
self._native_byteorder = _set_endianness(sys.byteorder)
with get_handle(
path_or_buf,
"rb",
storage_options=storage_options,
is_text=False,
compression=compression,
) as handles:
# Copy to BytesIO, and ensure no encoding
# Argument 1 to "BytesIO" has incompatible type "Union[Any, bytes, None,
# str]"; expected "bytes"
self.path_or_buf = BytesIO(handles.handle.read()) # type: ignore[arg-type]
self._read_header()
self._setup_dtype()
def __enter__(self) -> StataReader:
"""enter context manager"""
return self
def __exit__(self, exc_type, exc_value, traceback) -> None:
"""exit context manager"""
self.close()
def close(self) -> None:
"""close the handle if its open"""
self.path_or_buf.close()
def _set_encoding(self) -> None:
"""
Set string encoding which depends on file version
"""
if self.format_version < 118:
self._encoding = "latin-1"
else:
self._encoding = "utf-8"
def _read_header(self) -> None:
first_char = self.path_or_buf.read(1)
if struct.unpack("c", first_char)[0] == b"<":
self._read_new_header()
else:
self._read_old_header(first_char)
self.has_string_data = len([x for x in self.typlist if type(x) is int]) > 0
# calculate size of a data record
self.col_sizes = [self._calcsize(typ) for typ in self.typlist]
def _read_new_header(self) -> None:
# The first part of the header is common to 117 - 119.
self.path_or_buf.read(27) # stata_dta><header><release>
self.format_version = int(self.path_or_buf.read(3))
if self.format_version not in [117, 118, 119]:
raise ValueError(_version_error.format(version=self.format_version))
self._set_encoding()
self.path_or_buf.read(21) # </release><byteorder>
self.byteorder = self.path_or_buf.read(3) == b"MSF" and ">" or "<"
self.path_or_buf.read(15) # </byteorder><K>
nvar_type = "H" if self.format_version <= 118 else "I"
nvar_size = 2 if self.format_version <= 118 else 4
self.nvar = struct.unpack(
self.byteorder + nvar_type, self.path_or_buf.read(nvar_size)
)[0]
self.path_or_buf.read(7) # </K><N>
self.nobs = self._get_nobs()
self.path_or_buf.read(11) # </N><label>
self._data_label = self._get_data_label()
self.path_or_buf.read(19) # </label><timestamp>
self.time_stamp = self._get_time_stamp()
self.path_or_buf.read(26) # </timestamp></header><map>
self.path_or_buf.read(8) # 0x0000000000000000
self.path_or_buf.read(8) # position of <map>
self._seek_vartypes = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 16
)
self._seek_varnames = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 10
)
self._seek_sortlist = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 10
)
self._seek_formats = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 9
)
self._seek_value_label_names = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 19
)
# Requires version-specific treatment
self._seek_variable_labels = self._get_seek_variable_labels()
self.path_or_buf.read(8) # <characteristics>
self.data_location = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 6
)
self.seek_strls = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 7
)
self.seek_value_labels = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 14
)
self.typlist, self.dtyplist = self._get_dtypes(self._seek_vartypes)
self.path_or_buf.seek(self._seek_varnames)
self.varlist = self._get_varlist()
self.path_or_buf.seek(self._seek_sortlist)
self.srtlist = struct.unpack(
self.byteorder + ("h" * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1)),
)[:-1]
self.path_or_buf.seek(self._seek_formats)
self.fmtlist = self._get_fmtlist()
self.path_or_buf.seek(self._seek_value_label_names)
self.lbllist = self._get_lbllist()
self.path_or_buf.seek(self._seek_variable_labels)
self._variable_labels = self._get_variable_labels()
# Get data type information, works for versions 117-119.
def _get_dtypes(
self, seek_vartypes: int
) -> tuple[list[int | str], list[str | np.dtype]]:
self.path_or_buf.seek(seek_vartypes)
raw_typlist = [
struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0]
for _ in range(self.nvar)
]
def f(typ: int) -> int | str:
if typ <= 2045:
return typ
try:
return self.TYPE_MAP_XML[typ]
except KeyError as err:
raise ValueError(f"cannot convert stata types [{typ}]") from err
typlist = [f(x) for x in raw_typlist]
def g(typ: int) -> str | np.dtype:
if typ <= 2045:
return str(typ)
try:
# error: Incompatible return value type (got "Type[number]", expected
# "Union[str, dtype]")
return self.DTYPE_MAP_XML[typ] # type: ignore[return-value]
except KeyError as err:
raise ValueError(f"cannot convert stata dtype [{typ}]") from err
dtyplist = [g(x) for x in raw_typlist]
return typlist, dtyplist
def _get_varlist(self) -> list[str]:
# 33 in order formats, 129 in formats 118 and 119
b = 33 if self.format_version < 118 else 129
return [self._decode(self.path_or_buf.read(b)) for _ in range(self.nvar)]
# Returns the format list
def _get_fmtlist(self) -> list[str]:
if self.format_version >= 118:
b = 57
elif self.format_version > 113:
b = 49
elif self.format_version > 104:
b = 12
else:
b = 7
return [self._decode(self.path_or_buf.read(b)) for _ in range(self.nvar)]
# Returns the label list
def _get_lbllist(self) -> list[str]:
if self.format_version >= 118:
b = 129
elif self.format_version > 108:
b = 33
else:
b = 9
return [self._decode(self.path_or_buf.read(b)) for _ in range(self.nvar)]
def _get_variable_labels(self) -> list[str]:
if self.format_version >= 118:
vlblist = [
self._decode(self.path_or_buf.read(321)) for _ in range(self.nvar)
]
elif self.format_version > 105:
vlblist = [
self._decode(self.path_or_buf.read(81)) for _ in range(self.nvar)
]
else:
vlblist = [
self._decode(self.path_or_buf.read(32)) for _ in range(self.nvar)
]
return vlblist
def _get_nobs(self) -> int:
if self.format_version >= 118:
return struct.unpack(self.byteorder + "Q", self.path_or_buf.read(8))[0]
else:
return struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
def _get_data_label(self) -> str:
if self.format_version >= 118:
strlen = struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0]
return self._decode(self.path_or_buf.read(strlen))
elif self.format_version == 117:
strlen = struct.unpack("b", self.path_or_buf.read(1))[0]
return self._decode(self.path_or_buf.read(strlen))
elif self.format_version > 105:
return self._decode(self.path_or_buf.read(81))
else:
return self._decode(self.path_or_buf.read(32))
def _get_time_stamp(self) -> str:
if self.format_version >= 118:
strlen = struct.unpack("b", self.path_or_buf.read(1))[0]
return self.path_or_buf.read(strlen).decode("utf-8")
elif self.format_version == 117:
strlen = struct.unpack("b", self.path_or_buf.read(1))[0]
return self._decode(self.path_or_buf.read(strlen))
elif self.format_version > 104:
return self._decode(self.path_or_buf.read(18))
else:
raise ValueError()
def _get_seek_variable_labels(self) -> int:
if self.format_version == 117:
self.path_or_buf.read(8) # <variable_labels>, throw away
# Stata 117 data files do not follow the described format. This is
# a work around that uses the previous label, 33 bytes for each
# variable, 20 for the closing tag and 17 for the opening tag
return self._seek_value_label_names + (33 * self.nvar) + 20 + 17
elif self.format_version >= 118:
return struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 17
else:
raise ValueError()
def _read_old_header(self, first_char: bytes) -> None:
self.format_version = struct.unpack("b", first_char)[0]
if self.format_version not in [104, 105, 108, 111, 113, 114, 115]:
raise ValueError(_version_error.format(version=self.format_version))
self._set_encoding()
self.byteorder = (
struct.unpack("b", self.path_or_buf.read(1))[0] == 0x1 and ">" or "<"
)
self.filetype = struct.unpack("b", self.path_or_buf.read(1))[0]
self.path_or_buf.read(1) # unused
self.nvar = struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0]
self.nobs = self._get_nobs()
self._data_label = self._get_data_label()
self.time_stamp = self._get_time_stamp()
# descriptors
if self.format_version > 108:
typlist = [ord(self.path_or_buf.read(1)) for _ in range(self.nvar)]
else:
buf = self.path_or_buf.read(self.nvar)
typlistb = np.frombuffer(buf, dtype=np.uint8)
typlist = []
for tp in typlistb:
if tp in self.OLD_TYPE_MAPPING:
typlist.append(self.OLD_TYPE_MAPPING[tp])
else:
typlist.append(tp - 127) # bytes
try:
self.typlist = [self.TYPE_MAP[typ] for typ in typlist]
except ValueError as err:
invalid_types = ",".join([str(x) for x in typlist])
raise ValueError(f"cannot convert stata types [{invalid_types}]") from err
try:
self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist]
except ValueError as err:
invalid_dtypes = ",".join([str(x) for x in typlist])
raise ValueError(f"cannot convert stata dtypes [{invalid_dtypes}]") from err
if self.format_version > 108:
self.varlist = [
self._decode(self.path_or_buf.read(33)) for _ in range(self.nvar)
]
else:
self.varlist = [
self._decode(self.path_or_buf.read(9)) for _ in range(self.nvar)
]
self.srtlist = struct.unpack(
self.byteorder + ("h" * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1)),
)[:-1]
self.fmtlist = self._get_fmtlist()
self.lbllist = self._get_lbllist()
self._variable_labels = self._get_variable_labels()
# ignore expansion fields (Format 105 and later)
# When reading, read five bytes; the last four bytes now tell you
# the size of the next read, which you discard. You then continue
# like this until you read 5 bytes of zeros.
if self.format_version > 104:
while True:
data_type = struct.unpack(
self.byteorder + "b", self.path_or_buf.read(1)
)[0]
if self.format_version > 108:
data_len = struct.unpack(
self.byteorder + "i", self.path_or_buf.read(4)
)[0]
else:
data_len = struct.unpack(
self.byteorder + "h", self.path_or_buf.read(2)
)[0]
if data_type == 0:
break
self.path_or_buf.read(data_len)
# necessary data to continue parsing
self.data_location = self.path_or_buf.tell()
def _setup_dtype(self) -> np.dtype:
"""Map between numpy and state dtypes"""
if self._dtype is not None:
return self._dtype
dtypes = [] # Convert struct data types to numpy data type
for i, typ in enumerate(self.typlist):
if typ in self.NUMPY_TYPE_MAP:
typ = cast(str, typ) # only strs in NUMPY_TYPE_MAP
dtypes.append(("s" + str(i), self.byteorder + self.NUMPY_TYPE_MAP[typ]))
else:
dtypes.append(("s" + str(i), "S" + str(typ)))
self._dtype = np.dtype(dtypes)
return self._dtype
def _calcsize(self, fmt: int | str) -> int:
if isinstance(fmt, int):
return fmt
return struct.calcsize(self.byteorder + fmt)
def _decode(self, s: bytes) -> str:
# have bytes not strings, so must decode
s = s.partition(b"\0")[0]
try:
return s.decode(self._encoding)
except UnicodeDecodeError:
# GH 25960, fallback to handle incorrect format produced when 117
# files are converted to 118 files in Stata
encoding = self._encoding
msg = f"""
One or more strings in the dta file could not be decoded using {encoding}, and
so the fallback encoding of latin-1 is being used. This can happen when a file
has been incorrectly encoded by Stata or some other software. You should verify
the string values returned are correct."""
warnings.warn(msg, UnicodeWarning)
return s.decode("latin-1")
def _read_value_labels(self) -> None:
if self._value_labels_read:
# Don't read twice
return
if self.format_version <= 108:
# Value labels are not supported in version 108 and earlier.
self._value_labels_read = True
self.value_label_dict: dict[str, dict[float | int, str]] = {}
return
if self.format_version >= 117:
self.path_or_buf.seek(self.seek_value_labels)
else:
assert self._dtype is not None
offset = self.nobs * self._dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
self._value_labels_read = True
self.value_label_dict = {}
while True:
if self.format_version >= 117:
if self.path_or_buf.read(5) == b"</val": # <lbl>
break # end of value label table
slength = self.path_or_buf.read(4)
if not slength:
break # end of value label table (format < 117)
if self.format_version <= 117:
labname = self._decode(self.path_or_buf.read(33))
else:
labname = self._decode(self.path_or_buf.read(129))
self.path_or_buf.read(3) # padding
n = struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
txtlen = struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
off = np.frombuffer(
self.path_or_buf.read(4 * n), dtype=self.byteorder + "i4", count=n
)
val = np.frombuffer(
self.path_or_buf.read(4 * n), dtype=self.byteorder + "i4", count=n
)
ii = np.argsort(off)
off = off[ii]
val = val[ii]
txt = self.path_or_buf.read(txtlen)
self.value_label_dict[labname] = {}
for i in range(n):
end = off[i + 1] if i < n - 1 else txtlen
self.value_label_dict[labname][val[i]] = self._decode(txt[off[i] : end])
if self.format_version >= 117:
self.path_or_buf.read(6) # </lbl>
self._value_labels_read = True
def _read_strls(self) -> None:
self.path_or_buf.seek(self.seek_strls)
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
self.GSO = {"0": ""}
while True:
if self.path_or_buf.read(3) != b"GSO":
break
if self.format_version == 117:
v_o = struct.unpack(self.byteorder + "Q", self.path_or_buf.read(8))[0]
else:
buf = self.path_or_buf.read(12)
# Only tested on little endian file on little endian machine.
v_size = 2 if self.format_version == 118 else 3
if self.byteorder == "<":
buf = buf[0:v_size] + buf[4 : (12 - v_size)]
else:
# This path may not be correct, impossible to test
buf = buf[0:v_size] + buf[(4 + v_size) :]
v_o = struct.unpack("Q", buf)[0]
typ = struct.unpack("B", self.path_or_buf.read(1))[0]
length = struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
va = self.path_or_buf.read(length)
if typ == 130:
decoded_va = va[0:-1].decode(self._encoding)
else:
# Stata says typ 129 can be binary, so use str
decoded_va = str(va)
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
self.GSO[str(v_o)] = decoded_va
def __next__(self) -> DataFrame:
self._using_iterator = True
return self.read(nrows=self._chunksize)
def get_chunk(self, size: int | None = None) -> DataFrame:
"""
Reads lines from Stata file and returns as dataframe
Parameters
----------
size : int, defaults to None
Number of lines to read. If None, reads whole file.
Returns
-------
DataFrame
"""
if size is None:
size = self._chunksize
return self.read(nrows=size)
@Appender(_read_method_doc)
def read(
self,
nrows: int | None = None,
convert_dates: bool | None = None,
convert_categoricals: bool | None = None,
index_col: str | None = None,
convert_missing: bool | None = None,
preserve_dtypes: bool | None = None,
columns: Sequence[str] | None = None,
order_categoricals: bool | None = None,
) -> DataFrame:
# Handle empty file or chunk. If reading incrementally raise
# StopIteration. If reading the whole thing return an empty
# data frame.
if (self.nobs == 0) and (nrows is None):
self._can_read_value_labels = True
self._data_read = True
self.close()
return DataFrame(columns=self.varlist)
# Handle options
if convert_dates is None:
convert_dates = self._convert_dates
if convert_categoricals is None:
convert_categoricals = self._convert_categoricals
if convert_missing is None:
convert_missing = self._convert_missing
if preserve_dtypes is None:
preserve_dtypes = self._preserve_dtypes
if columns is None:
columns = self._columns
if order_categoricals is None:
order_categoricals = self._order_categoricals
if index_col is None:
index_col = self._index_col
if nrows is None:
nrows = self.nobs
if (self.format_version >= 117) and (not self._value_labels_read):
self._can_read_value_labels = True
self._read_strls()
# Read data
assert self._dtype is not None
dtype = self._dtype
max_read_len = (self.nobs - self._lines_read) * dtype.itemsize
read_len = nrows * dtype.itemsize
read_len = min(read_len, max_read_len)
if read_len <= 0:
# Iterator has finished, should never be here unless
# we are reading the file incrementally
if convert_categoricals:
self._read_value_labels()
self.close()
raise StopIteration
offset = self._lines_read * dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
read_lines = min(nrows, self.nobs - self._lines_read)
data = np.frombuffer(
self.path_or_buf.read(read_len), dtype=dtype, count=read_lines
)
self._lines_read += read_lines
if self._lines_read == self.nobs:
self._can_read_value_labels = True
self._data_read = True
# if necessary, swap the byte order to native here
if self.byteorder != self._native_byteorder:
data = data.byteswap().newbyteorder()
if convert_categoricals:
self._read_value_labels()
if len(data) == 0:
data = DataFrame(columns=self.varlist)
else:
data = DataFrame.from_records(data)
data.columns = self.varlist
# If index is not specified, use actual row number rather than
# restarting at 0 for each chunk.
if index_col is None:
ix = np.arange(self._lines_read - read_lines, self._lines_read)
data = data.set_index(ix)
if columns is not None:
try:
data = self._do_select_columns(data, columns)
except ValueError:
self.close()
raise
# Decode strings
for col, typ in zip(data, self.typlist):
if type(typ) is int:
data[col] = data[col].apply(self._decode, convert_dtype=True)
data = self._insert_strls(data)
cols_ = np.where([dtyp is not None for dtyp in self.dtyplist])[0]
# Convert columns (if needed) to match input type
ix = data.index
requires_type_conversion = False
data_formatted = []
for i in cols_:
if self.dtyplist[i] is not None:
col = data.columns[i]
dtype = data[col].dtype
if dtype != np.dtype(object) and dtype != self.dtyplist[i]:
requires_type_conversion = True
data_formatted.append(
(col, Series(data[col], ix, self.dtyplist[i]))
)
else:
data_formatted.append((col, data[col]))
if requires_type_conversion:
data = DataFrame.from_dict(dict(data_formatted))
del data_formatted
data = self._do_convert_missing(data, convert_missing)
if convert_dates:
def any_startswith(x: str) -> bool:
return any(x.startswith(fmt) for fmt in _date_formats)
cols = np.where([any_startswith(x) for x in self.fmtlist])[0]
for i in cols:
col = data.columns[i]
try:
data[col] = _stata_elapsed_date_to_datetime_vec(
data[col], self.fmtlist[i]
)
except ValueError:
self.close()
raise
if convert_categoricals and self.format_version > 108:
data = self._do_convert_categoricals(
data, self.value_label_dict, self.lbllist, order_categoricals
)
if not preserve_dtypes:
retyped_data = []
convert = False
for col in data:
dtype = data[col].dtype
if dtype in (np.dtype(np.float16), np.dtype(np.float32)):
dtype = np.dtype(np.float64)
convert = True
elif dtype in (
np.dtype(np.int8),
np.dtype(np.int16),
np.dtype(np.int32),
):
dtype = np.dtype(np.int64)
convert = True
retyped_data.append((col, data[col].astype(dtype)))
if convert:
data = DataFrame.from_dict(dict(retyped_data))
if index_col is not None:
data = data.set_index(data.pop(index_col))
return data
def _do_convert_missing(self, data: DataFrame, convert_missing: bool) -> DataFrame:
# Check for missing values, and replace if found
replacements = {}
for i, colname in enumerate(data):
fmt = self.typlist[i]
if fmt not in self.VALID_RANGE:
continue
fmt = cast(str, fmt) # only strs in VALID_RANGE
nmin, nmax = self.VALID_RANGE[fmt]
series = data[colname]
# appreciably faster to do this with ndarray instead of Series
svals = series._values
missing = (svals < nmin) | (svals > nmax)
if not missing.any():
continue
if convert_missing: # Replacement follows Stata notation
missing_loc = np.nonzero(np.asarray(missing))[0]
umissing, umissing_loc = np.unique(series[missing], return_inverse=True)
replacement = Series(series, dtype=object)
for j, um in enumerate(umissing):
missing_value = StataMissingValue(um)
loc = missing_loc[umissing_loc == j]
replacement.iloc[loc] = missing_value
else: # All replacements are identical
dtype = series.dtype
if dtype not in (np.float32, np.float64):
dtype = np.float64
replacement = Series(series, dtype=dtype)
# Note: operating on ._values is much faster than directly
# TODO: can we fix that?
replacement._values[missing] = np.nan
replacements[colname] = replacement
if replacements:
columns = data.columns
replacement_df = DataFrame(replacements, copy=False)
replaced = concat(
[data.drop(replacement_df.columns, axis=1), replacement_df],
axis=1,
copy=False,
)
data = replaced[columns]
return data
def _insert_strls(self, data: DataFrame) -> DataFrame:
if not hasattr(self, "GSO") or len(self.GSO) == 0:
return data
for i, typ in enumerate(self.typlist):
if typ != "Q":
continue
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
data.iloc[:, i] = [self.GSO[str(k)] for k in data.iloc[:, i]]
return data
def _do_select_columns(self, data: DataFrame, columns: Sequence[str]) -> DataFrame:
if not self._column_selector_set:
column_set = set(columns)
if len(column_set) != len(columns):
raise ValueError("columns contains duplicate entries")
unmatched = column_set.difference(data.columns)
if unmatched:
joined = ", ".join(list(unmatched))
raise ValueError(
"The following columns were not "
f"found in the Stata data set: {joined}"
)
# Copy information for retained columns for later processing
dtyplist = []
typlist = []
fmtlist = []
lbllist = []
for col in columns:
i = data.columns.get_loc(col)
dtyplist.append(self.dtyplist[i])
typlist.append(self.typlist[i])
fmtlist.append(self.fmtlist[i])
lbllist.append(self.lbllist[i])
self.dtyplist = dtyplist
self.typlist = typlist
self.fmtlist = fmtlist
self.lbllist = lbllist
self._column_selector_set = True
return data[columns]
def _do_convert_categoricals(
self,
data: DataFrame,
value_label_dict: dict[str, dict[float | int, str]],
lbllist: Sequence[str],
order_categoricals: bool,
) -> DataFrame:
"""
Converts categorical columns to Categorical type.
"""
value_labels = list(value_label_dict.keys())
cat_converted_data = []
for col, label in zip(data, lbllist):
if label in value_labels:
# Explicit call with ordered=True
vl = value_label_dict[label]
keys = np.array(list(vl.keys()))
column = data[col]
key_matches = column.isin(keys)
if self._using_iterator and key_matches.all():
initial_categories: np.ndarray | None = keys
# If all categories are in the keys and we are iterating,
# use the same keys for all chunks. If some are missing
# value labels, then we will fall back to the categories
# varying across chunks.
else:
if self._using_iterator:
# warn is using an iterator
warnings.warn(
categorical_conversion_warning, CategoricalConversionWarning
)
initial_categories = None
cat_data = Categorical(
column, categories=initial_categories, ordered=order_categoricals
)
if initial_categories is None:
# If None here, then we need to match the cats in the Categorical
categories = []
for category in cat_data.categories:
if category in vl:
categories.append(vl[category])
else:
categories.append(category)
else:
# If all cats are matched, we can use the values
categories = list(vl.values())
try:
# Try to catch duplicate categories
cat_data.categories = categories
except ValueError as err:
vc = Series(categories).value_counts()
repeated_cats = list(vc.index[vc > 1])
repeats = "-" * 80 + "\n" + "\n".join(repeated_cats)
# GH 25772
msg = f"""
Value labels for column {col} are not unique. These cannot be converted to
pandas categoricals.
Either read the file with `convert_categoricals` set to False or use the
low level interface in `StataReader` to separately read the values and the
value_labels.
The repeated labels are:
{repeats}
"""
raise ValueError(msg) from err
# TODO: is the next line needed above in the data(...) method?
cat_series = Series(cat_data, index=data.index)
cat_converted_data.append((col, cat_series))
else:
cat_converted_data.append((col, data[col]))
data = DataFrame(dict(cat_converted_data), copy=False)
return data
@property
def data_label(self) -> str:
"""
Return data label of Stata file.
"""
return self._data_label
def variable_labels(self) -> dict[str, str]:
"""
Return variable labels as a dict, associating each variable name
with corresponding label.
Returns
-------
dict
"""
return dict(zip(self.varlist, self._variable_labels))
def value_labels(self) -> dict[str, dict[float | int, str]]:
"""
Return a dict, associating each variable name a dict, associating
each value its corresponding label.
Returns
-------
dict
"""
if not self._value_labels_read:
self._read_value_labels()
return self.value_label_dict
@Appender(_read_stata_doc)
def read_stata(
filepath_or_buffer: FilePathOrBuffer,
convert_dates: bool = True,
convert_categoricals: bool = True,
index_col: str | None = None,
convert_missing: bool = False,
preserve_dtypes: bool = True,
columns: Sequence[str] | None = None,
order_categoricals: bool = True,
chunksize: int | None = None,
iterator: bool = False,
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
) -> DataFrame | StataReader:
reader = StataReader(
filepath_or_buffer,
convert_dates=convert_dates,
convert_categoricals=convert_categoricals,
index_col=index_col,
convert_missing=convert_missing,
preserve_dtypes=preserve_dtypes,
columns=columns,
order_categoricals=order_categoricals,
chunksize=chunksize,
storage_options=storage_options,
compression=compression,
)
if iterator or chunksize:
return reader
with reader:
return reader.read()
def _set_endianness(endianness: str) -> str:
if endianness.lower() in ["<", "little"]:
return "<"
elif endianness.lower() in [">", "big"]:
return ">"
else: # pragma : no cover
raise ValueError(f"Endianness {endianness} not understood")
def _pad_bytes(name: AnyStr, length: int) -> AnyStr:
"""
Take a char string and pads it with null bytes until it's length chars.
"""
if isinstance(name, bytes):
return name + b"\x00" * (length - len(name))
return name + "\x00" * (length - len(name))
def _convert_datetime_to_stata_type(fmt: str) -> np.dtype:
"""
Convert from one of the stata date formats to a type in TYPE_MAP.
"""
if fmt in [
"tc",
"%tc",
"td",
"%td",
"tw",
"%tw",
"tm",
"%tm",
"tq",
"%tq",
"th",
"%th",
"ty",
"%ty",
]:
return np.dtype(np.float64) # Stata expects doubles for SIFs
else:
raise NotImplementedError(f"Format {fmt} not implemented")
def _maybe_convert_to_int_keys(convert_dates: dict, varlist: list[Hashable]) -> dict:
new_dict = {}
for key in convert_dates:
if not convert_dates[key].startswith("%"): # make sure proper fmts
convert_dates[key] = "%" + convert_dates[key]
if key in varlist:
new_dict.update({varlist.index(key): convert_dates[key]})
else:
if not isinstance(key, int):
raise ValueError("convert_dates key must be a column or an integer")
new_dict.update({key: convert_dates[key]})
return new_dict
def _dtype_to_stata_type(dtype: np.dtype, column: Series) -> int:
"""
Convert dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
the dta spec.
1 - 244 are strings of this length
Pandas Stata
251 - for int8 byte
252 - for int16 int
253 - for int32 long
254 - for float32 float
255 - for double double
If there are dates to convert, then dtype will already have the correct
type inserted.
"""
# TODO: expand to handle datetime to integer conversion
if dtype.type == np.object_: # try to coerce it to the biggest string
# not memory efficient, what else could we
# do?
itemsize = max_len_string_array( | ensure_object(column._values) | pandas.core.dtypes.common.ensure_object |
"""Generate HVTN505 dataset for Michael on statsrv"""
import pandas as pd
import numpy as np
import re
import itertools
__all__ = ['parseProcessed',
'parseRaw',
'unstackIR',
'compressSubsets',
'subset2vec',
'vec2subset',
'itersubsets',
'subset2label',
'subsetdf',
'applyResponseCriteria',
'computeMarginals',
'generateGzAPerfExceptions']
def unstackIR(df, uVars):
"""Return a response and magnitude df with one row per ptid
and columns for each combination of uVars"""
varFunc = lambda r: ' '.join(r[uVars])
tmpDf = df.copy()
tmpDf['var'] = tmpDf.apply(varFunc, axis=1)
responseDf = tmpDf.pivot(index='ptid', columns='var', values='response')
magDf = tmpDf.pivot(index='ptid', columns='var', values='mag')
return responseDf, magDf
def _parsePTID(v):
"""Returns a string version of a PTID"""
if pd.isnull(v):
out = 'NA'
elif np.isreal(v):
out = '%1.0f' % v
else:
out = v
out = out.replace('-', '')
if out[-2:] == '.0':
out = out[:-2]
return out
def _parseIR(fn, uVars, mag, subset={}, printUnique=False, sep=','):
raw = | pd.read_csv(fn, dtype={'ptid':str, 'Ptid':str}, skipinitialspace=True, sep=sep) | pandas.read_csv |
""" Test cases for DataFrame.plot """
import string
import warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Series,
date_range,
)
import pandas._testing as tm
from pandas.tests.plotting.common import TestPlotBase
from pandas.io.formats.printing import pprint_thing
pytestmark = pytest.mark.slow
@td.skip_if_no_mpl
class TestDataFramePlotsSubplots(TestPlotBase):
def test_subplots(self):
df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
for kind in ["bar", "barh", "line", "area"]:
axes = df.plot(kind=kind, subplots=True, sharex=True, legend=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
assert axes.shape == (3,)
for ax, column in zip(axes, df.columns):
self._check_legend_labels(ax, labels=[pprint_thing(column)])
for ax in axes[:-2]:
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
if kind != "bar":
# change https://github.com/pandas-dev/pandas/issues/26714
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible(ax.xaxis.get_label(), visible=False)
self._check_visible(ax.get_yticklabels())
self._check_visible(axes[-1].xaxis)
self._check_visible(axes[-1].get_xticklabels())
self._check_visible(axes[-1].get_xticklabels(minor=True))
self._check_visible(axes[-1].xaxis.get_label())
self._check_visible(axes[-1].get_yticklabels())
axes = df.plot(kind=kind, subplots=True, sharex=False)
for ax in axes:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible(ax.get_xticklabels(minor=True))
self._check_visible(ax.xaxis.get_label())
self._check_visible(ax.get_yticklabels())
axes = df.plot(kind=kind, subplots=True, legend=False)
for ax in axes:
assert ax.get_legend() is None
def test_subplots_timeseries(self):
idx = date_range(start="2014-07-01", freq="M", periods=10)
df = DataFrame(np.random.rand(10, 3), index=idx)
for kind in ["line", "area"]:
axes = df.plot(kind=kind, subplots=True, sharex=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
for ax in axes[:-2]:
# GH 7801
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible(ax.xaxis.get_label(), visible=False)
self._check_visible(ax.get_yticklabels())
self._check_visible(axes[-1].xaxis)
self._check_visible(axes[-1].get_xticklabels())
self._check_visible(axes[-1].get_xticklabels(minor=True))
self._check_visible(axes[-1].xaxis.get_label())
self._check_visible(axes[-1].get_yticklabels())
self._check_ticks_props(axes, xrot=0)
axes = df.plot(kind=kind, subplots=True, sharex=False, rot=45, fontsize=7)
for ax in axes:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible(ax.get_xticklabels(minor=True))
self._check_visible(ax.xaxis.get_label())
self._check_visible(ax.get_yticklabels())
self._check_ticks_props(ax, xlabelsize=7, xrot=45, ylabelsize=7)
def test_subplots_timeseries_y_axis(self):
# GH16953
data = {
"numeric": np.array([1, 2, 5]),
"timedelta": [
pd.Timedelta(-10, unit="s"),
pd.Timedelta(10, unit="m"),
pd.Timedelta(10, unit="h"),
],
"datetime_no_tz": [
pd.to_datetime("2017-08-01 00:00:00"),
pd.to_datetime("2017-08-01 02:00:00"),
pd.to_datetime("2017-08-02 00:00:00"),
],
"datetime_all_tz": [
pd.to_datetime("2017-08-01 00:00:00", utc=True),
pd.to_datetime("2017-08-01 02:00:00", utc=True),
pd.to_datetime("2017-08-02 00:00:00", utc=True),
],
"text": ["This", "should", "fail"],
}
testdata = DataFrame(data)
y_cols = ["numeric", "timedelta", "datetime_no_tz", "datetime_all_tz"]
for col in y_cols:
ax = testdata.plot(y=col)
result = ax.get_lines()[0].get_data()[1]
expected = testdata[col].values
assert (result == expected).all()
msg = "no numeric data to plot"
with pytest.raises(TypeError, match=msg):
testdata.plot(y="text")
@pytest.mark.xfail(reason="not support for period, categorical, datetime_mixed_tz")
def test_subplots_timeseries_y_axis_not_supported(self):
"""
This test will fail for:
period:
since period isn't yet implemented in ``select_dtypes``
and because it will need a custom value converter +
tick formatter (as was done for x-axis plots)
categorical:
because it will need a custom value converter +
tick formatter (also doesn't work for x-axis, as of now)
datetime_mixed_tz:
because of the way how pandas handles ``Series`` of
``datetime`` objects with different timezone,
generally converting ``datetime`` objects in a tz-aware
form could help with this problem
"""
data = {
"numeric": np.array([1, 2, 5]),
"period": [
pd.Period("2017-08-01 00:00:00", freq="H"),
pd.Period("2017-08-01 02:00", freq="H"),
pd.Period("2017-08-02 00:00:00", freq="H"),
],
"categorical": pd.Categorical(
["c", "b", "a"], categories=["a", "b", "c"], ordered=False
),
"datetime_mixed_tz": [
pd.to_datetime("2017-08-01 00:00:00", utc=True),
pd.to_datetime("2017-08-01 02:00:00"),
pd.to_datetime("2017-08-02 00:00:00"),
],
}
testdata = DataFrame(data)
ax_period = testdata.plot(x="numeric", y="period")
assert (
ax_period.get_lines()[0].get_data()[1] == testdata["period"].values
).all()
ax_categorical = testdata.plot(x="numeric", y="categorical")
assert (
ax_categorical.get_lines()[0].get_data()[1]
== testdata["categorical"].values
).all()
ax_datetime_mixed_tz = testdata.plot(x="numeric", y="datetime_mixed_tz")
assert (
ax_datetime_mixed_tz.get_lines()[0].get_data()[1]
== testdata["datetime_mixed_tz"].values
).all()
def test_subplots_layout_multi_column(self):
# GH 6667
df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(-1, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(2, -1))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(1, 4))
self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
assert axes.shape == (1, 4)
axes = df.plot(subplots=True, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
assert axes.shape == (1, 4)
axes = df.plot(subplots=True, layout=(4, -1))
self._check_axes_shape(axes, axes_num=3, layout=(4, 1))
assert axes.shape == (4, 1)
msg = "Layout of 1x1 must be larger than required size 3"
with pytest.raises(ValueError, match=msg):
df.plot(subplots=True, layout=(1, 1))
msg = "At least one dimension of layout must be positive"
with pytest.raises(ValueError, match=msg):
df.plot(subplots=True, layout=(-1, -1))
@pytest.mark.parametrize(
"kwargs, expected_axes_num, expected_layout, expected_shape",
[
({}, 1, (1, 1), (1,)),
({"layout": (3, 3)}, 1, (3, 3), (3, 3)),
],
)
def test_subplots_layout_single_column(
self, kwargs, expected_axes_num, expected_layout, expected_shape
):
# GH 6667
df = DataFrame(np.random.rand(10, 1), index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, **kwargs)
self._check_axes_shape(
axes,
axes_num=expected_axes_num,
layout=expected_layout,
)
assert axes.shape == expected_shape
def test_subplots_warnings(self):
# GH 9464
with tm.assert_produces_warning(None):
df = DataFrame(np.random.randn(100, 4))
df.plot(subplots=True, layout=(3, 2))
df = DataFrame(
np.random.randn(100, 4), index=date_range("1/1/2000", periods=100)
)
df.plot(subplots=True, layout=(3, 2))
def test_subplots_multiple_axes(self):
# GH 5353, 6970, GH 7069
fig, axes = self.plt.subplots(2, 3)
df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
returned = df.plot(subplots=True, ax=axes[0], sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
assert returned.shape == (3,)
assert returned[0].figure is fig
# draw on second row
returned = df.plot(subplots=True, ax=axes[1], sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
assert returned.shape == (3,)
assert returned[0].figure is fig
self._check_axes_shape(axes, axes_num=6, layout=(2, 3))
tm.close()
msg = "The number of passed axes must be 3, the same as the output plot"
with pytest.raises(ValueError, match=msg):
fig, axes = self.plt.subplots(2, 3)
# pass different number of axes from required
df.plot(subplots=True, ax=axes)
# pass 2-dim axes and invalid layout
# invalid lauout should not affect to input and return value
# (show warning is tested in
# TestDataFrameGroupByPlots.test_grouped_box_multiple_axes
fig, axes = self.plt.subplots(2, 2)
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
df = DataFrame(np.random.rand(10, 4), index=list(string.ascii_letters[:10]))
returned = df.plot(
subplots=True, ax=axes, layout=(2, 1), sharex=False, sharey=False
)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4,)
returned = df.plot(
subplots=True, ax=axes, layout=(2, -1), sharex=False, sharey=False
)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4,)
returned = df.plot(
subplots=True, ax=axes, layout=(-1, 2), sharex=False, sharey=False
)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4,)
# single column
fig, axes = self.plt.subplots(1, 1)
df = DataFrame(np.random.rand(10, 1), index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, ax=[axes], sharex=False, sharey=False)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
assert axes.shape == (1,)
def test_subplots_ts_share_axes(self):
# GH 3964
fig, axes = self.plt.subplots(3, 3, sharex=True, sharey=True)
self.plt.subplots_adjust(left=0.05, right=0.95, hspace=0.3, wspace=0.3)
df = DataFrame(
np.random.randn(10, 9),
index=date_range(start="2014-07-01", freq="M", periods=10),
)
for i, ax in enumerate(axes.ravel()):
df[i].plot(ax=ax, fontsize=5)
# Rows other than bottom should not be visible
for ax in axes[0:-1].ravel():
self._check_visible(ax.get_xticklabels(), visible=False)
# Bottom row should be visible
for ax in axes[-1].ravel():
self._check_visible(ax.get_xticklabels(), visible=True)
# First column should be visible
for ax in axes[[0, 1, 2], [0]].ravel():
self._check_visible(ax.get_yticklabels(), visible=True)
# Other columns should not be visible
for ax in axes[[0, 1, 2], [1]].ravel():
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in axes[[0, 1, 2], [2]].ravel():
self._check_visible(ax.get_yticklabels(), visible=False)
def test_subplots_sharex_axes_existing_axes(self):
# GH 9158
d = {"A": [1.0, 2.0, 3.0, 4.0], "B": [4.0, 3.0, 2.0, 1.0], "C": [5, 1, 3, 4]}
df = DataFrame(d, index=date_range("2014 10 11", "2014 10 14"))
axes = df[["A", "B"]].plot(subplots=True)
df["C"].plot(ax=axes[0], secondary_y=True)
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
for ax in axes.ravel():
self._check_visible(ax.get_yticklabels(), visible=True)
def test_subplots_dup_columns(self):
# GH 10962
df = DataFrame(np.random.rand(5, 5), columns=list("aaaaa"))
axes = df.plot(subplots=True)
for ax in axes:
self._check_legend_labels(ax, labels=["a"])
assert len(ax.lines) == 1
tm.close()
axes = df.plot(subplots=True, secondary_y="a")
for ax in axes:
# (right) is only attached when subplots=False
self._check_legend_labels(ax, labels=["a"])
assert len(ax.lines) == 1
tm.close()
ax = df.plot(secondary_y="a")
self._check_legend_labels(ax, labels=["a (right)"] * 5)
assert len(ax.lines) == 0
assert len(ax.right_ax.lines) == 5
def test_bar_log_no_subplots(self):
# GH3254, GH3298 matplotlib/matplotlib#1882, #1892
# regressions in 1.2.1
expected = np.array([0.1, 1.0, 10.0, 100])
# no subplots
df = DataFrame({"A": [3] * 5, "B": list(range(1, 6))}, index=range(5))
ax = df.plot.bar(grid=True, log=True)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
def test_bar_log_subplots(self):
expected = np.array([0.1, 1.0, 10.0, 100.0, 1000.0, 1e4])
ax = DataFrame([Series([200, 300]), | Series([300, 500]) | pandas.Series |
from googleapiclient.discovery import build
from datetime import datetime, timedelta
from pandas import DataFrame, Timedelta, to_timedelta
from structures import Structure
from networkdays import networkdays
from calendar import monthrange
class Timesheet:
def __init__(self, credentials, sheetid):
# The ID and range of a sample spreadsheet.
self.SAMPLE_SPREADSHEET_ID = sheetid
self.service = build('sheets', 'v4', credentials=credentials)
# Call the Sheets API
self.sheet = self.service.spreadsheets()
# Get the Sheet name
sheet_metadata = self.sheet.get(spreadsheetId=self.SAMPLE_SPREADSHEET_ID).execute()
sheetname = sheet_metadata['sheets'][0]['properties']['title']
# Get sheet version
self.SAMPLE_RANGE_NAME = sheetname + "!" + "B2:N"
# Google Credentials
self.credentials = credentials
self.values = list()
self.data = list()
self.total_hours = Timedelta("00:00:00")
# Metadata information of the sheet
self.person = ''
self.month = ''
self.year = ''
self.max_working_hours = Timedelta("00:00:00")
# Calculate correction last row data
self.row_correction = 0
correction = {
"v. 20210427": 1,
"v. 20210114": 2,
"v. 20201217": 2,
"v. 20201130": 0,
"v. 20200527": 0,
"v. 20190725": 0
}
sheet_range = {
"v. 20210427": "!B2:N",
"v. 20210114": "!B2:N",
"v. 20201217": "!B2:N",
"v. 20201130": "!A1:N",
"v. 20200527": "!A1:N",
"v. 20190725": "!A1:N"
}
result = self.sheet.values().get(spreadsheetId=self.SAMPLE_SPREADSHEET_ID,
range=sheetname + "!A:N").execute()
version = result.get('values', [])
if len(version[0]) == 0:
version = version[1][1]
else:
version = version[0][13]
self.SAMPLE_RANGE_NAME = sheetname + sheet_range[version]
self.row_correction = correction[version]
self.lastday = int()
position_project_description = {
"v. 20210427": 2,
"v. 20210114": 2,
"v. 20201217": 2,
"v. 20201130": 3,
"v. 20200527": 3,
"v. 20190725": 3
}
position_start_hours = {
"v. 20210427": 6,
"v. 20210114": 6,
"v. 20201217": 6,
"v. 20201130": 7,
"v. 20200527": 7,
"v. 20190725": 7
}
self.position_project_description = position_project_description[version]
self.position_start_hours = position_start_hours[version]
self.position_end_hours = self.position_start_hours + 1
self.position_total_hours = self.position_end_hours + 1
def get_data_from_timesheet(self):
"""Shows basic usage of the Sheets API.
Prints values from a sample spreadsheet.
"""
# Request the data
result = self.sheet.values().get(spreadsheetId=self.SAMPLE_SPREADSHEET_ID,
range=self.SAMPLE_RANGE_NAME).execute()
values = result.get('values', [])
self.service.close()
self.values = values
def data_analysis_validation(self):
df = DataFrame(self.values)
# last_data_row = df.index[df[6] == 'max working hours'].tolist()[0]
last_data_row = max(df.index[df[0] == str(self.lastday)].tolist())
first_data_row = min(df.index[df[0] == '1'].tolist()) + 1
# We substrate 2 lines to this due the the max working hours and the sum
# TODO: This only applies to the last versions of the template...
# last_data_row -= self.row_correction
aux = self.values[first_data_row:last_data_row + 1]
# last_day = int(aux[len(aux) - 1][0])
# Get the days that are weekends
weekend = list(filter(lambda x: x[self.position_project_description] == 'WeekEnd Day', aux))
weekend = list(map(lambda x: int(x[0]), weekend))
for i in range(1, self.lastday):
# Filter list for day and extract only the corresponding columns of hours
# and the day is not weekend
temp1 = list(
filter(
lambda x: x[0] in [str(i)] and x[self.position_project_description] not in
['WeekEnd Day', 'Bank Holiday', 'NOT APPLICABLE']
, aux
)
)
# Check if the temp1 is empty, it means that we have a WeekEnd Day or Bank Holiday
# Therefore we do not need to validate the data
if len(temp1) != 0:
temp1 = list(map(lambda x: [x[self.position_start_hours], x[self.position_end_hours], x[self.position_total_hours]], temp1))
# Check difference between "End h" and "Start h"
list(map(lambda x: self.__validate_diff_hours__(day=i, hours=x), temp1))
# Check number hours per day equal to 8
temp1 = list(map(lambda x: x[2], temp1))
self.__validate_sum_hours_day__(day=i, hours=temp1, weekend=weekend)
self.data = aux
@staticmethod
def __validate_diff_hours__(day, hours):
# we specify the input and the format...
t = datetime.strptime(hours[2], "%H:%M:%S")
# ...and use datetime's hour, min and sec properties to build a timedelta
delta = timedelta(hours=t.hour, minutes=t.minute, seconds=t.second)
diff = datetime.strptime(hours[1], '%H:%M') - datetime.strptime(hours[0], '%H:%M')
if diff != delta:
if diff - delta == timedelta(minutes=45):
print(f'WARNING, the difference is not correct on day {day}, hours {hours}, diff {diff - delta}')
else:
print(f'ERROR, the difference is not correct on day {day}, hours {hours}, diff {diff - delta}')
@staticmethod
def __validate_sum_hours_day__(day, hours, weekend):
# print(day, hours)
aux = list(map(lambda x: x[2], hours))
for i in range(0, len(aux)):
t = datetime.strptime(hours[i], "%H:%M:%S")
delta = timedelta(hours=t.hour, minutes=t.minute, seconds=t.second)
aux[i] = delta
result = sum(aux, timedelta())
if result != timedelta(hours=8) and day not in weekend:
print(f'Error: day {day} has a sum of hours different from 8, total: {result}')
def data_extraction(self):
final_data = dict()
df = DataFrame(self.data)
# Assign the correct name to the columns
if self.position_project_description == 2:
df.columns = ['day', 'code', 'project', 'wp', 'task', 'location', 'start', 'end', 'hours', 'a', 'b', 'c', 'd']
elif self.position_project_description == 3:
df.columns = ['day', 'code', 'e', 'project', 'wp', 'task', 'location', 'start', 'end', 'hours', 'a', 'b', 'c', 'd']
# Extract a sub-dataframe of the data interested to manage ('project', 'wp', 'task', 'hours')
df = df[["project", "wp", "task", "hours"]]
# Convert column "hours" from string to timedelta
df['hours'] = to_timedelta(df['hours'])
# Get the unique list of values of the column project
projects = df.project.unique().tolist()
print()
# Calculate the sum of hours per each WP and per each task
for p in projects:
mask = df['project'].values == p
aux_df = df.loc[mask]
# aux_df.columns = ["project", "wp", "task", "hours"]
# Need to check the column [1](wp) and column [2](task),
# - Case 1: if [1] is empty then we create an array of 12 values, each per month
# with the sum of column [3]
# - Case 2: if [1] has value but [2] is empty, create a list of wps in which each
# of them is the sum of [3]
# - Case 3: if [1] and [2] have values, create a list of wps with a list of tasks
# with the array of sums of [3]
column1 = aux_df['wp'].values[0]
column2 = aux_df['task'].values[0]
if column1 == '':
# Case 1: array of sum values for project
total = aux_df['hours'].sum()
self.total_hours += total
print(f'project "{p}" total hours "{total}"')
struct = Structure(data=self)
c1 = struct.project_without_wp(project=p, total=total)
final_data.update(c1)
elif column2 == '':
# Case 2: array of sum values for each wp
wps = aux_df.wp.unique().tolist()
total_wps = dict()
for w in wps:
mask_wp = aux_df['wp'].values == w
aux_df_wp = aux_df.loc[mask_wp]
total_wps[w] = aux_df_wp['hours'].sum()
self.total_hours += total_wps[w]
print(f'project "{p}", wp "{w}", total hours "{total_wps[w]}"')
struct = Structure(data=self)
c2 = struct.project_with_wps_without_tasks(project=p, workpackages=wps, total=total_wps)
final_data.update(c2)
else:
# Case 3: array of sum values for each task
wps = aux_df.wp.unique().tolist()
total_wps = dict()
total_tasks = dict()
tasks = list()
for w in wps:
mask_wp = aux_df['wp'].values == w
aux_df_wp = aux_df.loc[mask_wp]
tasks = aux_df.task.unique().tolist()
total_tasks = dict()
for t in tasks:
mask_task = aux_df_wp['task'].values == t
aux_df_task = aux_df_wp.loc[mask_task]
total_tasks[t] = aux_df_task['hours'].sum()
self.total_hours += total_tasks[t]
total_wps[w] = total_tasks
print(f'project "{p}", wp "{w}", total hours "{total_wps[w]}"')
struct = Structure(data=self)
c3 = struct.project_with_wps_with_tasks(project=p, workpackages=wps, tasks=tasks, total=total_tasks)
final_data.update(c3)
# Need to check the hours with the expected hours
if self.total_hours != self.max_working_hours:
print(f'Error the number of Total Hours "{self.total_hours}" '
f'is different from Max Working Hours "{self.max_working_hours}"')
return {}
else:
print(f'\nTotal hours "{self.total_hours}"')
print(f'\nData generated: \n{final_data}')
return final_data
def metadata_extraction(self):
"""
Extract the metadata information of the google sheet: Name[0,4], Family Name[1,4], Month[0,8],
Year[1,8], and Max Working Hours[x,9]
:return:
"""
df = | DataFrame(self.values) | pandas.DataFrame |
import argparse
import numpy as np
import os
import pandas as pd
import random
import sys
import time
import torch
import scan
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
class RNN(nn.Module):
def __init__(self,
input_size,
hidden_size,
output_size,
bias=True,
nonlinearity='tanh',
mode='blelloch',
rnn_type='PyTorch',
test_artifacts=None):
super(RNN, self).__init__()
self._input_size = input_size
self._hidden_size = hidden_size
assert mode in {'normal', 'blelloch', 'normal-nobp', 'blelloch-nobp'}
self._mode = mode
assert rnn_type in {'PyTorch', 'cuDNN'}
self._rnn_type = rnn_type
if rnn_type == 'PyTorch':
self._rnn_cell = nn.RNNCell(
input_size,
hidden_size,
bias=bias,
nonlinearity=nonlinearity,
)
self._rnn = None
else:
self._rnn_cell = None
self._rnn = nn.RNN(
input_size,
hidden_size,
bias=bias,
nonlinearity=nonlinearity,
batch_first=False,
)
self._linear_y = nn.Linear(hidden_size, output_size, bias=True)
self._scan_inputs = None
self._x = None
self._hx = None
self._last_hx = None
self._test_artifacts = test_artifacts
@property
def weight_hh(self):
if self._rnn_type == 'PyTorch':
return self._rnn_cell.weight_hh
else:
return self._rnn.weight_hh_l0
@property
def bias_hh(self):
if self._rnn_type == 'PyTorch':
return self._rnn_cell.bias_hh
else:
return self._rnn.bias_hh_l0
@property
def weight_ih(self):
if self._rnn_type == 'PyTorch':
return self._rnn_cell.weight_ih
else:
return self._rnn.weight_ih_l0
@property
def bias_ih(self):
if self._rnn_type == 'PyTorch':
return self._rnn_cell.bias_ih
else:
return self._rnn.bias_ih_l0
def _forward_cuDNN(self, x, hx):
if self.training and self._mode in {'blelloch', 'blelloch-nobp'}:
self._hx[-1, :, :] = hx
hx = hx.view(1, -1, self._hidden_size)
x = x.view(x.size(0), x.size(1),
self._input_size).transpose(0, 1).contiguous()
output, hx = self._rnn(x, hx.view(1, -1, self._hidden_size))
if self.training and self._mode in {'blelloch', 'blelloch-nobp'}:
scan.reverse_seq(self._hx[:-1, :, :], output[:-1, :, :])
scan.reverse_seq(self._x, x)
scan.fill_inputs2(self._scan_inputs, self.weight_hh, output)
# Debug
if self._test_artifacts is not None:
for i in range(output.size(0)):
self._test_artifacts.add_artifact('hx_{}'.format(i),
output[i, :, :])
return hx.view(-1, self._hidden_size)
def _forward_PyTorch(self, x, hx):
""" Side-effects:
1. self._hx
2. self._x
3. self._scan_inputs
4. self._test_artifacts
"""
for i in range(x.size(1)):
if self.training and self._mode in {'blelloch', 'blelloch-nobp'}:
self._hx[x.size(1) - 1 - i, :, :] = hx
self._x[x.size(1) - 1 - i, :, :] = x[:, i].view(
-1, self._input_size)
hx = self._rnn_cell(x[:, i].unsqueeze(1), hx)
if self.training and self._mode in {'blelloch', 'blelloch-nobp'}:
scan.fill_inputs(self._scan_inputs, self.weight_hh, hx, i)
# Debug.
if self._test_artifacts is not None:
self._test_artifacts.add_artifact('hx_{}'.format(i), hx)
return hx
def forward(self, x):
hx = torch.zeros((x.size(0), self._hidden_size),
device=self._linear_y.weight.device)
if self.training and self._mode in {'blelloch', 'blelloch-nobp'}:
if self._scan_inputs is None:
# x.size(1) number of fully-connected and activation,
# 1 for the grad vec.
scan_length = 2 * x.size(1) + 1
self._scan_inputs = torch.zeros(
(scan_length, x.size(0), self._hidden_size,
self._hidden_size),
dtype=x.dtype,
device=x.device,
)
else:
self._scan_inputs.zero_()
if self._x is None:
self._x = torch.zeros(
(x.size(1), x.size(0), self._input_size),
dtype=x.dtype,
device=x.device,
)
#else:
# self._x.zero_()
if self._hx is None:
self._hx = torch.zeros(
(x.size(1), x.size(0), self._hidden_size),
dtype=hx.dtype,
device=hx.device,
)
#else:
# self._hx.zero_()
if self._rnn_type == 'PyTorch':
forward_fn = lambda x, hx: self._forward_PyTorch(x, hx)
else:
forward_fn = lambda x, hx: self._forward_cuDNN(x, hx)
if self.training and self._mode in {'blelloch', 'blelloch-nobp'}:
with torch.no_grad():
hx = forward_fn(x, hx)
self._last_hx = hx
hx.requires_grad = True
else:
hx = forward_fn(x, hx)
return self._linear_y(hx)
def backward_by_scan(self, loss):
# Figure out the gradient of loss to last_hx
(self._scan_inputs[0, :, 0, :], self._linear_y.weight.grad,
self._linear_y.bias.grad) = torch.autograd.grad(
loss, [self._last_hx, self._linear_y.weight, self._linear_y.bias])
scan_results = scan.scan(self._scan_inputs)
dl_dz = scan_results[2::2, :, :].contiguous()
self.weight_hh.grad = torch.bmm(
dl_dz.unsqueeze(3).view(-1, self._hidden_size, 1),
self._hx.unsqueeze(2).view(-1, 1, self._hidden_size)).sum(dim=0)
self.weight_ih.grad = torch.bmm(
dl_dz.unsqueeze(3).view(-1, self._hidden_size, 1),
self._x.unsqueeze(2).view(-1, 1, self._input_size)).sum(dim=0)
bias_grad = dl_dz.sum(dim=(0, 1))
self.bias_hh.grad, self.bias_ih.grad = bias_grad, bias_grad
def update_test_artifacts(self, test_artifacts):
test_artifacts.add_artifact('rnn.weight_hh', self.weight_hh)
test_artifacts.add_artifact('rnn.bias_hh', self.bias_hh)
test_artifacts.add_artifact('rnn.weight_ih', self.weight_ih)
test_artifacts.add_artifact('rnn.bias_ih', self.bias_ih)
test_artifacts.add_artifact('_linear_y.weight', self._linear_y.weight)
test_artifacts.add_artifact('_linear_y.bias', self._linear_y.bias)
test_artifacts.add_artifact('rnn.weight_hh.grad', self.weight_hh.grad)
test_artifacts.add_artifact('rnn.bias_hh.grad', self.bias_hh.grad)
test_artifacts.add_artifact('rnn.weight_ih.grad', self.weight_ih.grad)
test_artifacts.add_artifact('rnn.bias_ih.grad', self.bias_ih.grad)
test_artifacts.add_artifact('_linear_y.weight.grad',
self._linear_y.weight.grad)
test_artifacts.add_artifact('_linear_y.bias.grad',
self._linear_y.bias.grad)
def build_dataloaders(save_dir, train_batch_size, test_batch_size):
train_X = torch.load(os.path.join(save_dir, 'train_X')).cuda()
train_Y = torch.load(os.path.join(
save_dir,
'train_Y',
)).type(dtype=torch.long).cuda()
#test_X = torch.load(os.path.join(save_dir, 'test_X')).cuda()
#test_Y = torch.load(os.path.join(
# save_dir,
# 'test_Y',
#)).type(dtype=torch.long).cuda()
#num_classes = (max(train_Y.max(), test_Y.max()) -
# min(train_Y.min(), test_Y.min()) + 1)
num_classes = (train_Y.max() - train_Y.min() + 1)
train_loader = torch.utils.data.DataLoader(
torch.utils.data.TensorDataset(train_X, train_Y),
batch_size=train_batch_size,
shuffle=True,
)
#test_loader = torch.utils.data.DataLoader(
# torch.utils.data.TensorDataset(test_X, test_Y),
# batch_size=test_batch_size,
# shuffle=True,
#)
test_loader = None
return train_loader, test_loader, int(num_classes.item())
class UnitTestArtifacts(object):
def __init__(self):
self._record = []
def new_timeframe(self):
self._record.append({})
def add_artifact(self, k, v):
self._record[-1][k] = v.detach().cpu()
def assert_allclose(self, expected):
for i, expected_artifacts in enumerate(expected._record):
print('************************************************')
print('****************** epoch = {} ******************'.format(i))
print('************************************************')
for k, expected_artifact in expected_artifacts.items():
print('++++++++++++++ Compare {} ++++++++++++++'.format(k))
try:
np.testing.assert_allclose(self._record[i][k].numpy(),
expected_artifact.numpy(),
rtol=1e-4)
print('Done!')
except AssertionError as e:
print(e)
def seed_this_process(seed):
random.seed(seed)
torch.manual_seed(seed)
np.random.seed(seed)
def accuracy(y, y_):
with torch.no_grad():
_, indices = y.max(1)
return (indices == y_).type(dtype=torch.float).mean().item()
def main(args):
# Redirect stdout and stderr outputs.
if args.stdout is not None:
sys.stdout = open(args.stdout, 'w', buffering=1)
if args.stderr is not None:
sys.stderr = open(args.stderr, 'w', buffering=1)
test_artifacts = None if args.unit_test is None else UnitTestArtifacts()
# Fix a seed.
seed_this_process(args.seed)
train_loader, test_loader, num_classes = build_dataloaders(
args.save_dir, args.train_batch_size, args.test_batch_size)
rnn = RNN(
1,
args.hidden_size,
num_classes,
mode=args.mode,
rnn_type=args.rnn_type,
test_artifacts=test_artifacts,
).cuda()
loss_fn = nn.CrossEntropyLoss()
if args.mode in {'blelloch', 'blelloch-nobp'}:
torch.optim.Optimizer.zero_grad = lambda self: None
optimizer = optim.Adam(rnn.parameters(), lr=args.learning_rate)
epoch_latency = {
'epoch': [],
'timestamp': [],
'latency': [],
'loss': [],
'accuracy': [],
}
epoch_events = []
def record(epoch, event_start, event_stop, loss, accuracy):
epoch_latency['epoch'].append(epoch)
epoch_events.append((event_start, event_stop))
epoch_latency['loss'].append(loss)
epoch_latency['accuracy'].append(accuracy)
def train():
for i, batch in enumerate(train_loader):
if args.num_iterations is not None and i >= args.num_iterations:
break
if args.unit_test is not None:
test_artifacts.new_timeframe()
optimizer.zero_grad()
x = batch[0]
y_ = batch[1]
y = rnn(x)
loss = loss_fn(y, y_)
if args.mode == 'blelloch':
rnn.backward_by_scan(loss)
elif args.mode == 'normal':
loss.backward()
elif args.mode in {'normal-nobp', 'blelloch-nobp'}:
pass
else:
raise RuntimeError('Impossible to reach here!')
optimizer.step()
if args.unit_test is not None:
rnn.update_test_artifacts(test_artifacts)
def train_loss():
running_loss = []
running_acc = []
rnn.eval()
with torch.no_grad():
for i, batch in enumerate(train_loader):
if args.num_iterations is not None and i >= args.num_iterations:
break
x = batch[0]
y_ = batch[1]
y = rnn(x)
loss = loss_fn(y, y_)
running_loss.append(loss.item())
running_acc.append(accuracy(y, y_))
rnn.train()
return np.mean(running_loss), np.mean(running_acc)
# The training loop:
for epoch in range(args.num_epochs):
epoch_start = torch.cuda.Event(enable_timing=True)
epoch_stop = torch.cuda.Event(enable_timing=True)
epoch_start.record()
train()
epoch_stop.record()
epoch_loss, epoch_acc = train_loss() if args.save_loss_acc else (0, 0)
record(epoch, epoch_start, epoch_stop, epoch_loss, epoch_acc)
if args.unit_test == 'expected':
torch.save(test_artifacts, args.unit_test_cache)
elif args.unit_test == 'actual':
expected = torch.load(args.unit_test_cache)
test_artifacts.assert_allclose(expected)
torch.cuda.synchronize()
clock = 0.0
for i, event_pair in enumerate(epoch_events):
lat = event_pair[0].elapsed_time(event_pair[1]) / 1000
clock += lat
epoch_latency['timestamp'].append(clock)
epoch_latency['latency'].append(lat)
print('[{}/{} @ {} s] lat: {} s; loss: {}; acc: {}'.format(
epoch_latency['epoch'][i],
args.num_epochs,
epoch_latency['timestamp'][i],
epoch_latency['latency'][i],
epoch_latency['loss'][i],
epoch_latency['accuracy'][i],
))
if args.save_epoch_latency is not None:
| pd.DataFrame(epoch_latency) | pandas.DataFrame |
import os
import re
import sys
import time
import pandas as pd
from typing import Union
from rich import console
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver import Firefox, FirefoxOptions
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.expected_conditions import element_to_be_clickable as clickable
from Crypto.Random import random
HACKERNEWS_ROOT = 'https://news.ycombinator.com'
HACKERNEWS_LOGIN = f'{HACKERNEWS_ROOT}/login'
CRAWL_DELAY = 30
class Keywords:
RUSSIA = ['russia', 'putin', 'moscow', 'lavrov', 'oligarch']
UKRAINE = ['ukraine', 'ukrainian', 'kyiv', 'zelensky', 'kuleba']
BELARUS = 'belarus'
BALTIC_STATES = ['baltic', 'estonia', 'latvia', 'lithuania']
CHINA = ['china', 'chinese', 'beijing']
TAIWAN = 'taiwan'
NATO = 'nato'
JAPAN = ['japan', 'tokyo']
IRAQ = ['iraq', 'erbil']
IRAN = ['iran', 'khasabad']
class Log:
console = console.Console()
console.clear()
@staticmethod
def debug(*objects):
Log.console.print(*objects)
@staticmethod
def write(*text, warning=False, error=False):
if warning:
kwargs = dict(style='black on yellow')
elif error:
kwargs = dict(style='red')
else:
kwargs = dict()
Log.console.log(*text, **kwargs)
@staticmethod
def error():
Log.console.print_exception()
sys.exit(0)
def xpath(query: str) -> tuple[str, str]:
return ('xpath', query)
def randomize_crawl_delay(rand_range: int, delay=CRAWL_DELAY):
rand_range = min(rand_range, CRAWL_DELAY)
rand_range = int(rand_range / 2)
return random.randint(delay - rand_range, delay + rand_range)
def start_selenium_driver():
Log.write('starting Selenium driver')
options = FirefoxOptions()
options.headless = True
driver = Firefox(options=options)
return driver
def login(driver, wait, user, password):
Log.write(f'logging in as {user}')
driver.get(HACKERNEWS_LOGIN)
# the only discernable difference betweem the login form
# and the create account form is that the login form is
# autofocused
login_form = xpath('//form[table/tbody/tr/td/input[@autofocus="true"]]')
wait.until(clickable(login_form))
login_form = driver.find_element(*login_form)
username_input = login_form.find_element_by_xpath('//input[@type="text"]')
password_input = login_form.find_element_by_xpath('//input[@type="password"]')
login_button = login_form.find_element_by_xpath('//input[@type="submit"]')
username_input.send_keys(user)
password_input.send_keys(password)
login_button.click()
def validate_login(driver: Firefox, wait: WebDriverWait, user: str):
Log.write('validating login')
user_anchor = xpath('//a[@id="me"]')
wait.until(clickable(user_anchor))
user_anchor = driver.find_element(*user_anchor)
assert user == user_anchor.text
def user_endpoint(user: str) -> str:
return f'https://news.ycombinator.com/user?id={user}'
def new_hackernews_session(wait_timeout=120):
user, password = os.getenv('HN_USER'), os.getenv('HN_PASSWORD')
driver = start_selenium_driver()
wait = WebDriverWait(driver, wait_timeout)
login(driver, wait, user, password)
validate_login(driver, wait, user)
return driver, wait
def extract_posts(driver: Firefox, wait: WebDriverWait):
posts = xpath('//tr[@class="athing"]')
subtexts = xpath('//td[@class="subtext"]')
wait.until(clickable(posts))
posts = driver.find_elements(*posts)
subtexts = driver.find_elements(*subtexts)
posts_w_subtext = list(zip(posts, subtexts))
records = list()
for post, subtext in posts_w_subtext:
post_id = int(post.get_attribute('id'))
rank = post.find_element_by_xpath('.//span[@class="rank"]')
rank = int(rank.text[:-1])
title = post.find_element_by_xpath('.//a[@class="titlelink"]')
link = title.get_attribute('href')
title = title.text
timestamp = subtext.find_element_by_xpath('.//span[@class="age"]')
timestamp = timestamp.get_attribute('title')
try:
user = subtext.find_element_by_xpath('.//a[@class="hnuser"]')
user = user.text
score = subtext.find_element_by_xpath('.//span[@class="score"]')
score = score.text
score = int(score.split(' ')[0])
except NoSuchElementException:
user = None
score = None
comments = f'https://news.ycombinator.com/item?id={post_id}'
user_profile = f'https://news.ycombinator.com/user?id={user}'
records.append(dict(id=post_id, rank=rank, title=title, link=link,
user=user, score=score, timestamp=timestamp,
comments=comments, user_profile=user_profile))
return records
def go_to_next_page(driver: Firefox, wait: WebDriverWait):
more_anchor = xpath('//a[@class="morelink"]')
wait.until(clickable(more_anchor))
more_anchor = driver.find_element(*more_anchor)
more_anchor.click()
def extract_data_from_hackernews(pages=5, polite=True, crawl_range=(10, 10)) -> pd.DataFrame:
# "polite" will cause the bot to adhere strictly to
# HN's crawl delay of 30 seconds.
# Turn this setting off to speed up extraction.
#
# When polite=False, crawl delays will be randomized.
# The rate will be a random integer
# between crawl_delay[0] + (crawl_delay[1] / 2)
# and crawl_delay[0] - (crawl_delay[1] / 2)
# eg (10, 10) -> randint(5, 15)
#
# Note: requestig too quickly will result in an IP ban.
# To unban an IP: https://news.ycombinator.com/unban?ip=<ip address>
try:
existing_posts = pd.read_parquet('hackernews_posts.snappy.parquet')
except FileNotFoundError:
existing_posts = pd.DataFrame([], columns=('id', 'rank', 'title', 'link',
'user', 'score', 'timestamp',
'comments', 'user_profile'))
existing_posts = existing_posts.set_index('id')
if polite:
Log.write(f'polite scraping enabled. crawl delay set to {CRAWL_DELAY} seconds')
else:
Log.write('!!WARNING!! polite scraping is disabled', warning=True)
Log.write('crawling too quickly will result in an IP ban', warning=True)
driver, wait = new_hackernews_session()
all_posts = list()
page = 1
while True:
Log.write(f'scraping {driver.current_url}')
current_page_posts = extract_posts(driver, wait)
all_posts += current_page_posts
Log.write(f'{len(current_page_posts)} posts scraped ({len(all_posts)} total)')
if page < pages:
crawl_delay = randomize_crawl_delay(*crawl_range) if not polite else CRAWL_DELAY
Log.write(f'sleeping {crawl_delay} seconds')
time.sleep(crawl_delay)
try:
go_to_next_page(driver, wait)
except TimeoutError:
Log.write('timed out waiting for next page', error=True)
break
page += 1
continue
break
driver.quit()
posts = pd.DataFrame(all_posts)
posts = pd.concat([existing_posts, posts]).drop_duplicates()
posts.to_parquet('hackernews_posts.snappy.parquet')
return posts
def extract_users_from_posts(polite=True, crawl_range=(10, 10)) -> pd.DataFrame:
posts = pd.read_parquet('hackernews_posts.snappy.parquet')
posts = posts.set_index('id')
posters = posts['user_profile'].unique()
comments = posts['comments'].to_list()
all_users = set(posters)
driver, _ = new_hackernews_session()
n = 0
while True:
comment_section = comments[n]
Log.write(f'scraping {comment_section}')
driver.get(comment_section)
users = driver.find_elements_by_xpath('//a[@class="hnuser"]')
users = set(f'https://news.ycombinator.com/user?id={u.text}' for u in users)
all_users = all_users.union(users)
Log.write(f'{len(users)} users scraped ({len(all_users)} total)')
n += 1
if n < len(comments):
crawl_delay = randomize_crawl_delay(*crawl_range) if not polite else CRAWL_DELAY
Log.write(f'sleeping {crawl_delay} seconds')
time.sleep(crawl_delay)
continue
break
users = pd.DataFrame(all_users, columns=('users',))
users.to_parquet('hackernews_user_profiles.snappy.parquet')
driver.quit()
return users
def extract_user_profiles(sample_size: int = None, polite=True, crawl_range=(10, 10), users=None):
users = pd.read_parquet('hackernews_user_profiles.snappy.parquet') if users is None else users
existing_profiles = pd.read_parquet('hackernews_users.snappy.parquet')
if sample_size is None:
sample_size = len(users)
Log.write(f'{len(existing_profiles)} profiles loaded')
users = users[~users['users'].isin(existing_profiles['profile'])]
sample = users['users'].sample(sample_size).to_list()
driver, _ = new_hackernews_session()
all_users = list()
n = 0
while True:
user_profile = sample[n]
Log.write(f'scraping {user_profile}')
driver.get(user_profile)
about_section = '//tr[td[text()="about:"]]/td[@style="overflow:hidden;"]'
about_section = driver.find_element_by_xpath(about_section)
record = (user_profile.split('=')[-1], about_section.text, user_profile)
all_users.append(record)
n += 1
if n < len(sample):
crawl_delay = randomize_crawl_delay(*crawl_range) if not polite else CRAWL_DELAY
Log.write(f'sleeping {crawl_delay} seconds')
time.sleep(crawl_delay)
continue
break
profiles = | pd.DataFrame(all_users, columns=('user', 'about', 'profile')) | pandas.DataFrame |
import matplotlib.pyplot as plt
import pandas as pd
from numpy import object
def update_plot_params():
params = {'legend.fontsize': 'x-large',
'figure.figsize': (10, 8),
'axes.labelsize': 'x-large',
'axes.titlesize': 'x-large',
'xtick.labelsize': 'x-large',
'ytick.labelsize': 'x-large'}
plt.rcParams.update(params)
def cv2df(cv_score, calculate_mean=False, col_idx=None):
"""
Convert cross validation scores to pandas.DataFrame
"""
df = | pd.DataFrame(cv_score) | pandas.DataFrame |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, Biota Technology.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import division
from unittest import TestCase, main
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sourcetracker._sourcetracker import (intersect_and_sort_samples,
collapse_source_data,
subsample_dataframe,
validate_gibbs_input,
validate_gibbs_parameters,
collate_gibbs_results,
get_samples,
generate_environment_assignments,
cumulative_proportions,
single_sink_feature_table,
ConditionalProbability,
gibbs_sampler, gibbs)
from sourcetracker._plot import plot_heatmap
class TestValidateGibbsInput(TestCase):
def setUp(self):
self.index = ['s%s' % i for i in range(5)]
self.columns = ['f%s' % i for i in range(4)]
def test_no_errors_(self):
# A table where nothing is wrong, no changes expected.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32), index=self.index,
columns=self.columns)
exp_sources = pd.DataFrame(data.astype(np.int32), index=self.index,
columns=self.columns)
obs = validate_gibbs_input(sources)
pd.util.testing.assert_frame_equal(obs, sources)
# Sources and sinks.
sinks = pd.DataFrame(data, index=self.index, columns=self.columns)
exp_sinks = pd.DataFrame(data.astype(np.int32), index=self.index,
columns=self.columns)
obs_sources, obs_sinks = validate_gibbs_input(sources, sinks)
pd.util.testing.assert_frame_equal(obs_sources, exp_sources)
pd.util.testing.assert_frame_equal(obs_sinks, exp_sinks)
def test_float_data(self):
# Data is float, expect rounding.
data = np.random.uniform(0, 1, size=20).reshape(5, 4)
sources = pd.DataFrame(data, index=self.index, columns=self.columns)
exp_sources = pd.DataFrame(np.zeros(20).reshape(5, 4).astype(np.int32),
index=self.index, columns=self.columns)
obs_sources = validate_gibbs_input(sources)
pd.util.testing.assert_frame_equal(obs_sources, exp_sources)
data = np.random.uniform(0, 1, size=20).reshape(5, 4) + 1.
sources = pd.DataFrame(data, index=self.index, columns=self.columns)
exp_sources = pd.DataFrame(np.ones(20).reshape(5, 4).astype(np.int32),
index=self.index, columns=self.columns)
obs_sources = validate_gibbs_input(sources)
pd.util.testing.assert_frame_equal(obs_sources, exp_sources)
# Sources and sinks.
data = np.random.uniform(0, 1, size=20).reshape(5, 4) + 5
sinks = pd.DataFrame(data,
index=self.index,
columns=self.columns)
exp_sinks = \
pd.DataFrame(5 * np.ones(20).reshape(5, 4).astype(np.int32),
index=self.index,
columns=self.columns)
obs_sources, obs_sinks = validate_gibbs_input(sources, sinks)
pd.util.testing.assert_frame_equal(obs_sources, exp_sources)
pd.util.testing.assert_frame_equal(obs_sinks, exp_sinks)
def test_negative_data(self):
# Values less than 0, expect errors.
data = np.random.uniform(0, 1, size=20).reshape(5, 4) - 1.
sources = pd.DataFrame(data,
index=self.index,
columns=self.columns)
self.assertRaises(ValueError, validate_gibbs_input, sources)
data = -1 * np.random.randint(0, 20, size=20).reshape(5, 4)
sources = pd.DataFrame(data,
index=self.index,
columns=self.columns)
self.assertRaises(ValueError, validate_gibbs_input, sources)
# Sources and sinks.
data = np.random.randint(0, 10, size=20).reshape(5, 4) + 1
sources = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
sinks = pd.DataFrame(-10 * data,
index=self.index,
columns=self.columns)
self.assertRaises(ValueError, validate_gibbs_input, sources, sinks)
def test_nan_data(self):
# nans, expect errors.
data = np.random.uniform(0, 1, size=20).reshape(5, 4)
data[3, 2] = np.nan
sources = pd.DataFrame(data,
index=self.index,
columns=self.columns)
self.assertRaises(ValueError, validate_gibbs_input, sources)
# Sources and sinks.
data = np.random.randint(0, 10, size=20).reshape(5, 4) + 1.
sources = pd.DataFrame(data,
index=self.index,
columns=self.columns)
data[1, 3] = np.nan
sinks = pd.DataFrame(data,
index=self.index,
columns=self.columns)
self.assertRaises(ValueError, validate_gibbs_input, sources, sinks)
def test_non_numeric_data(self):
# data contains at least some non-numeric columns, expect errors.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
sources.iloc[2, 2] = '3.a'
self.assertRaises(ValueError, validate_gibbs_input, sources)
# Sources and sinks.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
sinks = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
sinks.iloc[2, 2] = '3'
self.assertRaises(ValueError, validate_gibbs_input, sources, sinks)
def test_columns_identical(self):
# Columns are identical, no error expected.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
data = np.random.randint(0, 10, size=200).reshape(50, 4)
sinks = pd.DataFrame(data.astype(np.int32),
index=['s%s' % i for i in range(50)],
columns=self.columns)
obs_sources, obs_sinks = validate_gibbs_input(sources, sinks)
pd.util.testing.assert_frame_equal(obs_sources, sources)
pd.util.testing.assert_frame_equal(obs_sinks, sinks)
def test_columns_non_identical(self):
# Columns are not identical, error expected.
data = np.random.randint(0, 10, size=20).reshape(5, 4)
sources = pd.DataFrame(data.astype(np.int32),
index=self.index,
columns=self.columns)
data = np.random.randint(0, 10, size=200).reshape(50, 4)
sinks = pd.DataFrame(data.astype(np.int32),
index=['s%s' % i for i in range(50)],
columns=['feature%s' % i for i in range(4)])
self.assertRaises(ValueError, validate_gibbs_input, sources, sinks)
class TestValidateGibbsParams(TestCase):
def test_acceptable_inputs(self):
# All values acceptable, expect no errors.
alpha1 = .001
alpha2 = .1
beta = 10
restarts = 10
draws_per_restart = 1
burnin = 100
delay = 1
self.assertTrue(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
alpha1 = alpha2 = beta = 0
self.assertTrue(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
def test_not_acceptable_inputs(self):
# One of the float params is negative.
alpha1 = -.001
alpha2 = .1
beta = 10
restarts = 10
draws_per_restart = 1
burnin = 100
delay = 1
self.assertFalse(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
# One of the int params is 0.
alpha1 = .001
restarts = 0
self.assertFalse(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
# One of the int params is a float.
restarts = 1.34
self.assertFalse(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
# A param is a string.
restarts = '3.2232'
self.assertFalse(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
# A param is a nan.
restarts = 3
alpha1 = np.nan
self.assertFalse(validate_gibbs_parameters(alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay))
class TestIntersectAndSortSamples(TestCase):
def test_partially_overlapping_tables(self):
# Test an example where there are unshared samples present in both
# feature and sample tables. Notice that order is different between
# the samples that are shared between both tables. The order of samples
# in the returned tables is set by the ordering done in np.intersect1d.
sdata_c1 = [3.1, 'red', 5]
sdata_c2 = [3.6, 'yellow', 7]
sdata_c3 = [3.9, 'yellow', -2]
sdata_c4 = [2.5, 'red', 5]
sdata_c5 = [6.7, 'blue', 10]
samples = ['s1', 's4', 's2', 's3', 'sX']
headers = ['pH', 'color', 'day']
stable = pd.DataFrame([sdata_c1, sdata_c4, sdata_c2, sdata_c3,
sdata_c5], index=samples, columns=headers)
fdata = np.arange(90).reshape(9, 10)
samples = ['s%i' % i for i in range(3, 12)]
columns = ['o%i' % i for i in range(1, 11)]
ftable = pd.DataFrame(fdata, index=samples, columns=columns)
exp_ftable = pd.DataFrame(fdata[[1, 0], :], index=['s4', 's3'],
columns=columns)
exp_stable = pd.DataFrame([sdata_c4, sdata_c3], index=['s4', 's3'],
columns=headers)
obs_stable, obs_ftable = intersect_and_sort_samples(stable, ftable)
pd.util.testing.assert_frame_equal(obs_stable, exp_stable)
pd.util.testing.assert_frame_equal(obs_ftable, exp_ftable)
# No shared samples, expect a ValueError.
ftable.index = ['ss%i' % i for i in range(9)]
self.assertRaises(ValueError, intersect_and_sort_samples, stable,
ftable)
# All samples shared, expect no changes.
fdata = np.arange(50).reshape(5, 10)
samples = ['s1', 's4', 's2', 's3', 'sX']
columns = ['o%i' % i for i in range(10)]
ftable = pd.DataFrame(fdata, index=samples, columns=columns)
exp_ftable = ftable.loc[stable.index, :]
exp_stable = stable
obs_stable, obs_ftable = intersect_and_sort_samples(stable, ftable)
pd.util.testing.assert_frame_equal(obs_stable, exp_stable)
pd.util.testing.assert_frame_equal(obs_ftable, exp_ftable)
class TestGetSamples(TestCase):
def tests(self):
# Make a dataframe which contains mixed data to test.
col0 = ['a', 'a', 'a', 'a', 'b']
col1 = [3, 2, 3, 1, 3]
col2 = ['red', 'red', 'blue', 255, 255]
headers = ['sample_location', 'num_reps', 'color']
samples = ['s1', 's2', 's3', 's4', 's5']
sample_metadata = \
pd.DataFrame.from_dict({k: v for k, v in zip(headers,
[col0, col1, col2])})
sample_metadata.index = samples
obs = get_samples(sample_metadata, 'sample_location', 'b')
exp = pd.Index(['s5'], dtype='object')
pd.util.testing.assert_index_equal(obs, exp)
obs = get_samples(sample_metadata, 'sample_location', 'a')
exp = | pd.Index(['s1', 's2', 's3', 's4'], dtype='object') | pandas.Index |
import pandas as pd, sqlite3 as sql
import datetime as dt, re, time, holidays
from dateutil.relativedelta import relativedelta
# Shift nontrading days data to next available trading day
def next_business_day(date):
ONE_DAY = dt.relativedelta(days=1)
HOLIDAYS_US = holidays.US()
next_day = date + ONE_DAY
while next_day.weekday() in holidays.WEEKEND or next_day in HOLIDAYS_US:
next_day += ONE_DAY
return next_day
# Time it to CL to Make sure Imported Script Does Not Weigh Down Server Since it needs to be Run First
print(time.perf_counter())
# Gather Data Sources via SQL Queries
con = sql.connect('data/processed/temp_c.db', timeout=5000)
port = pd.read_sql(f"SELECT Date date, Open, High, Low, Close, Volume, Volatility, Turnover, symbol FROM daily ORDER BY date", con=con, parse_dates={'date': '%Y-%m-%d %H:%M:%S'})\
.drop_duplicates(subset=['date', 'symbol'])
articles =pd.read_sql("SELECT date, symbol, publisher, pos_sent, neu_sent, neg_sent, comp_sent FROM (SELECT * FROM news_sentiment JOIN (SELECT * FROM articles) USING (pk))", con=con, parse_dates={'date': '%Y-%m-%d %H:%M:%S'})
recommends = pd.read_sql(f"SELECT Date date, symbol, Firm, new_grade, prev_grade, Action from recommendations ORDER BY Date", con=con, parse_dates={'date': '%Y-%m-%d %H:%M:%S'})
comments = pd.read_sql(f"SELECT date, channel, symbols, pos_sent, neu_sent, neg_sent, comp_sent from symbol_comments ORDER BY date", parse_dates={'date': '%Y-%m-%d'}, con=con)
comments.loc[:, "symbols"] = comments.symbols.apply(lambda x: x.replace('BTC', 'BTC-USD'))
companies = tuple(port.symbol.unique())
c_data = pd.read_sql(f"SELECT * from mentions WHERE symbol IN {companies}", con=con, index_col='pk')
con.close()
symbols_re = re.compile(r"\[|\]|\'|\'")
last_index = comments.index.max()
# decompose for single symbol
comments = comments.assign(symbols = lambda x: x.symbols.apply(lambda x: re.sub(symbols_re, '', x)).apply(str.split, sep=','))\
.explode('symbols').reset_index().rename(columns={'index': 'comment_index'})
comments = comments[lambda x:(~( x.comment_index.isnull()) & (x.symbols.isin(companies)))]
# Translate Ratings
recommendsDict = {"Very Bearish": 1, "Bearish": 2, "Neutral": 3, "Bullish": 4, "Very Bullish": 5}
recommends=recommends.assign(new_sent = lambda x: x.new_grade.apply(lambda g: recommendsDict[g]))\
.assign(prev_sent = lambda x: x.prev_grade.apply(lambda g: recommendsDict[g]))
# take aggregations over wanted frequency; make buy decisions based off of the frequency of data points and sentiments
# return port with new information: shares and cost * shares
# Aggregate Data Source to Obtained Average Ratings to pick Stocks and Allocate Portfolio Amount to Match Back with Security Daily Data
class EAT():
def __init__(self, portfolio, articles, comments, recs, start, end, start_amount):
self.portfolio = portfolio.copy(deep=True)
self.postions = []
self.start = start
self.end = end
self.articles = articles[lambda x: (x.date >= start) & (x.date <= end)]
self.comments = comments[lambda x: (x.date >= start) & (x.date <= end)]
self.recs = recs[lambda x: (x.date >= start) & (x.date <= end)]
self.aggs = {}
self.trading_days = set(self.portfolio[lambda x: x.symbol == "DIS"].date.values)
self.starting_amt = start_amount
self.dates = []
self.share = True
self.funds_lookup = | pd.DataFrame() | pandas.DataFrame |
from django.shortcuts import render
from django.http import HttpResponse
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from . import models, serializers, utils
from django.db.models import Avg
from rest_framework import generics, status
from rest_framework.response import Response
from rest_framework.decorators import api_view
from django.http import JsonResponse
from django.shortcuts import get_object_or_404
from django.http import Http404
from collections import defaultdict
from . import opensmoke
from . import boolparser
from rest_framework.parsers import FileUploadParser, MultiPartParser, FormParser, JSONParser
import seaborn as sns
import io
import pandas as pd
import pandas as pd
from django.http import FileResponse
from rest_framework.views import APIView
import numpy as np
from collections import defaultdict
from pathlib import Path
import os
dict_excel_names = {"IDT": "ignition delay", "T": "temperature"}
from pint import UnitRegistry
ureg = UnitRegistry()
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
def index(request):
return render(request, '../frontend/index.html')
class FilePaperCreate(CreateView):
model = models.FilePaper
fields = ['title', 'reference_doi']
template_name = 'experimentmanager/newpaper.html'
success_url = "/"
class ExperimentCreate(CreateView):
model = models.Experiment
fields = '__all__'
template_name = "experimentmanager/newexperiment.html"
class ExperimentListAPI(generics.ListAPIView):
queryset = models.Experiment.objects.all()
serializer_class = serializers.ExperimentSerializer
class ExperimentFilteredListAPI(generics.ListAPIView):
serializer_class = serializers.ExperimentSerializer
def get_queryset(self):
"""
Optionally restricts the returned purchases to a given user,
by filtering against a `username` query parameter in the URL.
"""
queryset = models.Experiment.objects.all()
experiments = self.request.query_params.getlist('experiments[]', None)
if experiments is not None:
queryset = queryset.filter(id__in=experiments)
return queryset
def experiment_search_fields(request):
experiments = models.Experiment.objects.all()
reactors_to_types = defaultdict(list)
for e in experiments:
if e.experiment_type not in reactors_to_types[e.reactor]:
reactors_to_types[e.reactor].append(e.experiment_type)
species = [i for i in models.InitialSpecie.objects.values_list("name", flat=True).distinct()]
response = {"reactors": list(reactors_to_types.keys()), "reactors_to_types" : reactors_to_types, "species" : species}
return JsonResponse(response)
class SearchExperiments(generics.ListAPIView):
serializer_class = serializers.ExperimentSerializer
def get_queryset(self):
queryset = models.Experiment.objects.all()
reactor = self.request.query_params.get('reactor', None)
experiment_type = self.request.query_params.get('experiment_type', None)
species = self.request.query_params.getlist('species[]', None)
if reactor is not None:
queryset = queryset.filter(reactor=reactor)
if experiment_type is not None:
queryset = queryset.filter(experiment_type=experiment_type)
if species:
queryset = queryset.filter(initial_species__name__in=species)
complex_query = self.request.query_params.get('complex_query', None)
# complex query handling
if complex_query is not None and len(complex_query) > 0:
# TODO: validate complex query
# TODO: better filtering method (custom manager)
p = boolparser.BooleanParser(complex_query.upper())
result_list_ids = []
for e in queryset:
cond = False
try:
cond = p.evaluate(e.get_params_experiment())
except:
pass
if cond:
result_list_ids.append(e.id)
queryset = queryset.filter(id__in=result_list_ids)
# filter based on existence of run_type # TODO: improve
result_list_ids = [e.id for e in queryset if e.run_type() is not None]
queryset = queryset.filter(id__in=result_list_ids)
return queryset
class ChemModelListAPI(generics.ListAPIView):
queryset = models.ChemModel.objects.all()
serializer_class = serializers.ChemModelSerializer
class ExperimentDetailAPI(generics.RetrieveDestroyAPIView):
queryset = models.Experiment.objects.all()
serializer_class = serializers.ExperimentDetailSerializer
# both for experiments and experiments + chem_models
def get_curves(exp_id, chem_models):
experiment = get_object_or_404(models.Experiment, pk=exp_id)
target_executions = []
if chem_models and len(chem_models) > 0:
target_executions = experiment.executions.filter(chemModel__id__in=chem_models)
model_to_dash = dict()
if chem_models and len(chem_models) > 0:
if len(chem_models) <= 4:
model_to_dash = dict(zip([float(j) for j in chem_models], ['solid', 'dash', 'dot', 'dashdot']))
else:
model_to_dash = dict(zip([float(j) for j in chem_models], len(chem_models)*['solid']))
if experiment.run_type() == models.EType.batch_idt:
temp_column = experiment.data_columns.get(name="temperature")
idt_column = experiment.data_columns.get(name="ignition delay")
temp = [1000/float(t) for t in temp_column.data]
idt = [float(t) for t in idt_column.data]
### RE-SORTING
t_dict = dict(zip(temp, idt))
sorted_dict = sorted(t_dict.items(), key=lambda kv: kv[0])
temp, idt = zip(*sorted_dict)
###
x_axis = "1000/T [{}]".format(temp_column.units)
y_axis = "IDT [{}]".format(idt_column.units)
# TODO: ASSUME T NOT ALWAYS IN K
target_units = idt_column.units
e_curve = {"x": temp, "y": idt, "name": "Ignition Delay Time", "mode": 'markers', "type": 'scatter'}
model_curves = []
for t in target_executions:
temp_column = t.execution_columns.get(name="temperature")
idt_column = t.execution_columns.get(name="ignition delay")
temp = [1000 / float(t) for t in temp_column.data]
idt = [(float(t) * ureg.parse_expression(idt_column.units)).to(target_units).magnitude for t in idt_column.data]
model_curves.append({"x": temp, "y": idt, "name": t.chemModel.name, "mode": 'lines', "type": 'scatter', 'line': {
'dash': model_to_dash[t.chemModel.id]
}})
response = utils.curve_io_formatter([[e_curve]+model_curves], x_axis=x_axis, y_axis=y_axis, logY=True)
return JsonResponse(response)
elif experiment.run_type() == models.EType.flame_parPhi:
phi_column = experiment.data_columns.get(name="phi")
lfs_column = experiment.data_columns.get(name="laminar burning velocity")
phi = [float(t) for t in phi_column.data]
lfs = [float(t) for t in lfs_column.data]
### RE-SORTING
t_dict = dict(zip(phi, lfs))
sorted_dict = sorted(t_dict.items(), key=lambda kv: kv[0])
temp, idt = zip(*sorted_dict)
###
x_axis = "phi"
y_axis = "LFS [{}]".format(lfs_column.units)
# TODO: ASSUME T NOT ALWAYS IN K
target_units = lfs_column.units
e_curve = {"x": phi, "y": lfs, "name": "LFS", "mode": 'markers', "type": 'scatter'}
model_curves = []
for t in target_executions:
phi_column = t.execution_columns.get(name="phi")
lfs_column = t.execution_columns.get(name="laminar burning velocity")
phi = [float(t) for t in phi_column.data]
lfs = [(float(t) * ureg.parse_expression(lfs_column.units)).to(target_units).magnitude for t in
lfs_column.data]
model_curves.append(
{"x": phi, "y": lfs, "name": t.chemModel.name, "mode": 'lines', "type": 'scatter', 'line': {
'dash': model_to_dash[t.chemModel.id]
}})
response = utils.curve_io_formatter([[e_curve] + model_curves], x_axis=x_axis, y_axis=y_axis, logY=True)
return JsonResponse(response)
elif experiment.run_type() in (models.EType.stirred_parT, models.EType.flow_isothermal_parT):
temp_column = experiment.data_columns.get(name="temperature")
comp_column = experiment.data_columns.filter(name="composition")
comp_column = sorted(comp_column, key=lambda cc: max(cc.data), reverse=True)
x_axis = "Temperature [{}]".format(temp_column.units)
y_axis = "{} [{}]".format(comp_column[0].name, comp_column[0].units)
temp = [float(t) for t in temp_column.data]
colors = sns.color_palette("hls", len(comp_column))
colors = ["rgb({},{},{})".format(int(i[0]*255), int(i[1]*255), int(i[2]*255)) for i in colors]
components = [cc.species[0] for cc in comp_column]
colors_dict = dict(zip(components, colors))
e_curves = []
for index, cc in enumerate(comp_column):
e_curves.append(
{"x": temp, "y": [float(c) for c in cc.data], "name": cc.species[0], "mode": 'markers', "type": 'scatter', 'legendgroup': cc.species[0],
'marker': {
'symbol': index,
'color': colors_dict[cc.species[0]]}
})
model_curves = []
for t in target_executions:
temp_column = t.execution_columns.get(name="temperature")
comp_column = t.execution_columns.filter(name="composition")
temp = [float(t) for t in temp_column.data]
for index, cc in enumerate(comp_column):
model_curves.append(
{"x": temp, "y": [float(c) for c in cc.data],
"name": "{} {}".format(cc.species[0], t.chemModel.name), "mode": 'lines',
"type": 'scatter', 'legendgroup': cc.species[0],
'marker': {
'color': colors_dict[cc.species[0]]},
'line': {
'dash': model_to_dash[cc.execution.chemModel.id]
}
})
response_curves = []
for e_curve in e_curves:
related_model_curves = [mc for mc in model_curves if mc['legendgroup'] == e_curve['legendgroup']]
response_curves.append([e_curve]+related_model_curves)
response = utils.curve_io_formatter(response_curves, x_axis=x_axis, y_axis=y_axis, logY=False)
return JsonResponse(response, safe=False)
@api_view(['GET'])
def experiment_models_curve_API(request):
exp_id = request.query_params.get('experiment', None)
chem_models = request.query_params.getlist('chemModels[]', None)
return get_curves(exp_id, chem_models)
@api_view(['GET'])
def experiment_curve_API(request, pk):
response = get_curves(pk, None)
if response is None:
content = 'This experiment is currently not supported'
return Response(content, status=status.HTTP_501_NOT_IMPLEMENTED)
return response
@api_view(['GET'])
def curve_matching_results_API(request):
exp_id = request.query_params.get('experiment', None)
experiment = get_object_or_404(models.Experiment, pk=exp_id)
chem_models = request.query_params.getlist('chemModels[]', None)
executions = models.Execution.objects.filter(chemModel__id__in=chem_models, experiment=experiment)
data = []
names = []
for exe in executions:
target_CM_results = models.CurveMatchingResult.objects.filter(execution_column__execution=exe)
exe_data = dict()
exe_data['model'] = exe.chemModel.name
average_index = average_error = 0
if len(target_CM_results) > 0:
averages = target_CM_results.aggregate(Avg('index'), Avg('error'))
average_index, average_error = averages['index__avg'], averages['error__avg']
exe_data['average_index'] = round(average_index, 7)
exe_data['average_error'] = round(average_error, 7)
for t in target_CM_results:
execution_column = t.execution_column
name = execution_column.name if not execution_column.species else execution_column.species[0]
names.append(name)
# exe_data[name] = {'index' : float(t.index), 'error' : float(t.error)}
exe_data[name + '_index'] = round(t.index, 7) if t.index is not None else None
exe_data[name + '_error'] = round(t.error, 7) if t.error is not None else None
data.append(exe_data)
return JsonResponse({'data': data, 'names': list(set(names))})
#deprecated
@api_view(['GET'])
def curve_matching_global_results_API_OLD(request):
exp_ids = request.query_params.getlist('experiments[]', None)
chem_models_ids = request.query_params.getlist('chemModels[]', None)
details = request.query_params.get('details', "1")
data = []
names = []
chem_models = models.ChemModel.objects.filter(id__in=chem_models_ids)
for cm in chem_models:
executions = models.Execution.objects.filter(chemModel=cm, experiment__id__in=exp_ids)
cmr = models.CurveMatchingResult.objects.filter(execution_column__execution__in=executions)
ind = defaultdict(list)
err = defaultdict(list)
result = dict()
for c in cmr:
result["model"] = cm.name
execution_column = c.execution_column
name = execution_column.name if not execution_column.species else execution_column.species[0]
ind[name].append(c.index)
err[name].append(c.error)
if details == "1":
names.append(name)
for name, i in ind.items():
result[name + '_index'] = round(float(np.mean(i)), 7)
for name, e in err.items():
result[name + '_error'] = round(float(np.mean(e)), 7)
average_index = average_error = 0
if len(cmr) > 0:
averages = cmr.aggregate(Avg('index'), Avg('error'))
average_index, average_error = averages['index__avg'], averages['error__avg']
result['average_index'] = round(average_index, 7)
result['average_error'] = round(average_error, 7)
data.append(result)
return JsonResponse({'data': data, 'names': list(set(names))})
@api_view(['GET'])
def curve_matching_global_results_API(request):
exp_ids = request.query_params.getlist('experiments[]', None)
chem_models_ids = request.query_params.getlist('chemModels[]', None)
details = request.query_params.get('details', "1")
cmr = models.CurveMatchingResult.objects.filter(execution_column__execution__chemModel__in=chem_models_ids, execution_column__execution__experiment__in=exp_ids)
result = []
for i in cmr:
if i.index is None or i.error is None:
continue
modelName = i.execution_column.execution.chemModel.name
experimentDOI = i.execution_column.execution.experiment.fileDOI
execution_column = i.execution_column
name = execution_column.name if not execution_column.species else execution_column.species[0]
r = dict()
r['model'] = modelName
r['experiment'] = experimentDOI
r['name'] = name
r['ind'] = float(i.index)
r['err'] = float(i.error)
result.append(r)
df = pd.DataFrame.from_dict(result)[['model', 'experiment', 'name', 'ind', 'err']]
df = df.groupby(["model", "name"]).mean()
data = []
names = set()
for model, new_df in df.groupby(level=0):
d = {'model' : model}
overall = new_df.groupby(['model']).mean()
d['average_index'] = round(new_df['ind'].mean(), 7)
d['average_error'] = round(new_df['err'].mean(), 7)
if details == "1":
for i, t in new_df.iterrows():
d[i[1] + "_index"] = round(t['ind'], 7)
d[i[1] + "_error"] = round(t['err'], 7)
names.add(i[1])
data.append(d)
result = {"data" : data, "names" : list(names)}
return JsonResponse(result, safe=False)
@api_view(['GET'])
def curve_matching_global_results_dict_API(request):
exp_ids = request.query_params.getlist('experiments[]', None)
chem_models_ids = request.query_params.getlist('chemModels[]', None)
cmr = models.CurveMatchingResult.objects.filter(execution_column__execution__chemModel__in=chem_models_ids, execution_column__execution__experiment__in=exp_ids)
result = []
for i in cmr:
if i.index is None or i.error is None:
continue
modelName = i.execution_column.execution.chemModel.name
experimentDOI = i.execution_column.execution.experiment.fileDOI
execution_column = i.execution_column
name = execution_column.name if not execution_column.species else execution_column.species[0]
r = dict()
r['model'] = modelName
r['experiment'] = experimentDOI
r['name'] = name
r['ind'] = float(i.index)
r['err'] = float(i.error)
result.append(r)
df = pd.DataFrame.from_dict(result)[['model', 'experiment', 'name', 'ind', 'err']]
df = df.groupby(["model", "name"]).mean()
result = []
for model, new_df in df.groupby(level=0):
d = | pd.Series(new_df.ind.values, index=new_df.index.levels[1]) | pandas.Series |
import numpy as np
import pandas as pd
import pytest
from lookback import models
class TestChangeDatesGeneralCase:
@pytest.fixture
def shape_data(self):
shape_df = pd.DataFrame(
data={
'shape_key': ['uts_co_S1', 'uts_co_S2', 'uts_co_S3', 'uts_co_S4'],
'START_DATE': ['2020-02-05', '2020-02-20', '2020-02-25', '2020-03-01'],
'END_DATE': ['2020-02-19', '2020-02-24', '2020-02-28', '2021-01-01'],
},
)
shape_df['END_DATE'] = pd.to_datetime(shape_df['END_DATE'])
shape_df['START_DATE'] = pd.to_datetime(shape_df['START_DATE'])
shape_df.set_index('START_DATE', drop=False, inplace=True)
return shape_df
@pytest.fixture
def district_data(self):
district_df = pd.DataFrame(
data={
'NewDistrict': ['1', '2', '3', '4'],
'district_key': ['co_D1', 'co_D2', 'co_D3', 'co_D4'],
'StartDate': ['2020-02-10', '2020-02-15', '2020-02-25', '2020-03-01'],
'EndDate': ['2020-02-14', '2020-02-24', '2020-02-28', None],
},
)
district_df['StartDate'] = | pd.to_datetime(district_df['StartDate']) | pandas.to_datetime |
import pandas as pd
from pandas.io.json import json_normalize
def venues_explore(client,lat,lng, limit=100, verbose=0, sort='popular', radius=2000, offset=1, day='any',query=''):
'''funtion to get n-places using explore in foursquare, where n is the limit when calling the function.
This returns a pandas dataframe with name, city ,country, lat, long, address and main category as columns
Arguments: *client, *lat, *long, limit (defaults to 100), radius (defaults to 2000), verbose (defaults to 0), offset (defaults to 1), day (defaults to any)'''
# create a dataframe
df_a = pd.DataFrame(columns=['Name',
'City',
'Latitude',
'Longitude',
'Category',
'Address'])
ll=lat+','+lng
if offset<=50:
for i_offset in range(0,offset):
#get venues using client https://github.com/mLewisLogic/foursquare
venues = client.venues.explore(params={'ll':ll,
'limit':limit,
'intent' : 'browse',
'sort':sort,
'radius':radius,
'offset':i_offset,
'day':day,
'query':query
})
venues=venues['groups'][0]['items']
df_venues = pd.DataFrame.from_dict(venues)
df_venues['venue'][0]
#print('limit', limit, 'sort', sort, 'radius', radius)
for i, value in df_venues['venue'].items():
if verbose==1:
print('i', i, 'name', value['name'])
venueName=value['name']
try:
venueCity=value['location']['city']
except:
venueCity=''
venueCountry=value['location']['country']
venueLat=value['location']['lat']
venueLng=value['location']['lng']
venueCountry=value['location']['country']
try:
venueAddress=value['location']['address']
except:
venueAddress=''
venueCategory=value['categories'][0]['name']
df_a=df_a.append([{'Name':venueName,
'City':venueCity,
'Country':venueCountry,
'Latitude':venueLat,
'Longitude':venueLng,
'Category':venueCategory,
'Address':venueAddress
}])
else:
print('ERROR: offset value per Foursquare API is up to 50. Please use a lower value.')
return df_a.reset_index()
def venues_explore_near(client,near, limit=100, verbose=0, sort='popular', radius=100000, offset=1, day='any',query=''):
'''funtion to get n-places using explore in foursquare, where n is the limit when calling the function.
This returns a pandas dataframe with name, city ,country, near, address and main category as columns.
"near" argument searches within the bounds of the geocode for a string naming a place in the world.
Arguments: *client, *near, limit (defaults to 100), radius (defaults to 100000, max according to api docs), verbose (defaults to 0), offset (defaults to 1), day (defaults to any)'''
# create a dataframe
df_a = pd.DataFrame(columns=['Name',
'City',
'Latitude',
'Longitude',
'Category',
'Address'])
if offset<=50:
for i_offset in range(0,offset):
#get venues using client https://github.com/mLewisLogic/foursquare
venues = client.venues.explore(params={'near':near,
'limit':limit,
'intent' : 'browse',
'sort':sort,
'radius':radius,
'offset':i_offset,
'day':day,
'query':query
})
venues=venues['groups'][0]['items']
df_venues = pd.DataFrame.from_dict(venues)
df_venues['venue'][0]
#print('limit', limit, 'sort', sort, 'radius', radius)
for i, value in df_venues['venue'].items():
if verbose==1:
print('i', i, 'name', value['name'])
venueName=value['name']
try:
venueCity=value['location']['city']
except:
venueCity=''
venueCountry=value['location']['country']
venueLat=value['location']['lat']
venueLng=value['location']['lng']
venueCountry=value['location']['country']
try:
venueAddress=value['location']['address']
except:
venueAddress=''
venueCategory=value['categories'][0]['name']
df_a=df_a.append([{'Name':venueName,
'City':venueCity,
'Country':venueCountry,
'Latitude':venueLat,
'Longitude':venueLng,
'Category':venueCategory,
'Address':venueAddress
}])
else:
print('ERROR: offset value according to Foursquare API is up to 50. Please use a lower value.')
return df_a.reset_index()
def get_categories():
'''Function to get a Pandas DataFrame of all categories in Foursquare as listed in https://developer.foursquare.com/docs/resources/categories
It uses json_normalize to get nested information and return a DataFrame with main, sub and sub-sub categories name and ID'''
df1 = pd.read_json('https://api.foursquare.com/v2/venues/categories?v=20170211&oauth_token=QEJ4AQPTMMNB413HGNZ5YDMJSHTOHZHMLZCAQCCLXIX41OMP&includeSupportedCC=true')
df1=df1.iloc[0,1]
df1 = json_normalize(df1)
#json_normalize(df1.iloc[0,0])
i=0
df_size=df1.shape[0]
df_cat= | pd.DataFrame() | pandas.DataFrame |
import pathlib
import os.path as osp
import pandas as pd
import numpy as np
from ast import literal_eval
from .vocabulary import build_vocab, Vocabulary
from ..utils import read_lines, unpickle_data
from ..data_generation.nr3d import decode_stimulus_string
def scannet_official_train_val(valid_views=None, verbose=True):
"""
:param valid_views: None or list like ['00', '01']
:return:
"""
pre_fix = osp.split(pathlib.Path(__file__).parent.absolute())[0]
train_split = osp.join(pre_fix, 'data/scannet/splits/official/v2/scannetv2_train.txt')
train_split = read_lines(train_split)
test_split = osp.join(pre_fix, 'data/scannet/splits/official/v2/scannetv2_val.txt')
test_split = read_lines(test_split)
if valid_views is not None:
train_split = [sc for sc in train_split if sc[-2:] in valid_views]
test_split = [sc for sc in test_split if sc[-2:] in valid_views]
if verbose:
print('#train/test scans:', len(train_split), '/', len(test_split))
scans_split = dict()
scans_split['train'] = set(train_split)
scans_split['test'] = set(test_split)
return scans_split
def objects_counter_percentile(scan_ids, all_scans, prc):
all_obs_len = list()
for scan_id in all_scans:
if scan_id in scan_ids:
all_obs_len.append(len(all_scans[scan_id].three_d_objects))
return np.percentile(all_obs_len, prc)
def mean_color(scan_ids, all_scans):
mean_rgb = np.zeros((1, 3), dtype=np.float32)
n_points = 0
for scan_id in scan_ids:
color = all_scans[scan_id].color
mean_rgb += np.sum(color, axis=0)
n_points += len(color)
mean_rgb /= n_points
return mean_rgb
def load_referential_data(args, referit_csv, scans_split):
"""
:param args:
:param referit_csv:
:param scans_split:
:return:
"""
referit_data = pd.read_csv(referit_csv)
if args.mentions_target_class_only:
n_original = len(referit_data)
referit_data = referit_data[referit_data['mentions_target_class']]
referit_data.reset_index(drop=True, inplace=True)
print('Dropping utterances without explicit '
'mention to the target class {}->{}'.format(n_original, len(referit_data)))
referit_data = referit_data[['tokens', 'instance_type', 'scan_id',
'dataset', 'target_id', 'utterance', 'stimulus_id']]
referit_data.tokens = referit_data['tokens'].apply(literal_eval)
# Add the is_train data to the pandas data frame (needed in creating data loaders for the train and test)
is_train = referit_data.scan_id.apply(lambda x: x in scans_split['train'])
referit_data['is_train'] = is_train
# Trim data based on token length
train_token_lens = referit_data.tokens[is_train].apply(lambda x: len(x))
print('{}-th percentile of token length for remaining (training) data'
' is: {:.1f}'.format(95, np.percentile(train_token_lens, 95)))
n_original = len(referit_data)
referit_data = referit_data[referit_data.tokens.apply(lambda x: len(x) <= args.max_seq_len)]
referit_data.reset_index(drop=True, inplace=True)
print('Dropping utterances with more than {} tokens, {}->{}'.format(args.max_seq_len, n_original, len(referit_data)))
# do this last, so that all the previous actions remain unchanged
if args.augment_with_sr3d is not None:
print('Adding Sr3D as augmentation.')
sr3d = | pd.read_csv(args.augment_with_sr3d) | pandas.read_csv |
import argparse
import numpy as np
import csv
import pandas as pd
import json
import scipy.sparse as sp
from sparsebm import (
SBM,
LBM,
ModelSelection,
generate_LBM_dataset,
generate_SBM_dataset,
)
from sparsebm.utils import reorder_rows, ARI
import logging
logger = logging.getLogger(__name__)
try:
import cupy
_DEFAULT_USE_GPU = True
except ImportError:
_DEFAULT_USE_GPU = False
def define_parsers():
main = argparse.ArgumentParser(prog="sparsebm")
subparsers = main.add_subparsers(
help="algorithm to use", dest="subparser_name"
)
sbm_parser = subparsers.add_parser(
"sbm", help="use the stochastic block model"
)
lbm_parser = subparsers.add_parser(
"lbm", help="use the latent block model"
)
ms_parser = subparsers.add_parser(
"modelselection", help="use the model selection with LBM or SBM"
)
input_grp = ms_parser.add_argument_group("mandatory arguments")
input_grp.add_argument(
"ADJACENCY_MATRIX", help="List of edges in CSV format"
)
input_grp.add_argument(
"-t",
"--type",
help="model to use. Either 'lbm' or 'sbm'",
required=True,
)
input_grp = ms_parser.add_argument_group("optional arguments")
input_grp.add_argument(
"-sep",
"--sep",
default=",",
help="CSV delimiter to use. Default is ',' ",
)
input_grp.add_argument(
"-gpu",
"--use_gpu",
help="specify if a GPU should be used.",
default=_DEFAULT_USE_GPU,
type=bool,
)
input_grp.add_argument(
"-idgpu",
"--gpu_index",
help="specify the gpu index if needed.",
default=None,
type=bool,
)
input_grp.add_argument(
"-s",
"--symmetric",
help="specify if the adajacency matrix is symmetric. For sbm only",
default=False,
)
input_grp.add_argument(
"-p", "--plot", help="display model exploration plot", default=True
)
output_grp = ms_parser.add_argument_group("output")
output_grp.add_argument(
"-o",
"--output",
help="File path for the json results.",
default="results.json",
)
generate_sbm_parser = subparsers.add_parser(
"generate", help="use sparsebm to generate a data matrix"
)
subparsers_generate = generate_sbm_parser.add_subparsers(
help="model to generate data with", dest="subparsers_generate_name"
)
sbm_generation_parser = subparsers_generate.add_parser(
"sbm", help="use the stochastic block model to generate data"
)
lbm_generation_parser = subparsers_generate.add_parser(
"lbm", help="use the latent block model to generate data"
)
help_example_base = """A json configuration file that specify the parameters
of the data to generate. If no file is given a random graph is generated."""
help_sbm_gen = """\n Example of json configuration file for SBM: \n{\n
"type": "sbm",\n "number_of_nodes": 1000,\n "number_of_clusters": 4,\n
"symmetric": true,\n "connection_probabilities": [\n [\n 0.1,\n
0.036,\n 0.012,\n 0.0614\n ],\n [\n 0.036,\n
0.074,\n 0,\n 0\n ],\n [\n 0.012,\n 0,\n
0.11,\n 0.024\n ],\n [\n 0.0614,\n 0,\n
0.024,\n 0.086\n ]\n ],\n "cluster_proportions": [\n 0.25
,\n 0.25,\n 0.25,\n 0.25\n ]\n}"""
sbm_generation_parser.add_argument(
"-f",
"--file",
default=None,
help=help_example_base + help_sbm_gen,
required=False,
)
lbm_generation_parser.add_argument(
"-f", "--file", default=None, help=help_example_base, required=False
)
for parser in [sbm_parser, lbm_parser]:
input_grp = parser.add_argument_group("mandatory arguments")
input_grp.add_argument(
"ADJACENCY_MATRIX", help="List of edges in CSV format"
)
if parser == lbm_parser:
input_grp.add_argument(
"-k1",
"--n_row_clusters",
help="number of row clusters",
default=4,
type=int,
required=True,
)
input_grp.add_argument(
"-k2",
"--n_column_clusters",
help="number of row clusters",
default=4,
type=int,
required=True,
)
if parser == sbm_parser:
input_grp.add_argument(
"-k",
"--n_clusters",
help="number of clusters",
default=4,
type=int,
required=True,
)
output_grp = parser.add_argument_group("output")
output_grp.add_argument(
"-o",
"--output",
help="File path for the json results.",
default="results.json",
)
param_grp = parser.add_argument_group("optional arguments")
param_grp.add_argument(
"-sep",
"--sep",
default=",",
help="CSV delimiter to use. Default is ',' ",
)
if parser == sbm_parser:
param_grp.add_argument(
"-s",
"--symmetric",
help="Specify if the adajacency matrix is symmetric",
default=False,
# type=bool,
)
param_grp.add_argument(
"-niter",
"--max_iter",
help="Maximum number of EM step",
default=10000,
type=int,
)
param_grp.add_argument(
"-ninit",
"--n_init",
help="Number of initializations that will be run",
default=100,
type=int,
)
param_grp.add_argument(
"-early",
"--n_iter_early_stop",
help="Number of EM steps to perform for each initialization.",
default=10,
type=int,
)
param_grp.add_argument(
"-ninitt",
"--n_init_total_run",
help="Number of the best initializations that will be run\
until convergence.",
default=2,
type=int,
)
param_grp.add_argument(
"-t",
"--tol",
help="Tolerance of likelihood to declare convergence.",
default=1e-4,
type=float,
)
param_grp.add_argument(
"-v",
"--verbosity",
help="Degree of verbosity. Scale from 0 (no message displayed)\
to 3.",
default=1,
type=int,
)
param_grp.add_argument(
"-gpu",
"--use_gpu",
help="Specify if a GPU should be used.",
default=_DEFAULT_USE_GPU,
type=bool,
)
param_grp.add_argument(
"-idgpu",
"--gpu_index",
help="Specify the gpu index if needed.",
default=None,
type=bool,
)
return main
def graph_from_csv(file, type, sep=","):
try:
pda = pd.read_csv(file, sep=sep, header=None)
npa = pda[[0, 1]].to_numpy()
if type == "sbm":
node_i_from = np.unique(npa)
node_i_to = np.arange(node_i_from.size)
i_mapping = {
f: t for f, t in np.stack((node_i_from, node_i_to), 1)
}
rows = pda[0].map(i_mapping)
cols = pda[1].map(i_mapping)
graph = sp.coo_matrix(
(np.ones(npa.shape[0]), (rows, cols)),
shape=(node_i_from.size, node_i_from.size),
)
return graph, i_mapping, None
else:
node_i_from = np.unique(npa[:, 0])
node_i_to = np.arange(node_i_from.size)
i_mapping = {
f: t for f, t in np.stack((node_i_from, node_i_to), 1)
}
rows = pda[0].map(i_mapping)
node_j_from = np.unique(npa[:, 1])
node_j_to = np.arange(node_j_from.size)
j_mapping = {
f: t for f, t in np.stack((node_j_from, node_j_to), 1)
}
cols = pda[1].map(j_mapping)
graph = sp.coo_matrix(
(np.ones(npa.shape[0]), (rows, cols)),
shape=(node_i_from.size, node_j_from.size),
)
return graph, i_mapping, j_mapping
except Exception as e:
logger.error(e)
raise e
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def process_sbm(args):
graph, row_from_to, _ = graph_from_csv(
args["ADJACENCY_MATRIX"], args["subparser_name"], sep=args["sep"]
)
model = SBM(
max_iter=args["max_iter"],
n_clusters=args["n_clusters"],
n_init=args["n_init"],
n_iter_early_stop=args["n_iter_early_stop"],
n_init_total_run=args["n_init_total_run"],
verbosity=args["verbosity"],
atol=args["tol"],
use_gpu=args["use_gpu"],
gpu_index=args["gpu_index"],
)
symmetric = str2bool(args["symmetric"])
logger.info(
"Runing with symmetric adjacency matrix : {}".format(symmetric)
)
model.fit(graph, symmetric=symmetric)
if not model.trained_successfully:
logger.error("FAILED, model has not been trained successfully.")
return None
logger.info("Model has been trained successfully.")
logger.info(
"Value of the Integrated Completed Loglikelihood is {:.4f}".format(
model.get_ICL()
)
)
labels = model.labels
groups = [
np.argwhere(labels == q).flatten() for q in range(args["n_clusters"])
]
row_to_from = {v: k for k, v in row_from_to.items()}
groups = [pd.Series(g).map(row_to_from).tolist() for g in groups]
results = {
"ILC": model.get_ICL(),
"edge_probability_between_groups": model.pi_.tolist(),
"group_membership_probability": model.group_membership_probability.flatten().tolist(),
"node_ids_clustered": groups,
}
with open(args["output"], "w") as outfile:
json.dump(results, outfile)
logger.info("Results saved in {}".format(args["output"]))
def process_lbm(args):
graph, row_from_to, col_from_to = graph_from_csv(
args["ADJACENCY_MATRIX"], args["subparser_name"], sep=args["sep"]
)
model = LBM(
max_iter=args["max_iter"],
n_row_clusters=args["n_row_clusters"],
n_column_clusters=args["n_column_clusters"],
n_init=args["n_init"],
n_iter_early_stop=args["n_iter_early_stop"],
n_init_total_run=args["n_init_total_run"],
verbosity=args["verbosity"],
atol=args["tol"],
use_gpu=args["use_gpu"],
gpu_index=args["gpu_index"],
)
model.fit(graph)
if not model.trained_successfully:
logger.error("FAILED, model has not been trained successfully.")
return None
logger.info("Model has been trained successfully.")
logger.info(
"Value of the Integrated Completed Loglikelihood is {:.4f}".format(
model.get_ICL()
)
)
row_labels = model.row_labels
row_groups = [
np.argwhere(row_labels == q).flatten()
for q in range(args["n_row_clusters"])
]
row_to_from = {v: k for k, v in row_from_to.items()}
row_groups = [pd.Series(g).map(row_to_from).tolist() for g in row_groups]
col_labels = model.column_labels
col_groups = [
np.argwhere(col_labels == q).flatten()
for q in range(args["n_column_clusters"])
]
col_to_from = {v: k for k, v in col_from_to.items()}
col_groups = [pd.Series(g).map(col_to_from).tolist() for g in col_groups]
results = {
"ILC": model.get_ICL(),
"edge_probability_between_groups": model.pi_.tolist(),
"row_group_membership_probability": model.row_group_membership_probability.flatten().tolist(),
"column_group_membership_probability": model.column_group_membership_probability.flatten().tolist(),
"node_type_1_ids_clustered": row_groups,
"node_type_2_ids_clustered": col_groups,
}
with open(args["output"], "w") as outfile:
json.dump(results, outfile)
logger.info("Results saved in {}".format(args["output"]))
def generate_sbm(args):
if "JSON_FILE" in args:
with open(args["JSON_FILE"]) as f:
conf = json.load(f)
else:
conf = {}
number_of_nodes = (
conf["number_of_nodes"] if "number_of_nodes" in conf else None
)
number_of_clusters = (
conf["number_of_clusters"] if "number_of_clusters" in conf else None
)
connection_probabilities = (
np.array(conf["connection_probabilities"])
if "connection_probabilities" in conf
else None
)
cluster_proportions = (
np.array(conf["cluster_proportions"])
if "cluster_proportions" in conf
else None
)
symmetric = conf["symmetric"] if "symmetric" in conf else False
dataset = generate_SBM_dataset(
number_of_nodes,
number_of_clusters,
connection_probabilities,
cluster_proportions,
symmetric=symmetric,
)
graph = dataset["data"]
graph = np.stack((graph.row, graph.col), 1)
cluster_indicator = dataset["cluster_indicator"]
labels = cluster_indicator.argmax(1)
number_of_clusters = cluster_indicator.shape[1]
groups = [
np.argwhere(labels == q).flatten().tolist()
for q in range(number_of_clusters)
]
results = {
"node_ids_grouped": groups,
"number_of_nodes": number_of_nodes,
"number_of_clusters": number_of_clusters,
"connection_probabilities": connection_probabilities.flatten().tolist()
if connection_probabilities
else None,
"cluster_proportions": cluster_proportions.tolist()
if cluster_proportions
else None,
}
file_groups = "./groups.json"
file_edges = "./edges.csv"
with open(file_groups, "w") as outfile:
json.dump(results, outfile)
logger.info("\n Groups and params saved in {}".format(file_groups))
np.savetxt(file_edges, graph, delimiter=",")
logger.info("Edges saved in {}".format(file_edges))
def generate_lbm(args):
if "JSON_FILE" in args:
with open(args["JSON_FILE"]) as f:
conf = json.load(f)
else:
conf = {}
number_of_rows = (
conf["number_of_rows"] if "number_of_rows" in conf else None
)
number_of_columns = (
conf["number_of_columns"] if "number_of_columns" in conf else None
)
nb_row_clusters = (
conf["nb_row_clusters"] if "nb_row_clusters" in conf else None
)
nb_column_clusters = (
conf["nb_column_clusters"] if "nb_column_clusters" in conf else None
)
connection_probabilities = (
np.array(conf["connection_probabilities"])
if "connection_probabilities" in conf
else None
)
row_cluster_proportions = (
np.array(conf["row_cluster_proportions"])
if "row_cluster_proportions" in conf
else None
)
column_cluster_proportions = (
np.array(conf["column_cluster_proportions"])
if "column_cluster_proportions" in conf
else None
)
dataset = generate_LBM_dataset(
number_of_rows,
number_of_columns,
nb_row_clusters,
nb_column_clusters,
connection_probabilities,
row_cluster_proportions,
column_cluster_proportions,
)
graph = dataset["data"]
number_of_rows, number_of_columns = graph.shape
graph = np.stack((graph.row, graph.col), 1)
row_cluster_indicator = dataset["row_cluster_indicator"]
column_cluster_indicator = dataset["column_cluster_indicator"]
row_labels = row_cluster_indicator.argmax(1)
col_labels = column_cluster_indicator.argmax(1)
nb_row_clusters = row_cluster_indicator.shape[1]
nb_column_clusters = column_cluster_indicator.shape[1]
row_groups = [
np.argwhere(row_labels == q).flatten().tolist()
for q in range(nb_row_clusters)
]
col_groups = [
np.argwhere(col_labels == q).flatten().tolist()
for q in range(nb_column_clusters)
]
results = {
"row_ids_grouped": row_groups,
"column_ids_grouped": col_groups,
"number_of_rows": number_of_rows,
"number_of_columns": number_of_columns,
"nb_row_clusters": nb_row_clusters,
"nb_column_clusters": nb_column_clusters,
"connection_probabilities": connection_probabilities.flatten().tolist()
if connection_probabilities
else None,
"row_cluster_proportions": row_cluster_proportions.tolist()
if row_cluster_proportions
else None,
"column_cluster_proportions": column_cluster_proportions.tolist()
if column_cluster_proportions
else None,
}
file_groups = "./groups.json"
file_edges = "./edges.csv"
with open(file_groups, "w") as outfile:
json.dump(results, outfile)
logger.info("\nGroups and params saved in {}".format(file_groups))
np.savetxt(file_edges, graph, delimiter=",")
logger.info("Edges saved in {}".format(file_edges))
def process_model_selection(args):
if args["type"].upper() not in ["SBM", "LBM"]:
raise Exception("Invalid type argument. Must be 'SBM' or 'LBM'")
graph, row_from_to, col_from_to = graph_from_csv(
args["ADJACENCY_MATRIX"], args["type"].lower(), sep=args["sep"]
)
model_selection = ModelSelection(
model_type=args["type"].upper(),
use_gpu=args["use_gpu"],
gpu_index=args["gpu_index"],
plot=args["plot"],
)
model = model_selection.fit(graph, symmetric=args["symmetric"])
if not model.trained_successfully:
logger.error("FAILED, model has not been trained successfully.")
return None
logger.info("Model has been trained successfully.")
logger.info(
"Value of the Integrated Completed Loglikelihood is {:.4f}".format(
model.get_ICL()
)
)
if args["type"] == "lbm":
logger.info(
"The model selection picked {} row classes".format(
model.n_row_clusters
)
)
logger.info(
"The model selection picked {} column classes".format(
model.n_column_clusters
)
)
nb_row_clusters = model.n_row_clusters
nb_column_clusters = model.n_column_clusters
row_labels = model.row_labels
row_groups = [
np.argwhere(row_labels == q).flatten()
for q in range(nb_row_clusters)
]
row_to_from = {v: k for k, v in row_from_to.items()}
row_groups = [
pd.Series(g).map(row_to_from).tolist() for g in row_groups
]
col_labels = model.column_labels
col_groups = [
np.argwhere(col_labels == q).flatten()
for q in range(nb_column_clusters)
]
col_to_from = {v: k for k, v in col_from_to.items()}
col_groups = [
pd.Series(g).map(col_to_from).tolist() for g in col_groups
]
results = {
"ILC": model.get_ICL(),
"nb_row_clusters": nb_row_clusters,
"nb_column_clusters": nb_column_clusters,
"edge_probability_between_groups": model.pi_.tolist(),
"row_group_membership_probability": model.row_group_membership_probability.flatten().tolist(),
"column_group_membership_probability": model.column_group_membership_probability.flatten().tolist(),
"node_type_1_ids_clustered": row_groups,
"node_type_2_ids_clustered": col_groups,
}
else:
logger.info(
"The model selection picked {} classes".format(model.n_clusters)
)
nb_clusters = model.n_clusters
labels = model.labels
groups = [
np.argwhere(labels == q).flatten() for q in range(nb_clusters)
]
row_to_from = {v: k for k, v in row_from_to.items()}
groups = [ | pd.Series(g) | pandas.Series |
# Copyright 2020 (c) Cognizant Digital Business, Evolutionary AI. All rights reserved. Issued under the Apache 2.0 License.
import numpy as np
import pandas as pd
ID_COLS = ['CountryName',
'RegionName',
'Date']
NPI_COLUMNS = ['C1_School closing',
'C2_Workplace closing',
'C3_Cancel public events',
'C4_Restrictions on gatherings',
'C5_Close public transport',
'C6_Stay at home requirements',
'C7_Restrictions on internal movement',
'C8_International travel controls',
'H1_Public information campaigns',
'H2_Testing policy',
'H3_Contact tracing']
# From https://github.com/OxCGRT/covid-policy-tracker/blob/master/documentation/codebook.md
MAX_NPIS = [3, 3, 2, 4, 2, 3, 2, 4, 2, 3, 2] # Sum is 30
def generate_scenario(start_date_str, end_date_str, raw_df, countries=None, scenario="Freeze"):
"""
Generates a scenario: a list of intervention plans, with history since 1/1/2020.
By default returns historical data.
Args:
start_date_str: start_date from which to apply the scenario
end_date_str: end_date of the data
raw_df: the original data frame containing the raw data
countries: a list of CountryName, or None for all countries
scenario:
- "Freeze" to keep the last known IP for every future date
- "MIN" to set all future IP to 0 (i.e. plan is to take no measures)
- "MAX" to set all future IP to maximum values (i.e. plan is to do everything possible)
- an array of integers of NPI_COLUMNS lengths: uses this array as the IP to use.
Returns: a Pandas DataFrame
"""
start_date = pd.to_datetime(start_date_str, format='%Y-%m-%d')
end_date = pd.to_datetime(end_date_str, format='%Y-%m-%d')
ips_df = raw_df[ID_COLS + NPI_COLUMNS]
# Add RegionID column that combines CountryName and RegionName for easier manipulation of data\n",
# hist_ips_df['GeoID'] = hist_ips_df['CountryName'] + '__' + hist_ips_df['RegionName'].astype(str)
# Filter on countries
if countries:
ips_df = ips_df[ips_df.CountryName.isin(countries)]
# Check the dates
# Remove any date that is after the requested end_date
ips_df = ips_df[ips_df.Date <= end_date]
# Fill any missing NPIs by assuming they are the same as previous day, or 0 if none is available
for npi_col in NPI_COLUMNS:
ips_df.update(ips_df.groupby(['CountryName', 'RegionName'])[npi_col].ffill().fillna(0))
future_rows = []
# Make up IP for dates in the future
for g in ips_df.CountryName.unique():
ips_gdf = ips_df[ips_df.CountryName == g]
last_known_date = ips_gdf.Date.max()
if scenario == "MIN":
zero_npis = [0] * len(NPI_COLUMNS)
future_row_values = list(ips_gdf[ips_gdf.Date == last_known_date][ID_COLS].values[0]) + zero_npis
elif scenario == "MAX":
future_row_values = list(ips_gdf[ips_gdf.Date == last_known_date][ID_COLS].values[0]) + MAX_NPIS
elif scenario == "Freeze":
future_row_values = ips_gdf[ips_gdf.Date == last_known_date].values[0]
else:
future_row_values = list(ips_gdf[ips_gdf.Date == last_known_date][ID_COLS].values[0]) + scenario
current_date = last_known_date + np.timedelta64(1, 'D')
while current_date <= end_date:
new_row = future_row_values.copy()
new_row[ID_COLS.index("Date")] = current_date
future_rows.append(new_row)
current_date = current_date + np.timedelta64(1, 'D')
if future_rows:
future_rows_df = | pd.DataFrame(future_rows, columns=ips_df.columns) | pandas.DataFrame |
#Importing the required packages
from flask import Flask, render_template, request
import os
import pandas as pd
from pandas import ExcelFile
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.model_selection import *
from sklearn.metrics import *
from sklearn.model_selection import cross_val_score
import itertools
from sklearn import datasets
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
app = Flask(__name__)
#Routing to initial home page
@app.route('/')
def home():
return render_template('home.html')
@app.route('/admin_login')
def admin_login():
return render_template('admin_login.html')
@app.route('/admin', methods=['GET','POST'])
def admin():
user=request.form['un']
pas=request.form['pw']
cr=pd.read_excel('admin_cred.xlsx')
un=np.asarray(cr['Username']).tolist()
pw=np.asarray(cr['Password']).tolist()
cred = dict(zip(un, pw))
if user in un:
if(cred[user]==pas):
return render_template('admin.html')
else:
k=1
return render_template('admin_login.html',k=k)
else:
k=1
return render_template('admin_login.html',k=k)
@app.route('/admin_printed', methods=['GET','POST'])
def admin_printed():
trainfile=request.files['admin_doc']
t=pd.read_excel(trainfile)
t.to_excel('trainfile.xlsx')
return render_template('admin_printed.html')
@app.route('/login')
def login():
return render_template('login.html')
@app.route('/index', methods=['GET','POST'])
def index():
user=request.form['un']
pas=request.form['pw']
cr=pd.read_excel('cred.xlsx')
un=np.asarray(cr['Username']).tolist()
pw=np.asarray(cr['Password']).tolist()
cred = dict(zip(un, pw))
if user in un:
if(cred[user]==pas):
return render_template('index.html')
else:
k=1
return render_template('login.html',k=k)
else:
k=1
return render_template('login.html',k=k)
#Routing to page when File Upload is selected
@app.route('/file_upload')
def file_upload():
return render_template("file_upload.html")
@app.route('/upload_printed', methods=['GET','POST'])
def upload_printed():
abc=request.files['printed_doc']
test1= | pd.read_excel(abc) | pandas.read_excel |
# complete
# The primary (as of the current moment) feature selection method.
from cabi.prepare_data.utils import bal, get_and_adjust_data
import datetime
import numpy as np
import pandas as pd
from pandas.tseries.offsets import Hour
def complete(
db_engine, station_id, start, end, sample_size=int(1.0e5),
balance=None):
"""
sample_size will be ignored if balance is not None.
"""
data = get_and_adjust_data(
db_engine, station_id, start, end)
# Balance or set to sample_size
if balance is None:
if data.size > sample_size:
data = data.sample(n=sample_size)
else:
data = bal(data, balance)
# Ensure shuffling.
data = data.iloc[np.random.permutation(len(data))]
X = []
yempty = []
yfull = []
weather_isd = pd.read_sql_query(
"SELECT * FROM weather_isd", db_engine, index_col="ts")
weather = pd.read_sql_query(
"SELECT * FROM weather", db_engine, index_col="ts")
weather = pd.concat([weather_isd, weather])
weather.index = weather.index.tz_localize(None)
# Get rid of duplicates
weather = weather.groupby(level=0).first()
weather = weather.asfreq(Hour(), method="pad")
no_weather_count = 0
for row in data.iteritems():
hour = row[0].replace(
minute=0, second=0, microsecond=0, tzinfo=None)
try:
temp_hour = hour
temp = float(weather.loc[temp_hour].temp)
while | pd.isnull(temp) | pandas.isnull |
"""
Base class for a runnable script
"""
import pandas as pd
import numpy as np
from .. import api as mhapi
import os
from ..utility import logger
class Processor:
def __init__(self, verbose=True, violate=False, independent=True):
self.verbose = verbose
self.independent = independent
self.violate = violate
self.name = 'BaseProcessor'
def run_on_file(self, file, prev_file=None, next_file=None):
self.file = file
if self.independent:
prev_file = None
next_file = None
self._extract_meta(file)
data, prev_data, next_data = self._load_file(file, prev_file=prev_file, next_file=next_file)
combined_data, data_start_indicator, data_stop_indicator = self._merge_data(data, prev_data=prev_data, next_data=next_data)
result_data = self._run_on_data(combined_data, data_start_indicator, data_stop_indicator)
result_data = self._post_process(result_data)
return result_data
def set_meta(self, meta):
self.meta = meta
def _extract_meta(self, file):
file = os.path.normpath(os.path.abspath(file))
pid = mhapi.extract_pid(file)
if not self.violate:
data_type = mhapi.extract_datatype(file)
file_type = mhapi.extract_file_type(file)
sensor_type = mhapi.extract_sensortype(file)
sid = mhapi.extract_id(file)
date = mhapi.extract_date(file)
hour = mhapi.extract_hour(file)
meta = dict(
pid=pid,
data_type=data_type,
file_type=file_type,
sensor_type=sensor_type,
sid=sid,
date=date,
hour=hour
)
else:
meta = dict(
pid=pid
)
self.meta = meta
def _load_file(self, file, prev_file=None, next_file=None):
raise NotImplementedError("Subclass must implement this method")
def _merge_data(self, data, prev_data=None, next_data=None):
raise NotImplementedError("Subclass must implement this method")
def _run_on_data(self, combined_data, data_start_indicator, data_stop_indicator):
raise NotImplementedError("Subclass must implement this method")
def _post_process(self, result_data):
return result_data
def __str__(self):
return self.name
class SensorProcessor(Processor):
def __init__(self, verbose=True, violate=False, independent=True):
Processor.__init__(self, verbose=verbose, violate=violate, independent=independent)
self.name = 'SensorProcessor'
def _load_file(self, file, prev_file=None, next_file=None):
file = os.path.normpath(os.path.abspath(file))
df = mhapi.helpers.importer.import_sensor_file_mhealth(file)
if self.verbose:
logger.info("Current file: " + file)
logger.info("Previous file: " + str(prev_file))
logger.info("Next file: " + str(next_file))
if prev_file is not None and prev_file != "None":
prev_file = os.path.normpath(os.path.abspath(prev_file))
prev_df = mhapi.helpers.importer.import_sensor_file_mhealth(prev_file)
else:
prev_df = | pd.DataFrame() | pandas.DataFrame |
# flake8: noqa: F841
import tempfile
from typing import Any, Dict, List, Union
from pandas.io.parsers import TextFileReader
import numpy as np
import pandas as pd
from . import check_series_result, check_dataframe_result
def test_types_to_datetime() -> None:
df = pd.DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
r1: pd.Series = pd.to_datetime(df)
r2: pd.Series = pd.to_datetime(df, unit="s", origin="unix", infer_datetime_format=True)
r3: pd.Series = pd.to_datetime(df, unit="ns", dayfirst=True, utc=None, format="%M:%D", exact=False)
r4: pd.DatetimeIndex = pd.to_datetime([1, 2], unit="D", origin=pd.Timestamp("01/01/2000"))
r5: pd.DatetimeIndex = pd.to_datetime([1, 2], unit="D", origin=3)
r6: pd.DatetimeIndex = pd.to_datetime(["2022-01-03", "2022-02-22"])
r7: pd.DatetimeIndex = pd.to_datetime(pd.Index(["2022-01-03", "2022-02-22"]))
r8: pd.Series = pd.to_datetime({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
def test_types_concat() -> None:
s = pd.Series([0, 1, -10])
s2 = pd.Series([7, -5, 10])
check_series_result(pd.concat([s, s2]))
check_dataframe_result(pd.concat([s, s2], axis=1))
check_series_result(pd.concat([s, s2], keys=["first", "second"], sort=True))
check_series_result(pd.concat([s, s2], keys=["first", "second"], names=["source", "row"]))
# Depends on the axis
rs1: Union[pd.Series, pd.DataFrame] = pd.concat({"a": s, "b": s2})
rs1a: Union[pd.Series, pd.DataFrame] = pd.concat({"a": s, "b": s2}, axis=1)
rs2: Union[pd.Series, pd.DataFrame] = pd.concat({1: s, 2: s2})
rs2a: Union[pd.Series, pd.DataFrame] = pd.concat({1: s, 2: s2}, axis=1)
rs3: Union[pd.Series, pd.DataFrame] = pd.concat({1: s, None: s2})
rs3a: Union[pd.Series, pd.DataFrame] = pd.concat({1: s, None: s2}, axis=1)
df = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
df2 = pd.DataFrame(data={"col1": [10, 20], "col2": [30, 40]})
check_dataframe_result(pd.concat([df, df2]))
check_dataframe_result(pd.concat([df, df2], axis=1))
check_dataframe_result(pd.concat([df, df2], keys=["first", "second"], sort=True))
check_dataframe_result(pd.concat([df, df2], keys=["first", "second"], names=["source", "row"]))
result: pd.DataFrame = pd.concat({"a": pd.DataFrame([1, 2, 3]), "b": pd.DataFrame([4, 5, 6])}, axis=1)
result2: Union[pd.DataFrame, pd.Series] = pd.concat({"a": pd.Series([1, 2, 3]), "b": pd.Series([4, 5, 6])}, axis=1)
rdf1: pd.DataFrame = pd.concat({"a": df, "b": df2})
rdf2: pd.DataFrame = | pd.concat({1: df, 2: df2}) | pandas.concat |
import pandas as pd
from pandas._testing import assert_frame_equal
#from fopy.database._handle_input_formulas_dtype import _Handle_input_dtype
from fopy import Formulas
d_list = ['d = v*t', 'f = m*a']
d_tuple = tuple(d_list)
d_set = set(d_list)
d_dict_fos = {'Formula': d_list}
d_dict_fos_id = {'ID':[1,2], **d_dict_fos}
d_dict_num = {1:d_list[0], 2:d_list[1]}
d_df = pd.DataFrame(data=d_dict_fos)
good_df = pd.DataFrame(data={'ID':[1, 2], 'Formula':d_list})
def test_load_data():
h_list = Formulas(data=d_list)
assert_frame_equal(h_list.data, good_df)
h_tuple = Formulas(data=d_tuple)
assert_frame_equal(h_tuple.data, good_df)
#h_set = _Handle_input_dtype(data=d_set)
#assert_frame_equal(h_set.data, good_df)
h_dict_fos = Formulas(data=d_dict_fos)
assert_frame_equal(h_dict_fos.data, good_df)
h_dict_fos_id = Formulas(data=d_dict_fos_id)
assert_frame_equal(h_dict_fos_id.data, good_df)
h_dict_num = Formulas(data=d_dict_num)
| assert_frame_equal(h_dict_num.data, good_df) | pandas._testing.assert_frame_equal |
'''
Python reducer function
Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
'''
'''
Modified by <EMAIL> for AWS lambda map-reduce test.
This reducer function takes in multiple files which are mapper phase outputs , writes back to one parquet file in s3
'''
import boto3
import json
import random
import resource
from io import StringIO
import time
import awswrangler as wr
import pandas as pd
# create an S3 & Dynamo session
s3 = boto3.resource('s3')
s3_client = boto3.client('s3')
# constants
TASK_MAPPER_PREFIX = "task/mapper/";
TASK_REDUCER_PREFIX = "task/reducer/";
def write_to_s3(bucket, key, data, metadata):
# Write to S3 Bucket
s3.Bucket(bucket).put_object(Key=key, Body=data, Metadata=metadata)
def write_pandas_parquet_to_s3(df, bucketName,fname, keyName):
path = "s3://" + str(bucketName) + "/parquet/" + str(keyName)
# dummy dataframe
wr.s3.to_parquet(
df=df,
path=path,
compression='gzip'
)
def lambda_handler(event, context):
start_time = time.time()
job_bucket = event['jobBucket']
bucket = event['bucket']
reducer_keys = event['keys']
job_id = event['jobId']
r_id = event['reducerId']
step_id = event['stepId']
n_reducers = event['nReducers']
# aggr
results = {}
line_count = 0
final_df = pd.DataFrame()
# INPUT CSV => OUTPUT PARQUET
# Download and process all keys
for key in reducer_keys:
response = s3_client.get_object(Bucket=job_bucket, Key=key)
contents = response['Body'].read().decode('utf-8')
data = contents.split('\n')[1:-1]
df = pd.DataFrame(data,columns=['row'])
#print(df.shape)
df[['row_number','VendorID','tpep_pickup_datetime','tpep_dropoff_datetime','passenger_count','trip_distance','pickup_longitude','pickup_latitude','RatecodeID','store_and_fwd_flag','dropoff_longitude','dropoff_latitude','payment_type','fare_amount','extra','mta_tax','tip_amount','tolls_amount','improvement_surcharge','total_amount','LocationID']] = df.row.str.split(",",expand=True)
df.drop(['row','row_number'],axis=1,inplace=True)
#type convert
df['VendorID'] = pd.to_numeric(df['VendorID'])
df['passenger_count'] = pd.to_numeric(df['passenger_count'])
df['trip_distance'] = pd.to_numeric(df['trip_distance'])
df['pickup_latitude'] = pd.to_numeric(df['pickup_latitude'])
df['pickup_longitude'] = pd.to_numeric(df['pickup_longitude'])
df['RatecodeID'] = | pd.to_numeric(df['RatecodeID']) | pandas.to_numeric |
from datetime import date, timedelta
import pandas as pd
from point import Point
import os
from urllib.error import HTTPError
import datetime
import numpy as np
class County:
def __init__(self, county_name): #( county_list, data_list, label_list):
self.name = county_name
def get_data(self, labels_list):
sdate = datetime.date(2020, 3, 22) # start date || this is the first day that JHU posted data for MDC
edate = datetime.date.today() # end date || currently set to yesterday's date because it turned to midnight and I was getting an error cause JHU did not publish it yet for 3/28
delta = edate - sdate # as timedelta
county = self.name
big_list = []
values_list =[]
x = []
# labels_list = ['FIPS', 'Admin2', 'Province_State', 'Country_Region', 'Last_Update', 'Lat', 'Long_', 'Confirmed', 'Deaths', 'Recovered', 'Active', 'Combined_Key']
# labels_list = [ 'Last_Update', 'Confirmed', 'Deaths', 'Combined_Key']
# big_list += labels_list + ['|||']
for i in range(delta.days + 1):
date = sdate + timedelta(days=i)
printable_date = str(date)
month = date.month
if len(str(month))<2:
month = '0'+str(month)
day = date.day
if len(str(day))<2:
day = '0'+str(day)
url = f'''https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/{month}-{day}-2020.csv'''
#https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/04-17-2020.csv
print(day)
print(month)
try:
db = | pd.read_csv(url, error_bad_lines=False) | pandas.read_csv |
import pandas as pd
from hooqu.analyzers.analyzer import COUNT_COL
from hooqu.analyzers.grouping_analyzers import FrequencyBasedAnalyzer
class TestBaseGroupingAnalyzer:
def test_frequency_based_asnalyzers_computes_correct_frequencies(self,):
df = pd.DataFrame({"att1": ["A", "B", "B"]})
state = FrequencyBasedAnalyzer.compute_frequencies(df, ["att1"])
assert state.num_rows == 3
expected = pd.DataFrame({"att1": ["A", "B"], f"{COUNT_COL}": [1, 2]})
| pd.testing.assert_frame_equal(expected, state.frequencies) | pandas.testing.assert_frame_equal |
import pandas as pd
import numpy as np
import re
import math
import codecs
import csv
# 预计剩余电影总量220k到200k
data=pd.read_csv("Website_ETL.CSV")
data=np.array(data)
dic={}
dic["Jan"]="1"
dic["Feb"]="2"
dic["Mar"]="3"
dic["Apr"]="4"
dic["May"]="5"
dic["Jun"]="6"
dic["Jul"]="7"
dic["Aug"]="8"
dic["Sep"]="9"
dic["Oct"]="10"
dic["Nov"]="11"
dic["Dec"]="12"
fileFirstMenu=open("ASIN.txt","r",encoding="utf-8")
isbnBanned=fileFirstMenu.readlines()
bannedIsbn={}
for i in isbnBanned:
bannedIsbn[i]=1
turn=0
for frame in data:
Drop=False # 是否丢弃该数据
loss=0 # 参数损失值
# narray转换到list
j = 0
for i in frame:
if ( | pd.isna(i) | pandas.isna |
import datetime
import json
import os
import pathlib
import tempfile
from unittest import mock
import numpy as np
import pandas as pd
import pytest
from etna.datasets import TSDataset
from etna.loggers import LocalFileLogger
from etna.loggers import S3FileLogger
from etna.loggers import tslogger
from etna.metrics import MAE
from etna.metrics import MSE
from etna.metrics import SMAPE
from etna.models import NaiveModel
from etna.pipeline import Pipeline
DATETIME_FORMAT = "%Y-%m-%dT%H-%M-%S"
def test_local_file_logger_init_new_dir():
"""Test that LocalFileLogger creates subfolder during init."""
with tempfile.TemporaryDirectory() as dirname:
assert len(os.listdir(dirname)) == 0
_ = LocalFileLogger(experiments_folder=dirname)
assert len(os.listdir(dirname)) == 1
def test_local_file_logger_save_config():
"""Test that LocalFileLogger creates folder with config during init."""
with tempfile.TemporaryDirectory() as dirname:
cur_dir = pathlib.Path(dirname)
example_config = {"key": "value"}
_ = LocalFileLogger(experiments_folder=dirname, config=example_config)
experiment_folder_name = os.listdir(dirname)[0]
experiment_folder = cur_dir.joinpath(experiment_folder_name)
assert len(os.listdir(experiment_folder)) == 1
with open(experiment_folder.joinpath("config.json")) as inf:
read_config = json.load(inf)
assert read_config == example_config
def test_local_file_logger_start_experiment():
"""Test that LocalFileLogger creates new subfolder according to the parameters."""
with tempfile.TemporaryDirectory() as dirname:
cur_dir = pathlib.Path(dirname)
# get rid of seconds fractions
start_datetime = datetime.datetime.strptime(datetime.datetime.now().strftime(DATETIME_FORMAT), DATETIME_FORMAT)
logger = LocalFileLogger(experiments_folder=dirname)
experiment_folder_name = os.listdir(dirname)[0]
experiment_folder = cur_dir.joinpath(experiment_folder_name)
# get rid of seconds fractions
end_datetime = datetime.datetime.strptime(datetime.datetime.now().strftime(DATETIME_FORMAT), DATETIME_FORMAT)
folder_creation_datetime = datetime.datetime.strptime(experiment_folder_name, DATETIME_FORMAT)
assert end_datetime >= folder_creation_datetime >= start_datetime
assert len(os.listdir(experiment_folder)) == 0
logger.start_experiment(job_type="test", group="1")
assert len(os.listdir(experiment_folder)) == 1
assert experiment_folder.joinpath("test").joinpath("1").exists()
def test_local_file_logger_fail_save_table():
"""Test that LocalFileLogger can't save table before starting the experiment."""
with tempfile.TemporaryDirectory() as dirname:
logger = LocalFileLogger(experiments_folder=dirname)
example_df = pd.DataFrame({"keys": [1, 2, 3], "values": ["1", "2", "3"]})
with pytest.raises(ValueError, match="You should start experiment before"):
logger._save_table(example_df, "example")
def test_local_file_logger_save_table():
"""Test that LocalFileLogger saves table after starting the experiment."""
with tempfile.TemporaryDirectory() as dirname:
cur_dir = pathlib.Path(dirname)
logger = LocalFileLogger(experiments_folder=dirname, gzip=False)
experiment_folder_name = os.listdir(dirname)[0]
experiment_folder = cur_dir.joinpath(experiment_folder_name)
logger.start_experiment(job_type="example", group="example")
example_df = pd.DataFrame({"keys": [1, 2, 3], "values": ["first", "second", "third"]})
logger._save_table(example_df, "example")
experiment_subfolder = experiment_folder.joinpath("example").joinpath("example")
assert "example.csv" in os.listdir(experiment_subfolder)
read_example_df = pd.read_csv(experiment_subfolder.joinpath("example.csv"))
assert np.all(read_example_df == example_df)
def test_local_file_logger_fail_save_dict():
"""Test that LocalFileLogger can't save dict before starting the experiment."""
with tempfile.TemporaryDirectory() as dirname:
logger = LocalFileLogger(experiments_folder=dirname)
example_dict = {"keys": [1, 2, 3], "values": ["first", "second", "third"]}
with pytest.raises(ValueError, match="You should start experiment before"):
logger._save_dict(example_dict, "example")
def test_local_file_logger_save_dict():
"""Test that LocalFileLogger saves dict after starting the experiment."""
with tempfile.TemporaryDirectory() as dirname:
cur_dir = pathlib.Path(dirname)
logger = LocalFileLogger(experiments_folder=dirname)
experiment_folder_name = os.listdir(dirname)[0]
experiment_folder = cur_dir.joinpath(experiment_folder_name)
logger.start_experiment(job_type="example", group="example")
example_dict = {"keys": [1, 2, 3], "values": ["first", "second", "third"]}
logger._save_dict(example_dict, "example")
experiment_subfolder = experiment_folder.joinpath("example").joinpath("example")
assert "example.json" in os.listdir(experiment_subfolder)
with open(experiment_subfolder.joinpath("example.json")) as inf:
read_example_dict = json.load(inf)
assert read_example_dict == example_dict
def test_base_file_logger_log_backtest_run(example_tsds: TSDataset):
"""Test that BaseLogger correctly works in log_backtest_run on LocalFileLogger example."""
with tempfile.TemporaryDirectory() as dirname:
cur_dir = pathlib.Path(dirname)
logger = LocalFileLogger(experiments_folder=dirname, gzip=False)
experiment_folder_name = os.listdir(dirname)[0]
experiment_folder = cur_dir.joinpath(experiment_folder_name)
idx = tslogger.add(logger)
metrics = [MAE(), MSE(), SMAPE()]
pipeline = Pipeline(model=NaiveModel(), horizon=10)
n_folds = 5
pipeline.backtest(ts=example_tsds, metrics=metrics, n_jobs=1, n_folds=n_folds)
for fold_number in range(n_folds):
fold_folder = experiment_folder.joinpath("crossval").joinpath(str(fold_number))
assert "metrics.csv" in os.listdir(fold_folder)
assert "forecast.csv" in os.listdir(fold_folder)
assert "test.csv" in os.listdir(fold_folder)
# check metrics summary
with open(fold_folder.joinpath("metrics_summary.json"), "r") as inf:
metrics_summary = json.load(inf)
statistic_keys = [
"median",
"mean",
"std",
"percentile_5",
"percentile_25",
"percentile_75",
"percentile_95",
]
assert len(metrics_summary.keys()) == len(metrics) * len(statistic_keys)
tslogger.remove(idx)
@pytest.mark.parametrize("aggregate_metrics", [True, False])
def test_base_file_logger_log_backtest_metrics(example_tsds: TSDataset, aggregate_metrics: bool):
"""Test that BaseFileLogger correctly works in log_backtest_metrics on LocaFileLogger example."""
with tempfile.TemporaryDirectory() as dirname:
cur_dir = pathlib.Path(dirname)
logger = LocalFileLogger(experiments_folder=dirname, gzip=False)
experiment_folder_name = os.listdir(dirname)[0]
experiment_folder = cur_dir.joinpath(experiment_folder_name)
idx = tslogger.add(logger)
metrics = [MAE(), MSE(), SMAPE()]
pipeline = Pipeline(model=NaiveModel(), horizon=10)
n_folds = 5
metrics_df, forecast_df, fold_info_df = pipeline.backtest(
ts=example_tsds, metrics=metrics, n_jobs=1, n_folds=n_folds, aggregate_metrics=aggregate_metrics
)
crossval_results_folder = experiment_folder.joinpath("crossval_results").joinpath("all")
# check metrics_df
metrics_df = metrics_df.reset_index(drop=True)
metrics_df_saved = pd.read_csv(crossval_results_folder.joinpath("metrics.csv"))
assert np.all(metrics_df_saved["segment"] == metrics_df["segment"])
assert np.allclose(metrics_df_saved.drop(columns=["segment"]), metrics_df.drop(columns=["segment"]))
# check forecast_df
forecast_df = TSDataset.to_flatten(forecast_df)
forecast_df_saved = pd.read_csv(
crossval_results_folder.joinpath("forecast.csv"), parse_dates=["timestamp"], infer_datetime_format=True
)
assert np.all(
forecast_df_saved[["timestamp", "fold_number", "segment"]]
== forecast_df[["timestamp", "fold_number", "segment"]]
)
assert np.allclose(forecast_df_saved["target"], forecast_df["target"])
# check fold_info_df
fold_info_df = fold_info_df.reset_index(drop=True)
fold_info_df_saved = pd.read_csv(
crossval_results_folder.joinpath("fold_info.csv"),
parse_dates=["train_start_time", "train_end_time", "test_start_time", "test_end_time"],
infer_datetime_format=True,
)
assert np.all(fold_info_df_saved == fold_info_df)
# check metrics summary
with open(crossval_results_folder.joinpath("metrics_summary.json"), "r") as inf:
metrics_summary = json.load(inf)
statistic_keys = ["median", "mean", "std", "percentile_5", "percentile_25", "percentile_75", "percentile_95"]
assert len(metrics_summary.keys()) == len(metrics) * len(statistic_keys)
tslogger.remove(idx)
def test_s3_file_logger_fail_init_endpoint_url(monkeypatch):
"""Test that S3FileLogger can't be created without setting 'endpoint_url' environment variable."""
monkeypatch.delenv("endpoint_url", raising=False)
monkeypatch.setenv("aws_access_key_id", "example")
monkeypatch.setenv("aws_secret_access_key", "example")
with pytest.raises(OSError, match="Environment variable `endpoint_url` should be specified"):
_ = S3FileLogger(bucket="example", experiments_folder="experiments_folder")
def test_s3_file_logger_fail_init_aws_access_key_id(monkeypatch):
"""Test that S3FileLogger can't be created without setting 'aws_access_key_id' environment variable."""
monkeypatch.setenv("endpoint_url", "https://s3.example.com")
monkeypatch.delenv("aws_access_key_id", raising=False)
monkeypatch.setenv("aws_secret_access_key", "example")
with pytest.raises(OSError, match="Environment variable `aws_access_key_id` should be specified"):
_ = S3FileLogger(bucket="example", experiments_folder="experiments_folder")
def test_s3_file_logger_fail_init_aws_secret_access_key(monkeypatch):
"""Test that S3FileLogger can't be created without setting 'aws_secret_access_key' environment variable."""
monkeypatch.setenv("endpoint_url", "https://s3.example.com")
monkeypatch.setenv("aws_access_key_id", "example")
monkeypatch.delenv("aws_secret_access_key", raising=False)
with pytest.raises(OSError, match="Environment variable `aws_secret_access_key` should be specified"):
_ = S3FileLogger(bucket="example", experiments_folder="experiments_folder")
@mock.patch("etna.loggers.S3FileLogger._check_bucket", return_value=None)
@mock.patch("etna.loggers.S3FileLogger._get_s3_client", return_value=None)
def test_s3_file_logger_fail_save_table(check_bucket_fn, get_s3_client_fn):
"""Test that S3FileLogger can't save table before starting the experiment."""
logger = S3FileLogger(bucket="example", experiments_folder="experiments_folder")
example_df = | pd.DataFrame({"keys": [1, 2, 3], "values": ["first", "second", "third"]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
""" # CRÉDITOS
Software desarrllado en el laboratorio de biología de plantas ubicado en el campus Antumapu perteneciente a la Universidad de Chile.
- Autores:
- <NAME>.
- <NAME>.
- Contacto:
- <EMAIL>
- <EMAIL> """
#package imports
import pandas as pd
import os
import errno
import pyqrcode
from pathlib import Path
import filecmp
import shutil
from python_firebase_url_shortener.url_shortener import UrlShortener
import time
import sys
import easygui as eg
import numpy
from PIL import Image
#autoidenficar el separator en csv ; o ,
class file_manager:
def file_opener(self):
#search if a csv file has been created previusly
try:
data=pd.read_csv('dataframe.csv',header=0,sep=';') #ver como variar de ; o ,
except:
file_path=eg.fileopenbox(msg='pick the file wish contain your data',title='directory',default='*',filetypes=None,multiple=False)
if file_path.endswith('.xlsx') or file_path.endswith('.xls'):
data=pd.read_excel(file_path,sheet_name='Hoja1',header=0)
elif file_path.endswith('.csv'):
data=pd.read_csv(file_path,header=0,sep=';') #ver como variar de ; o ,
columns_df=data.columns.tolist()
msg='select a column to be the index of the dataframe'
title='select index'
indexo=eg.choicebox(msg,title,columns_df)
data=data.set_index(indexo, drop = True)
og_data=data.copy()
og_columns_df=og_data.columns.tolist()
columns_dwc=pd.read_csv('documents\dwc_terms\simple_dwc_horizontal.csv',header=0,sep=';').columns.tolist() #ver como variar de ; o ,
columns_difference=list(set(columns_df)-set(columns_dwc))
if not columns_difference:
pass
else:
msg='the followings columns do not belong to DwC, select the ones you wish to delete'
title='select to delete'
choicebox=eg.multchoicebox(msg,title,columns_difference)
try:
for label in choicebox:
data.drop(label,axis=1,inplace=True)
except:
pass
empty_columns_drop_answer=eg.ynbox(msg='Do you wish to delete the empty columns?...',title='Drop empty columns') #a way to drop fully empty columns
if empty_columns_drop_answer==True:
data.dropna(axis=1, how='all',inplace=True)
og_data.dropna(axis=1, how='all',inplace=True)
og_data.to_csv('online_dataframe.csv',sep=',')
else:
pass
return og_data,data,indexo,og_columns_df
def file_creation(self):
Record_level=pd.read_csv('documents\dwc_terms\Record_level.csv',header=0,sep=';',encoding = 'unicode_escape')
Ocurrence=pd.read_csv('documents\dwc_terms\Ocurrence.csv',header=0,sep=';',encoding = 'unicode_escape')
Organism=pd.read_csv('documents\dwc_terms\organism.csv',header=0,sep=';',encoding = 'unicode_escape')
Material_sample=pd.read_csv('documents\dwc_terms\MaterialSample.csv',header=0,sep=';',encoding = 'unicode_escape')
Event=pd.read_csv('documents\dwc_terms\event.csv',header=0,sep=';',encoding = 'unicode_escape')
Location=pd.read_csv('documents\dwc_terms\location.csv',header=0,sep=';',encoding = 'unicode_escape')
Geological_Context= | pd.read_csv('documents\dwc_terms\GeologicalContext.csv',header=0,sep=';',encoding = 'unicode_escape') | pandas.read_csv |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series
import pandas._testing as tm
class TestSeriesCombine:
def test_combine_scalar(self):
# GH 21248
# Note - combine() with another Series is tested elsewhere because
# it is used when testing operators
s = pd.Series([i * 10 for i in range(5)])
result = s.combine(3, lambda x, y: x + y)
expected = pd.Series([i * 10 + 3 for i in range(5)])
tm.assert_series_equal(result, expected)
result = s.combine(22, lambda x, y: min(x, y))
expected = pd.Series([min(i * 10, 22) for i in range(5)])
tm.assert_series_equal(result, expected)
def test_update(self):
s = Series([1.5, np.nan, 3.0, 4.0, np.nan])
s2 = Series([np.nan, 3.5, np.nan, 5.0])
s.update(s2)
expected = Series([1.5, 3.5, 3.0, 5.0, np.nan])
tm.assert_series_equal(s, expected)
# GH 3217
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df["c"] = np.nan
df["c"].update(Series(["foo"], index=[0]))
expected = DataFrame(
[[1, np.nan, "foo"], [3, 2.0, np.nan]], columns=["a", "b", "c"]
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"other, dtype, expected",
[
# other is int
([61, 63], "int32", pd.Series([10, 61, 12], dtype="int32")),
([61, 63], "int64", pd.Series([10, 61, 12])),
([61, 63], float, pd.Series([10.0, 61.0, 12.0])),
([61, 63], object, pd.Series([10, 61, 12], dtype=object)),
# other is float, but can be cast to int
([61.0, 63.0], "int32", pd.Series([10, 61, 12], dtype="int32")),
([61.0, 63.0], "int64", pd.Series([10, 61, 12])),
([61.0, 63.0], float, | pd.Series([10.0, 61.0, 12.0]) | pandas.Series |
"""Console script for scribbles."""
import os
import sys
import click
import numpy as np
import pandas as pd
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
from collections import defaultdict
from pathlib import Path
from scribbles.datasets.synthetic import synthetic_sinusoidal, make_regression_dataset
tf.disable_v2_behavior()
tfd = tfp.distributions
kernels = tfp.math.psd_kernels
tf.logging.set_verbosity(tf.logging.INFO)
# TODO: add support for option
kernel_cls = kernels.ExponentiatedQuadratic
NUM_TRAIN = 512
NUM_FEATURES = 1
NUM_INDUCING_POINTS = 16
NUM_QUERY_POINTS = 256
JITTER = 1e-6
NOISE_VARIANCE = 1e-1
NUM_EPOCHS = 1000
BATCH_SIZE = 64
LEARNING_RATE = 1e-3
BETA1 = 0.9
BETA2 = 0.99
CHECKPOINT_DIR = "models/"
SUMMARY_DIR = "logs/"
CHECKPOINT_PERIOD = 100
SUMMARY_PERIOD = 5
LOG_PERIOD = 1
SEED = 42
SHUFFLE_BUFFER_SIZE = 256
def inducing_index_points_history_to_dataframe(inducing_index_points_history):
# TODO: this will fail for `num_features > 1`
return pd.DataFrame(np.hstack(inducing_index_points_history).T)
def variational_scale_history_to_dataframe(variational_scale_history,
num_epochs):
a = np.stack(variational_scale_history, axis=0).reshape(num_epochs, -1)
return | pd.DataFrame(a) | pandas.DataFrame |
"""
Summary: Pandas extension for converting 15-character Salesforce IDs to 18-character Salesforce IDs
Date: 2020-10-12
Contributor(s):
<NAME>
"""
from functools import lru_cache
from pandas import DataFrame
from pandas.api.extensions import register_series_accessor
@ | register_series_accessor("sf") | pandas.api.extensions.register_series_accessor |
#
# Copyright 2020 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Testing out the datacompy functionality
"""
import io
import logging
import sys
from datetime import datetime
from decimal import Decimal
from unittest import mock
import numpy as np
import pandas as pd
import pytest
from pandas.util.testing import assert_series_equal
from pytest import raises
import datacompy
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def test_numeric_columns_equal_abs():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_numeric_columns_equal_rel():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |False
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |False
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|True
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_date_columns_equal():
data = """a|b|expected
2017-01-01|2017-01-01|True
2017-01-02|2017-01-02|True
2017-10-01|2017-10-10|False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_equal_with_ignore_spaces():
data = """a|b|expected
2017-01-01|2017-01-01 |True
2017-01-02 |2017-01-02|True
2017-10-01 |2017-10-10 |False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2, ignore_spaces=True)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
2017-01-01|2017-01-01 |True
2017-01-02 |2017-01-02|True
2017-10-01 |2017-10-10 |False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2, ignore_spaces=True)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_unequal():
"""I want datetime fields to match with dates stored as strings
"""
df = pd.DataFrame([{"a": "2017-01-01", "b": "2017-01-02"}, {"a": "2017-01-01"}])
df["a_dt"] = pd.to_datetime(df["a"])
df["b_dt"] = pd.to_datetime(df["b"])
assert datacompy.columns_equal(df.a, df.a_dt).all()
assert datacompy.columns_equal(df.b, df.b_dt).all()
assert datacompy.columns_equal(df.a_dt, df.a).all()
assert datacompy.columns_equal(df.b_dt, df.b).all()
assert not datacompy.columns_equal(df.b_dt, df.a).any()
assert not datacompy.columns_equal(df.a_dt, df.b).any()
assert not datacompy.columns_equal(df.a, df.b_dt).any()
assert not datacompy.columns_equal(df.b, df.a_dt).any()
def test_bad_date_columns():
"""If strings can't be coerced into dates then it should be false for the
whole column.
"""
df = pd.DataFrame(
[{"a": "2017-01-01", "b": "2017-01-01"}, {"a": "2017-01-01", "b": "217-01-01"}]
)
df["a_dt"] = pd.to_datetime(df["a"])
assert not datacompy.columns_equal(df.a_dt, df.b).any()
def test_rounded_date_columns():
"""If strings can't be coerced into dates then it should be false for the
whole column.
"""
df = pd.DataFrame(
[
{"a": "2017-01-01", "b": "2017-01-01 00:00:00.000000", "exp": True},
{"a": "2017-01-01", "b": "2017-01-01 00:00:00.123456", "exp": False},
{"a": "2017-01-01", "b": "2017-01-01 00:00:01.000000", "exp": False},
{"a": "2017-01-01", "b": "2017-01-01 00:00:00", "exp": True},
]
)
df["a_dt"] = pd.to_datetime(df["a"])
actual = datacompy.columns_equal(df.a_dt, df.b)
expected = df["exp"]
assert_series_equal(actual, expected, check_names=False)
def test_decimal_float_columns_equal():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": 1, "expected": True},
{"a": Decimal("1.3"), "b": 1.3, "expected": True},
{"a": Decimal("1.000003"), "b": 1.000003, "expected": True},
{"a": Decimal("1.000000004"), "b": 1.000000003, "expected": False},
{"a": Decimal("1.3"), "b": 1.2, "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": 1, "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_float_columns_equal_rel():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": 1, "expected": True},
{"a": Decimal("1.3"), "b": 1.3, "expected": True},
{"a": Decimal("1.000003"), "b": 1.000003, "expected": True},
{"a": Decimal("1.000000004"), "b": 1.000000003, "expected": True},
{"a": Decimal("1.3"), "b": 1.2, "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": 1, "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.001)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_columns_equal():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.3"), "expected": True},
{"a": Decimal("1.000003"), "b": Decimal("1.000003"), "expected": True},
{"a": Decimal("1.000000004"), "b": Decimal("1.000000003"), "expected": False},
{"a": Decimal("1.3"), "b": Decimal("1.2"), "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": Decimal("1"), "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_columns_equal_rel():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.3"), "expected": True},
{"a": Decimal("1.000003"), "b": Decimal("1.000003"), "expected": True},
{"a": Decimal("1.000000004"), "b": Decimal("1.000000003"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.2"), "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": Decimal("1"), "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.001)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_infinity_and_beyond():
df = pd.DataFrame(
[
{"a": np.inf, "b": np.inf, "expected": True},
{"a": -np.inf, "b": -np.inf, "expected": True},
{"a": -np.inf, "b": np.inf, "expected": False},
{"a": np.inf, "b": -np.inf, "expected": False},
{"a": 1, "b": 1, "expected": True},
{"a": 1, "b": 0, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1", "expected": False},
{"a": 1, "b": "yo", "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column_with_ignore_spaces():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi ", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1 ", "expected": False},
{"a": 1, "b": "yo ", "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column_with_ignore_spaces_and_case():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi ", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1 ", "expected": False},
{"a": 1, "b": "yo ", "expected": False},
{"a": "Hi", "b": "hI ", "expected": True},
{"a": "HI", "b": "HI ", "expected": True},
{"a": "hi", "b": "hi ", "expected": True},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, ignore_spaces=True, ignore_case=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_compare_df_setter_bad():
df = pd.DataFrame([{"a": 1, "A": 2}, {"a": 2, "A": 2}])
with raises(TypeError, match="df1 must be a pandas DataFrame"):
compare = datacompy.Compare("a", "a", ["a"])
with raises(ValueError, match="df1 must have all columns from join_columns"):
compare = datacompy.Compare(df, df.copy(), ["b"])
with raises(ValueError, match="df1 must have unique column names"):
compare = datacompy.Compare(df, df.copy(), ["a"])
df_dupe = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 3}])
assert datacompy.Compare(df_dupe, df_dupe.copy(), ["a", "b"]).df1.equals(df_dupe)
def test_compare_df_setter_good():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"A": 1, "B": 2}, {"A": 2, "B": 3}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
assert compare.join_columns == ["a"]
compare = datacompy.Compare(df1, df2, ["A", "b"])
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
assert compare.join_columns == ["a", "b"]
def test_compare_df_setter_different_cases():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"A": 1, "b": 2}, {"A": 2, "b": 3}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
def test_compare_df_setter_bad_index():
df = pd.DataFrame([{"a": 1, "A": 2}, {"a": 2, "A": 2}])
with raises(TypeError, match="df1 must be a pandas DataFrame"):
compare = datacompy.Compare("a", "a", on_index=True)
with raises(ValueError, match="df1 must have unique column names"):
compare = datacompy.Compare(df, df.copy(), on_index=True)
def test_compare_on_index_and_join_columns():
df = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
with raises(Exception, match="Only provide on_index or join_columns"):
compare = datacompy.Compare(df, df.copy(), on_index=True, join_columns=["a"])
def test_compare_df_setter_good_index():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 3}])
compare = datacompy.Compare(df1, df2, on_index=True)
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
def test_columns_overlap():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 3}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1_unq_columns() == set()
assert compare.df2_unq_columns() == set()
assert compare.intersect_columns() == {"a", "b"}
def test_columns_no_overlap():
df1 = pd.DataFrame([{"a": 1, "b": 2, "c": "hi"}, {"a": 2, "b": 2, "c": "yo"}])
df2 = pd.DataFrame([{"a": 1, "b": 2, "d": "oh"}, {"a": 2, "b": 3, "d": "ya"}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1_unq_columns() == {"c"}
assert compare.df2_unq_columns() == {"d"}
assert compare.intersect_columns() == {"a", "b"}
def test_10k_rows():
df1 = pd.DataFrame(np.random.randint(0, 100, size=(10000, 2)), columns=["b", "c"])
df1.reset_index(inplace=True)
df1.columns = ["a", "b", "c"]
df2 = df1.copy()
df2["b"] = df2["b"] + 0.1
compare_tol = datacompy.Compare(df1, df2, ["a"], abs_tol=0.2)
assert compare_tol.matches()
assert len(compare_tol.df1_unq_rows) == 0
assert len(compare_tol.df2_unq_rows) == 0
assert compare_tol.intersect_columns() == {"a", "b", "c"}
assert compare_tol.all_columns_match()
assert compare_tol.all_rows_overlap()
assert compare_tol.intersect_rows_match()
compare_no_tol = datacompy.Compare(df1, df2, ["a"])
assert not compare_no_tol.matches()
assert len(compare_no_tol.df1_unq_rows) == 0
assert len(compare_no_tol.df2_unq_rows) == 0
assert compare_no_tol.intersect_columns() == {"a", "b", "c"}
assert compare_no_tol.all_columns_match()
assert compare_no_tol.all_rows_overlap()
assert not compare_no_tol.intersect_rows_match()
@mock.patch("datacompy.logging.debug")
def test_subset(mock_debug):
df1 = pd.DataFrame([{"a": 1, "b": 2, "c": "hi"}, {"a": 2, "b": 2, "c": "yo"}])
df2 = pd.DataFrame([{"a": 1, "c": "hi"}])
comp = datacompy.Compare(df1, df2, ["a"])
assert comp.subset()
assert mock_debug.called_with("Checking equality")
@mock.patch("datacompy.logging.info")
def test_not_subset(mock_info):
df1 = pd.DataFrame([{"a": 1, "b": 2, "c": "hi"}, {"a": 2, "b": 2, "c": "yo"}])
df2 = pd.DataFrame([{"a": 1, "b": 2, "c": "hi"}, {"a": 2, "b": 2, "c": "great"}])
comp = datacompy.Compare(df1, df2, ["a"])
assert not comp.subset()
assert mock_info.called_with("Sample c mismatch: a: 2, df1: yo, df2: great")
def test_large_subset():
df1 = pd.DataFrame(np.random.randint(0, 100, size=(10000, 2)), columns=["b", "c"])
df1.reset_index(inplace=True)
df1.columns = ["a", "b", "c"]
df2 = df1[["a", "b"]].sample(50).copy()
comp = datacompy.Compare(df1, df2, ["a"])
assert not comp.matches()
assert comp.subset()
def test_string_joiner():
df1 = pd.DataFrame([{"ab": 1, "bc": 2}, {"ab": 2, "bc": 2}])
df2 = pd.DataFrame([{"ab": 1, "bc": 2}, {"ab": 2, "bc": 2}])
compare = datacompy.Compare(df1, df2, "ab")
assert compare.matches()
def test_decimal_with_joins():
df1 = pd.DataFrame([{"a": Decimal("1"), "b": 2}, {"a": Decimal("2"), "b": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
compare = datacompy.Compare(df1, df2, "a")
assert compare.matches()
assert compare.all_columns_match()
assert compare.all_rows_overlap()
assert compare.intersect_rows_match()
def test_decimal_with_nulls():
df1 = pd.DataFrame([{"a": 1, "b": Decimal("2")}, {"a": 2, "b": Decimal("2")}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}, {"a": 3, "b": 2}])
compare = datacompy.Compare(df1, df2, "a")
assert not compare.matches()
assert compare.all_columns_match()
assert not compare.all_rows_overlap()
assert compare.intersect_rows_match()
def test_strings_with_joins():
df1 = | pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}]) | pandas.DataFrame |
from multiprocessing import Pool
import requests
import re
from bs4 import BeautifulSoup
from itertools import chain
from collections import Counter
from timeit import default_timer as timer
import pandas as pd
from datetime import datetime
def get_table_rows(fname="stats.html"):
"""
Extract the table rows from the statistics
:param fname: string name of the file stored in `data` directory
:return table_rows: list of BeautifulSoup objects to be passed to `process_in_parallel`
"""
soup = BeautifulSoup(open(f"data/{fname}", "r", encoding="utf8"), features="lxml")
table_rows = soup.find_all(attrs={"class": "sortableTable-row js-statsTableRow"})
print(f"Found {len(table_rows)} entries in table.")
return table_rows
def convert_timestamp(ts: int, tz: str):
"""Convert a unix timestamp to a date timestamp"""
return (
pd.to_datetime(ts, origin="unix", unit="ms")
.tz_localize("UTC")
.tz_convert(tz)
.tz_localize(None)
)
def process_entry(entry, parallel=True, tz="America/Chicago"):
"""
Extract data from one entry in table
:param entry: BeautifulSoup tag
:param parallel: Boolean for whether function is being run in parallel
:param tz: string representing timezone for started and published time
:return entry_dict: dictionary with data about entry
"""
# Convert to soup when running in parallel
if parallel:
entry = BeautifulSoup(entry, features="lxml").body.tr
entry_dict = {}
# Extract information
for value, key in zip(
entry.find_all(attrs={"class": "sortableTable-value"}),
["published_date", "views", "reads", "ratio", "fans"],
):
entry_dict[key] = float(value.text) if key == "ratio" else int(value.text)
entry_dict["read_time"] = int(
entry.find_all(attrs={"class": "readingTime"})[0].get("title").split(" ")[0]
)
# Unlisted vs published
entry_dict["type"] = (
"unlisted" if len(entry.find_all(text=" Unlisted")) > 0 else "published"
)
# Publication
publication = entry.find_all(attrs={"class": "sortableTable-text"})
if "In" in publication[0].text:
entry_dict["publication"] = publication[0].text.split("In ")[1].split("View")[0]
else:
entry_dict["publication"] = "None"
# Convert datetimes
entry_dict["published_date"] = convert_timestamp(
entry_dict["published_date"], tz=tz
)
entry_dict["started_date"] = convert_timestamp(entry.get("data-timestamp"), tz=tz)
# Get the link
link = entry.find_all(text="View story", attrs={"class": "sortableTable-link"})[
0
].get("href")
entry_dict["link"] = link
# Retrieve the article and create a soup
entry = requests.get(link).content
entry_soup = BeautifulSoup(entry, features="lxml")
# Get the title
try:
title = entry_soup.h1.text
except:
title = "response"
title_word_count = len(re.findall(r"[\w']+|[.,!?;]", title))
# Main text entries
entry_text = [
p.text for p in entry_soup.find_all(["h1", "h2", "h3", "p", "blockquote"])
]
# Make sure to catch everything
entry_text.extend(
s.text
for s in entry_soup.find_all(attrs={"class": "graf graf--li graf-after--li"})
)
entry_text.extend(
s.text
for s in entry_soup.find_all(attrs={"class": "graf graf--li graf-after--p"})
)
entry_text.extend(
s.text
for s in entry_soup.find_all(
attrs={"class": "graf graf--li graf-after--blockquote"}
)
)
entry_text.extend(
s.text
for s in entry_soup.find_all(
attrs={"class": "graf graf--li graf-after--pullquote"}
)
)
entry_text = " ".join(entry_text)
# Word count
word_count = len(re.findall(r"[\w']+|[.,!?;]", entry_text))
# Number of claps
clap_pattern = re.compile(
"^[0-9]{1,} claps|^[0-9]{1,}.[0-9]{1,}K claps|^[0-9]{1,}K claps"
)
claps = entry_soup.find_all(text=clap_pattern)
if len(claps) > 0:
if "K" in claps[0]:
clap_number = int(1e3 * float(claps[0].split("K")[0]))
else:
clap_number = int(claps[0].split(" ")[0])
else:
clap_number = 0
# Post tags
tags = entry_soup.find_all(attrs={"class": "tags tags--postTags tags--borderless"})
tags = [li.text for li in tags[0].find_all("li")]
# Responses to entry
responses = entry_soup.find_all(
attrs={
"class": "button button--chromeless u-baseColor--buttonNormal u-marginRight12",
"data-action": "scroll-to-responses",
}
)
num_responses = int(responses[0].text) if len(responses) > 0 else 0
# Store in dictionary
entry_dict["title"] = title
entry_dict["title_word_count"] = title_word_count
entry_dict["text"] = entry_text
entry_dict["word_count"] = word_count
entry_dict["claps"] = clap_number
entry_dict["tags"] = tags
entry_dict["num_responses"] = num_responses
# Time since publication
entry_dict["days_since_publication"] = (
datetime.now() - entry_dict["published_date"]
).total_seconds() / (3600 * 24)
return entry_dict
def process_in_parallel(table_rows, processes=20):
"""
Process all the stats in a table in parallel
:note: make sure to set the correct time zone in `process_entry`
:note: running on Mac may first require setting
export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES
from the command line to enable parallel processing
:param table_rows: BeautifulSoup table rows
:param processes: integer number of processes (threads) to use in parallel
:return df: dataframe of information about each post
"""
# Convert to strings for multiprocessing
table_rows_str = [str(r) for r in table_rows]
# Process each article in paralllel
pool = Pool(processes=processes)
results = []
start = timer()
for i, r in enumerate(pool.imap_unordered(process_entry, table_rows_str)):
# Report progress
print(f"{100 * i / len(table_rows_str):.2f}% complete.", end="\r")
results.append(r)
pool.close()
pool.join()
end = timer()
print(f"Processed {len(table_rows_str)} articles in {end-start:.2f} seconds.")
# Convert to dataframe
df = | pd.DataFrame(results) | pandas.DataFrame |
import os
import pandas as pd
from datetime import datetime, timedelta
# Global variable
PIE_PATH="/Users/fabrice/Documents/chartJS/tutoChartJS/chart/datas"
# Get Pie data
def get_data(path=PIE_PATH, filename="sample-pie-data.csv", separator=','):
csv_path = os.path.join(path, filename)
return pd.read_csv(csv_path, sep=separator)
#####################
# MAIN #
#####################
if __name__ == "__main__":
data = get_data(filename='sample-bar-data.csv', separator=";")
print(data.head())
# Convert the Date field into a python DateTime and sort it (instead of string)
data['Date'] = | pd.to_datetime(data['Date']) | pandas.to_datetime |
import numpy as np
from scipy.special import expit as sigmoid
import numpyro.handlers as numpyro
import pandas as pd
import pytest
import torch
from jax import random
import pyro.poutine as poutine
from brmp import define_model, brm, makedesc
from brmp.backend import data_from_numpy
from brmp.design import (Categorical, CategoricalCoding, Integral,
NumericCoding, RealValued, code_lengths, code_terms,
coef_names, dummy_df, make_column_lookup, makedata,
metadata_from_cols, metadata_from_df)
from brmp.family import (LKJ, Bernoulli, Binomial, HalfCauchy, HalfNormal,
Normal, StudentT, Poisson)
from brmp.fit import Samples
from brmp.formula import Formula, OrderedSet, Term, _1, allfactors, parse
from brmp.model import parameters, scalar_parameter_map, scalar_parameter_names
from brmp.model_pre import build_model_pre
from brmp.numpyro_backend import backend as numpyro_backend
from brmp.priors import Prior, build_prior_tree
from brmp.pyro_backend import backend as pyro_backend
from pyro.distributions import Independent
def assert_equal(a, b):
assert type(a) == np.ndarray or type(a) == torch.Tensor
assert type(a) == type(b)
if type(a) == np.ndarray:
assert (a == b).all()
else:
assert torch.equal(a, b)
default_params = dict(
Normal=dict(loc=0., scale=1.),
Cauchy=dict(loc=0., scale=1.),
HalfCauchy=dict(scale=3.),
HalfNormal=dict(scale=1.),
LKJ=dict(eta=1.),
Beta=dict(concentration1=1., concentration0=1.),
StudentT=dict(df=3., loc=0., scale=1.),
)
# Makes list of columns metadata that includes an entry for every
# factor in `formula`. Any column not already in `cols` is assumed to
# be `RealValued`.
def expand_columns(formula, cols):
lookup = make_column_lookup(cols)
return [lookup.get(factor, RealValued(factor))
for factor in allfactors(formula)]
codegen_cases = [
# TODO: This (and similar examples below) can't be expressed with
# the current parser. Is it useful to fix this (`y ~ -1`?), or can
# these be dropped?
# (Formula('y', [], []), [], [], ['sigma']),
('y ~ 1 + x', [], {}, Normal, [],
[('b_0', 'Cauchy', {}),
('sigma', 'HalfCauchy', {})]),
# Integer valued predictor.
('y ~ 1 + x', [Integral('x', min=0, max=10)], {}, Normal, [],
[('b_0', 'Cauchy', {}),
('sigma', 'HalfCauchy', {})]),
('y ~ 1 + x1 + x2', [], {}, Normal, [],
[('b_0', 'Cauchy', {}),
('sigma', 'HalfCauchy', {})]),
('y ~ x1:x2',
[Categorical('x1', list('ab')), Categorical('x2', list('cd'))],
{}, Normal, [],
[('b_0', 'Cauchy', {}),
('sigma', 'HalfCauchy', {})]),
# (Formula('y', [], [Group([], 'z', True)]), [Categorical('z', list('ab'))], [], ['sigma', 'z_1']),
# Groups with fewer than two terms don't sample the (Cholesky
# decomp. of the) correlation matrix.
# (Formula('y', [], [Group([], 'z', True)]), [Categorical('z', list('ab'))], [], ['sigma', 'z_1']),
('y ~ 1 | z', [Categorical('z', list('ab'))], {}, Normal, [],
[('sigma', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfCauchy', {})]),
# Integers as categorical levels.
('y ~ 1 | z', [Categorical('z', [10, 20])], {}, Normal, [],
[('sigma', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfCauchy', {})]),
('y ~ x | z', [Categorical('z', list('ab'))], {}, Normal, [],
[('sigma', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfCauchy', {})]),
('y ~ x | z',
[Categorical('x', list('ab')), Categorical('z', list('ab'))],
{}, Normal, [],
[('sigma', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfCauchy', {}),
('L_0', 'LKJ', {})]),
('y ~ 1 + x1 + x2 + (1 + x3 | z)', [Categorical('z', list('ab'))], {}, Normal, [],
[('b_0', 'Cauchy', {}),
('sigma', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfCauchy', {}),
('L_0', 'LKJ', {})]),
('y ~ 1 + x1 + x2 + (1 + x3 || z)', [Categorical('z', list('ab'))], {}, Normal, [],
[('b_0', 'Cauchy', {}),
('sigma', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfCauchy', {})]),
('y ~ 1 + x1 + x2 + (1 + x3 + x4 | z1) + (1 + x5 | z2)',
[Categorical('z1', list('ab')), Categorical('z2', list('ab'))],
{},
Normal,
[],
[('b_0', 'Cauchy', {}),
('sigma', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfCauchy', {}),
('L_0', 'LKJ', {}),
('z_1', 'Normal', {}),
('sd_1_0', 'HalfCauchy', {}),
('L_1', 'LKJ', {})]),
('y ~ 1 | a:b',
[Categorical('a', ['a1', 'a2']), Categorical('b', ['b1', 'b2'])],
{},
Normal,
[],
[('sigma', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfCauchy', {})]),
# Custom priors.
('y ~ 1 + x1 + x2',
[], {},
Normal,
[Prior(('b',), Normal(0., 100.))],
[('b_0', 'Normal', {'loc': 0., 'scale': 100.}),
('sigma', 'HalfCauchy', {})]),
('y ~ 1 + x1 + x2',
[], {},
Normal,
[Prior(('b', 'intercept'), Normal(0., 100.))],
[('b_0', 'Normal', {'loc': 0., 'scale': 100.}),
('b_1', 'Cauchy', {}),
('sigma', 'HalfCauchy', {})]),
('y ~ 1 + x1 + x2',
[], {},
Normal,
[Prior(('b', 'x1'), Normal(0., 100.))],
[('b_0', 'Cauchy', {}),
('b_1', 'Normal', {'loc': 0., 'scale': 100.}),
('b_2', 'Cauchy', {}),
('sigma', 'HalfCauchy', {})]),
('y ~ 1',
[], {},
Normal,
[Prior(('b',), StudentT(3., 0., 1.))],
[('b_0', 'StudentT', {}),
('sigma', 'HalfCauchy', {})]),
# Prior on coef of a factor.
('y ~ 1 + x',
[Categorical('x', list('ab'))],
{},
Normal,
[Prior(('b', 'x[b]'), Normal(0., 100.))],
[('b_0', 'Cauchy', {}),
('b_1', 'Normal', {'loc': 0., 'scale': 100.}),
('sigma', 'HalfCauchy', {})]),
# Prior on coef of an interaction.
('y ~ x1:x2',
[Categorical('x1', list('ab')), Categorical('x2', list('cd'))],
{},
Normal,
[Prior(('b', 'x1[b]:x2[c]'), Normal(0., 100.))],
[('b_0', 'Cauchy', {}),
('b_1', 'Normal', {'loc': 0., 'scale': 100.}),
('b_2', 'Cauchy', {}),
('sigma', 'HalfCauchy', {})]),
# Prior on group level `sd` choice.
('y ~ 1 + x2 + x3 | x1',
[Categorical('x1', list('ab'))],
{},
Normal,
[Prior(('sd', 'x1', 'intercept'), HalfCauchy(4.))],
[('sigma', 'HalfCauchy', {}),
('sd_0_0', 'HalfCauchy', {'scale': 4.}),
('sd_0_1', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('L_0', 'LKJ', {})]),
('y ~ 1 + x2 + x3 || x1',
[Categorical('x1', list('ab'))],
{},
Normal,
[Prior(('sd', 'x1', 'intercept'), HalfNormal(4.))],
[('sigma', 'HalfCauchy', {}),
('sd_0_0', 'HalfNormal', {'scale': 4.}),
('sd_0_1', 'HalfCauchy', {}),
('z_0', 'Normal', {})]),
('y ~ 1 + x || a:b',
[Categorical('a', ['a1', 'a2']), Categorical('b', ['b1', 'b2'])],
{},
Normal,
[Prior(('sd', 'a:b', 'intercept'), HalfNormal(4.))],
[('sigma', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfNormal', {'scale': 4.}),
('sd_0_1', 'HalfCauchy', {})]),
# Prior on L.
('y ~ 1 + x2 | x1',
[Categorical('x1', list('ab'))],
{},
Normal,
[Prior(('cor',), LKJ(2.))],
[('sigma', 'HalfCauchy', {}),
('sd_0_0', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('L_0', 'LKJ', {'eta': 2.})]),
('y ~ 1 + x | a:b',
[Categorical('a', ['a1', 'a2']), Categorical('b', ['b1', 'b2'])],
{},
Normal,
[Prior(('cor', 'a:b'), LKJ(2.))],
[('sigma', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfCauchy', {}),
('L_0', 'LKJ', {'eta': 2.})]),
# Prior on parameter of response distribution.
('y ~ x',
[],
{},
Normal,
[Prior(('resp', 'sigma'), HalfCauchy(4.))],
[('b_0', 'Cauchy', {}),
('sigma', 'HalfCauchy', {'scale': 4.})]),
# Custom response family.
('y ~ x',
[],
{},
Normal(sigma=0.5),
[],
[('b_0', 'Cauchy', {})]),
('y ~ x',
[Categorical('y', list('AB'))],
{},
Bernoulli,
[],
[('b_0', 'Cauchy', {})]),
('y ~ x',
[Integral('y', min=0, max=1)],
{},
Bernoulli,
[],
[('b_0', 'Cauchy', {})]),
('y ~ x',
[Integral('y', min=0, max=10)],
{},
Binomial(num_trials=10),
[],
[('b_0', 'Cauchy', {})]),
('y ~ 1 + x',
[Integral('y', min=0, max=10), Integral('x', min=0, max=10)],
{},
Poisson,
[],
[('b_0', 'Cauchy', {})]),
# Contrasts
('y ~ a',
[Categorical('a', ['a1', 'a2'])],
{'a': np.array([[-1, -1, -1], [1, 1, 1]])},
Normal,
[Prior(('b', 'a[custom.1]'), Normal(0., 1.))],
[('b_0', 'Cauchy', {}),
('b_1', 'Normal', {}),
('b_2', 'Cauchy', {}),
('sigma', 'HalfCauchy', {})]),
('y ~ a + (a | b)',
[Categorical('a', ['a1', 'a2']), Categorical('b', ['b1', 'b2'])],
{'a': np.array([[-1, -1, -1], [1, 1, 1]])},
Normal, [
Prior(('b', 'a[custom.1]'), Normal(0., 1.)),
Prior(('sd', 'b', 'a[custom.0]'), HalfCauchy(4.))
],
[('b_0', 'Cauchy', {}),
('b_1', 'Normal', {}),
('b_2', 'Cauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfCauchy', {'scale': 4.}),
('sd_0_1', 'HalfCauchy', {}),
('L_0', 'LKJ', {}),
('sigma', 'HalfCauchy', {})]),
]
# Map generic family names to backend specific names.
def pyro_family_name(name):
return dict(LKJ='LKJCorrCholesky').get(name, name)
def numpyro_family_name(name):
return dict(LKJ='LKJCholesky',
Bernoulli='BernoulliProbs',
Binomial='BinomialProbs').get(name, name)
@pytest.mark.parametrize('N', [1, 5])
@pytest.mark.parametrize('formula_str, non_real_cols, contrasts, family, priors, expected', codegen_cases)
def test_pyro_codegen(N, formula_str, non_real_cols, contrasts, family, priors, expected):
# Make dummy data.
formula = parse(formula_str)
cols = expand_columns(formula, non_real_cols)
# Generate the model from the column information rather than from
# the metadata extracted from `df`. Since N is small, the metadata
# extracted from `df` might loose information compared to the full
# metadata derived from `cols` (e.g. levels of a categorical
# column) leading to unexpected results. e.g. Missing levels might
# cause correlations not to be modelled, even thought they ought
# to be given the full metadata.
metadata = metadata_from_cols(cols)
desc = makedesc(formula, metadata, family, priors, code_lengths(contrasts))
# Generate model function and data.
modelfn = pyro_backend.gen(desc).fn
df = dummy_df(cols, N, allow_non_exhaustive=True)
data = data_from_numpy(pyro_backend, makedata(formula, df, metadata, contrasts))
trace = poutine.trace(modelfn).get_trace(**data)
# Check that y is correctly observed.
y_node = trace.nodes['y']
assert y_node['is_observed']
assert type(y_node['fn']).__name__ == family.name
assert_equal(y_node['value'], data['y_obs'])
# Check sample sites.
expected_sites = [site for (site, _, _) in expected]
assert set(trace.stochastic_nodes) - {'obs'} == set(expected_sites)
for (site, family_name, maybe_params) in expected:
fn = unwrapfn(trace.nodes[site]['fn'])
params = maybe_params or default_params[family_name]
assert type(fn).__name__ == pyro_family_name(family_name)
for (name, expected_val) in params.items():
val = fn.__getattribute__(name)
assert_equal(val, torch.tensor(expected_val).expand(val.shape))
def unwrapfn(fn):
return unwrapfn(fn.base_dist) if type(fn) == Independent else fn
@pytest.mark.parametrize('N', [1, 5])
@pytest.mark.parametrize('formula_str, non_real_cols, contrasts, family, priors, expected', codegen_cases)
def test_numpyro_codegen(N, formula_str, non_real_cols, contrasts, family, priors, expected):
# Make dummy data.
formula = parse(formula_str)
cols = expand_columns(formula, non_real_cols)
metadata = metadata_from_cols(cols)
desc = makedesc(formula, metadata, family, priors, code_lengths(contrasts))
# Generate model function and data.
modelfn = numpyro_backend.gen(desc).fn
df = dummy_df(cols, N, allow_non_exhaustive=True)
data = data_from_numpy(numpyro_backend, makedata(formula, df, metadata, contrasts))
rng = random.PRNGKey(0)
trace = numpyro.trace(numpyro.seed(modelfn, rng)).get_trace(**data)
# Check that y is correctly observed.
y_node = trace['y']
assert y_node['is_observed']
assert type(y_node['fn']).__name__ == numpyro_family_name(family.name)
assert_equal(y_node['value'], data['y_obs'])
# Check sample sites.
expected_sites = [site for (site, _, _) in expected]
sample_sites = [name for name, node in trace.items() if not node['is_observed']]
assert set(sample_sites) == set(expected_sites)
for (site, family_name, maybe_params) in expected:
fn = trace[site]['fn']
params = maybe_params or default_params[family_name]
assert type(fn).__name__ == numpyro_family_name(family_name)
for (name, expected_val) in params.items():
if family_name == 'LKJ':
assert name == 'eta'
name = 'concentration'
val = fn.__getattribute__(name)
assert_equal(val._value, np.broadcast_to(expected_val, val.shape))
@pytest.mark.parametrize('formula_str, cols, expected', [
('y ~ 1 + x',
[],
lambda df, coef: coef('b_intercept') + df['x'] * coef('b_x')),
('y ~ a',
[Categorical('a', ['a0', 'a1', 'a2'])],
lambda df, coef: ((df['a'] == 'a0') * coef('b_a[a0]') +
(df['a'] == 'a1') * coef('b_a[a1]') +
(df['a'] == 'a2') * coef('b_a[a2]'))),
('y ~ 1 + a',
[Categorical('a', ['a0', 'a1', 'a2'])],
lambda df, coef: (coef('b_intercept') +
(df['a'] == 'a1') * coef('b_a[a1]') +
(df['a'] == 'a2') * coef('b_a[a2]'))),
('y ~ x1:x2',
[],
lambda df, coef: df['x1'] * df['x2'] * coef('b_x1:x2')),
('y ~ a:x',
[Categorical('a', ['a0', 'a1'])],
lambda df, coef: (((df['a'] == 'a0') * df['x'] * coef('b_a[a0]:x')) +
((df['a'] == 'a1') * df['x'] * coef('b_a[a1]:x')))),
('y ~ 1 + x | a',
[Categorical('a', ['a0', 'a1'])],
lambda df, coef: ((df['a'] == 'a0') * (coef('r_a[a0,intercept]') + df['x'] * coef('r_a[a0,x]')) +
(df['a'] == 'a1') * (coef('r_a[a1,intercept]') + df['x'] * coef('r_a[a1,x]')))),
('y ~ 1 + x | a:b',
[Categorical('a', ['a0', 'a1']), Categorical('b', ['b0', 'b1'])],
lambda df, coef: (((df['a'] == 'a0') & (df['b'] == 'b0')) *
(coef('r_a:b[a0_b0,intercept]') + df['x'] * coef('r_a:b[a0_b0,x]')) +
((df['a'] == 'a1') & (df['b'] == 'b0')) *
(coef('r_a:b[a1_b0,intercept]') + df['x'] * coef('r_a:b[a1_b0,x]')) +
((df['a'] == 'a0') & (df['b'] == 'b1')) *
(coef('r_a:b[a0_b1,intercept]') + df['x'] * coef('r_a:b[a0_b1,x]')) +
((df['a'] == 'a1') & (df['b'] == 'b1')) *
(coef('r_a:b[a1_b1,intercept]') + df['x'] * coef('r_a:b[a1_b1,x]')))),
('y ~ 1 + (x1 | a) + (x2 | b)',
[Categorical('a', ['a0', 'a1']), Categorical('b', ['b0', 'b1'])],
lambda df, coef: (coef('b_intercept') +
(df['a'] == 'a0') * df['x1'] * coef('r_a[a0,x1]') +
(df['a'] == 'a1') * df['x1'] * coef('r_a[a1,x1]') +
(df['b'] == 'b0') * df['x2'] * coef('r_b[b0,x2]') +
(df['b'] == 'b1') * df['x2'] * coef('r_b[b1,x2]'))),
])
@pytest.mark.parametrize('backend', [pyro_backend, numpyro_backend])
def test_mu_correctness(formula_str, cols, backend, expected):
df = dummy_df(expand_columns(parse(formula_str), cols), 10)
fit = brm(formula_str, df).prior(num_samples=1, backend=backend)
# Pick out the one (and only) sample drawn.
actual_mu = fit.fitted(what='linear')[0]
# `expected` is assumed to return a data frame.
expected_mu = expected(df, fit.get_scalar_param).to_numpy(np.float32)
assert np.allclose(actual_mu, expected_mu)
@pytest.mark.parametrize('cols, family, expected', [
([],
Normal,
lambda mu: mu),
([Integral('y', min=0, max=1)],
Bernoulli,
lambda mu: sigmoid(mu)),
([Integral('y', min=0, max=5)],
Binomial(num_trials=5),
lambda mu: sigmoid(mu) * 5),
([Integral('y', min=0, max=5)],
Poisson,
lambda mu: np.exp(mu)),
])
@pytest.mark.parametrize('backend', [pyro_backend, numpyro_backend])
def test_expectation_correctness(cols, family, expected, backend):
formula_str = 'y ~ 1 + x'
df = dummy_df(expand_columns(parse(formula_str), cols), 10)
fit = brm(formula_str, df, family=family).prior(num_samples=1, backend=backend)
actual_expectation = fit.fitted(what='expectation')[0]
# We assume (since it's tested elsewhere) that `mu` is computed
# correctly by `fitted`. So given that, we check that `fitted`
# computes the correct expectation.
expected_expectation = expected(fit.fitted('linear')[0])
assert np.allclose(actual_expectation, expected_expectation)
@pytest.mark.parametrize('N', [0, 5])
@pytest.mark.parametrize('backend', [pyro_backend, numpyro_backend])
@pytest.mark.parametrize('formula_str, non_real_cols, contrasts, family, priors, expected', codegen_cases)
def test_sampling_from_prior_smoke(N, backend, formula_str, non_real_cols, contrasts, family, priors, expected):
formula = parse(formula_str)
cols = expand_columns(formula, non_real_cols)
metadata = metadata_from_cols(cols) # Use full metadata for same reason given in comment in codegen test.
desc = makedesc(formula, metadata, family, priors, code_lengths(contrasts))
model = backend.gen(desc)
df = dummy_df(cols, N, allow_non_exhaustive=True)
data = data_from_numpy(backend, makedata(formula, df, metadata, contrasts))
samples = backend.prior(data, model, num_samples=10, seed=None)
assert type(samples) == Samples
@pytest.mark.parametrize('formula_str, non_real_cols, contrasts, family, priors, expected', codegen_cases)
@pytest.mark.parametrize('fitargs', [
dict(backend=pyro_backend, num_samples=1, algo='prior'),
dict(backend=numpyro_backend, num_samples=1, algo='prior'),
])
def test_parameter_shapes(formula_str, non_real_cols, contrasts, family, priors, expected, fitargs):
# Make dummy data.
N = 5
formula = parse(formula_str)
cols = expand_columns(formula, non_real_cols)
df = dummy_df(cols, N, allow_non_exhaustive=True)
# Define model, and generate a single posterior sample.
metadata = metadata_from_cols(cols)
model = define_model(formula_str, metadata, family, priors, contrasts).gen(fitargs['backend'])
data = model.encode(df)
fit = model.run_algo('prior', data, num_samples=1, seed=None)
num_chains = fitargs.get('num_chains', 1)
# Check parameter sizes.
for parameter in parameters(fit.model_desc):
expected_param_shape = parameter.shape
samples = fit.get_param(parameter.name)
# A single sample is collected by each chain for all cases.
assert samples.shape == (num_chains,) + expected_param_shape
samples_with_chain_dim = fit.get_param(parameter.name, True)
assert samples_with_chain_dim.shape == (num_chains, 1) + expected_param_shape
def test_scalar_param_map_consistency():
formula = parse('y ~ 1 + x1 + (1 + x2 + b | a) + (1 + x1 | a:b)')
non_real_cols = [
Categorical('a', ['a1', 'a2', 'a3']),
Categorical('b', ['b1', 'b2', 'b3']),
]
cols = expand_columns(formula, non_real_cols)
desc = makedesc(formula, metadata_from_cols(cols), Normal, [], {})
params = parameters(desc)
spmap = scalar_parameter_map(desc)
# Check that each entry in the map points to a unique parameter
# position.
param_and_indices_set = set(param_and_indices
for (_, param_and_indices) in spmap)
assert len(param_and_indices_set) == len(spmap)
# Ensure that we have enough entries in the map to cover all of
# the scalar parameters. (The L_i parameters have a funny status.
# We consider them to be parameters, but not scalar parameters.
# This is not planned, rather things just evolved this way. It
# does makes some sense though, since we usually look at R_i
# instead.)
num_scalar_params = sum(np.product(shape)
for name, shape in params
if not name.startswith('L_'))
assert num_scalar_params == len(spmap)
# Check that all indices are valid. (i.e. Within the shape of the
# parameter.)
for scalar_param_name, (param_name, indices) in spmap:
ss = [shape for (name, shape) in params if name == param_name]
assert len(ss) == 1
param_shape = ss[0]
assert len(indices) == len(param_shape)
assert all(i < s for (i, s) in zip(indices, param_shape))
@pytest.mark.parametrize('formula_str, non_real_cols, contrasts, family, priors, expected', codegen_cases)
def test_scalar_parameter_names_smoke(formula_str, non_real_cols, contrasts, family, priors, expected):
formula = parse(formula_str)
cols = expand_columns(formula, non_real_cols)
metadata = metadata_from_cols(cols)
model = define_model(formula_str, metadata, family, priors, contrasts)
names = scalar_parameter_names(model.desc)
assert type(names) == list
@pytest.mark.parametrize('formula_str, non_real_cols, family, priors', [
('y ~ x', [], Bernoulli, []),
('y ~ x', [Integral('y', min=0, max=2)], Bernoulli, []),
('y ~ x', [Categorical('y', list('abc'))], Bernoulli, []),
('y ~ x', [Categorical('y', list('ab'))], Normal, []),
('y ~ x', [Integral('y', min=0, max=1)], Normal, []),
('y ~ x', [], Binomial(num_trials=1), []),
('y ~ x', [Integral('y', min=-1, max=1)], Binomial(num_trials=1), []),
('y ~ x',
[Integral('y', min=0, max=3)],
Binomial(num_trials=2),
[]),
('y ~ x', [Categorical('y', list('abc'))], Binomial(num_trials=1), []),
('y ~ x', [], Poisson, []),
])
def test_family_and_response_type_checks(formula_str, non_real_cols, family, priors):
formula = parse(formula_str)
cols = expand_columns(formula, non_real_cols)
metadata = metadata_from_cols(cols)
with pytest.raises(Exception, match='not compatible'):
build_model_pre(formula, metadata, family, {})
@pytest.mark.parametrize('formula_str, non_real_cols, family, priors, expected_error', [
('y ~ x',
[],
Normal,
[Prior(('resp', 'sigma'), Normal(0., 1.))],
r'(?i)invalid prior'),
('y ~ x1 | x2',
[Categorical('x2', list('ab'))],
Normal,
[Prior(('sd', 'x2'), Normal(0., 1.))],
r'(?i)invalid prior'),
('y ~ 1 + x1 | x2',
[Categorical('x2', list('ab'))],
Normal,
[Prior(('cor', 'x2'), Normal(0., 1.))],
r'(?i)invalid prior'),
('y ~ x',
[],
Normal,
[Prior(('b',), Bernoulli(.5))],
r'(?i)invalid prior'),
# This hasn't passed since I moved the family/response checks in
# to the pre-model. The problem is that the support of the
# Binomial response depends on its parameters which aren't fully
# specified in this case, meaning that the family/reponse check
# can't happen, and the prior test that ought to flag that a prior
# is missing is never reached. It's not clear that a "prior
# missing" error is the most helpful error to raise for this case,
# and it's possible that having the family/response test suggest
# that extra parameters ought to be specified is a better idea.
# It's tricky to say though, since this case is a bit of a one
# off, so figuring out a good general solution is tricky. Since
# it's not clear how best to proceed, so I'll punt for now.
pytest.param(
'y ~ x',
[Integral('y', 0, 1)],
Binomial,
[],
r'(?i)prior missing', marks=pytest.mark.xfail),
])
def test_prior_checks(formula_str, non_real_cols, family, priors, expected_error):
formula = parse(formula_str)
cols = expand_columns(formula, non_real_cols)
metadata = metadata_from_cols(cols)
design_metadata = build_model_pre(formula, metadata, family, {})
with pytest.raises(Exception, match=expected_error):
build_prior_tree(design_metadata, priors)
@pytest.mark.parametrize('formula_str, df, metadata_cols, contrasts, expected', [
# (Formula('y', [], []),
# pd.DataFrame(dict(y=[1, 2, 3])),
# dict(X=torch.tensor([[],
# [],
# []]),
# y_obs=torch.tensor([1., 2., 3.]))),
('y ~ 1',
pd.DataFrame(dict(y=[1., 2., 3.])),
None,
{},
dict(X=np.array([[1.],
[1.],
[1.]]),
y_obs=np.array([1., 2., 3.]))),
('y ~ x',
pd.DataFrame(dict(y=[1., 2., 3.],
x=[4., 5., 6.])),
None,
{},
dict(X=np.array([[4.],
[5.],
[6.]]),
y_obs=np.array([1., 2., 3.]))),
('y ~ 1 + x',
pd.DataFrame(dict(y=[1., 2., 3.],
x=[4., 5., 6.])),
None,
{},
dict(X=np.array([[1., 4.],
[1., 5.],
[1., 6.]]),
y_obs=np.array([1., 2., 3.]))),
('y ~ x + 1',
pd.DataFrame(dict(y=[1., 2., 3.],
x=[4., 5., 6.])),
None,
{},
dict(X=np.array([[1., 4.],
[1., 5.],
[1., 6.]]),
y_obs=np.array([1., 2., 3.]))),
('y ~ x',
pd.DataFrame(dict(y=[1., 2., 3.],
x=pd.Categorical(list('AAB')))),
None,
{},
dict(X=np.array([[1., 0.],
[1., 0.],
[0., 1.]]),
y_obs=np.array([1., 2., 3.]))),
('y ~ 1 + x',
pd.DataFrame(dict(y=[1., 2., 3.],
x=pd.Categorical(list('AAB')))),
None,
{},
dict(X=np.array([[1., 0.],
[1., 0.],
[1., 1.]]),
y_obs=np.array([1., 2., 3.]))),
('y ~ x1 + x2',
pd.DataFrame(dict(y=[1., 2., 3.],
x1=pd.Categorical(list('AAB')),
x2=pd.Categorical(list('ABC')))),
None,
{},
dict(X=np.array([[1., 0., 0., 0.],
[1., 0., 1., 0.],
[0., 1., 0., 1.]]),
y_obs=np.array([1., 2., 3.]))),
('y ~ 1 + x',
pd.DataFrame(dict(y=[1., 2., 3.],
x=pd.Categorical(list('ABC')))),
None,
{},
dict(X=np.array([[1., 0., 0.],
[1., 1., 0.],
[1., 0., 1.]]),
y_obs=np.array([1., 2., 3.]))),
# (Formula('y', [], [Group([], 'x', True)]),
# pd.DataFrame(dict(y=[1, 2, 3],
# x=pd.Categorical(list('ABC')))),
# dict(X=np.array([[],
# [],
# []]),
# y_obs=np.array([1., 2., 3.]),
# J_1=np.array([0, 1, 2]),
# Z_1=np.array([[],
# [],
# []]))),
('y ~ 1 + (1 + x1 | x2)',
pd.DataFrame(dict(y=[1., 2., 3.],
x1=pd.Categorical(list('AAB')),
x2=pd.Categorical(list('ABC')))),
None,
{},
dict(X=np.array([[1.],
[1.],
[1.]]),
y_obs=np.array([1., 2., 3.]),
J_0=np.array([0, 1, 2]),
Z_0=np.array([[1., 0.],
[1., 0.],
[1., 1.]]))),
# The matches brms modulo 0 vs. 1 based indexing.
('y ~ 1 | a:b:c',
pd.DataFrame(dict(y=[1., 2., 3.],
a=pd.Categorical([0, 0, 1]),
b=pd.Categorical([2, 1, 0]),
c=pd.Categorical([0, 1, 2]))),
None,
{},
dict(X=np.array([[], [], []]),
y_obs=np.array([1., 2., 3.]),
J_0=np.array([1, 0, 2]),
Z_0=np.array([[1.], [1.], [1.]]))),
# Interactions
# --------------------------------------------------
('y ~ x1:x2',
pd.DataFrame(dict(y=[1., 2., 3., 4.],
x1=pd.Categorical(list('ABAB')),
x2=pd.Categorical(list('CCDD')))),
None,
{},
# AC BC AD BD
dict(X=np.array([[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]]),
y_obs=np.array([1., 2., 3., 4.]))),
('y ~ 1 + x1:x2',
pd.DataFrame(dict(y=[1., 2., 3., 4.],
x1=pd.Categorical(list('ABAB')),
x2=pd.Categorical(list('CCDD')))),
None,
{},
# 1 D BC BD
dict(X=np.array([[1., 0., 0., 0.],
[1., 0., 1., 0.],
[1., 1., 0., 0.],
[1., 1., 0., 1.]]),
y_obs=np.array([1., 2., 3., 4.]))),
('y ~ 1 + x1 + x2 + x1:x2',
pd.DataFrame(dict(y=[1., 2., 3., 4.],
x1=pd.Categorical(list('ABAB')),
x2=pd.Categorical(list('CCDD')))),
None,
{},
# 1 B D BD
dict(X=np.array([[1., 0., 0., 0.],
[1., 1., 0., 0.],
[1., 0., 1., 0.],
[1., 1., 1., 1.]]),
y_obs=np.array([1., 2., 3., 4.]))),
# real-real
('y ~ x1:x2',
pd.DataFrame(dict(y=[1., 2., 3., 4.],
x1=np.array([1., 2., 1., 2.]),
x2=np.array([-10., 0., 10., 20.]))),
None,
{},
dict(X=np.array([[-10.],
[0.],
[10.],
[40.]]),
y_obs=np.array([1., 2., 3., 4.]))),
# real-int
('y ~ x1:x2',
pd.DataFrame(dict(y=[1., 2., 3., 4.],
x1=np.array([1., 2., 1., 2.]),
x2=np.array([-10, 0, 10, 20]))),
None,
{},
dict(X=np.array([[-10.],
[0.],
[10.],
[40.]]),
y_obs=np.array([1., 2., 3., 4.]))),
# real-categorical
('y ~ x1:x2',
pd.DataFrame(dict(y=[1., 2., 3., 4.],
x1=np.array([1., 2., 3., 4.]),
x2=pd.Categorical(list('ABAB')))),
None,
{},
dict(X=np.array([[1., 0.],
[0., 2.],
[3., 0.],
[0., 4.]]),
y_obs=np.array([1., 2., 3., 4.]))),
# This example is taken from here:
# https://patsy.readthedocs.io/en/latest/R-comparison.html
('y ~ a:x + a:b',
pd.DataFrame(dict(y=[1., 2., 3., 4.],
a=pd.Categorical(list('ABAB')),
b=pd.Categorical(list('CCDD')),
x=np.array([1., 2., 3., 4.]))),
None,
{},
dict(X=np.array([[1., 0., 0., 0., 1., 0.],
[0., 1., 0., 0., 0., 2.],
[0., 0., 1., 0., 3., 0.],
[0., 0., 0., 1., 0., 4.]]),
y_obs=np.array([1., 2., 3., 4.]))),
# Integer-valued Factors
# --------------------------------------------------
('y ~ x1 + x2',
pd.DataFrame(dict(y=[1, 2, 3],
x1=[4, 5, 6],
x2=[7., 8., 9.])),
None,
{},
dict(X=np.array([[4., 7.],
[5., 8.],
[6., 9.]]),
y_obs=np.array([1., 2., 3.]))),
# Categorical Response
# --------------------------------------------------
('y ~ x',
pd.DataFrame(dict(y=pd.Categorical(list('AAB')),
x=[1., 2., 3.])),
None,
{},
dict(X=np.array([[1.],
[2.],
[3.]]),
y_obs=np.array([0., 0., 1.]))),
# Contrasts
# --------------------------------------------------
('y ~ a',
pd.DataFrame(dict(y=[1., 2., 3.],
a=pd.Categorical(['a1', 'a1', 'a2']))),
None,
{'a': np.array([[-1], [1]])},
dict(X=np.array([[-1.],
[-1.],
[1.]]),
y_obs=np.array([1., 2., 3.]))),
('y ~ a',
pd.DataFrame(dict(y=[1., 2., 3.],
a=pd.Categorical(['a1', 'a1', 'a2']))),
[RealValued('y'), Categorical('a', levels=['a0', 'a1', 'a2'])],
{'a': np.array([[0], [-1], [1]])},
dict(X=np.array([[-1.],
[-1.],
[1.]]),
y_obs=np.array([1., 2., 3.]))),
('y ~ a',
pd.DataFrame(dict(y=[1., 2., 3.],
a=pd.Categorical(['a1', 'a1', 'a2']))),
None,
{'a': np.array([[-1, -2], [0, 1]])},
dict(X=np.array([[-1., -2.],
[-1., -2.],
[0., 1.]]),
y_obs=np.array([1., 2., 3.]))),
('y ~ 1 + a + b + a:b',
pd.DataFrame(dict(y=[1., 2., 3.],
a=pd.Categorical(['a1', 'a1', 'a2']),
b= | pd.Categorical(['b1', 'b2', 'b2']) | pandas.Categorical |
import re
import warnings
from datetime import datetime, timedelta
from unittest.mock import patch
import numpy as np
import pandas as pd
import pytest
from pandas.testing import (
assert_frame_equal,
assert_index_equal,
assert_series_equal,
)
from woodwork.logical_types import Double, Integer
from rayml.exceptions import (
MethodPropertyNotFoundError,
MissingComponentError,
ParameterNotUsedWarning,
)
from rayml.pipelines import ComponentGraph
from rayml.pipelines.components import (
DateTimeFeaturizer,
DropRowsTransformer,
ElasticNetClassifier,
Estimator,
Imputer,
LogisticRegressionClassifier,
NaturalLanguageFeaturizer,
OneHotEncoder,
RandomForestClassifier,
SelectColumns,
StandardScaler,
TargetImputer,
Transformer,
Undersampler,
)
from rayml.problem_types import is_classification
from rayml.utils import infer_feature_types
class DummyTransformer(Transformer):
name = "Dummy Transformer"
def __init__(self, parameters=None, random_seed=0):
parameters = parameters or {}
super().__init__(
parameters=parameters, component_obj=None, random_seed=random_seed
)
def fit(self, X, y):
return self
def transform(self, X, y=None):
return X
class TransformerA(DummyTransformer):
"""copy class"""
class TransformerB(DummyTransformer):
"""copy class"""
class TransformerC(DummyTransformer):
"""copy class"""
class DummyEstimator(Estimator):
name = "Dummy Estimator"
model_family = None
supported_problem_types = None
def __init__(self, parameters=None, random_seed=0):
parameters = parameters or {}
super().__init__(
parameters=parameters, component_obj=None, random_seed=random_seed
)
def fit(self, X, y):
return self
class EstimatorA(DummyEstimator):
"""copy class"""
class EstimatorB(DummyEstimator):
"""copy class"""
class EstimatorC(DummyEstimator):
"""copy class"""
@pytest.fixture
def dummy_components():
return TransformerA, TransformerB, TransformerC, EstimatorA, EstimatorB, EstimatorC
def test_init(example_graph):
comp_graph = ComponentGraph()
assert len(comp_graph.component_dict) == 0
graph = example_graph
comp_graph = ComponentGraph(graph)
assert len(comp_graph.component_dict) == 6
expected_order = [
"Imputer",
"OneHot_ElasticNet",
"Elastic Net",
"OneHot_RandomForest",
"Random Forest",
"Logistic Regression Classifier",
]
assert comp_graph.compute_order == expected_order
def test_init_str_components():
graph = {
"Imputer": ["Imputer", "X", "y"],
"OneHot_RandomForest": ["One Hot Encoder", "Imputer.x", "y"],
"OneHot_ElasticNet": ["One Hot Encoder", "Imputer.x", "y"],
"Random Forest": ["Random Forest Classifier", "OneHot_RandomForest.x", "y"],
"Elastic Net": ["Elastic Net Classifier", "OneHot_ElasticNet.x", "y"],
"Logistic Regression Classifier": [
"Logistic Regression Classifier",
"Random Forest.x",
"Elastic Net.x",
"y",
],
}
comp_graph = ComponentGraph(graph)
assert len(comp_graph.component_dict) == 6
expected_order = [
"Imputer",
"OneHot_ElasticNet",
"Elastic Net",
"OneHot_RandomForest",
"Random Forest",
"Logistic Regression Classifier",
]
assert comp_graph.compute_order == expected_order
def test_init_instantiated():
graph = {
"Imputer": [
Imputer(numeric_impute_strategy="constant", numeric_fill_value=0),
"X",
"y",
]
}
component_graph = ComponentGraph(graph)
component_graph.instantiate(
{"Imputer": {"numeric_fill_value": 10, "categorical_fill_value": "Fill"}}
)
cg_imputer = component_graph.get_component("Imputer")
assert graph["Imputer"][0] == cg_imputer
assert cg_imputer.parameters["numeric_fill_value"] == 0
assert cg_imputer.parameters["categorical_fill_value"] is None
def test_invalid_init():
invalid_graph = {"Imputer": [Imputer, "X", "y"], "OHE": OneHotEncoder}
with pytest.raises(
ValueError, match="All component information should be passed in as a list"
):
ComponentGraph(invalid_graph)
graph = {
"Imputer": [
None,
"X",
"y",
]
}
with pytest.raises(
ValueError, match="may only contain str or ComponentBase subclasses"
):
ComponentGraph(graph)
graph = {
"Fake": ["Fake Component", "X", "y"],
"Estimator": [ElasticNetClassifier, "Fake.x", "y"],
}
with pytest.raises(MissingComponentError):
ComponentGraph(graph)
def test_init_bad_graphs():
graph_with_cycle = {
"Imputer": [Imputer, "X", "y"],
"OHE": [OneHotEncoder, "Imputer.x", "Estimator.x", "y"],
"Estimator": [RandomForestClassifier, "OHE.x", "y"],
}
with pytest.raises(ValueError, match="given graph contains a cycle"):
ComponentGraph(graph_with_cycle)
graph_with_more_than_one_final_component = {
"Imputer": ["Imputer", "X", "y"],
"OneHot_RandomForest": ["One Hot Encoder", "Imputer.x", "y"],
"OneHot_ElasticNet": ["One Hot Encoder", "Imputer.x", "y"],
"Random Forest": ["Random Forest Classifier", "OneHot_RandomForest.x", "y"],
"Elastic Net": ["Elastic Net Classifier", "X", "y"],
"Logistic Regression Classifier": [
"Logistic Regression Classifier",
"Random Forest.x",
"Elastic Net.x",
"y",
],
}
with pytest.raises(ValueError, match="graph has more than one final"):
ComponentGraph(graph_with_more_than_one_final_component)
graph_with_unconnected_imputer = {
"Imputer": ["Imputer", "X", "y"],
"DateTime": ["DateTime Featurizer", "X", "y"],
"Logistic Regression Classifier": [
"Logistic Regression Classifier",
"DateTime.x",
"y",
],
}
with pytest.raises(ValueError, match="The given graph is not completely connected"):
ComponentGraph(graph_with_unconnected_imputer)
def test_order_x_and_y():
graph = {
"Imputer": [Imputer, "X", "y"],
"OHE": [OneHotEncoder, "Imputer.x", "y"],
"Random Forest": [RandomForestClassifier, "OHE.x", "y"],
}
component_graph = ComponentGraph(graph).instantiate()
assert component_graph.compute_order == ["Imputer", "OHE", "Random Forest"]
def test_list_raises_error():
component_list = ["Imputer", "One Hot Encoder", RandomForestClassifier]
with pytest.raises(
ValueError,
match="component_dict must be a dictionary which specifies the components and edges between components",
):
ComponentGraph(component_list)
def test_instantiate_with_parameters(example_graph):
graph = example_graph
component_graph = ComponentGraph(graph)
assert not isinstance(component_graph.get_component("Imputer"), Imputer)
assert not isinstance(
component_graph.get_component("Elastic Net"), ElasticNetClassifier
)
parameters = {
"OneHot_RandomForest": {"top_n": 3},
"OneHot_ElasticNet": {"top_n": 5},
"Elastic Net": {"max_iter": 100},
}
component_graph.instantiate(parameters)
expected_order = [
"Imputer",
"OneHot_ElasticNet",
"Elastic Net",
"OneHot_RandomForest",
"Random Forest",
"Logistic Regression Classifier",
]
assert component_graph.compute_order == expected_order
assert isinstance(component_graph.get_component("Imputer"), Imputer)
assert isinstance(
component_graph.get_component("Random Forest"), RandomForestClassifier
)
assert isinstance(
component_graph.get_component("Logistic Regression Classifier"),
LogisticRegressionClassifier,
)
assert component_graph.get_component("OneHot_RandomForest").parameters["top_n"] == 3
assert component_graph.get_component("OneHot_ElasticNet").parameters["top_n"] == 5
assert component_graph.get_component("Elastic Net").parameters["max_iter"] == 100
@pytest.mark.parametrize("parameters", [None, {}])
def test_instantiate_without_parameters(parameters, example_graph):
graph = example_graph
component_graph = ComponentGraph(graph)
if parameters is not None:
component_graph.instantiate(parameters)
else:
component_graph.instantiate()
assert (
component_graph.get_component("OneHot_RandomForest").parameters["top_n"] == 10
)
assert component_graph.get_component("OneHot_ElasticNet").parameters["top_n"] == 10
assert component_graph.get_component(
"OneHot_RandomForest"
) is not component_graph.get_component("OneHot_ElasticNet")
expected_order = [
"Imputer",
"OneHot_ElasticNet",
"Elastic Net",
"OneHot_RandomForest",
"Random Forest",
"Logistic Regression Classifier",
]
assert component_graph.compute_order == expected_order
def test_reinstantiate(example_graph):
component_graph = ComponentGraph(example_graph)
component_graph.instantiate()
with pytest.raises(ValueError, match="Cannot reinstantiate a component graph"):
component_graph.instantiate({"OneHot": {"top_n": 7}})
def test_bad_instantiate_can_reinstantiate(example_graph):
component_graph = ComponentGraph(example_graph)
with pytest.raises(ValueError, match="Error received when instantiating component"):
component_graph.instantiate(
parameters={"Elastic Net": {"max_iter": 100, "fake_param": None}}
)
component_graph.instantiate({"Elastic Net": {"max_iter": 22}})
assert component_graph.get_component("Elastic Net").parameters["max_iter"] == 22
def test_get_component(example_graph):
graph = example_graph
component_graph = ComponentGraph(graph)
assert component_graph.get_component("OneHot_ElasticNet") == OneHotEncoder
assert (
component_graph.get_component("Logistic Regression Classifier")
== LogisticRegressionClassifier
)
with pytest.raises(ValueError, match="not in the graph"):
component_graph.get_component("Fake Component")
component_graph.instantiate(
{
"OneHot_RandomForest": {"top_n": 3},
"Random Forest": {"max_depth": 4, "n_estimators": 50},
}
)
assert component_graph.get_component("OneHot_ElasticNet") == OneHotEncoder()
assert component_graph.get_component("OneHot_RandomForest") == OneHotEncoder(
top_n=3
)
assert component_graph.get_component("Random Forest") == RandomForestClassifier(
n_estimators=50, max_depth=4
)
def test_get_estimators(example_graph):
component_graph = ComponentGraph(example_graph)
with pytest.raises(ValueError, match="Cannot get estimators until"):
component_graph.get_estimators()
component_graph.instantiate()
assert component_graph.get_estimators() == [
RandomForestClassifier(),
ElasticNetClassifier(),
LogisticRegressionClassifier(),
]
component_graph = ComponentGraph({"Imputer": ["Imputer", "X", "y"]})
component_graph.instantiate()
assert component_graph.get_estimators() == []
def test_parents(example_graph):
graph = example_graph
component_graph = ComponentGraph(graph)
assert component_graph.get_inputs("Imputer") == ["X", "y"]
assert component_graph.get_inputs("OneHot_RandomForest") == ["Imputer.x", "y"]
assert component_graph.get_inputs("OneHot_ElasticNet") == ["Imputer.x", "y"]
assert component_graph.get_inputs("Random Forest") == ["OneHot_RandomForest.x", "y"]
assert component_graph.get_inputs("Elastic Net") == ["OneHot_ElasticNet.x", "y"]
assert component_graph.get_inputs("Logistic Regression Classifier") == [
"Random Forest.x",
"Elastic Net.x",
"y",
]
with pytest.raises(ValueError, match="not in the graph"):
component_graph.get_inputs("Fake component")
component_graph.instantiate()
assert component_graph.get_inputs("Imputer") == ["X", "y"]
assert component_graph.get_inputs("OneHot_RandomForest") == ["Imputer.x", "y"]
assert component_graph.get_inputs("OneHot_ElasticNet") == ["Imputer.x", "y"]
assert component_graph.get_inputs("Random Forest") == ["OneHot_RandomForest.x", "y"]
assert component_graph.get_inputs("Elastic Net") == ["OneHot_ElasticNet.x", "y"]
assert component_graph.get_inputs("Logistic Regression Classifier") == [
"Random Forest.x",
"Elastic Net.x",
"y",
]
with pytest.raises(ValueError, match="not in the graph"):
component_graph.get_inputs("Fake component")
def test_get_last_component(example_graph):
component_graph = ComponentGraph()
with pytest.raises(
ValueError, match="Cannot get last component from edgeless graph"
):
component_graph.get_last_component()
component_graph = ComponentGraph(example_graph)
assert component_graph.get_last_component() == LogisticRegressionClassifier
component_graph.instantiate()
assert component_graph.get_last_component() == LogisticRegressionClassifier()
component_graph = ComponentGraph({"Imputer": [Imputer, "X", "y"]})
assert component_graph.get_last_component() == Imputer
component_graph = ComponentGraph(
{"Imputer": [Imputer, "X", "y"], "OneHot": [OneHotEncoder, "Imputer.x", "y"]}
)
assert component_graph.get_last_component() == OneHotEncoder
@patch("rayml.pipelines.components.Transformer.fit_transform")
@patch("rayml.pipelines.components.Estimator.fit")
@patch("rayml.pipelines.components.Estimator.predict_proba")
def test_fit_component_graph(
mock_predict_proba, mock_fit, mock_fit_transform, example_graph, X_y_binary
):
X, y = X_y_binary
mock_fit_transform.return_value = pd.DataFrame(X)
mock_predict_proba.return_value = pd.DataFrame(y)
mock_predict_proba.return_value.ww.init()
component_graph = ComponentGraph(example_graph).instantiate()
component_graph.fit(X, y)
assert mock_fit_transform.call_count == 3
assert mock_fit.call_count == 3
assert mock_predict_proba.call_count == 2
@patch("rayml.pipelines.components.TargetImputer.fit_transform")
@patch("rayml.pipelines.components.OneHotEncoder.fit_transform")
def test_fit_correct_inputs(
mock_ohe_fit_transform, mock_imputer_fit_transform, X_y_binary
):
X, y = X_y_binary
X = pd.DataFrame(X)
y = pd.Series(y)
graph = {
"Target Imputer": [TargetImputer, "X", "y"],
"OHE": [OneHotEncoder, "Target Imputer.x", "Target Imputer.y"],
}
expected_x = pd.DataFrame(index=X.index, columns=X.columns).fillna(1.0)
expected_x.ww.init()
expected_y = pd.Series(index=y.index).fillna(0)
mock_imputer_fit_transform.return_value = tuple((expected_x, expected_y))
mock_ohe_fit_transform.return_value = expected_x
component_graph = ComponentGraph(graph).instantiate()
component_graph.fit(X, y)
assert_frame_equal(expected_x, mock_ohe_fit_transform.call_args[0][0])
assert_series_equal(expected_y, mock_ohe_fit_transform.call_args[0][1])
@patch("rayml.pipelines.components.Transformer.fit_transform")
@patch("rayml.pipelines.components.Estimator.fit")
@patch("rayml.pipelines.components.Estimator.predict_proba")
def test_component_graph_fit_and_transform_all_but_final(
mock_predict_proba, mock_fit, mock_fit_transform, example_graph, X_y_binary
):
X, y = X_y_binary
component_graph = ComponentGraph(example_graph)
component_graph.instantiate()
mock_X_t = pd.DataFrame(np.ones(pd.DataFrame(X).shape))
mock_fit_transform.return_value = mock_X_t
mock_fit.return_value = Estimator
mock_predict_proba.return_value = pd.DataFrame(y)
mock_predict_proba.return_value.ww.init()
component_graph.fit_and_transform_all_but_final(X, y)
assert mock_fit_transform.call_count == 3
assert mock_fit.call_count == 2
assert mock_predict_proba.call_count == 2
@patch("rayml.pipelines.components.Estimator.fit")
@patch("rayml.pipelines.components.Estimator.predict_proba")
@patch("rayml.pipelines.components.Estimator.predict")
def test_predict(mock_predict, mock_predict_proba, mock_fit, example_graph, X_y_binary):
X, y = X_y_binary
mock_predict_proba.return_value = pd.DataFrame(y)
mock_predict_proba.return_value.ww.init()
mock_predict.return_value = pd.Series(y)
component_graph = ComponentGraph(example_graph).instantiate()
component_graph.fit(X, y)
component_graph.predict(X)
assert (
mock_predict_proba.call_count == 4
) # Called twice when fitting pipeline, twice when predicting
assert mock_predict.call_count == 1 # Called once during predict
assert mock_fit.call_count == 3 # Only called during fit, not predict
@patch("rayml.pipelines.components.Estimator.fit")
@patch("rayml.pipelines.components.Estimator.predict_proba")
@patch("rayml.pipelines.components.Estimator.predict")
def test_predict_multiclass(
mock_predict, mock_predict_proba, mock_fit, example_graph, X_y_multi
):
X, y = X_y_multi
mock_predict_proba.return_value = pd.DataFrame(
{
0: np.full(X.shape[0], 0.33),
1: np.full(X.shape[0], 0.33),
2: np.full(X.shape[0], 0.33),
}
)
mock_predict_proba.return_value.ww.init()
mock_predict.return_value = pd.Series(y)
component_graph = ComponentGraph(example_graph).instantiate()
component_graph.fit(X, y)
final_estimator_input = component_graph.transform_all_but_final(X, y)
assert final_estimator_input.columns.to_list() == [
"Col 0 Random Forest.x",
"Col 1 Random Forest.x",
"Col 2 Random Forest.x",
"Col 0 Elastic Net.x",
"Col 1 Elastic Net.x",
"Col 2 Elastic Net.x",
]
for col in final_estimator_input:
assert np.array_equal(
final_estimator_input[col].to_numpy(), np.full(X.shape[0], 0.33)
)
component_graph.predict(X)
assert (
mock_predict_proba.call_count == 6
) # Called twice when fitting pipeline, twice to compute final features, and twice when predicting
assert mock_predict.call_count == 1 # Called once during predict
assert mock_fit.call_count == 3 # Only called during fit, not predict
@patch("rayml.pipelines.components.Estimator.fit")
@patch("rayml.pipelines.components.Estimator.predict_proba")
@patch("rayml.pipelines.components.Estimator.predict")
def test_predict_regression(
mock_predict, mock_predict_proba, mock_fit, example_regression_graph, X_y_multi
):
X, y = X_y_multi
mock_predict.return_value = pd.Series(y)
mock_predict_proba.side_effect = MethodPropertyNotFoundError
component_graph = ComponentGraph(example_regression_graph).instantiate()
component_graph.fit(X, y)
final_estimator_input = component_graph.transform_all_but_final(X, y)
assert final_estimator_input.columns.to_list() == [
"Random Forest.x",
"Elastic Net.x",
]
component_graph.predict(X)
assert (
mock_predict_proba.call_count == 6
) # Called twice when fitting pipeline, twice to compute final features, and twice when predicting
assert (
mock_predict.call_count == 7
) # Called because `predict_proba` does not exist for regresssions
assert mock_fit.call_count == 3 # Only called during fit, not predict
@patch("rayml.pipelines.components.Estimator.fit")
@patch("rayml.pipelines.components.Estimator.predict_proba")
@patch("rayml.pipelines.components.Estimator.predict")
def test_predict_repeat_estimator(
mock_predict, mock_predict_proba, mock_fit, X_y_binary
):
X, y = X_y_binary
mock_predict_proba.return_value = pd.DataFrame(y)
mock_predict_proba.return_value.ww.init()
mock_predict.return_value = pd.Series(y)
graph = {
"Imputer": [Imputer, "X", "y"],
"OneHot_RandomForest": [OneHotEncoder, "Imputer.x", "y"],
"OneHot_Logistic": [OneHotEncoder, "Imputer.x", "y"],
"Random Forest": [RandomForestClassifier, "OneHot_RandomForest.x", "y"],
"Logistic Regression Classifier": [
LogisticRegressionClassifier,
"OneHot_Logistic.x",
"y",
],
"Final Estimator": [
LogisticRegressionClassifier,
"Random Forest.x",
"Logistic Regression Classifier.x",
"y",
],
}
component_graph = ComponentGraph(graph)
component_graph.instantiate()
component_graph.fit(X, y)
assert (
not component_graph.get_component(
"Logistic Regression Classifier"
)._component_obj
== component_graph.get_component("Final Estimator")._component_obj
)
component_graph.predict(X)
assert mock_predict_proba.call_count == 4
assert mock_predict.call_count == 1
assert mock_fit.call_count == 3
@patch("rayml.pipelines.components.Imputer.transform")
@patch("rayml.pipelines.components.OneHotEncoder.transform")
@patch("rayml.pipelines.components.RandomForestClassifier.predict_proba")
@patch("rayml.pipelines.components.ElasticNetClassifier.predict_proba")
def test_transform_all_but_final(
mock_en_predict_proba,
mock_rf_predict_proba,
mock_ohe,
mock_imputer,
example_graph,
X_y_binary,
):
X, y = X_y_binary
mock_imputer.return_value = pd.DataFrame(X)
mock_ohe.return_value = pd.DataFrame(X)
mock_en_predict_proba.return_value = pd.DataFrame(
({0: np.zeros(X.shape[0]), 1: np.ones(X.shape[0])})
)
mock_en_predict_proba.return_value.ww.init()
mock_rf_predict_proba.return_value = pd.DataFrame(
({0: np.ones(X.shape[0]), 1: np.zeros(X.shape[0])})
)
mock_rf_predict_proba.return_value.ww.init()
X_expected = pd.DataFrame(
{
"Col 1 Random Forest.x": np.zeros(X.shape[0]),
"Col 1 Elastic Net.x": np.ones(X.shape[0]),
}
)
component_graph = ComponentGraph(example_graph).instantiate()
component_graph.fit(X, y)
X_t = component_graph.transform_all_but_final(X)
assert_frame_equal(X_expected, X_t)
assert mock_imputer.call_count == 2
assert mock_ohe.call_count == 4
@patch(f"{__name__}.DummyTransformer.transform")
def test_transform_all_but_final_single_component(mock_transform, X_y_binary):
X, y = X_y_binary
X = pd.DataFrame(X)
mock_transform.return_value = X
component_graph = ComponentGraph(
{"Dummy Component": [DummyTransformer, "X", "y"]}
).instantiate()
component_graph.fit(X, y)
X_t = component_graph.transform_all_but_final(X)
assert_frame_equal(X, X_t)
@patch("rayml.pipelines.components.Imputer.fit_transform")
def test_fit_y_parent(mock_fit_transform, X_y_binary):
X, y = X_y_binary
graph = {
"Imputer": [Imputer, "X", "y"],
"OHE": [OneHotEncoder, "Imputer.x", "y"],
"Random Forest": [RandomForestClassifier, "OHE.x", "y"],
}
component_graph = ComponentGraph(graph).instantiate()
mock_fit_transform.return_value = tuple((pd.DataFrame(X), pd.Series(y)))
component_graph.fit(X, y)
mock_fit_transform.assert_called_once()
def test_predict_empty_graph(X_y_binary):
X, y = X_y_binary
X = pd.DataFrame(X)
component_graph = ComponentGraph()
component_graph.instantiate()
component_graph.fit(X, y)
X_t = component_graph.transform(X, y)
assert_frame_equal(X, X_t)
X_pred = component_graph.predict(X)
assert_frame_equal(X, X_pred)
def test_no_instantiate_before_fit(X_y_binary):
X, y = X_y_binary
graph = {
"Imputer": [Imputer, "X", "y"],
"OHE": [OneHotEncoder, "Imputer.x", "y"],
"Estimator": [RandomForestClassifier, "OHE.x", "y"],
}
component_graph = ComponentGraph(graph)
with pytest.raises(
ValueError,
match="All components must be instantiated before fitting or predicting",
):
component_graph.fit(X, y)
def test_multiple_y_parents():
graph = {
"Imputer": [Imputer, "X", "y"],
"TargetImputer": [Imputer, "Imputer.x", "y"],
"Estimator": [RandomForestClassifier, "Imputer.x", "y", "TargetImputer.y"],
}
with pytest.raises(ValueError, match="All components must have exactly one target"):
ComponentGraph(graph)
def test_component_graph_order(example_graph):
component_graph = ComponentGraph(example_graph)
expected_order = [
"Imputer",
"OneHot_ElasticNet",
"Elastic Net",
"OneHot_RandomForest",
"Random Forest",
"Logistic Regression Classifier",
]
assert expected_order == component_graph.compute_order
component_graph = ComponentGraph({"Imputer": [Imputer, "X", "y"]})
expected_order = ["Imputer"]
assert expected_order == component_graph.compute_order
@pytest.mark.parametrize(
"index",
[
list(range(-5, 0)),
list(range(100, 105)),
[f"row_{i}" for i in range(5)],
pd.date_range("2020-09-08", periods=5),
],
)
@pytest.mark.parametrize("with_estimator_last_component", [True, False])
def test_component_graph_transform_and_predict_with_custom_index(
index,
with_estimator_last_component,
example_graph,
example_graph_with_transformer_last_component,
):
X = pd.DataFrame(
{"categories": [f"cat_{i}" for i in range(5)], "numbers": np.arange(5)},
index=index,
)
y = pd.Series([1, 2, 1, 2, 1], index=index)
X.ww.init(logical_types={"categories": "categorical"})
graph_to_use = (
example_graph
if with_estimator_last_component
else example_graph_with_transformer_last_component
)
component_graph = ComponentGraph(graph_to_use)
component_graph.instantiate()
component_graph.fit(X, y)
if with_estimator_last_component:
predictions = component_graph.predict(X)
assert_index_equal(predictions.index, X.index)
assert not predictions.isna().any(axis=None)
else:
X_t = component_graph.transform(X)
| assert_index_equal(X_t.index, X.index) | pandas.testing.assert_index_equal |
#!C:\Users\willi\AppData\Local\Programs\Python\Python38-32\python.exe
#!/usr/bin/python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import psycopg2
import time
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.holtwinters import ExponentialSmoothing as HWES
import statsmodels.api as sm
#=============================================#
# Database Conn #
#=============================================#
conn = psycopg2.connect(database = "machine_learning", user = "postgres", password = "<PASSWORD>", host = "localhost", port = "5432")
cur = conn.cursor()
cur.execute("SELECT * from arima")
rowsArima = cur.fetchall()
dataArima = pd.DataFrame(rowsArima,columns = ['Month','Value'])
dataArima.set_index('Month',inplace = True)
dataArima.index = pd.to_datetime(dataArima.index)
cur.execute("SELECT * from hwes")
rowsHwes = cur.fetchall()
dataHwes = | pd.DataFrame(rowsHwes,columns = ['Month','Value']) | pandas.DataFrame |
# 读取 Northwind.txt 文本数据到 DataFrame。
# (1) 查询出在1997年3月份销售过的产品名称。
# (2) 求解销售相关性最强的两个产品。
# (3) 求解销售业绩波动最小的产品。
import numpy as np
import pandas as pd
data = pd.read_table('Northwind.txt', sep=',')
# (1) 查询出在1997年3月份销售过的产品名称。
data1 = data[((data.OrderYear == 1997) & \
(data.OrderMonth == 3))]
print(data1.ProductName.sort_values().unique())
# (2) 求解销售相关性最强的两个产品。(解法一)
grouped = data.loc[:,\
['OrderYear','OrderMonth','Quantity']].groupby( \
data.ProductName)
carr = []
for (pn1), group1 in grouped:
g1 = pd.DataFrame(group1, \
columns=['OrderYear','OrderMonth','Quantity'])
g1 = g1.Quantity.groupby( \
[g1.OrderYear,g1.OrderMonth]).sum()
g1 = pd.DataFrame(g1)
for (pn2), group2 in grouped:
if pn1 == pn2:
continue
g2 = pd.DataFrame(group2, \
columns=['OrderYear','OrderMonth','Quantity'])
g2 = g2.Quantity.groupby(\
[g2.OrderYear,g2.OrderMonth]).sum()
g2 = | pd.DataFrame(g2) | pandas.DataFrame |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import os
import operator
import unittest
import numpy as np
from pandas.core.api import (Index, Series, TimeSeries, DataFrame, isnull)
import pandas.core.datetools as datetools
from pandas.util.testing import assert_series_equal
import pandas.util.testing as common
#-------------------------------------------------------------------------------
# Series test cases
class TestSeries(unittest.TestCase):
def setUp(self):
self.ts = common.makeTimeSeries()
self.series = common.makeStringSeries()
self.objSeries = common.makeObjectSeries()
self.empty = Series([], index=[])
def test_constructor(self):
# Recognize TimeSeries
self.assert_(isinstance(self.ts, TimeSeries))
# Pass in Series
derived = Series(self.ts)
self.assert_(isinstance(derived, TimeSeries))
self.assert_(common.equalContents(derived.index, self.ts.index))
# Ensure new index is not created
self.assertEquals(id(self.ts.index), id(derived.index))
# Pass in scalar
scalar = Series(0.5)
self.assert_(isinstance(scalar, float))
# Mixed type Series
mixed = Series(['hello', np.NaN], index=[0, 1])
self.assert_(mixed.dtype == np.object_)
self.assert_(mixed[1] is np.NaN)
self.assertRaises(Exception, Series, [0, 1, 2], index=None)
self.assert_(not isinstance(self.empty, TimeSeries))
self.assert_(not isinstance(Series({}), TimeSeries))
self.assertRaises(Exception, Series, np.random.randn(3, 3),
index=np.arange(3))
def test_constructor_corner(self):
df = common.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
self.assert_(isinstance(s, Series))
def test_fromDict(self):
data = {'a' : 0, 'b' : 1, 'c' : 2, 'd' : 3}
series = Series(data)
self.assert_(common.is_sorted(series.index))
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : datetime.now()}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : '3'}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : '0', 'b' : '1'}
series = Series(data, dtype=float)
self.assert_(series.dtype == np.float64)
def test_setindex(self):
# wrong type
series = self.series.copy()
self.assertRaises(TypeError, series._set_index, None)
# wrong length
series = self.series.copy()
self.assertRaises(AssertionError, series._set_index,
np.arange(len(series) - 1))
# works
series = self.series.copy()
series.index = np.arange(len(series))
self.assert_(isinstance(series.index, Index))
def test_array_finalize(self):
pass
def test_fromValue(self):
nans = Series.fromValue(np.NaN, index=self.ts.index)
self.assert_(nans.dtype == np.float_)
strings = Series.fromValue('foo', index=self.ts.index)
self.assert_(strings.dtype == np.object_)
d = datetime.now()
dates = Series.fromValue(d, index=self.ts.index)
self.assert_(dates.dtype == np.object_)
def test_contains(self):
common.assert_contains_all(self.ts.index, self.ts)
def test_save_load(self):
self.series.save('tmp1')
self.ts.save('tmp3')
unp_series = Series.load('tmp1')
unp_ts = Series.load('tmp3')
os.remove('tmp1')
os.remove('tmp3')
assert_series_equal(unp_series, self.series)
assert_series_equal(unp_ts, self.ts)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assert_(self.series.get(-1) is None)
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - datetools.bday
self.assertRaises(Exception, self.ts.__getitem__, d),
def test_fancy(self):
slice1 = self.series[[1,2,3]]
slice2 = self.objSeries[[1,2,3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assert_(self.series.index[9] not in numSlice.index)
self.assert_(self.objSeries.index[9] not in objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assert_(common.equalContents(numSliceEnd,
np.array(self.series)[-10:]))
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1,2,17]] = np.NaN
self.ts[6] = np.NaN
self.assert_(np.isnan(self.ts[6]))
self.assert_(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assert_(not np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(common.makeIntIndex(20).astype(float),
index=common.makeIntIndex(20))
series[::2] = 0
self.assert_((series[::2] == 0).all())
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertEqual(len(sl.index.indexMap), len(sl.index))
def test_repr(self):
str(self.ts)
str(self.series)
str(self.series.astype(int))
str(self.objSeries)
str(Series(common.randn(1000), index=np.arange(1000)))
# empty
str(self.empty)
# with NaNs
self.series[5:7] = np.NaN
str(self.series)
def test_toString(self):
from cStringIO import StringIO
self.ts.toString(buffer=StringIO())
def test_iter(self):
for i, val in enumerate(self.series):
self.assertEqual(val, self.series[i])
for i, val in enumerate(self.ts):
self.assertEqual(val, self.ts[i])
def test_keys(self):
self.assert_(self.ts.keys() is self.ts.index)
def test_values(self):
self.assert_(np.array_equal(self.ts, self.ts.values))
def test_iteritems(self):
for idx, val in self.series.iteritems():
self.assertEqual(val, self.series[idx])
for idx, val in self.ts.iteritems():
self.assertEqual(val, self.ts[idx])
def test_stats(self):
self.series[5:15] = np.NaN
s1 = np.array(self.series)
s1 = s1[-np.isnan(s1)]
self.assertEquals(np.min(s1), self.series.min())
self.assertEquals(np.max(s1), self.series.max())
self.assertEquals(np.sum(s1), self.series.sum())
self.assertEquals(np.mean(s1), self.series.mean())
self.assertEquals(np.std(s1, ddof=1), self.series.std())
self.assertEquals(np.var(s1, ddof=1), self.series.var())
try:
from scipy.stats import skew
common.assert_almost_equal(skew(s1, bias=False),
self.series.skew())
except ImportError:
pass
self.assert_(not np.isnan(np.sum(self.series)))
self.assert_(not np.isnan(np.mean(self.series)))
self.assert_(not np.isnan(np.std(self.series)))
self.assert_(not np.isnan(np.var(self.series)))
self.assert_(not np.isnan(np.min(self.series)))
self.assert_(not np.isnan(np.max(self.series)))
self.assert_(np.isnan( | Series([1.], index=[1]) | pandas.core.api.Series |
from flowsa.common import WITHDRAWN_KEYWORD
from flowsa.flowbyfunctions import assign_fips_location_system
from flowsa.location import US_FIPS
import math
import pandas as pd
import io
from flowsa.settings import log
from string import digits
YEARS_COVERED = {
"asbestos": "2014-2018",
"barite": "2014-2018",
"bauxite": "2013-2017",
"beryllium": "2014-2018",
"boron": "2014-2018",
"chromium": "2014-2018",
"clay": "2015-2016",
"cobalt": "2013-2017",
"copper": "2011-2015",
"diatomite": "2014-2018",
"feldspar": "2013-2017",
"fluorspar": "2013-2017",
"fluorspar_inports": ["2016", "2017"],
"gallium": "2014-2018",
"garnet": "2014-2018",
"gold": "2013-2017",
"graphite": "2013-2017",
"gypsum": "2014-2018",
"iodine": "2014-2018",
"ironore": "2014-2018",
"kyanite": "2014-2018",
"lead": "2012-2018",
"lime": "2014-2018",
"lithium": "2013-2017",
"magnesium": "2013-2017",
"manganese": "2012-2016",
"manufacturedabrasive": "2017-2018",
"mica": "2014-2018",
"molybdenum": "2014-2018",
"nickel": "2012-2016",
"niobium": "2014-2018",
"peat": "2014-2018",
"perlite": "2013-2017",
"phosphate": "2014-2018",
"platinum": "2014-2018",
"potash": "2014-2018",
"pumice": "2014-2018",
"rhenium": "2014-2018",
"salt": "2013-2017",
"sandgravelconstruction": "2013-2017",
"sandgravelindustrial": "2014-2018",
"silver": "2012-2016",
"sodaash": "2010-2017",
"sodaash_t4": ["2016", "2017"],
"stonecrushed": "2013-2017",
"stonedimension": "2013-2017",
"strontium": "2014-2018",
"talc": "2013-2017",
"titanium": "2013-2017",
"tungsten": "2013-2017",
"vermiculite": "2014-2018",
"zeolites": "2014-2018",
"zinc": "2013-2017",
"zirconium": "2013-2017",
}
def usgs_myb_year(years, current_year_str):
"""
Sets the column for the string based on the year. Checks that the year
you picked is in the last file.
:param years: string, with hypthon
:param current_year_str: string, year of interest
:return: string, year
"""
years_array = years.split("-")
lower_year = int(years_array[0])
upper_year = int(years_array[1])
current_year = int(current_year_str)
if lower_year <= current_year <= upper_year:
column_val = current_year - lower_year + 1
return "year_" + str(column_val)
else:
log.info("Your year is out of scope. Pick a year between %s and %s",
lower_year, upper_year)
def usgs_myb_name(USGS_Source):
"""
Takes the USGS source name and parses it so it can be used in other parts
of Flow by activity.
:param USGS_Source: string, usgs source name
:return:
"""
source_split = USGS_Source.split("_")
name_cc = str(source_split[2])
name = ""
for char in name_cc:
if char.isupper():
name = name + " " + char
else:
name = name + char
name = name.lower()
name = name.strip()
return name
def usgs_myb_static_variables():
"""
Populates the data values for Flow by activity that are the same
for all of USGS_MYB Files
:return:
"""
data = {}
data["Class"] = "Geological"
data['FlowType'] = "ELEMENTARY_FLOWS"
data["Location"] = US_FIPS
data["Compartment"] = "ground"
data["Context"] = None
data["ActivityConsumedBy"] = None
return data
def usgs_myb_remove_digits(value_string):
"""
Eliminates numbers in a string
:param value_string:
:return:
"""
remove_digits = str.maketrans('', '', digits)
return_string = value_string.translate(remove_digits)
return return_string
def usgs_myb_url_helper(*, build_url, **_):
"""
This helper function uses the "build_url" input from flowbyactivity.py,
which is a base url for data imports that requires parts of the url text
string to be replaced with info specific to the data year. This function
does not parse the data, only modifies the urls from which data is
obtained.
:param build_url: string, base url
:param config: dictionary, items in FBA method yaml
:param args: dictionary, arguments specified when running flowbyactivity.py
flowbyactivity.py ('year' and 'source')
:return: list, urls to call, concat, parse, format into Flow-By-Activity
format
"""
return [build_url]
def usgs_asbestos_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[4:11]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) > 12:
for x in range(12, len(df_data.columns)):
col_name = "Unnamed: " + str(x)
del df_data[col_name]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['asbestos'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_asbestos_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
product = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['asbestos'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:":
product = "imports"
elif df.iloc[index]["Production"].strip() == \
"Exports and reexports:":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['asbestos'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "nan":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(dataframe,
str(year))
return dataframe
def usgs_barite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(
io.BytesIO(resp.content), sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[7:14]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 11:
df_data.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['barite'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_barite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['barite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:3":
product = "imports"
elif df.iloc[index]["Production"].strip() == \
"Crude, sold or used by producers:":
product = "production"
elif df.iloc[index]["Production"].strip() == "Exports:2":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['barite'], year)
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_bauxite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[6:14]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one. columns) == 11:
df_data_one.columns = ["Production", "space_2", "year_1", "space_3",
"year_2", "space_4", "year_3", "space_5",
"year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['bauxite'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_bauxite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production", "Total"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['bauxite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Production":
prod = "production"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption, as shipped:":
prod = "import"
elif df.iloc[index]["Production"].strip() == \
"Exports, as shipped:":
prod = "export"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
flow_amount = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = flow_amount
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_beryllium_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T4')
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_1 = pd.DataFrame(df_raw_data_two.loc[6:9]).reindex()
df_data_1 = df_data_1.reset_index()
del df_data_1["index"]
df_data_2 = pd.DataFrame(df_raw_data.loc[12:12]).reindex()
df_data_2 = df_data_2.reset_index()
del df_data_2["index"]
if len(df_data_2.columns) > 11:
for x in range(11, len(df_data_2.columns)):
col_name = "Unnamed: " + str(x)
del df_data_2[col_name]
if len(df_data_1. columns) == 11:
df_data_1.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
if len(df_data_2. columns) == 11:
df_data_2.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['beryllium'], year))
for col in df_data_1.columns:
if col not in col_to_use:
del df_data_1[col]
for col in df_data_2.columns:
if col not in col_to_use:
del df_data_2[col]
frames = [df_data_1, df_data_2]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_beryllium_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["United States6", "Mine shipments1",
"Imports for consumption, beryl2"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['beryllium'], year)
for df in df_list:
for index, row in df.iterrows():
prod = "production"
if df.iloc[index]["Production"].strip() == \
"Imports for consumption, beryl2":
prod = "imports"
if df.iloc[index]["Production"].strip() in row_to_use:
remove_digits = str.maketrans('', '', digits)
product = df.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
data["Description"] = name
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_boron_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data.loc[8:8]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
df_data_two = pd.DataFrame(df_raw_data.loc[21:22]).reindex()
df_data_two = df_data_two.reset_index()
del df_data_two["index"]
df_data_three = pd.DataFrame(df_raw_data.loc[27:28]).reindex()
df_data_three = df_data_three.reset_index()
del df_data_three["index"]
if len(df_data_one. columns) == 11:
df_data_one.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
df_data_two.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
df_data_three.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['boron'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
del df_data_two[col]
del df_data_three[col]
frames = [df_data_one, df_data_two, df_data_three]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_boron_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["B2O3 content", "Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['boron'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "B2O3 content" or \
df.iloc[index]["Production"].strip() == "Quantity":
product = "production"
if df.iloc[index]["Production"].strip() == "Colemanite:4":
des = "Colemanite"
elif df.iloc[index]["Production"].strip() == "Ulexite:4":
des = "Ulexite"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
if des == name:
data['FlowName'] = name + " " + product
else:
data['FlowName'] = name + " " + product + " " + des
data["Description"] = des
data["ActivityProducedBy"] = name
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_chromium_call(*, resp, year, **_):
""""
Convert response for calling url to pandas dataframe,
begin parsing df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[4:24]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
elif len(df_data. columns) == 13:
df_data.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5", "space_6"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['chromium'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_chromium_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Secondary2", "Total"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['chromium'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Imports:":
product = "imports"
elif df.iloc[index]["Production"].strip() == "Secondary2":
product = "production"
elif df.iloc[index]["Production"].strip() == "Exports:":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['chromium'], year)
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_clay_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data_ball = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T3')
df_data_ball = pd.DataFrame(df_raw_data_ball.loc[19:19]).reindex()
df_data_ball = df_data_ball.reset_index()
del df_data_ball["index"]
df_raw_data_bentonite = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T4 ')
df_data_bentonite = pd.DataFrame(
df_raw_data_bentonite.loc[28:28]).reindex()
df_data_bentonite = df_data_bentonite.reset_index()
del df_data_bentonite["index"]
df_raw_data_common = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T5 ')
df_data_common = pd.DataFrame(df_raw_data_common.loc[40:40]).reindex()
df_data_common = df_data_common.reset_index()
del df_data_common["index"]
df_raw_data_fire = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T6 ')
df_data_fire = pd.DataFrame(df_raw_data_fire.loc[12:12]).reindex()
df_data_fire = df_data_fire.reset_index()
del df_data_fire["index"]
df_raw_data_fuller = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T7 ')
df_data_fuller = pd.DataFrame(df_raw_data_fuller.loc[17:17]).reindex()
df_data_fuller = df_data_fuller.reset_index()
del df_data_fuller["index"]
df_raw_data_kaolin = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T8 ')
df_data_kaolin = pd.DataFrame(df_raw_data_kaolin.loc[18:18]).reindex()
df_data_kaolin = df_data_kaolin.reset_index()
del df_data_kaolin["index"]
df_raw_data_export = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T13')
df_data_export = pd.DataFrame(df_raw_data_export.loc[6:15]).reindex()
df_data_export = df_data_export.reset_index()
del df_data_export["index"]
df_raw_data_import = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T14')
df_data_import = pd.DataFrame(df_raw_data_import.loc[6:13]).reindex()
df_data_import = df_data_import.reset_index()
del df_data_import["index"]
df_data_ball.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_bentonite.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_common.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_fire.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_fuller.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_kaolin.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2"]
df_data_export.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2", "space_5", "extra"]
df_data_import.columns = ["Production", "space_1", "year_1", "space_2",
"value_1", "space_3", "year_2", "space_4",
"value_2", "space_5", "extra"]
df_data_ball["type"] = "Ball clay"
df_data_bentonite["type"] = "Bentonite"
df_data_common["type"] = "Common clay"
df_data_fire["type"] = "Fire clay"
df_data_fuller["type"] = "Fuller’s earth"
df_data_kaolin["type"] = "Kaolin"
df_data_export["type"] = "export"
df_data_import["type"] = "import"
col_to_use = ["Production", "type"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['clay'], year))
for col in df_data_import.columns:
if col not in col_to_use:
del df_data_import[col]
del df_data_export[col]
for col in df_data_ball.columns:
if col not in col_to_use:
del df_data_ball[col]
del df_data_bentonite[col]
del df_data_common[col]
del df_data_fire[col]
del df_data_fuller[col]
del df_data_kaolin[col]
frames = [df_data_import, df_data_export, df_data_ball, df_data_bentonite,
df_data_common, df_data_fire, df_data_fuller, df_data_kaolin]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_clay_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Ball clay", "Bentonite", "Fire clay", "Kaolin",
"Fuller’s earth", "Total", "Grand total",
"Artificially activated clay and earth",
"Clays, not elsewhere classified",
"Clays, not elsewhere classified"]
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["type"].strip() == "import":
product = "imports"
elif df.iloc[index]["type"].strip() == "export":
product = "exports"
else:
product = "production"
if str(df.iloc[index]["Production"]).strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
if product == "production":
data['FlowName'] = \
df.iloc[index]["type"].strip() + " " + product
data["Description"] = df.iloc[index]["type"].strip()
data["ActivityProducedBy"] = df.iloc[index]["type"].strip()
else:
data['FlowName'] = \
df.iloc[index]["Production"].strip() + " " + product
data["Description"] = df.iloc[index]["Production"].strip()
data["ActivityProducedBy"] = \
df.iloc[index]["Production"].strip()
col_name = usgs_myb_year(YEARS_COVERED['clay'], year)
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)" or \
str(df.iloc[index][col_name]) == "(2)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_cobalt_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T8')
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_1 = pd.DataFrame(df_raw_data_two.loc[6:11]).reindex()
df_data_1 = df_data_1.reset_index()
del df_data_1["index"]
df_data_2 = pd.DataFrame(df_raw_data.loc[23:23]).reindex()
df_data_2 = df_data_2.reset_index()
del df_data_2["index"]
if len(df_data_2.columns) > 11:
for x in range(11, len(df_data_2.columns)):
col_name = "Unnamed: " + str(x)
del df_data_2[col_name]
if len(df_data_1. columns) == 12:
df_data_1.columns = ["Production", "space_6", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
if len(df_data_2. columns) == 11:
df_data_2.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['cobalt'], year))
for col in df_data_1.columns:
if col not in col_to_use:
del df_data_1[col]
for col in df_data_2.columns:
if col not in col_to_use:
del df_data_2[col]
frames = [df_data_1, df_data_2]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_cobalt_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
name = usgs_myb_name(source)
des = name
row_to_use = ["United Statese, 16, 17", "Mine productione",
"Imports for consumption", "Exports"]
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
prod = "production"
if df.iloc[index]["Production"].strip() == \
"United Statese, 16, 17":
prod = "production"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Exports":
prod = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
remove_digits = str.maketrans('', '', digits)
product = df.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['cobalt'], year)
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
data["FlowAmount"] = str(df.iloc[index][col_name])
remove_rows = ["(18)", "(2)"]
if data["FlowAmount"] not in remove_rows:
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_copper_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin
parsing df into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_1 = pd.DataFrame(df_raw_data.loc[12:12]).reindex()
df_data_1 = df_data_1.reset_index()
del df_data_1["index"]
df_data_2 = pd.DataFrame(df_raw_data.loc[30:31]).reindex()
df_data_2 = df_data_2.reset_index()
del df_data_2["index"]
if len(df_data_1. columns) == 12:
df_data_1.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
df_data_2.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production", "Unit"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['copper'], year))
for col in df_data_1.columns:
if col not in col_to_use:
del df_data_1[col]
for col in df_data_2.columns:
if col not in col_to_use:
del df_data_2[col]
frames = [df_data_1, df_data_2]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_copper_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
remove_digits = str.maketrans('', '', digits)
product = df.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
if product == "Total":
prod = "production"
elif product == "Exports, refined":
prod = "exports"
elif product == "Imports, refined":
prod = "imports"
data["ActivityProducedBy"] = "Copper; Mine"
data['FlowName'] = name + " " + prod
data["Unit"] = "Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['copper'], year)
data["Description"] = "Copper; Mine"
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_diatomite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[7:10]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one.columns) == 10:
df_data_one.columns = ["Production", "year_1", "space_2", "year_2",
"space_3", "year_3", "space_4", "year_4",
"space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['diatomite'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_diatomite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Exports2", "Imports for consumption2"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports2":
prod = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption2":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Quantity":
prod = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand metric tons"
col_name = usgs_myb_year(YEARS_COVERED['diatomite'], year)
data["FlowAmount"] = str(df.iloc[index][col_name])
data["Description"] = name
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_feldspar_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin
parsing df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_two = pd.DataFrame(df_raw_data_two.loc[4:8]).reindex()
df_data_two = df_data_two.reset_index()
del df_data_two["index"]
df_data_one = pd.DataFrame(df_raw_data_two.loc[10:15]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_two. columns) == 13:
df_data_two.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
df_data_one.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['feldspar'], year))
for col in df_data_two.columns:
if col not in col_to_use:
del df_data_two[col]
del df_data_one[col]
frames = [df_data_two, df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_feldspar_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Quantity3"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports, feldspar:4":
prod = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption:4":
prod = "imports"
elif df.iloc[index]["Production"].strip() == \
"Production, feldspar:e, 2":
prod = "production"
elif df.iloc[index]["Production"].strip() == "Nepheline syenite:":
prod = "production"
des = "Nepheline syenite"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['feldspar'], year)
data["FlowAmount"] = str(df.iloc[index][col_name])
data["Description"] = des
data["ActivityProducedBy"] = name
if name == des:
data['FlowName'] = name + " " + prod
else:
data['FlowName'] = name + " " + prod + " " + des
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_fluorspar_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin
parsing df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
if year in YEARS_COVERED['fluorspar_inports']:
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T2')
df_raw_data_three = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T7')
df_raw_data_four = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T8')
df_data_one = pd.DataFrame(df_raw_data_one.loc[5:15]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if year in YEARS_COVERED['fluorspar_inports']:
df_data_two = pd.DataFrame(df_raw_data_two.loc[7:8]).reindex()
df_data_three = pd.DataFrame(df_raw_data_three.loc[19:19]).reindex()
df_data_four = pd.DataFrame(df_raw_data_four.loc[11:11]).reindex()
if len(df_data_two.columns) == 13:
df_data_two.columns = ["Production", "space_1", "not_1", "space_2",
"not_2", "space_3", "not_3", "space_4",
"not_4", "space_5", "year_4", "space_6",
"year_5"]
if len(df_data_three.columns) == 9:
df_data_three.columns = ["Production", "space_1", "year_4",
"space_2", "not_1", "space_3", "year_5",
"space_4", "not_2"]
df_data_four.columns = ["Production", "space_1", "year_4",
"space_2", "not_1", "space_3", "year_5",
"space_4", "not_2"]
if len(df_data_one. columns) == 13:
df_data_one.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['fluorspar'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
if year in YEARS_COVERED['fluorspar_inports']:
for col in df_data_two.columns:
if col not in col_to_use:
del df_data_two[col]
for col in df_data_three.columns:
if col not in col_to_use:
del df_data_three[col]
for col in df_data_four.columns:
if col not in col_to_use:
del df_data_four[col]
df_data_one["type"] = "data_one"
if year in YEARS_COVERED['fluorspar_inports']:
# aluminum fluoride
# cryolite
df_data_two["type"] = "data_two"
df_data_three["type"] = "Aluminum Fluoride"
df_data_four["type"] = "Cryolite"
frames = [df_data_one, df_data_two, df_data_three, df_data_four]
else:
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_fluorspar_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Quantity3", "Total", "Hydrofluoric acid",
"Metallurgical", "Production"]
prod = ""
name = usgs_myb_name(source)
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports:3":
prod = "exports"
des = name
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption:3":
prod = "imports"
des = name
elif df.iloc[index]["Production"].strip() == "Fluorosilicic acid:":
prod = "production"
des = "Fluorosilicic acid:"
if str(df.iloc[index]["type"]).strip() == "data_two":
prod = "imports"
des = df.iloc[index]["Production"].strip()
elif str(df.iloc[index]["type"]).strip() == \
"Aluminum Fluoride" or \
str(df.iloc[index]["type"]).strip() == "Cryolite":
prod = "imports"
des = df.iloc[index]["type"].strip()
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['fluorspar'], year)
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_gallium_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[5:7]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) > 11:
for x in range(11, len(df_data.columns)):
col_name = "Unnamed: " + str(x)
del df_data[col_name]
if len(df_data.columns) == 11:
df_data.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['gallium'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_gallium_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production, primary crude", "Metal"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['gallium'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:":
product = "imports"
elif df.iloc[index]["Production"].strip() == \
"Production, primary crude":
product = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Kilograms"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['gallium'], year)
if str(df.iloc[index][col_name]).strip() == "--":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "nan":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_garnet_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_two = pd.DataFrame(df_raw_data_two.loc[4:5]).reindex()
df_data_two = df_data_two.reset_index()
del df_data_two["index"]
df_data_one = pd.DataFrame(df_raw_data_two.loc[10:14]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one.columns) > 13:
for x in range(13, len(df_data_one.columns)):
col_name = "Unnamed: " + str(x)
del df_data_one[col_name]
del df_data_two[col_name]
if len(df_data_two. columns) == 13:
df_data_two.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
df_data_one.columns = ["Production", "space_1", "unit", "space_2",
"year_1", "space_3", "year_2", "space_4",
"year_3", "space_5", "year_4", "space_6",
"year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['garnet'], year))
for col in df_data_two.columns:
if col not in col_to_use:
del df_data_two[col]
del df_data_one[col]
frames = [df_data_two, df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_garnet_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports:2":
prod = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption: 3":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Crude production:":
prod = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['garnet'], year)
data["FlowAmount"] = str(df.iloc[index][col_name])
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_gold_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[6:14]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) == 13:
df_data.columns = ["Production", "Space", "Units", "space_1",
"year_1", "space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['gold'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_gold_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Exports, refined bullion",
"Imports for consumption, refined bullion"]
dataframe = pd.DataFrame()
product = "production"
name = usgs_myb_name(source)
des = name
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Quantity":
product = "production"
elif df.iloc[index]["Production"].strip() == \
"Exports, refined bullion":
product = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption, refined bullion":
product = "imports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "kilograms"
data['FlowName'] = name + " " + product
data["Description"] = des
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['gold'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_graphite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[5:9]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 13:
df_data.columns = ["Production", "space_1", "Unit", "space_6",
"year_1", "space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['graphite'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_graphite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantiy", "Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['graphite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption:":
product = "imports"
elif df.iloc[index]["Production"].strip() == "Exports:":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['graphite'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "nan":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_gypsum_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin
parsing df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[7:10]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one.columns) > 11:
for x in range(11, len(df_data_one.columns)):
col_name = "Unnamed: " + str(x)
del df_data_one[col_name]
if len(df_data_one.columns) == 11:
df_data_one.columns = ["Production", "space_1", "year_1", "space_3",
"year_2", "space_4", "year_3", "space_5",
"year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['gypsum'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_gypsum_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Imports for consumption"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['gypsum'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Quantity":
prod = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data["FlowAmount"] = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_iodine_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[6:10]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 11:
df_data.columns = ["Production", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5"]
elif len(df_data. columns) == 13:
df_data.columns = ["Production", "unit", "space_1", "year_1", "space_2",
"year_2", "space_3", "year_3", "space_4",
"year_4", "space_5", "year_5", "space_6"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['iodine'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_iodine_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production", "Quantity, for consumption", "Exports2"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['iodine'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Imports:2":
product = "imports"
elif df.iloc[index]["Production"].strip() == "Production":
product = "production"
elif df.iloc[index]["Production"].strip() == "Exports2":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['iodine'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_iron_ore_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[7:25]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Units", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production", "Units"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['ironore'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_iron_ore_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
name = usgs_myb_name(source)
des = name
row_to_use = ["Gross weight", "Quantity"]
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Production:":
product = "production"
elif df.iloc[index]["Production"].strip() == "Exports:":
product = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption:":
product = "imports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
data['FlowName'] = "Iron Ore " + product
data["Description"] = "Iron Ore"
data["ActivityProducedBy"] = "Iron Ore"
col_name = usgs_myb_year(YEARS_COVERED['ironore'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_kyanite_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[4:13]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one. columns) == 12:
df_data_one.columns = ["Production", "unit", "space_2", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['kyanite'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_kyanite_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity", "Quantity2"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['kyanite'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Exports of kyanite concentrate:3":
prod = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption, all kyanite minerals:3":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Production:":
prod = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data["FlowAmount"] = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_lead_url_helper(*, year, **_):
"""
This helper function uses the "build_url" input from flowbyactivity.py,
which is a base url for data imports that requires parts of the url text
string to be replaced with info specific to the data year. This function
does not parse the data, only modifies the urls from which data is
obtained.
:param build_url: string, base url
:return: list, urls to call, concat, parse, format into Flow-By-Activity
format
"""
if int(year) < 2013:
build_url = ('https://d9-wret.s3.us-west-2.amazonaws.com/assets/'
'palladium/production/atoms/files/myb1-2016-lead.xls')
elif int(year) < 2014:
build_url = ('https://d9-wret.s3.us-west-2.amazonaws.com/assets/'
'palladium/production/atoms/files/myb1-2017-lead.xls')
else:
build_url = ('https://d9-wret.s3.us-west-2.amazonaws.com/assets/'
'palladium/production/s3fs-public/media/files/myb1-2018-lead-advrel.xlsx')
url = build_url
return [url]
def usgs_lead_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[8:15]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) > 12:
for x in range(12, len(df_data.columns)):
col_name = "Unnamed: " + str(x)
del df_data[col_name]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Units", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production", "Units"]
if int(year) == 2013:
modified_sy = "2013-2018"
col_to_use.append(usgs_myb_year(modified_sy, year))
elif int(year) > 2013:
modified_sy = "2014-2018"
col_to_use.append(usgs_myb_year(modified_sy, year))
else:
col_to_use.append(usgs_myb_year(YEARS_COVERED['lead'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_lead_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
name = usgs_myb_name(source)
des = name
row_to_use = ["Primary lead, refined content, "
"domestic ores and base bullion",
"Secondary lead, lead content",
"Lead ore and concentrates", "Lead in base bullion"]
import_export = ["Exports, lead content:",
"Imports for consumption, lead content:"]
dataframe = pd.DataFrame()
product = "production"
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() in import_export:
if df.iloc[index]["Production"].strip() == \
"Exports, lead content:":
product = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption, lead content:":
product = "imports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["ActivityProducedBy"] = df.iloc[index]["Production"]
if int(year) == 2013:
modified_sy = "2013-2018"
col_name = usgs_myb_year(modified_sy, year)
elif int(year) > 2013:
modified_sy = "2014-2018"
col_name = usgs_myb_year(modified_sy, year)
else:
col_name = usgs_myb_year(YEARS_COVERED['lead'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_lime_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_two = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_1 = pd.DataFrame(df_raw_data_two.loc[16:16]).reindex()
df_data_1 = df_data_1.reset_index()
del df_data_1["index"]
df_data_2 = pd.DataFrame(df_raw_data_two.loc[28:32]).reindex()
df_data_2 = df_data_2.reset_index()
del df_data_2["index"]
if len(df_data_1.columns) > 12:
for x in range(12, len(df_data_1.columns)):
col_name = "Unnamed: " + str(x)
del df_data_1[col_name]
del df_data_2[col_name]
if len(df_data_1. columns) == 12:
df_data_1.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
df_data_2.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['lime'], year))
for col in df_data_1.columns:
if col not in col_to_use:
del df_data_1[col]
for col in df_data_2.columns:
if col not in col_to_use:
del df_data_2[col]
frames = [df_data_1, df_data_2]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_lime_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Total", "Quantity"]
import_export = ["Exports:7", "Imports for consumption:7"]
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
prod = "production"
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports:7":
prod = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption:7":
prod = "imports"
if df.iloc[index]["Production"].strip() in row_to_use:
remove_digits = str.maketrans('', '', digits)
product = df.iloc[index][
"Production"].strip().translate(remove_digits)
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Thousand Metric Tons"
col_name = usgs_myb_year(YEARS_COVERED['lime'], year)
data["Description"] = des
data["ActivityProducedBy"] = name
if product.strip() == "Total":
data['FlowName'] = name + " " + prod
elif product.strip() == "Quantity":
data['FlowName'] = name + " " + prod
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_lithium_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing
df into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[6:8]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
if len(df_data_one.columns) > 11:
for x in range(11, len(df_data_one.columns)):
col_name = "Unnamed: " + str(x)
del df_data_one[col_name]
if len(df_data_one. columns) == 11:
df_data_one.columns = ["Production", "space_2", "year_1", "space_3",
"year_2", "space_4", "year_3", "space_5",
"year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['lithium'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_lithium_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Exports3", "Imports3", "Production"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['lithium'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports3":
prod = "exports"
elif df.iloc[index]["Production"].strip() == "Imports3":
prod = "imports"
elif df.iloc[index]["Production"].strip() == "Production":
prod = "production"
if df.iloc[index]["Production"].strip() in row_to_use:
product = df.iloc[index]["Production"].strip()
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data["FlowAmount"] = str(df.iloc[index][col_name])
if str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
data["Description"] = des
data["ActivityProducedBy"] = name
data['FlowName'] = name + " " + prod
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_magnesium_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[7:15]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Units", "space_1", "year_1",
"space_2", "year_2", "space_3", "year_3",
"space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['magnesium'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_magnesium_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Secondary", "Primary", "Exports", "Imports for consumption"]
dataframe = pd.DataFrame()
name = usgs_myb_name(source)
des = name
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == "Exports":
product = "exports"
elif df.iloc[index]["Production"].strip() == \
"Imports for consumption":
product = "imports"
elif df.iloc[index]["Production"].strip() == "Secondary" or \
df.iloc[index]["Production"].strip() == "Primary":
product = "production" + " " + \
df.iloc[index]["Production"].strip()
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['magnesium'], year)
if str(df.iloc[index][col_name]) == "--":
data["FlowAmount"] = str(0)
elif str(df.iloc[index][col_name]) == "W":
data["FlowAmount"] = WITHDRAWN_KEYWORD
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_manganese_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data = pd.DataFrame(df_raw_data.loc[7:9]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) > 12:
for x in range(12, len(df_data.columns)):
col_name = "Unnamed: " + str(x)
del df_data[col_name]
if len(df_data. columns) == 12:
df_data.columns = ["Production", "Unit", "space_1", "year_1",
"space_2", "year_2", "space_3",
"year_3", "space_4", "year_4", "space_5", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['manganese'], year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_manganese_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Production", "Exports", "Imports for consumption"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
col_name = usgs_myb_year(YEARS_COVERED['manganese'], year)
for df in df_list:
for index, row in df.iterrows():
if df.iloc[index]["Production"].strip() == \
"Imports for consumption":
product = "imports"
elif df.iloc[index]["Production"].strip() == "Production":
product = "production"
elif df.iloc[index]["Production"].strip() == "Exports":
product = "exports"
if df.iloc[index]["Production"].strip() in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data["Unit"] = "Metric Tons"
data['FlowName'] = name + " " + product
data["Description"] = name
data["ActivityProducedBy"] = name
col_name = usgs_myb_year(YEARS_COVERED['manganese'], year)
if str(df.iloc[index][col_name]) == "--" or \
str(df.iloc[index][col_name]) == "(3)":
data["FlowAmount"] = str(0)
else:
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_ma_call(*, resp, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
df_raw_data = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T2')
df_data = pd.DataFrame(df_raw_data.loc[6:7]).reindex()
df_data = df_data.reset_index()
del df_data["index"]
if len(df_data.columns) > 9:
for x in range(9, len(df_data.columns)):
col_name = "Unnamed: " + str(x)
del df_data[col_name]
if len(df_data. columns) == 9:
df_data.columns = ["Product", "space_1", "quality_year_1", "space_2",
"value_year_1", "space_3",
"quality_year_2", "space_4", "value_year_2"]
elif len(df_data. columns) == 9:
df_data.columns = ["Product", "space_1", "quality_year_1", "space_2",
"value_year_1", "space_3",
"quality_year_2", "space_4", "value_year_2"]
col_to_use = ["Product"]
col_to_use.append("quality_"
+ usgs_myb_year(YEARS_COVERED['manufacturedabrasive'],
year))
for col in df_data.columns:
if col not in col_to_use:
del df_data[col]
return df_data
def usgs_ma_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Silicon carbide"]
name = usgs_myb_name(source)
des = name
dataframe = pd.DataFrame()
for df in df_list:
for index, row in df.iterrows():
remove_digits = str.maketrans('', '', digits)
product = df.iloc[index][
"Product"].strip().translate(remove_digits)
if product in row_to_use:
data = usgs_myb_static_variables()
data["SourceName"] = source
data["Year"] = str(year)
data['FlowName'] = "Silicon carbide"
data["ActivityProducedBy"] = "Silicon carbide"
data["Unit"] = "Metric Tons"
col_name = ("quality_"
+ usgs_myb_year(
YEARS_COVERED['manufacturedabrasive'], year))
col_name_array = col_name.split("_")
data["Description"] = product + " " + col_name_array[0]
data["FlowAmount"] = str(df.iloc[index][col_name])
dataframe = dataframe.append(data, ignore_index=True)
dataframe = assign_fips_location_system(
dataframe, str(year))
return dataframe
def usgs_mica_call(*, resp, source, year, **_):
"""
Convert response for calling url to pandas dataframe, begin parsing df
into FBA format
:param url: string, url
:param resp: df, response from url call
:param year: year
:return: pandas dataframe of original source data
"""
df_raw_data_one = pd.io.excel.read_excel(io.BytesIO(resp.content),
sheet_name='T1')
df_data_one = pd.DataFrame(df_raw_data_one.loc[4:6]).reindex()
df_data_one = df_data_one.reset_index()
del df_data_one["index"]
name = usgs_myb_name(source)
des = name
if len(df_data_one. columns) == 12:
df_data_one.columns = ["Production", "Unit", "space_2", "year_1",
"space_3", "year_2", "space_4", "year_3",
"space_5", "year_4", "space_6", "year_5"]
col_to_use = ["Production"]
col_to_use.append(usgs_myb_year(YEARS_COVERED['mica'], year))
for col in df_data_one.columns:
if col not in col_to_use:
del df_data_one[col]
frames = [df_data_one]
df_data = pd.concat(frames)
df_data = df_data.reset_index()
del df_data["index"]
return df_data
def usgs_mica_parse(*, df_list, source, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param source: source
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
data = {}
row_to_use = ["Quantity"]
prod = ""
name = usgs_myb_name(source)
des = name
dataframe = | pd.DataFrame() | pandas.DataFrame |
import sys
sys.path.append('../')
#code below used to deal with special characters on the file path during read_csv()
sys._enablelegacywindowsfsencoding()
import numpy as np
import seaborn as sns
import pandas as pd
from sklearn.model_selection import cross_val_score
import matplotlib.pyplot as plt #MatPlotLib usado para desenhar o gráfico criado com o NetworkX
# Import PySwarms
import pyswarms as ps
'''
### Generating a toy dataset using scikit-learn
We'll be using `sklearn.datasets.make_classification` to generate a 100-sample, 15-dimensional dataset with three classes.
We will then plot the distribution of the features in order to give us a qualitative assessment of the feature-space.
For our toy dataset, we will be rigging some parameters a bit. Out of the 10 features,
we'll have only 5 that are informative, 5 that are redundant, and 2 that are repeated.
Hopefully, we get to have Binary PSO select those that are informative, and prune those that are redundant or repeated.
'''
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
X, y = make_classification(n_samples=500, n_features=20, n_classes=2,
n_informative=5, n_redundant=0, n_repeated=0,
random_state=None, shuffle=True)
#X, X_test, y, y_test = train_test_split(X, y, test_size=0.20, random_state=None)
#X, y = make_classification(n_samples=100, n_features=15, n_classes=3,
# n_informative=4, n_redundant=1, n_repeated=2,
# random_state=1)
df = pd.DataFrame(X)
df['labels'] = | pd.Series(y) | pandas.Series |
import logging
from datetime import datetime
from timeit import default_timer as timer
from io import StringIO
import pandas as pd
import pytz
import requests
from celery.schedules import crontab
from celery.task import Task
from api.models import ConfirmedData, DeadData, RecoveredData, CovidData, \
ImportsUpdate
from covidapi.celery import app
logger = logging.getLogger('api_tasks')
class CalculateOrder(Task):
def run(self, order_id, task_caller='checkout', *args, **kwargs):
t0 = timer()
t1 = timer()
logger.info('Calculations finished in {}'.format(t1 - t0))
def save_import_log(endpoint, columns, rows_count, cols_count,
total_import_time):
ImportsUpdate.objects.create(
endpoint=endpoint,
columns=columns,
rows_count=rows_count,
cols_count=cols_count,
total_import_time=total_import_time
)
def update_john_hopkins():
t0 = timer()
url = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/'
data_types = [
'time_series_covid19_confirmed_global.csv',
'time_series_covid19_deaths_global.csv',
'time_series_covid19_recovered_global.csv'
]
columns = ['confirmed', 'deaths', 'recovered']
urls = [url + endpoint for endpoint in data_types]
endpoints = zip(columns, urls)
data = {}
with requests.Session() as s:
for col, endpoint in endpoints:
data[col] = s.get(endpoint).content.decode('utf-8')
tables = {
'confirmed': ConfirmedData,
'deaths': DeadData,
'recovered': RecoveredData,
}
t1 = timer()
for key, value in data.items():
t2 = timer()
data_content = StringIO(str(value))
df = pd.read_csv(data_content)
df_tot = df.groupby('Country/Region').sum()
df_tot.drop(['Lat', 'Long'], axis=1, inplace=True)
df_tot = df_tot.T
df_tot.index = pd.to_datetime(df_tot.index)
models = []
for idx, row in df_tot.iterrows():
timestamp = idx
for country, value in row.items():
model = tables[key]()
model.count = value
model.timestamp = timestamp
model.country = country
models.append(model)
tables[key].objects.all().delete()
tables[key].objects.bulk_create(models, batch_size=500)
t3 = timer()
total_execution_time = (t1-t0) + (t3-t2)
save_import_log(
url,
df_tot.columns.to_list(),
df_tot.shape[0],
df_tot.shape[1],
total_execution_time
)
def update_convidbr():
t0 = timer()
url = 'https://raw.githubusercontent.com/wcota/covid19br/master/cases-brazil-cities-time.csv'
with requests.Session() as s:
data = s.get(url).content.decode('utf-8')
data_content = StringIO(str(data))
df = | pd.read_csv(data_content) | pandas.read_csv |
import os
import pandas as pd
from ... import fileleaf as fl
class DatabaseSources:
"""
This Class is used to handle all the data sources and retrieve the applicable data
It keeps track of all sources and handles the fast library functionality
"""
__default_supported_extensions = {
'.csv': {
'path': 'filepath_or_buffer',
'function': pd.read_csv
},
'.ftr': {
'path': 'path',
'function': pd.read_feather
},
'.sas7bdat': {
'path': 'filepath_or_buffer',
'function': pd.read_sas
}
}
def __init__(
self, supported_extensions=None, folders=None, default_configs=None, fast_access_lib_ref=None,
column_types=None
):
"""
In initialization every piece of data is saved in the appropriate place
:param supported_extensions: This class is build around being able to read data from different sources
into dataframes, this input defines the supported formats and respective treatments
It is a dicionary with a 'function' to read the data, and a named parameter 'path' to specify the path
Example:
>>> supported_extensions = {
>>> '.csv': {
>>> 'path': 'filepath_or_buffer',
>>> 'function': pd.read_csv
>>> },
>>> '.ftr': {
>>> 'path': 'path',
>>> 'function': pd.read_feather
>>> }
>>>}
:param folders: dicionary of reference names and folders (paths) to be considered as sources
Example:
>>> folder = {
>>> 'important sources': r'C:/Users/some_user/some_sources'
>>> 'other sources': r'C:/Users/some_user/other_sources'
>>>}
:param default_configs: defines the default configs for each supported extension
"""
self.supported_extensions = supported_extensions or self.__default_supported_extensions
self.folders = folders
self.default_configs = {**{extension: {} for extension in self.supported_extensions}, **(default_configs or {})}
self.default_configs['.csv']['dtype'] = column_types
self.fast_access_lib_ref = fast_access_lib_ref
def __str__(self):
return self.list.__str__()
@property
def list(self):
tables = {
'database_ref': [], 'name': [], 'extension': [],
'size_MB': [], 'in_fast_lib': [], 'folder': [], 'specific_configs': []
}
tables = self.__load_folders(tables)
df_tables = | pd.DataFrame(tables) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
class CoercionBase(object):
klasses = ['index', 'series']
dtypes = ['object', 'int64', 'float64', 'complex128', 'bool',
'datetime64', 'datetime64tz', 'timedelta64', 'period']
@property
def method(self):
raise NotImplementedError(self)
def _assert(self, left, right, dtype):
# explicitly check dtype to avoid any unexpected result
if isinstance(left, pd.Series):
tm.assert_series_equal(left, right)
elif isinstance(left, pd.Index):
tm.assert_index_equal(left, right)
else:
raise NotImplementedError
self.assertEqual(left.dtype, dtype)
self.assertEqual(right.dtype, dtype)
def test_has_comprehensive_tests(self):
for klass in self.klasses:
for dtype in self.dtypes:
method_name = 'test_{0}_{1}_{2}'.format(self.method,
klass, dtype)
if not hasattr(self, method_name):
msg = 'test method is not defined: {0}, {1}'
raise AssertionError(msg.format(type(self), method_name))
class TestSetitemCoercion(CoercionBase, tm.TestCase):
method = 'setitem'
def _assert_setitem_series_conversion(self, original_series, loc_value,
expected_series, expected_dtype):
""" test series value's coercion triggered by assignment """
temp = original_series.copy()
temp[1] = loc_value
tm.assert_series_equal(temp, expected_series)
# check dtype explicitly for sure
self.assertEqual(temp.dtype, expected_dtype)
# .loc works different rule, temporary disable
# temp = original_series.copy()
# temp.loc[1] = loc_value
# tm.assert_series_equal(temp, expected_series)
def test_setitem_series_object(self):
obj = pd.Series(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Series(['a', 1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Series(['a', 1.1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = pd.Series(['a', 1 + 1j, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = pd.Series(['a', True, 'c', 'd'])
self._assert_setitem_series_conversion(obj, True, exp, np.object)
def test_setitem_series_int64(self):
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1, exp, np.int64)
# int + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1, 1.1, 3, 4]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.int64)
# int + complex -> complex
exp = pd.Series([1, 1 + 1j, 3, 4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# int + bool -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, True, exp, np.int64)
def test_setitem_series_float64(self):
obj = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Series([1.1, 1.1, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.float64)
# float + complex -> complex
exp = pd.Series([1.1, 1 + 1j, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp,
np.complex128)
# float + bool -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, True, exp, np.float64)
def test_setitem_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
def test_setitem_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
# bool + int -> int
# TODO_GH12747 The result must be int
# tm.assert_series_equal(temp, pd.Series([1, 1, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1, exp, np.bool)
# TODO_GH12747 The result must be int
# assigning int greater than bool
# tm.assert_series_equal(temp, pd.Series([1, 3, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 3, exp, np.bool)
# bool + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1., 1.1, 1., 0.]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.bool)
# bool + complex -> complex (buggy, results in bool)
# TODO_GH12747 The result must be complex
# tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 1, 0]))
# self.assertEqual(temp.dtype, np.complex128)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.bool)
# bool + bool -> bool
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, True, exp, np.bool)
def test_setitem_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp(1),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, 1, exp, 'datetime64[ns]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2011-01-02', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz -> datetime64tz
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz=tz)
self._assert_setitem_series_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp(1, tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_setitem_series_conversion(obj, 1, exp,
'datetime64[ns, US/Eastern]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_timedelta64(self):
pass
def test_setitem_series_period(self):
pass
def _assert_setitem_index_conversion(self, original_series, loc_key,
expected_index, expected_dtype):
""" test index's coercion triggered by assign key """
temp = original_series.copy()
temp[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
temp = original_series.copy()
temp.loc[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
def test_setitem_index_object(self):
obj = pd.Series([1, 2, 3, 4], index=list('abcd'))
self.assertEqual(obj.index.dtype, np.object)
# object + object -> object
exp_index = pd.Index(list('abcdx'))
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
# object + int -> IndexError, regarded as location
temp = obj.copy()
with tm.assertRaises(IndexError):
temp[5] = 5
# object + float -> object
exp_index = pd.Index(['a', 'b', 'c', 'd', 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.object)
def test_setitem_index_int64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.index.dtype, np.int64)
# int + int -> int
exp_index = pd.Index([0, 1, 2, 3, 5])
self._assert_setitem_index_conversion(obj, 5, exp_index, np.int64)
# int + float -> float
exp_index = pd.Index([0, 1, 2, 3, 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.float64)
# int + object -> object
exp_index = pd.Index([0, 1, 2, 3, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_float64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])
self.assertEqual(obj.index.dtype, np.float64)
# float + int -> int
temp = obj.copy()
# TODO_GH12747 The result must be float
with tm.assertRaises(IndexError):
temp[5] = 5
# float + float -> float
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 5.1])
self._assert_setitem_index_conversion(obj, 5.1, exp_index, np.float64)
# float + object -> object
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_complex128(self):
pass
def test_setitem_index_bool(self):
pass
def test_setitem_index_datetime64(self):
pass
def test_setitem_index_datetime64tz(self):
pass
def test_setitem_index_timedelta64(self):
pass
def test_setitem_index_period(self):
pass
class TestInsertIndexCoercion(CoercionBase, tm.TestCase):
klasses = ['index']
method = 'insert'
def _assert_insert_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by insert """
target = original.copy()
res = target.insert(1, value)
tm.assert_index_equal(res, expected)
self.assertEqual(res.dtype, expected_dtype)
def test_insert_index_object(self):
obj = pd.Index(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Index(['a', 1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Index(['a', 1.1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1.1, exp, np.object)
# object + bool -> object
res = obj.insert(1, False)
tm.assert_index_equal(res, pd.Index(['a', False, 'b', 'c', 'd']))
self.assertEqual(res.dtype, np.object)
# object + object -> object
exp = pd.Index(['a', 'x', 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_int64(self):
obj = pd.Int64Index([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Index([1, 1, 2, 3, 4])
self._assert_insert_conversion(obj, 1, exp, np.int64)
# int + float -> float
exp = pd.Index([1, 1.1, 2, 3, 4])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# int + bool -> int
exp = pd.Index([1, 0, 2, 3, 4])
self._assert_insert_conversion(obj, False, exp, np.int64)
# int + object -> object
exp = pd.Index([1, 'x', 2, 3, 4])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_float64(self):
obj = pd.Float64Index([1., 2., 3., 4.])
self.assertEqual(obj.dtype, np.float64)
# float + int -> int
exp = pd.Index([1., 1., 2., 3., 4.])
self._assert_insert_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Index([1., 1.1, 2., 3., 4.])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# float + bool -> float
exp = pd.Index([1., 0., 2., 3., 4.])
self._assert_insert_conversion(obj, False, exp, np.float64)
# float + object -> object
exp = pd.Index([1., 'x', 2., 3., 4.])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_complex128(self):
pass
def test_insert_index_bool(self):
pass
def test_insert_index_datetime64(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'])
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_datetime64tz(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'], tz='US/Eastern')
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'], tz='US/Eastern')
val = pd.Timestamp('2012-01-01', tz='US/Eastern')
self._assert_insert_conversion(obj, val, exp,
'datetime64[ns, US/Eastern]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='Asia/Tokyo'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_timedelta64(self):
obj = pd.TimedeltaIndex(['1 day', '2 day', '3 day', '4 day'])
self.assertEqual(obj.dtype, 'timedelta64[ns]')
# timedelta64 + timedelta64 => timedelta64
exp = pd.TimedeltaIndex(['1 day', '10 day', '2 day', '3 day', '4 day'])
self._assert_insert_conversion(obj, pd.Timedelta('10 day'),
exp, 'timedelta64[ns]')
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_period(self):
obj = pd.PeriodIndex(['2011-01', '2011-02', '2011-03', '2011-04'],
freq='M')
self.assertEqual(obj.dtype, 'period[M]')
# period + period => period
exp = pd.PeriodIndex(['2011-01', '2012-01', '2011-02',
'2011-03', '2011-04'], freq='M')
self._assert_insert_conversion(obj, pd.Period('2012-01', freq='M'),
exp, 'period[M]')
# period + datetime64 => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
pd.Timestamp('2012-01-01'),
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, np.object)
# period + int => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
1,
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 1, exp, np.object)
# period + object => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
'x',
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 'x', exp, np.object)
class TestWhereCoercion(CoercionBase, tm.TestCase):
method = 'where'
def _assert_where_conversion(self, original, cond, values,
expected, expected_dtype):
""" test coercion triggered by where """
target = original.copy()
res = target.where(cond, values)
self._assert(res, expected, expected_dtype)
def _where_object_common(self, klass):
obj = klass(list('abcd'))
self.assertEqual(obj.dtype, np.object)
cond = klass([True, False, True, False])
# object + int -> object
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, 1, exp, np.object)
values = klass([5, 6, 7, 8])
exp = klass(['a', 6, 'c', 8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + float -> object
exp = klass(['a', 1.1, 'c', 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.object)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass(['a', 6.6, 'c', 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + complex -> object
exp = klass(['a', 1 + 1j, 'c', 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.object)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass(['a', 6 + 6j, 'c', 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.object)
if klass is pd.Series:
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', 0, 'c', 1])
self._assert_where_conversion(obj, cond, values, exp, np.object)
elif klass is pd.Index:
# object + bool -> object
exp = klass(['a', True, 'c', True])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', False, 'c', True])
self._assert_where_conversion(obj, cond, values, exp, np.object)
else:
NotImplementedError
def test_where_series_object(self):
self._where_object_common(pd.Series)
def test_where_index_object(self):
self._where_object_common(pd.Index)
def _where_int64_common(self, klass):
obj = klass([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
cond = klass([True, False, True, False])
# int + int -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.int64)
values = klass([5, 6, 7, 8])
exp = klass([1, 6, 3, 8])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
# int + float -> float
exp = klass([1, 1.1, 3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1, 6.6, 3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# int + complex -> complex
if klass is pd.Series:
exp = klass([1, 1 + 1j, 3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1, 6 + 6j, 3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# int + bool -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, True, exp, np.int64)
values = klass([True, False, True, True])
exp = klass([1, 0, 3, 1])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
def test_where_series_int64(self):
self._where_int64_common(pd.Series)
def test_where_index_int64(self):
self._where_int64_common(pd.Index)
def _where_float64_common(self, klass):
obj = klass([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
cond = klass([True, False, True, False])
# float + int -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, 1, exp, np.float64)
values = klass([5, 6, 7, 8])
exp = klass([1.1, 6.0, 3.3, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + float -> float
exp = klass([1.1, 1.1, 3.3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1.1, 6.6, 3.3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + complex -> complex
if klass is pd.Series:
exp = klass([1.1, 1 + 1j, 3.3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1.1, 6 + 6j, 3.3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# float + bool -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, True, exp, np.float64)
values = klass([True, False, True, True])
exp = klass([1.1, 0.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
def test_where_series_float64(self):
self._where_float64_common(pd.Series)
def test_where_index_float64(self):
self._where_float64_common(pd.Index)
def test_where_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
cond = pd.Series([True, False, True, False])
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.complex128)
values = pd.Series([5, 6, 7, 8])
exp = pd.Series([1 + 1j, 6.0, 3 + 3j, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.complex128)
values = pd.Series([5.5, 6.6, 7.7, 8.8])
exp = pd.Series([1 + 1j, 6.6, 3 + 3j, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.complex128)
values = pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = pd.Series([1 + 1j, 6 + 6j, 3 + 3j, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, True, exp, np.complex128)
values = pd.Series([True, False, True, True])
exp = pd.Series([1 + 1j, 0, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
def test_where_index_complex128(self):
pass
def test_where_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
cond = pd.Series([True, False, True, False])
# bool + int -> int
exp = pd.Series([1, 1, 1, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.int64)
values = pd.Series([5, 6, 7, 8])
exp = pd.Series([1, 6, 1, 8])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
# bool + float -> float
exp = pd.Series([1.0, 1.1, 1.0, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = pd.Series([5.5, 6.6, 7.7, 8.8])
exp = pd.Series([1.0, 6.6, 1.0, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# bool + complex -> complex
exp = pd.Series([1, 1 + 1j, 1, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.complex128)
values = pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = pd.Series([1, 6 + 6j, 1, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# bool + bool -> bool
exp = pd.Series([True, True, True, True])
self._assert_where_conversion(obj, cond, True, exp, np.bool)
values = pd.Series([True, False, True, True])
exp = pd.Series([True, False, True, True])
self._assert_where_conversion(obj, cond, values, exp, np.bool)
def test_where_index_bool(self):
pass
def test_where_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
cond = pd.Series([True, False, True, False])
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-01')])
self._assert_where_conversion(obj, cond, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
values = pd.Series([pd.Timestamp('2012-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2012-01-03'),
pd.Timestamp('2012-01-04')])
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
# ToDo: coerce to object
msg = "cannot coerce a Timestamp with a tz on a naive Block"
with tm.assertRaisesRegexp(TypeError, msg):
obj.where(cond, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: do not coerce to UTC, must be object
values = pd.Series([pd.Timestamp('2012-01-01', tz='US/Eastern'),
pd.Timestamp('2012-01-02', tz='US/Eastern'),
pd.Timestamp('2012-01-03', tz='US/Eastern'),
pd.Timestamp('2012-01-04', tz='US/Eastern')])
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02 05:00'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04 05:00')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
def test_where_index_datetime64(self):
obj = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
cond = pd.Index([True, False, True, False])
# datetime64 + datetime64 -> datetime64
# must support scalar
msg = "cannot coerce a Timestamp with a tz on a naive Block"
with tm.assertRaises(TypeError):
obj.where(cond, pd.Timestamp('2012-01-01'))
values = pd.Index([pd.Timestamp('2012-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2012-01-03'),
pd.Timestamp('2012-01-04')])
exp = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
# ToDo: coerce to object
msg = ("Index\\(\\.\\.\\.\\) must be called with a collection "
"of some kind")
with tm.assertRaisesRegexp(TypeError, msg):
obj.where(cond, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: do not ignore timezone, must be object
values = pd.Index([pd.Timestamp('2012-01-01', tz='US/Eastern'),
pd.Timestamp('2012-01-02', tz='US/Eastern'),
pd.Timestamp('2012-01-03', tz='US/Eastern'),
pd.Timestamp('2012-01-04', tz='US/Eastern')])
exp = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
def test_where_series_datetime64tz(self):
pass
def test_where_series_timedelta64(self):
pass
def test_where_series_period(self):
pass
def test_where_index_datetime64tz(self):
pass
def test_where_index_timedelta64(self):
pass
def test_where_index_period(self):
pass
class TestFillnaSeriesCoercion(CoercionBase, tm.TestCase):
# not indexing, but place here for consisntency
method = 'fillna'
def _assert_fillna_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by fillna """
target = original.copy()
res = target.fillna(value)
self._assert(res, expected, expected_dtype)
def _fillna_object_common(self, klass):
obj = klass(['a', np.nan, 'c', 'd'])
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = klass(['a', 1, 'c', 'd'])
self._assert_fillna_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = klass(['a', 1.1, 'c', 'd'])
self._assert_fillna_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = klass(['a', 1 + 1j, 'c', 'd'])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = klass(['a', True, 'c', 'd'])
self._assert_fillna_conversion(obj, True, exp, np.object)
def test_fillna_series_object(self):
self._fillna_object_common(pd.Series)
def test_fillna_index_object(self):
self._fillna_object_common(pd.Index)
def test_fillna_series_int64(self):
# int can't hold NaN
pass
def test_fillna_index_int64(self):
pass
def _fillna_float64_common(self, klass):
obj = klass([1.1, np.nan, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = klass([1.1, 1.0, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = klass([1.1, 1.1, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1.1, exp, np.float64)
if klass is pd.Series:
# float + complex -> complex
exp = klass([1.1, 1 + 1j, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.complex128)
elif klass is pd.Index:
# float + complex -> object
exp = klass([1.1, 1 + 1j, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.object)
else:
NotImplementedError
# float + bool -> float
exp = klass([1.1, 1.0, 3.3, 4.4])
self._assert_fillna_conversion(obj, True, exp, np.float64)
def test_fillna_series_float64(self):
self._fillna_float64_common(pd.Series)
def test_fillna_index_float64(self):
self._fillna_float64_common(pd.Index)
def test_fillna_series_complex128(self):
obj = pd.Series([1 + 1j, np.nan, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, 1, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, True, exp, np.complex128)
def test_fillna_index_complex128(self):
self._fillna_float64_common(pd.Index)
def test_fillna_series_bool(self):
# bool can't hold NaN
pass
def test_fillna_index_bool(self):
pass
def test_fillna_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.NaT,
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + datetime64tz => object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
value = pd.Timestamp('2012-01-01', tz='US/Eastern')
self._assert_fillna_conversion(obj, value, exp, np.object)
# datetime64 + int => object
# ToDo: must be coerced to object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp(1),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, 1, exp, 'datetime64[ns]')
# datetime64 + object => object
exp = pd.Series([pd.Timestamp('2011-01-01'),
'x',
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, 'x', exp, np.object)
def test_fillna_series_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.NaT,
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz => datetime64tz
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz=tz)
self._assert_fillna_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64 => object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01')
self._assert_fillna_conversion(obj, value, exp, np.object)
# datetime64tz + datetime64tz(different tz) => object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz='Asia/Tokyo'),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz='Asia/Tokyo')
self._assert_fillna_conversion(obj, value, exp, np.object)
# datetime64tz + int => datetime64tz
# ToDo: must be object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp(1, tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_fillna_conversion(obj, 1, exp,
'datetime64[ns, US/Eastern]')
# datetime64tz + object => object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
'x',
| pd.Timestamp('2011-01-03', tz=tz) | pandas.Timestamp |
'''
This code will clean the OB datasets and combine all the cleaned data into one
Dataset name: O-27-Da Yan
semi-automate code, needs some hands work. LOL But God is so good to me.
1. 9 different buildings in this dataset, and each building has different rooms
3. each room has different window, door, ac, indoor, outdoor info
4. I processed building A to F by hand, then figured out that I can rename the files first, then use code to process
5. rename the file by type and number, such as window1, indoor1, ac1, door1, etc.
6. code automated G, H, I
7. the folder has multiple types of data, csv and xlsx, figure out the file type, then rean into pandas
8. concat the outdoor datetime and temperature with ac data, then judge if the ac is on or off
'''
import os
import glob
import string
import datetime
import pandas as pd
import matplotlib.pyplot as plt
# specify the path
data_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/2021-05-28-1130-raw-data/Annex 79 Data Collection/O-27-Da Yan/_yapan_processing/processed/'
template_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/OB Database Consolidation/Templates/'
save_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/2021-05-28-1130-raw-data/Annex 79 Data Collection/O-27-Da Yan/_yapan_processing/_sql/'
# generate the name of different building folders
alphabet_string = string.ascii_uppercase
alphabet_list = list(alphabet_string)
building_names = alphabet_list[:9]
''' 1. process data by folders '''
begin_time = datetime.datetime.now()
# create dataframe to store the data
combined_window = pd.DataFrame()
combined_door = pd.DataFrame()
combined_hvac = pd.DataFrame()
combined_indoor = pd.DataFrame()
combined_outdoor = pd.DataFrame()
''' process outdoor data '''
print(f'Process outdoor data')
os.chdir(data_path) # pwd
sub_folders = next(os.walk('.'))[1] # get the names of the child directories, different rooms
root_files = next(os.walk('.'))[2] # get the files under root path
outdoor_files = list(filter(lambda name: 'outdoor_building' in name, root_files)) # filter out the door status files
combined_outdoor = pd.concat([pd.read_csv(f) for f in outdoor_files])
''' manual processed data '''
print(f'Process manually processed data')
building_names_1 = building_names[:6]
# unit test
# i = 0
# folder_name = building_names_1[i]
for index, bld_name in enumerate(building_names_1):
print(f'Reading the data under building folder {bld_name}')
building_path = data_path + bld_name + '/'
os.chdir(building_path) # pwd
sub_folders = next(os.walk('.'))[1] # get the names of the child directories, different rooms
root_files = next(os.walk('.'))[2] # get the files under root path
# combine
indoor_files = list(filter(lambda name: 'indoor' in name, root_files)) # filter out the indoor files
window_files = list(filter(lambda name: 'window' in name, root_files)) # filter out the window files
hvac_files = list(filter(lambda name: 'hvac' in name, root_files)) # filter out the ac files
door_files = list(filter(lambda name: 'door_status' in name, root_files)) # filter out the door status files
# read anc combine the files under this folder
if indoor_files: # make sure it is not empty
indoor_temp_df = pd.concat([pd.read_csv(f) for f in indoor_files])
combined_indoor = pd.concat([combined_indoor, indoor_temp_df], ignore_index=True) # concat the data
else:
pass
if window_files:
window_temp_df = pd.concat([pd.read_csv(f) for f in window_files])
combined_window = pd.concat([combined_window, window_temp_df], ignore_index=True) # concat the data
else:
pass
if hvac_files:
hvac_temp_df = pd.concat([pd.read_csv(f) for f in hvac_files])
combined_hvac = pd.concat([combined_hvac, hvac_temp_df], ignore_index=True) # concat the data
# print(combined_hvac.isnull().sum())
# print(index)
else:
pass
if door_files:
door_temp_df = pd.concat([pd.read_csv(f) for f in door_files])
combined_door = pd.concat([combined_door, door_temp_df], ignore_index=True) # concat the data
# print(combined_door.isnull().sum())
# print(index)
else:
pass
''' auto mated process by building level '''
building_names = ['G', 'H', 'I']
building_ids = [7, 8, 9]
for index, bld_name in enumerate(building_names):
print(f'Dealing with data under building folder {bld_name}')
building_path = data_path + bld_name + '/'
os.chdir(building_path) # pwd
sub_folders = next(os.walk('.'))[1] # get the names of the child directories, different rooms
root_files = next(os.walk('.'))[2] # get the files under root path
'''' room level '''
for room_id in sub_folders:
print(f'Dealing with data under room folder {room_id}')
room_path = building_path + room_id + '/'
os.chdir(room_path) # pwd
file_names = os.listdir() # get all the file names
window_files = list(filter(lambda name: 'window' in name, file_names)) # filter out the window files
hvac_files = list(filter(lambda name: 'ac' in name, file_names)) # filter out the ac files
door_files = list(filter(lambda name: 'door' in name, file_names)) # filter out the door files
# read and combine files
if window_files:
for window_name in window_files:
name, extension = os.path.splitext(window_name) # get the path and extension of a file
if extension == '.CSV': # if the file is csv file
temp_df = pd.read_csv(window_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'Window_Status'] # rename the columns
else:
temp_df = pd.read_excel(window_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'Window_Status']
temp_df['Window_ID'] = int(name.split('_')[0][6:])
temp_df['Room_ID'] = int(room_id) # assign Room_ID
temp_df['Building_ID'] = building_ids[index] # assign Building_ID
combined_window = pd.concat([combined_window, temp_df], ignore_index=True) # concat the data
else:
pass
if door_files:
for door_name in door_files:
name, extension = os.path.splitext(door_name) # get the path and extension of a file
if extension == '.CSV': # if the file is csv file
temp_df = pd.read_csv(door_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'Door_Status'] # rename the columns
else:
temp_df = pd.read_excel(door_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'Door_Status']
temp_df['Door_ID'] = int(name.split('_')[0][4:])
temp_df['Room_ID'] = int(room_id) # assign Room_ID
temp_df['Building_ID'] = building_ids[index] # assign Building_ID
combined_door = pd.concat([combined_door, temp_df], ignore_index=True) # concat the data
else:
pass
if hvac_files:
for hvac_name in hvac_files:
name, extension = os.path.splitext(hvac_name) # get the path and extension of a file
if extension == '.CSV': # if the file is csv file
temp_df = pd.read_csv(hvac_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'yapan_supply _t']
else:
temp_df = pd.read_excel(hvac_name, usecols=[0, 1])
temp_df.columns = ['Date_Time', 'yapan_supply _t']
temp_df['HVAC_Zone_ID'] = int(name.split('_')[0][2:]) # get the number of ac
temp_df['Room_ID'] = int(room_id) # assign Room_ID
temp_df['Building_ID'] = building_ids[index] # assign Building_ID
combined_hvac = pd.concat([combined_hvac, temp_df], ignore_index=True) # concat the data
else:
pass
# drop na rows when specific column is null
combined_indoor = combined_indoor[combined_indoor['Date_Time'].notnull()]
combined_outdoor = combined_outdoor[combined_outdoor['Date_Time'].notnull()]
combined_window = combined_window[combined_window['Date_Time'].notnull()]
combined_door = combined_door[combined_door['Date_Time'].notnull()]
combined_hvac = combined_hvac[combined_hvac['Date_Time'].notnull()]
# process windows, door open/close data
combined_door['Door_Status'] = combined_door['Door_Status'].replace([0, 1, 2], [1, 0, 0])
combined_window['Window_Status'] = combined_window['Window_Status'].replace([0, 1, 2], [1, 0, 0])
# format datetime
print("Formatting datetime!")
combined_indoor['Date_Time'] = pd.to_datetime(combined_indoor['Date_Time'], format='%m/%d/%Y %H:%M')
combined_outdoor['Date_Time'] = pd.to_datetime(combined_outdoor['Date_Time'], format='%m/%d/%Y %H:%M')
combined_window['Date_Time'] = pd.to_datetime(combined_window['Date_Time'], infer_datetime_format=True)
combined_door['Date_Time'] = pd.to_datetime(combined_door['Date_Time'], infer_datetime_format=True)
combined_hvac['Date_Time'] = pd.to_datetime(combined_hvac['Date_Time'], infer_datetime_format=True)
# format data type
print(combined_indoor.dtypes)
print(combined_outdoor.dtypes)
print(combined_window.dtypes)
print(combined_door.dtypes)
print(combined_hvac.dtypes)
combined_indoor['Building_ID'] = combined_indoor['Building_ID'].astype(int)
combined_indoor['Room_ID'] = combined_indoor['Room_ID'].astype(int)
combined_outdoor['Building_ID'] = combined_outdoor['Building_ID'].astype(int)
combined_window['Building_ID'] = combined_window['Building_ID'].astype(int)
combined_window['Room_ID'] = combined_window['Room_ID'].astype(int)
combined_window['Window_ID'] = combined_window['Window_ID'].astype(int)
combined_door['Building_ID'] = combined_door['Building_ID'].astype(int)
combined_door['Room_ID'] = combined_door['Room_ID'].astype(int)
combined_door['Door_ID'] = combined_door['Door_ID'].astype(int)
combined_hvac['Building_ID'] = combined_hvac['Building_ID'].astype(int)
combined_hvac['Room_ID'] = combined_hvac['Room_ID'].astype(int)
combined_hvac['HVAC_Zone_ID'] = combined_hvac['HVAC_Zone_ID'].astype(int)
# replace null with empty
# # check combined data
# print('check null values')
# print(combined_window.isnull().sum())
# print(combined_door.isnull().sum())
# print(combined_hvac.isnull().sum())
#
# # check the unique IDs
# print(combined_window.Window_ID.unique())
# print(combined_door.Door_ID.unique())
# print(combined_hvac.HVAC_Zone_ID.unique())
#
# print(combined_hvac.Building_ID.unique())
# print(combined_window.Building_ID.unique())
# print(combined_door.Building_ID.unique())
# save data
combined_indoor.to_csv(save_path + 'combined_indoor.csv', index=False)
combined_outdoor.to_csv(save_path + 'combined_outdoor.csv', index=False)
combined_window.to_csv(save_path + 'combined_window.csv', index=False)
combined_door.to_csv(save_path + 'combined_door.csv', index=False)
combined_hvac.to_csv(save_path + 'combined_hvac.csv', index=False)
''' read templates and save data into the standard templates '''
# data
combined_indoor = pd.read_csv(save_path + 'combined_indoor.csv')
combined_outdoor = pd.read_csv(save_path + 'combined_outdoor.csv')
combined_window = pd.read_csv(save_path + 'combined_window.csv')
combined_door = pd.read_csv(save_path + 'combined_door.csv')
combined_hvac = pd.read_csv(save_path + 'combined_hvac.csv')
# templates
# read templates into pandas
template_window = pd.read_csv(template_path+'Window_Status.csv')
template_door = pd.read_csv(template_path+'Door_Status.csv')
template_hvac = pd.read_csv(template_path+'HVAC_Measurement.csv')
template_indoor = pd.read_csv(template_path+'Indoor_Measurement.csv')
template_outdoor = pd.read_csv(template_path+'Outdoor_Measurement.csv')
# columns
print(template_window.columns)
print(combined_window.columns)
print(template_door.columns)
print(combined_door.columns)
print(template_hvac.columns)
print(combined_hvac.columns)
print(template_indoor.columns)
print(combined_indoor.columns)
print(template_outdoor.columns)
print(combined_outdoor.columns)
# concat data
template_window = | pd.concat([template_window, combined_window], ignore_index=True) | pandas.concat |
import csv
import re
import string
import math
import warnings
import pandas as pd
import numpy as np
import ipywidgets as wg
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.ticker as mtick
from itertools import product
from scipy.optimize import curve_fit
from plate_mapping import plate_mapping as pm
# define custom errors
class DataError(Exception):
pass
class PlateSizeError(Exception):
pass
class DataTypeError(Exception):
pass
# define well plate dimensions
plate_dim = {96:(8, 12), 384:(16, 24)}
# define header names for platemapping module
pm.header_names = {'Well ID': {'dtype':str, 'long':True, 'short_row': False, 'short_col':False},
'Type': {'dtype':str, 'long':True, 'short_row': True, 'short_col':True},
'Contents': {'dtype':str, 'long':True, 'short_row': True, 'short_col':True},
'Protein Name': {'dtype':str, 'long':True, 'short_row': True, 'short_col':True},
'Protein Concentration': {'dtype':float, 'long':True, 'short_row': True, 'short_col':True},
'Tracer Name': {'dtype':str, 'long':True, 'short_row': True, 'short_col':True},
'Tracer Concentration': {'dtype':float, 'long':True, 'short_row': True, 'short_col':True},
'Competitor Name': {'dtype':str, 'long':True, 'short_row': True, 'short_col':True},
'Competitor Concentration': {'dtype':float, 'long':True, 'short_row': True, 'short_col':True},
'Concentration Units':{'dtype':str, 'long':True, 'short_row': True, 'short_col':True},
}
class FA:
"""Class used for the analysis of fluorescence anisotropy data.
:param data_dict: A dictionary contaning data frames with pre-processed data and metadata
:type data_dict: dict
:param g_factor: G-factor
:type g_factor: float
:param plate_map: dataframe from a plate map csv file that defines each and every well
:type plate_map: pandas df"""
def __init__(self, data_dict, g_factor, plate_map):
self.data_dict = data_dict
self.g_factor = g_factor
self.plate_map = plate_map
# create list of all p and s data frames to run some stats
frames = []
for repeat in self.data_dict.values():
metadata, data = repeat.values()
p_channel, s_channel = data.values()
frames.append(p_channel)
frames.append(s_channel)
new = pd.concat(frames, axis=1) # join all p and s data frames into one df
nan = new.size - new.describe().loc['count'].sum() # find number of 'nan' cells
# create a data frame to store the final fitting parameters
p_names = self.plate_map['Protein Name'].dropna().unique() # get all protein names
t_names = self.plate_map['Tracer Name'].dropna().unique() # get all trcaer names
final_fit = pd.DataFrame(index=pd.MultiIndex.from_product([p_names, t_names]),
columns=['rmin', 'rmin error', 'rmax', 'rmax error', 'lambda', 'Kd', 'Kd error'])
final_fit["lambda"] = 1 # set the default lambda value as 1
FA.final_fit = final_fit # add the final_fit df as a class vriable
print("Data has been uploaded!\n")
print(f"Number of repeats: {len(self.data_dict)} \nValue of g-factor: {self.g_factor} \nOverall number of empty cells is {int(nan)} in {len(frames)} data frames.\nProteins: {p_names}\nTracers: {t_names}")
@classmethod
def read_in_envision(cls, data_csv, platemap_csv, data_type='plate', size=384):
"""Reads in the raw data from csv file along with a platemap and constructs the FA class boject.
:param data_csv: File path of the raw data file in .csv format.
:type data_csv: str
:param platemap_csv: File path of the platemap file in .csv format.
:type platemap_csv: str
:param data_type: Format in which the raw data was exported (plate or list), defaults to plate.
:type data_type: str
:param size: Size of the well plate (384 or 96), defaults to 384.
:type size: int
:return: A dictionary contaning data frames with pre-processed data, g-factor, data frame containing platemap.
:rtype: dict, float, pandas df """
# ensure the plate size is either 384 or 96
if size not in plate_dim:
raise PlateSizeError('Invalid size of the well plate, should be 384 or 96.')
# try to read in data in plate format
if data_type == 'plate':
try:
data_dict, g_factor = FA._read_in_plate(data_csv, size=size)
plate_map_df = pm.plate_map(platemap_csv, size=size)
return cls(data_dict, g_factor, plate_map_df)
except (UnboundLocalError, IndexError, ValueError):
raise DataError(f"Error occured during data read in. Check your file contains data in the 'plate' format and plate size is {size}.")
# try to read in data in list format
if data_type == 'list':
try:
data_dict, g_factor = FA._read_in_list(data_csv, size=size)
plate_map_df = pm.plate_map(platemap_csv, size=size)
return cls(data_dict, g_factor, plate_map_df)
except (UnboundLocalError, IndexError):
raise DataError("Error occured during data read in. Check your file contains data in the 'list' format.")
else:
raise DataTypeError(f"'{data_type}' is not one of the two valid data types: plate or list.")
def _read_in_plate(csv_file, size):
"""Reads the raw data file and finds the information needed to extract data. Passes those parameters to pre_process_plate function and executes it.
Returns a tuple of two elemnts: dictionary of data frames and g-factor.
:param csv_file: File path of the raw data file in .csv format
:type csv_file: str
:param well_ids: A list of well IDs for the pre-processed data frames
:type well_ids: list
:return: A tuple of dictionary of data frames and the g-factor
:rtype: tuple """
with open(csv_file) as file:
all_data_lines = list(csv.reader(file, delimiter=',')) # read the csv file and cast it into a list containing all lines
blank_indexes = list(index for index, item in enumerate(all_data_lines) if item == []) # list containing indices of all blank rows
if blank_indexes == []:
blank_indexes = list(index for index, item in enumerate(all_data_lines) if set(item) == {''})
blanks = np.array(blank_indexes) # convert the list of blank indices to a numpy array
read_in_info = [] # list to store the tuples with parameters needed for pandas to read in the csv file
for index, item in enumerate(all_data_lines): # iterate over all lines in the csv file
if item != [] and re.findall(r"Plate information", item[0]) == ['Plate information'] and re.search(r'Results for', all_data_lines[index + 9][0]) == None and re.findall(r"Formula", all_data_lines[index+1][10]) != ['Formula']:
skiprows = index + 9 # Set the skiprows parameter for raw data table
skiprows_meta = index + 1 # Set the skiprows parameter for metadata table
end_of_data = blanks[blanks > skiprows].min() # Calculate the end of data table by finding the smallest blank index after the beginning of data table
read_in_info.append((skiprows, end_of_data - skiprows + 1, skiprows_meta)) # add the skiprows, caculated number of data lines and skiprows for metadata parameters to the list as a tuple
data_format = 'plate1'
if item != [] and re.findall(r"Plate information", item[0]) == ['Plate information'] and re.search(r'Results for', all_data_lines[index + 9][0]) != None:
skiprows = index + 10
skiprows_meta = index + 1
end_of_data = blanks[blanks > skiprows].min()
read_in_info.append((skiprows, end_of_data - skiprows - 1, skiprows_meta))
data_format = 'plate2'
if item != [] and len(item) > 1 and re.fullmatch(r"G-factor", item[0]):
g_factor = float(item[4])
return FA._pre_process_plate(csv_file, read_in_info, data_format, size), g_factor
def _pre_process_plate(csv_file, read_in_info, data_format, size):
"""Extracts the data and metadata from the csv file, processes it and returns a nested dictionary containing data and metadata for each repeat and channel.
:param csv_file: File path of the raw data file in .csv format
:type csv_file: str
:param read_in_info: Tuples with read in parameters for each channel.
:type read_in_info: list
:param data_format: Plate type (plate1 or plate2)
:type data_format: str
:param well_ids: A list of well IDs for the pre-processed data frames
:type well_ids: list
:return: A dictionary containing data and metadata
:rtype: dict """
data_frames = {} # dictionary to store data frames
counter = 1 # counter incremented by 0.5 to enable alternating labelling of data frames as 'p' or 's'
row_letters = list(string.ascii_uppercase)[0: plate_dim[size][0]] # generate a list of letters for well IDs
col_numbers = list(np.arange(1, plate_dim[size][1] + 1).astype(str)) # generate a list of numbers for well IDs
well_ids = ['%s%s' % (item[0], item[1]) for item in product(row_letters, col_numbers)] # generate a list of well IDs for the pre-processed data frames
for index, item in enumerate(read_in_info): # iterate over all tuples in the list, each tuple contains skiprows, nrows and skiprows_meta for one channel
if data_format == 'plate1': # raw data table does not have row and column names so 'names' parameter passed to omit the last column
raw_data = pd.read_csv(csv_file, sep=',', names=col_numbers, index_col=False, engine='python', skiprows=item[0], nrows=item[1], encoding='utf-8')
if data_format == 'plate2': # raw data table has row and column names, so index_col=0 to set the first column as row labels
raw_data = | pd.read_csv(csv_file, sep=',', index_col=0, engine='python', skiprows=item[0], nrows=item[1], encoding='utf-8') | pandas.read_csv |
############################################# IMPORT STUFF #############################################
import pandas as pd
import numpy as np
import importlib.util
from spellchecker import SpellChecker
# helper function to help load things from BERT folder
def module_from_file(module_name, file_path):
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
# load function from BERT to process things
MASKEDLM_MODULE_PATH = "../../../../code/BERT/bert_generation.py"
bert_generation = module_from_file('predict_prefix_probability', MASKEDLM_MODULE_PATH)
predict_prefix_probability = bert_generation.predict_prefix_probability
spell = SpellChecker()
########################################## END OF IMPORT STUFF ##########################################
FILES = ["batch_0_raw.csv"]
PROBABILITY_PATHS = ["batch_0_with_probs.csv"]
DFS = [pd.read_csv(f, sep=",") for f in FILES]
TEMPLATE = "The (WORD) is"
def clean_df(df, filterNonsense=True, filterNoncommonsense=False, lower=True, fixTypo=True, saveToPath=True):
# first, filter out nonsense
new_df = df.copy()
log = "og: " + str(df.shape[0]) + ". "
if filterNonsense:
new_df = new_df[new_df["makeSense"] == "Yes"]
log += "filtered nonsense, remaining: " + str(new_df.shape[0]) + ". "
if filterNoncommonsense:
new_df = new_df[new_df["frequentAssociation"]]
log += "filtered non frequent association, remaining: " + str(new_df.shape[0]) + ". "
cols = ["antonym1", "antonym2", "antonym3"]
if lower:
def lower_item(i):
if pd.isnull(i): return ""
return i.lower()
for col in cols:
new_df[col] = new_df[col].apply(lambda x: lower_item(x))
if fixTypo:
def fix_typo(i):
if pd.isnull(i) or i == "": return ""
return spell.correction(i)
for col in cols:
new_df[col] = new_df[col].apply(lambda x: fix_typo(x))
return new_df, log
def add_probabilities(df, i, loadFromFile=True):
if loadFromFile:
return | pd.read_csv(PROBABILITY_PATHS[i], sep="\t") | pandas.read_csv |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
import unittest.mock as mock
from datetime import datetime, timedelta
from unittest import TestCase
import numpy as np
import pandas as pd
from kats.consts import TimeSeriesData
from kats.detectors.stat_sig_detector import (
MultiStatSigDetectorModel,
StatSigDetectorModel,
SeasonalityHandler,
)
from kats.utils.simulator import Simulator
from parameterized.parameterized import parameterized
from operator import attrgetter
_SERIALIZED = b'{"n_control": 20, "n_test": 7, "time_unit": "s"}'
_SERIALIZED2 = b'{"n_control": 20, "n_test": 7, "time_unit": "s", "rem_season": false, "seasonal_period": "weekly", "use_corrected_scores": true, "max_split_ts_length": 500}'
class TestStatSigDetector(TestCase):
def setUp(self) -> None:
date_start_str = "2020-03-01"
date_start = datetime.strptime(date_start_str, "%Y-%m-%d")
previous_seq = [date_start + timedelta(days=x) for x in range(60)]
values = np.random.randn(len(previous_seq))
self.ts_init = TimeSeriesData(
pd.DataFrame({"time": previous_seq[0:30], "value": values[0:30]})
)
self.ts_later = TimeSeriesData(
pd.DataFrame({"time": previous_seq[30:35], "value": values[30:35]})
)
self.ss_detect = StatSigDetectorModel(n_control=20, n_test=7)
def test_detector(self) -> None:
np.random.seed(100)
pred_later = self.ss_detect.fit_predict(
historical_data=self.ts_init, data=self.ts_later
)
self.ss_detect.visualize()
# prediction returns scores of same length
self.assertEqual(len(pred_later.scores), len(self.ts_later))
def test_logging(self) -> None:
np.random.seed(100)
date_start_str = "2020-03-01"
date_start = datetime.strptime(date_start_str, "%Y-%m-%d")
num_seq = 3
previous_seq = [date_start + timedelta(days=x) for x in range(60)]
values = [np.random.randn(len(previous_seq)) for _ in range(num_seq)]
ts_init = TimeSeriesData(
pd.DataFrame(
{
**{"time": previous_seq[0:30]},
**{f"value_{i}": values[i][0:30] for i in range(num_seq)},
}
)
)
ts_later = TimeSeriesData(
pd.DataFrame(
{
**{"time": previous_seq[30:35]},
**{f"value_{i}": values[i][30:35] for i in range(num_seq)},
}
)
)
self.assertEqual(self.ss_detect.n_test, 7)
with self.assertRaises(ValueError):
self.ss_detect.fit_predict(historical_data=ts_init, data=ts_later)
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
@parameterized.expand(
[
["n_control", 20],
["n_test", 7],
["time_unit", "s"],
["rem_season", False],
["seasonal_period", "weekly"],
]
)
def test_load_from_serialized(self, attribute:str, expected:object) -> None:
detector = StatSigDetectorModel(serialized_model=_SERIALIZED)
self.assertEqual(attrgetter(attribute)(detector), expected)
def test_serialize(self) -> None:
detector = StatSigDetectorModel(n_control=20, n_test=7, time_unit="s")
self.assertEqual(_SERIALIZED2, detector.serialize())
def test_missing_values(self) -> None:
with self.assertRaises(ValueError):
_ = StatSigDetectorModel()
def test_visualize_unpredicted(self) -> None:
detector = StatSigDetectorModel(n_control=20, n_test=7)
with self.assertRaises(ValueError):
detector.visualize()
def test_missing_time_unit(self) -> None:
detector = StatSigDetectorModel(n_control=20, n_test=7)
with mock.patch.object(detector, "_set_time_unit"):
with self.assertRaises(ValueError):
detector.fit_predict(data=self.ts_later, historical_data=self.ts_init)
def test_no_update(self) -> None:
detector = StatSigDetectorModel(n_control=20, n_test=7)
with mock.patch.object(detector, "_should_update") as su:
su.return_value = False
result = detector.fit_predict(
data=self.ts_later, historical_data=self.ts_init
)
self.assertEqual(detector.response, result)
def test_fallback_on_historical_time_unit(self) -> None:
data = TimeSeriesData(
pd.DataFrame(
{
"time": [
datetime(2021, 1, 1),
datetime(2021, 1, 2),
datetime(2021, 2, 1),
],
"values": [0, 1, 2],
}
)
)
detector = StatSigDetectorModel(n_control=20, n_test=7)
detector.fit_predict(data=data, historical_data=self.ts_init)
self.assertEqual("D", detector.time_unit)
def test_remove_season(self) -> None:
sim3 = Simulator(n=120, start="2018-01-01")
ts3 = sim3.level_shift_sim(
cp_arr=[60],
level_arr=[1.35, 1.05],
noise=0.05,
seasonal_period=7,
seasonal_magnitude=0.575,
)
n_control = 14 * 86400
n_test = 14 * 86400
ss_detect5 = StatSigDetectorModel(
n_control=n_control,
n_test=n_test,
time_unit="sec",
rem_season=True,
seasonal_period="biweekly",
)
anom3 = ss_detect5.fit_predict(data=ts3)
self.assertEqual(np.min(anom3.scores.value.values) < -5, True)
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
@parameterized.expand(
[
["weekly", 0.1],
["daily"],
]
)
def test_season_handler(self, period:str, lpj_factor:float=0.1) -> None:
sim3 = Simulator(n=120, start="2018-01-01")
ts3 = sim3.level_shift_sim(
cp_arr=[60],
level_arr=[1.35, 1.05],
noise=0.05,
seasonal_period=7,
seasonal_magnitude=0.575,
)
with self.assertRaises(ValueError):
if period == "weekly":
SeasonalityHandler(data=ts3, seasonal_period=period, lpj_factor=lpj_factor)
else:
SeasonalityHandler(data=ts3, seasonal_period=period)
class TestStatSigDetectorPMM(TestCase):
def setUp(self) -> None:
random.seed(100)
time_unit = 86400
hist_data_time = [x * time_unit for x in range(0, 28)]
data_time = [x * time_unit for x in range(28, 35)]
hist_data_value = [random.normalvariate(100, 10) for _ in range(0, 28)]
data_value = [random.normalvariate(130, 10) for _ in range(28, 35)]
self.hist_ts = TimeSeriesData(
time=pd.Series(hist_data_time),
value=pd.Series(hist_data_value),
use_unix_time=True,
unix_time_units="s",
)
self.data_ts = TimeSeriesData(
time= | pd.Series(data_time) | pandas.Series |
from keras.models import Sequential
from keras.optimizers import SGD,adam
from keras.layers import Input, Dense, Convolution2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D, Dropout, Flatten, merge, Reshape, Activation, LeakyReLU
from sklearn.metrics import log_loss
import numpy as np
import json
import matplotlib.pyplot as plt
import pandas as pd
from natsort import natsorted
import glob
import pathlib
from keras.callbacks import EarlyStopping,ModelCheckpoint, ReduceLROnPlateau, TensorBoard
from keras import regularizers
import tensorflow as tf
import configparser
def model():
model = Sequential()
model.add(Dense(4096, kernel_regularizer=regularizers.l2(0.001), kernel_initializer='he_normal', input_shape=(26,)))
model.add(LeakyReLU())
model.add(Dropout(0.5))
model.add(Dense(4096, kernel_regularizer=regularizers.l2(0.001), kernel_initializer='he_normal'))
model.add(LeakyReLU())
model.add(Dropout(0.5))
model.add(Dense(4096, kernel_regularizer=regularizers.l2(0.001), kernel_initializer='he_normal'))
model.add(LeakyReLU())
model.add(Dropout(0.5))
model.add(Dense(4096, kernel_regularizer=regularizers.l2(0.001), kernel_initializer='he_normal'))
model.add(LeakyReLU())
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss=custom_loss)
#model.compile(optimizer='adam', loss=custom_loss)
return model
def custom_loss(y_true, y_pred):
normalize_num = 80000000
y_true = y_true * normalize_num
y_pred = y_pred * normalize_num
out = tf.square(tf.log(y_true + 1) - tf.log(y_pred + 1))
return out
def plot_history_loss(history,axL):
axL.plot(history['loss'],label="loss for training")
axL.plot(history['val_loss'],label="loss for validation")
axL.set_title('model loss')
axL.set_xlabel('epoch')
axL.set_ylabel('loss')
axL.legend(loc='upper right')
def calc_RMSLE(Y_train, Y_pred):
RMSLE = np.square(np.log(Y_train + 1) - np.log(Y_pred + 1))
return RMSLE
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(MyEncoder, self).default(obj)
if __name__ == '__main__':
channel = 3
num_classes = 1
# SETTING
ini = configparser.ConfigParser()
ini.read('./config.ini', 'UTF-8')
image_size = int(ini['common']['image_size'])
img_rows, img_cols = image_size, image_size
batch_size = int(ini['Train']['batch_size'])
nb_epoch = int(ini['Train']['nb_epoch'])
normalize_num = int(ini['Train']['normalize_num'])
dir_prep = str(ini['Train']['dir_prep'])
dir_result = str(ini['Train']['dir_result_stats'])
dir_data = str(ini['Train']['dir_data'])
dir_tflog = str(ini['Train']['dir_tflog'])
dir_eval_image = str(ini['common']['dir_ori_data']) + str(ini['common']['dir_eval_image'])
# データのロード
X_train = np.load(dir_prep + 'train_images_stats.npy', allow_pickle=True)
X_valid = np.load(dir_prep + 'test_images_stats.npy', allow_pickle=True)
X_eval = np.load(dir_prep + 'eval_images_stats.npy', allow_pickle=True)
Y_train = np.load(dir_prep + 'train_anno.npy', allow_pickle=True)/normalize_num
Y_valid = np.load(dir_prep + 'test_anno.npy', allow_pickle=True)/normalize_num
print("!!!!",X_train.shape,Y_train.shape,X_valid.shape,Y_valid.shape,X_eval.shape)
# モデルのロード
model = model()
# モデルの学習
es_cb = EarlyStopping(monitor='val_loss', patience=50, verbose=1, mode='min')
cp = ModelCheckpoint(dir_result + "best.hdf5", monitor="val_loss", verbose=1,
save_best_only=True, save_weights_only=True)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=3)
tb_cb = TensorBoard(log_dir=dir_tflog, histogram_freq=1)
history = model.fit(X_train, Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
shuffle=True,
verbose=1,
validation_data=(X_valid, Y_valid),
callbacks=[cp, es_cb, reduce_lr, tb_cb]
)
model.save_weights(dir_result + 'param.hdf5')
with open(dir_result + 'history.json', 'w') as f:
json.dump(history.history, f, cls = MyEncoder)
# ログの書き出し
f = open(dir_result + 'history.json', 'r')
history = json.load(f)
f.close()
fig, (axL) = plt.subplots(ncols=1, figsize=(10,4))
plot_history_loss(history, axL)
fig.savefig(dir_result + 'loss.png')
plt.close()
# 学習結果のロード
model.load_weights(dir_result + "best.hdf5")
# trainデータの出力
Y_train = Y_train * normalize_num
train_pred = model.predict(X_train,
batch_size=batch_size,
verbose=1).reshape(-1) * normalize_num
RMSLE_train_cal = calc_RMSLE(Y_train, train_pred)
train = np.stack([Y_train, train_pred, RMSLE_train_cal])
df_train = | pd.DataFrame(train.T, columns=['TRUE', 'MODEL', 'RMSLE_cal']) | pandas.DataFrame |
"""
Filter and combine various peptide/MHC datasets to derive a composite training set,
optionally including eluted peptides identified by mass-spec.
"""
import sys
import argparse
import os
import json
import collections
from six.moves import StringIO
import pandas
from mhcflurry.common import normalize_allele_name
def normalize_allele_name_or_return_unknown(s):
return normalize_allele_name(
s,
raise_on_error=False,
default_value="UNKNOWN")
parser = argparse.ArgumentParser(usage=__doc__)
parser.add_argument(
"--ms-item",
nargs="+",
action="append",
metavar="PMID FILE, ... FILE",
default=[],
help="Mass spec item to curate: PMID and list of files")
parser.add_argument(
"--expression-item",
nargs="+",
action="append",
metavar="LABEL FILE, ... FILE",
default=[],
help="Expression data to curate: dataset label and list of files")
parser.add_argument(
"--ms-out",
metavar="OUT.csv",
help="Out file path (MS data)")
parser.add_argument(
"--expression-out",
metavar="OUT.csv",
help="Out file path (RNA-seq expression)")
parser.add_argument(
"--expression-metadata-out",
metavar="OUT.csv",
help="Out file path for expression metadata, i.e. which samples used")
parser.add_argument(
"--debug",
action="store_true",
default=False,
help="Leave user in pdb if PMID is unsupported")
PMID_HANDLERS = {}
EXPRESSION_HANDLERS = {}
def load(filenames, **kwargs):
result = {}
for filename in filenames:
if filename.endswith(".csv"):
result[filename] = pandas.read_csv(filename, **kwargs)
elif filename.endswith(".xlsx") or filename.endswith(".xls"):
result[filename] = pandas.read_excel(filename, **kwargs)
else:
result[filename] = filename
return result
def debug(*filenames):
loaded = load(filenames)
import ipdb
ipdb.set_trace()
def handle_pmid_27600516(filename):
"""Gloger, ..., Neri Cancer Immunol Immunother 2016 [PMID 27600516]"""
df = pandas.read_csv(filename)
sample_to_peptides = {}
current_sample = None
for peptide in df.peptide:
if peptide.startswith("#"):
current_sample = peptide[1:]
sample_to_peptides[current_sample] = []
else:
assert current_sample is not None
sample_to_peptides[current_sample].append(peptide.strip().upper())
rows = []
for (sample, peptides) in sample_to_peptides.items():
for peptide in sorted(set(peptides)):
rows.append([sample, peptide])
result_df = pandas.DataFrame(rows, columns=["sample_id", "peptide"])
result_df["sample_type"] = "melanoma_cell_line"
result_df["cell_line"] = result_df.sample_id
result_df["mhc_class"] = "I"
result_df["pulldown_antibody"] = "W6/32"
result_df["format"] = "multiallelic"
result_df["hla"] = result_df.sample_id.map({
"FM-82": "HLA-A*02:01 HLA-A*01:01 HLA-B*08:01 HLA-B*15:01 HLA-C*03:04 HLA-C*07:01",
"FM-93/2": "HLA-A*02:01 HLA-A*26:01 HLA-B*40:01 HLA-B*44:02 HLA-C*03:04 HLA-C*05:01",
"Mel-624": "HLA-A*02:01 HLA-A*03:01 HLA-B*07:02 HLA-B*14:01 HLA-C*07:02 HLA-C*08:02",
"MeWo": "HLA-A*02:01 HLA-A*26:01 HLA-B*14:02 HLA-B*38:01 HLA-C*08:02 HLA-C*12:03",
"SK-Mel-5": "HLA-A*02:01 HLA-A*11:01 HLA-B*40:01 HLA-C*03:03",
})
return result_df
def handle_pmid_23481700(filename):
"""Hassan, ..., <NAME> Mol Cell Proteomics 2015 [PMID 23481700]"""
df = pandas.read_excel(filename, skiprows=10)
assert df["Peptide sequence"].iloc[0] == "TPSLVKSTSQL"
assert df["Peptide sequence"].iloc[-1] == "LPHSVNSKL"
hla = {
"JY": "HLA-A*02:01 HLA-B*07:02 HLA-C*07:02",
"HHC": "HLA-A*02:01 HLA-B*07:02 HLA-B*44:02 HLA-C*05:01 HLA-C*07:02",
}
results = []
for sample_id in ["JY", "HHC"]:
hits_df = df.loc[
df["Int %s" % sample_id].map(
lambda x: {"n.q.": 0, "n.q": 0}.get(x, x)).astype(float) > 0
]
result_df = pandas.DataFrame({
"peptide": hits_df["Peptide sequence"].dropna().values,
})
result_df["sample_id"] = sample_id
result_df["cell_line"] = "B-LCL-" + sample_id
result_df["hla"] = hla[sample_id]
result_df["sample_type"] = "B-LCL"
result_df["mhc_class"] = "I"
result_df["format"] = "multiallelic"
result_df["pulldown_antibody"] = "W6/32"
results.append(result_df)
result_df = pandas.concat(results, ignore_index=True)
# Rename samples to avoid a collision with the JY sample in PMID 25576301.
result_df.sample_id = result_df.sample_id.map({
"JY": "JY.2015",
"HHC": "HHC.2015",
})
return result_df
def handle_pmid_24616531(filename):
"""Mommen, ..., Heck PNAS 2014 [PMID 24616531]"""
df = pandas.read_excel(filename, sheet_name="EThcD")
peptides = df.Sequence.values
assert peptides[0] == "APFLRIAF"
assert peptides[-1] == "WRQAGLSYIRYSQI"
result_df = pandas.DataFrame({
"peptide": peptides,
})
result_df["sample_id"] = "24616531"
result_df["sample_type"] = "B-LCL"
result_df["cell_line"] = "GR"
result_df["pulldown_antibody"] = "W6/32"
# Note: this publication lists hla as "HLA-A*01,-03, B*07,-27, and -C*02,-07"
# we are guessing the exact 4 digit alleles based on this.
result_df["hla"] = "HLA-A*01:01 HLA-A*03:01 HLA-B*07:02 HLA-B*27:05 HLA-C*02:02 HLA-C*07:01"
result_df["mhc_class"] = "I"
result_df["format"] = "multiallelic"
return result_df
def handle_pmid_25576301(filename):
"""Bassani-Sternberg, ..., Mann Mol Cell Proteomics 2015 [PMID 25576301]"""
df = pandas.read_excel(filename, sheet_name="Peptides")
peptides = df.Sequence.values
assert peptides[0] == "AAAAAAAQSVY"
assert peptides[-1] == "YYYNGKAVY"
column_to_sample = {}
for s in [c for c in df if c.startswith("Intensity ")]:
assert s[-2] == "-"
column_to_sample[s] = s.replace("Intensity ", "")[:-2].strip()
intensity_columns = list(column_to_sample)
rows = []
for _, row in df.iterrows():
x1 = row[intensity_columns]
x2 = x1[x1 > 0].index.map(column_to_sample).value_counts()
x3 = x2[x2 >= 2] # require at least two replicates for each peptide
for sample in x3.index:
rows.append((row.Sequence, sample))
result_df = pandas.DataFrame(rows, columns=["peptide", "sample_id"])
result_df["pulldown_antibody"] = "W6/32"
result_df["mhc_class"] = "I"
result_df["format"] = "multiallelic"
allele_map = {
'Fib': "HLA-A*03:01 HLA-A*23:01 HLA-B*08:01 HLA-B*15:18 HLA-C*07:02 HLA-C*07:04",
'HCC1937': "HLA-A*23:01 HLA-A*24:02 HLA-B*07:02 HLA-B*40:01 HLA-C*03:04 HLA-C*07:02",
'SupB15WT': None, # four digit alleles unknown, will drop sample
'SupB15RT': None,
'HCT116': "HLA-A*01:01 HLA-A*02:01 HLA-B*45:01 HLA-B*18:01 HLA-C*05:01 HLA-C*07:01",
# Homozygous at HLA-A:
'HCC1143': "HLA-A*31:01 HLA-A*31:01 HLA-B*35:08 HLA-B*37:01 HLA-C*04:01 HLA-C*06:02",
# Homozygous everywhere:
'JY': "HLA-A*02:01 HLA-A*02:01 HLA-B*07:02 HLA-B*07:02 HLA-C*07:02 HLA-C*07:02",
}
sample_type = {
'Fib': "fibroblast",
'HCC1937': "basal like breast cancer",
'SupB15WT': None,
'SupB15RT': None,
'HCT116': "colon carcinoma",
'HCC1143': "basal like breast cancer",
'JY': "B-cell",
}
cell_line = {
'Fib': None,
'HCC1937': "HCC1937",
'SupB15WT': None,
'SupB15RT': None,
'HCT116': "HCT116",
'HCC1143': "HCC1143",
'JY': "JY",
}
result_df["hla"] = result_df.sample_id.map(allele_map)
print("Entries before dropping samples with unknown alleles", len(result_df))
result_df = result_df.loc[~result_df.hla.isnull()]
print("Entries after dropping samples with unknown alleles", len(result_df))
result_df["sample_type"] = result_df.sample_id.map(sample_type)
result_df["cell_line"] = result_df.sample_id.map(cell_line)
print(result_df.head(3))
return result_df
def handle_pmid_26992070(*filenames):
"""Ritz, ..., Fugmann Proteomics 2016 [PMID 26992070]"""
# Although this publication seems to suggest that HEK293 are C*07:02
# (figure 3B), in a subsequent publication [PMID 28834231] this group
# gives the HEK293 HLA type as HLA‐A*03:01, HLA‐B*07:02, and HLA‐C*07:01.
# We are therefore using the HLA‐C*07:01 (i.e. the latter) typing results
# here.
allele_text = """
Cell line HLA-A 1 HLA-A 2 HLA-B 1 HLA-B 2 HLA-C 1 HLA-C 2
HEK293 03:01 03:01 07:02 07:02 07:01 07:01
HL-60 01:01 01:01 57:01 57:01 06:02 06:02
RPMI8226 30:01 68:02 15:03 15:10 02:10 03:04
MAVER-1 24:02 26:01 38:01 44:02 05:01 12:03
THP-1 02:01 24:02 15:11 35:01 03:03 03:03
"""
allele_info = pandas.read_csv(
StringIO(allele_text), sep="\t", index_col=0)
allele_info.index = allele_info.index.str.strip()
for gene in ["A", "B", "C"]:
for num in ["1", "2"]:
allele_info[
"HLA-%s %s" % (gene, num)
] = "HLA-" + gene + "*" + allele_info["HLA-%s %s" % (gene, num)]
cell_line_to_allele = allele_info.apply(" ".join, axis=1)
sheets = {}
for f in filenames:
if f.endswith(".xlsx"):
d = pandas.read_excel(f, sheet_name=None, skiprows=1)
sheets.update(d)
dfs = []
for cell_line in cell_line_to_allele.index:
# Using data from DeepQuanTR, which appears to be a consensus between
# two other methods used.
sheet = sheets[cell_line + "_DeepQuanTR"]
replicated = sheet.loc[
sheet[[c for c in sheet if "Sample" in c]].fillna(0).sum(1) > 1
]
df = pandas.DataFrame({
'peptide': replicated.Sequence.values
})
df["sample_id"] = cell_line
df["hla"] = cell_line_to_allele.get(cell_line)
dfs.append(df)
result_df = pandas.concat(dfs, ignore_index=True)
result_df["pulldown_antibody"] = "W6/32"
result_df["cell_line"] = result_df["sample_id"]
result_df["sample_type"] = result_df.sample_id.map({
"HEK293": "hek",
"HL-60": "neutrophil",
"RPMI8226": "b-cell",
"MAVER-1": "b-LCL",
"THP-1": "monocyte",
})
result_df["mhc_class"] = "I"
result_df["format"] = "multiallelic"
return result_df
def handle_pmid_27412690(filename):
"""Shraibman, ..., Admon Mol Cell Proteomics 2016 [PMID 27412690]"""
hla_types = {
"U-87": "HLA-A*02:01 HLA-B*44:02 HLA-C*05:01",
"T98G": "HLA-A*02:01 HLA-B*39:06 HLA-C*07:02",
"LNT-229": "HLA-A*03:01 HLA-B*35:01 HLA-C*04:01",
}
sample_id_to_cell_line = {
"U-87": "U-87",
"T98G": "T98G",
"LNT-229": "LNT-229",
"U-87+DAC": "U-87",
"T98G+DAC": "T98G",
"LNT-229+DAC": "LNT-229",
}
df = pandas.read_excel(filename)
assert df.Sequence.iloc[0] == "AAAAAAGSGTPR"
intensity_col_to_sample_id = {}
for col in df:
if col.startswith("Intensity "):
sample_id = col.split()[1]
assert sample_id in sample_id_to_cell_line, (col, sample_id)
intensity_col_to_sample_id[col] = sample_id
dfs = []
for (sample_id, cell_line) in sample_id_to_cell_line.items():
intensity_cols = [
c for (c, v) in intensity_col_to_sample_id.items()
if v == sample_id
]
hits_df = df.loc[
(df[intensity_cols] > 0).sum(1) > 1
]
result_df = pandas.DataFrame({
"peptide": hits_df.Sequence.values,
})
result_df["sample_id"] = sample_id
result_df["cell_line"] = cell_line
result_df["hla"] = hla_types[cell_line]
dfs.append(result_df)
result_df = pandas.concat(dfs, ignore_index=True)
result_df["sample_type"] = "glioblastoma"
result_df["pulldown_antibody"] = "W6/32"
result_df["mhc_class"] = "I"
result_df["format"] = "multiallelic"
return result_df
def handle_pmid_28832583(*filenames):
"""Bassani-Sternberg, ..., Gfeller PLOS Comp. Bio. 2017 [PMID 28832583]"""
# This work also reanalyzes data from
# Pearson, ..., <NAME> Invest 2016 [PMID 27841757]
(filename_dataset1, filename_dataset2) = sorted(filenames)
dataset1 = pandas.read_csv(filename_dataset1, sep="\t")
dataset2 = pandas.read_csv(filename_dataset2, sep="\t")
df = pandas.concat([dataset1, dataset2], ignore_index=True, sort=False)
info_text = """
cell_line origin original_pmid allele1 allele2 allele3 allele4 allele5 allele6
CD165 B-cell 28832583 HLA-A*02:05 HLA-A*24:02 HLA-B*15:01 HLA-B*50:01 HLA-C*03:03 HLA-C*06:02
CM467 B-cell 28832583 HLA-A*01:01 HLA-A*24:02 HLA-B*13:02 HLA-B*39:06 HLA-C*06:02 HLA-C*12:03
GD149 B-cell 28832583 HLA-A*01:01 HLA-A*24:02 HLA-B*38:01 HLA-B*44:03 HLA-C*06:02 HLA-C*12:03
MD155 B-cell 28832583 HLA-A*02:01 HLA-A*24:02 HLA-B*15:01 HLA-B*18:01 HLA-C*03:03 HLA-C*07:01
PD42 B-cell 28832583 HLA-A*02:06 HLA-A*24:02 HLA-B*07:02 HLA-B*55:01 HLA-C*01:02 HLA-C*07:02
RA957 B-cell 28832583 HLA-A*02:20 HLA-A*68:01 HLA-B*35:03 HLA-B*39:01 HLA-C*04:01 HLA-C*07:02
TIL1 TIL 28832583 HLA-A*02:01 HLA-A*02:01 HLA-B*18:01 HLA-B*38:01 HLA-C*05:01
TIL3 TIL 28832583 HLA-A*01:01 HLA-A*23:01 HLA-B*07:02 HLA-B*15:01 HLA-C*12:03 HLA-C*14:02
Apher1 Leukapheresis 28832583 HLA-A*03:01 HLA-A*29:02 HLA-B*44:02 HLA-B*44:03 HLA-C*12:03 HLA-C*16:01
Apher6 Leukapheresis 28832583 HLA-A*02:01 HLA-A*03:01 HLA-B*07:02 HLA-C*07:02
pat_AC2 B-LCL 27841757 HLA-A*03:01 HLA-A*32:01 HLA-B*27:05 HLA-B*45:01
pat_C B-LCL 27841757 HLA-A*02:01 HLA-A*03:01 HLA-B*07:02 HLA-C*07:02
pat_CELG B-LCL 27841757 HLA-A*02:01 HLA-A*24:02 HLA-B*15:01 HLA-B*73:01 HLA-C*03:03 HLA-C*15:05
pat_CP2 B-LCL 27841757 HLA-A*11:01 HLA-B*14:02 HLA-B*44:02
pat_FL B-LCL 27841757 HLA-A*03:01 HLA-A*11:01 HLA-B*44:03 HLA-B*50:01
pat_J B-LCL 27841757 HLA-A*02:01 HLA-A*03:01 HLA-B*07:02 HLA-C*07:02
pat_JPB3 B-LCL 27841757 HLA-A*02:01 HLA-A*11:01 HLA-B*27:05 HLA-B*56:01
pat_JT2 B-LCL 27841757 HLA-A*11:01 HLA-B*18:03 HLA-B*35:01
pat_M B-LCL 27841757 HLA-A*03:01 HLA-A*29:02 HLA-B*08:01 HLA-B*44:03 HLA-C*07:01 HLA-C*16:01
pat_MA B-LCL 27841757 HLA-A*02:01 HLA-A*29:02 HLA-B*44:03 HLA-B*57:01 HLA-C*07:01 HLA-C*16:01
pat_ML B-LCL 27841757 HLA-A*02:01 HLA-A*11:01 HLA-B*40:01 HLA-B*44:03
pat_NS2 B-LCL 27841757 HLA-A*02:01 HLA-B*13:02 HLA-B*41:01
pat_NT B-LCL 27841757 HLA-A*01:01 HLA-A*32:01 HLA-B*08:01
pat_PF1 B-LCL 27841757 HLA-A*01:01 HLA-A*02:01 HLA-B*07:02 HLA-B*44:03 HLA-C*07:02 HLA-C*16:01
pat_R B-LCL 27841757 HLA-A*03:01 HLA-A*29:02 HLA-B*08:01 HLA-B*44:03 HLA-C*07:01 HLA-C*16:01
pat_RT B-LCL 27841757 HLA-A*01:01 HLA-A*02:01 HLA-B*18:01 HLA-B*39:24 HLA-C*05:01 HLA-C*07:01
pat_SR B-LCL 27841757 HLA-A*02:01 HLA-A*23:01 HLA-B*18:01 HLA-B*44:03
pat_ST B-LCL 27841757 HLA-A*03:01 HLA-A*24:02 HLA-B*07:02 HLA-B*27:05
"""
info_df = pandas.read_csv(StringIO(info_text), sep="\t", index_col=0)
info_df.index = info_df.index.str.strip()
info_df["hla"] = info_df[
[c for c in info_df if c.startswith("allele")]
].fillna("").apply(" ".join, axis=1)
results = []
for col in df.columns:
if col.startswith("Intensity "):
sample_id = col.replace("Intensity ", "")
assert sample_id in info_df.index, sample_id
peptides = df.loc[df[col].fillna(0) > 0].Sequence.unique()
result_df = pandas.DataFrame({"peptide": peptides})
result_df["sample_id"] = sample_id
result_df["hla"] = info_df.loc[sample_id].hla
result_df["sample_type"] = info_df.loc[sample_id].origin
result_df["original_pmid"] = str(
info_df.loc[sample_id].original_pmid)
results.append(result_df)
result_df = pandas.concat(results, ignore_index=True)
samples = result_df.sample_id.unique()
for sample_id in info_df.index:
assert sample_id in samples, (sample_id, samples)
result_df["mhc_class"] = "I"
result_df["format"] = "multiallelic"
result_df["cell_line"] = ""
result_df["pulldown_antibody"] = "W6/32"
return result_df
PMID_31495665_SAMPLE_TYPES = {
"HLA-DR_Lung": "lung",
"HLA-DR_PBMC_HDSC": "pbmc",
"HLA-DR_PBMC_RG1095": "pbmc",
"HLA-DR_PBMC_RG1104": "pbmc",
"HLA-DR_PBMC_RG1248": "pbmc",
"HLA-DR_Spleen": "spleen",
"MAPTAC_A*02:01": "mix:a375,expi293,hek293,hela",
"MAPTAC_A*11:01": "mix:expi293,hela",
"MAPTAC_A*32:01": "mix:a375,expi293,hela",
"MAPTAC_B*07:02": "mix:a375,expi293,hela",
"MAPTAC_B*45:01": "expi293",
"MAPTAC_B*52:01": "mix:a375,expi293",
"MAPTAC_C*03:03": "expi293",
"MAPTAC_C*06:02": "mix:a375,expi293",
"MAPTAC_DPB1*06:01/DPA1*01:03_dm+": "expi293",
"MAPTAC_DPB1*06:01/DPA1*01:03_dm-": "expi293",
"MAPTAC_DQB1*06:04/DQA1*01:02_dm+": "expi293",
"MAPTAC_DQB1*06:04/DQA1*01:02_dm-": "expi293",
"MAPTAC_DRB1*01:01": "mix:a375,b721,expi293,kg1,k562",
"MAPTAC_DRB1*03:01": "expi293",
"MAPTAC_DRB1*04:01": "expi293",
"MAPTAC_DRB1*07:01": "mix:expi293,hek293",
"MAPTAC_DRB1*11:01": "mix:expi293,k562,kg1",
"MAPTAC_DRB1*12:01_dm+": "expi293",
"MAPTAC_DRB1*12:01_dm-": "expi293",
"MAPTAC_DRB1*15:01": "expi293",
"MAPTAC_DRB3*01:01_dm+": "expi293",
"MAPTAC_DRB3*01:01_dm-": "expi293",
}
CELL_LINE_MIXTURES = sorted(
set(
x for x in PMID_31495665_SAMPLE_TYPES.values()
if x.startswith("mix:")))
def handle_pmid_31495665(filename):
"""Abelin, ..., Rooney Immunity 2019 [PMID 31495665]"""
hla_type = {
"HLA-DR_A375": None,
"HLA-DR_Lung": "DRB1*01:01 DRB1*03:01 DRB3*01:01",
"HLA-DR_PBMC_HDSC": "DRB1*03:01 DRB1*11:01 DRB3*01:01 DRB3*02:02",
"HLA-DR_PBMC_RG1095": "HLA-DRA1*01:01-DRB1*03:01 HLA-DRA1*01:01-DRB1*11:01 HLA-DRA1*01:01-DRB3*01:01 HLA-DRA1*01:01-DRB3*02:02",
"HLA-DR_PBMC_RG1104": "DRB1*01:01 DRB1*11:01 DRB3*02:02",
"HLA-DR_PBMC_RG1248": "DRB1*03:01 DRB1*03:01 DRB3*01:01 DRB3*01:01",
"HLA-DR_SILAC_Donor1_10minLysate": None,
"HLA-DR_SILAC_Donor1_5hrLysate": None,
"HLA-DR_SILAC_Donor1_DConly": None,
"HLA-DR_SILAC_Donor1_UVovernight": None,
"HLA-DR_SILAC_Donor2_DC_UV_16hr": None,
"HLA-DR_SILAC_Donor2_DC_UV_24hr": None,
"HLA-DR_Spleen": "DRB1*04:01 DRB4*01:03 DRB1*15:03 DRB5*01:01",
"MAPTAC_A*02:01": "HLA-A*02:01",
"MAPTAC_A*11:01": "HLA-A*11:01",
"MAPTAC_A*32:01": "HLA-A*32:01",
"MAPTAC_B*07:02": "HLA-B*07:02",
"MAPTAC_B*45:01": "HLA-B*45:01",
"MAPTAC_B*52:01": "HLA-B*52:01",
"MAPTAC_C*03:03": "HLA-C*03:03",
"MAPTAC_C*06:02": "HLA-C*06:02",
"MAPTAC_DPB1*06:01/DPA1*01:03_dm+": "HLA-DPB1*06:01-DPA1*01:03",
"MAPTAC_DPB1*06:01/DPA1*01:03_dm-": "HLA-DPB1*06:01-DPA1*01:03",
"MAPTAC_DQB1*06:04/DQA1*01:02_dm+": "HLA-DQB1*06:04-DQA1*01:02",
"MAPTAC_DQB1*06:04/DQA1*01:02_dm-": "HLA-DQB1*06:04-DQA1*01:02",
"MAPTAC_DRB1*01:01": "HLA-DRA1*01:01-DRB1*01:01",
"MAPTAC_DRB1*03:01": "HLA-DRA1*01:01-DRB1*03:01",
"MAPTAC_DRB1*04:01": "HLA-DRA1*01:01-DRB1*04:01",
"MAPTAC_DRB1*07:01": "HLA-DRA1*01:01-DRB1*07:01",
"MAPTAC_DRB1*11:01": "HLA-DRA1*01:01-DRB1*11:01",
"MAPTAC_DRB1*12:01_dm+": "HLA-DRA1*01:01-DRB1*12:01",
"MAPTAC_DRB1*12:01_dm-": "HLA-DRA1*01:01-DRB1*12:01",
"MAPTAC_DRB1*15:01": "HLA-DRA1*01:01-DRB1*15:01",
"MAPTAC_DRB3*01:01_dm+": "HLA-DRA1*01:01-DRB3*01:01",
"MAPTAC_DRB3*01:01_dm-": "HLA-DRA1*01:01-DRB3*01:01",
}
pulldown_antibody = {
"HLA-DR_Lung": "L243 (HLA-DR)",
"HLA-DR_PBMC_HDSC": "tal1b5 (HLA-DR)",
"HLA-DR_PBMC_RG1095": "tal1b5 (HLA-DR)",
"HLA-DR_PBMC_RG1104": "tal1b5 (HLA-DR)",
"HLA-DR_PBMC_RG1248": "tal1b5 (HLA-DR)",
"HLA-DR_Spleen": "L243 (HLA-DR)",
"MAPTAC_A*02:01": "MAPTAC",
"MAPTAC_A*11:01": "MAPTAC",
"MAPTAC_A*32:01": "MAPTAC",
"MAPTAC_B*07:02": "MAPTAC",
"MAPTAC_B*45:01": "MAPTAC",
"MAPTAC_B*52:01": "MAPTAC",
"MAPTAC_C*03:03": "MAPTAC",
"MAPTAC_C*06:02": "MAPTAC",
"MAPTAC_DPB1*06:01/DPA1*01:03_dm+": "MAPTAC",
"MAPTAC_DPB1*06:01/DPA1*01:03_dm-": "MAPTAC",
"MAPTAC_DQB1*06:04/DQA1*01:02_dm+": "MAPTAC",
"MAPTAC_DQB1*06:04/DQA1*01:02_dm-": "MAPTAC",
"MAPTAC_DRB1*01:01": "MAPTAC",
"MAPTAC_DRB1*03:01": "MAPTAC",
"MAPTAC_DRB1*04:01": "MAPTAC",
"MAPTAC_DRB1*07:01": "MAPTAC",
"MAPTAC_DRB1*11:01": "MAPTAC",
"MAPTAC_DRB1*12:01_dm+": "MAPTAC",
"MAPTAC_DRB1*12:01_dm-": "MAPTAC",
"MAPTAC_DRB1*15:01": "MAPTAC",
"MAPTAC_DRB3*01:01_dm+": "MAPTAC",
"MAPTAC_DRB3*01:01_dm-": "MAPTAC",
}
format = {
"HLA-DR_Lung": "DR-specific",
"HLA-DR_PBMC_HDSC": "DR-specific",
"HLA-DR_PBMC_RG1095": "DR-specific",
"HLA-DR_PBMC_RG1104": "DR-specific",
"HLA-DR_PBMC_RG1248": "DR-specific",
"HLA-DR_Spleen": "DR-specific",
"MAPTAC_A*02:01": "monoallelic",
"MAPTAC_A*11:01": "monoallelic",
"MAPTAC_A*32:01": "monoallelic",
"MAPTAC_B*07:02": "monoallelic",
"MAPTAC_B*45:01": "monoallelic",
"MAPTAC_B*52:01": "monoallelic",
"MAPTAC_C*03:03": "monoallelic",
"MAPTAC_C*06:02": "monoallelic",
"MAPTAC_DPB1*06:01/DPA1*01:03_dm+": "monoallelic",
"MAPTAC_DPB1*06:01/DPA1*01:03_dm-": "monoallelic",
"MAPTAC_DQB1*06:04/DQA1*01:02_dm+": "monoallelic",
"MAPTAC_DQB1*06:04/DQA1*01:02_dm-": "monoallelic",
"MAPTAC_DRB1*01:01": "monoallelic",
"MAPTAC_DRB1*03:01": "monoallelic",
"MAPTAC_DRB1*04:01": "monoallelic",
"MAPTAC_DRB1*07:01": "monoallelic",
"MAPTAC_DRB1*11:01": "monoallelic",
"MAPTAC_DRB1*12:01_dm+": "monoallelic",
"MAPTAC_DRB1*12:01_dm-": "monoallelic",
"MAPTAC_DRB1*15:01": "monoallelic",
"MAPTAC_DRB3*01:01_dm+": "monoallelic",
"MAPTAC_DRB3*01:01_dm-": "monoallelic",
}
mhc_class = {
"HLA-DR_Lung": "II",
"HLA-DR_PBMC_HDSC": "II",
"HLA-DR_PBMC_RG1095": "II",
"HLA-DR_PBMC_RG1104": "II",
"HLA-DR_PBMC_RG1248": "II",
"HLA-DR_Spleen": "II",
"MAPTAC_A*02:01": "I",
"MAPTAC_A*11:01": "I",
"MAPTAC_A*32:01": "I",
"MAPTAC_B*07:02": "I",
"MAPTAC_B*45:01": "I",
"MAPTAC_B*52:01": "I",
"MAPTAC_C*03:03": "I",
"MAPTAC_C*06:02": "I",
"MAPTAC_DPB1*06:01/DPA1*01:03_dm+": "II",
"MAPTAC_DPB1*06:01/DPA1*01:03_dm-": "II",
"MAPTAC_DQB1*06:04/DQA1*01:02_dm+": "II",
"MAPTAC_DQB1*06:04/DQA1*01:02_dm-": "II",
"MAPTAC_DRB1*01:01": "II",
"MAPTAC_DRB1*03:01": "II",
"MAPTAC_DRB1*04:01": "II",
"MAPTAC_DRB1*07:01": "II",
"MAPTAC_DRB1*11:01": "II",
"MAPTAC_DRB1*12:01_dm+": "II",
"MAPTAC_DRB1*12:01_dm-": "II",
"MAPTAC_DRB1*15:01": "II",
"MAPTAC_DRB3*01:01_dm+": "II",
"MAPTAC_DRB3*01:01_dm-": "II",
}
cell_line = {
"HLA-DR_Lung": "",
"HLA-DR_PBMC_HDSC": "",
"HLA-DR_PBMC_RG1095": "",
"HLA-DR_PBMC_RG1104": "",
"HLA-DR_PBMC_RG1248": "",
"HLA-DR_Spleen": "",
"MAPTAC_A*02:01": "",
"MAPTAC_A*11:01": "",
"MAPTAC_A*32:01": "",
"MAPTAC_B*07:02": "",
"MAPTAC_B*45:01": "expi293",
"MAPTAC_B*52:01": "",
"MAPTAC_C*03:03": "expi293",
"MAPTAC_C*06:02": "",
"MAPTAC_DPB1*06:01/DPA1*01:03_dm+": "expi293",
"MAPTAC_DPB1*06:01/DPA1*01:03_dm-": "expi293",
"MAPTAC_DQB1*06:04/DQA1*01:02_dm+": "expi293", # don't actually see this in DataS1A!
"MAPTAC_DQB1*06:04/DQA1*01:02_dm-": "expi293",
"MAPTAC_DRB1*01:01": "",
"MAPTAC_DRB1*03:01": "expi293",
"MAPTAC_DRB1*04:01": "expi293",
"MAPTAC_DRB1*07:01": "",
"MAPTAC_DRB1*11:01": "",
"MAPTAC_DRB1*12:01_dm+": "expi293",
"MAPTAC_DRB1*12:01_dm-": "expi293",
"MAPTAC_DRB1*15:01": "expi293",
"MAPTAC_DRB3*01:01_dm+": "expi293",
"MAPTAC_DRB3*01:01_dm-": "expi293",
}
df = pandas.read_excel(filename, sheet_name="DataS1B")
results = []
for sample_id in df.columns:
if hla_type[sample_id] is None:
print("Intentionally skipping", sample_id)
continue
result_df = pandas.DataFrame({
"peptide": df[sample_id].dropna().values,
})
result_df["sample_id"] = sample_id
result_df["hla"] = hla_type[sample_id]
result_df["pulldown_antibody"] = pulldown_antibody[sample_id]
result_df["format"] = format[sample_id]
result_df["mhc_class"] = mhc_class[sample_id]
result_df["sample_type"] = PMID_31495665_SAMPLE_TYPES[sample_id]
result_df["cell_line"] = cell_line[sample_id]
results.append(result_df)
result_df = pandas.concat(results, ignore_index=True)
# Remove class II for now
result_df = result_df.loc[result_df.mhc_class == "I"]
return result_df
def handle_pmid_27869121(filename):
"""Bassani-Sternberg, ..., Krackhardt Nature Comm. 2016 [PMID 27869121]"""
# Although this dataset has class II data also, we are only extracting
# class I for now.
df = pandas.read_excel(filename, skiprows=1)
# Taking these from:
# Supplementary Table 2: Information of patients selected for neoepitope
# identification
# For the Mel5 sample, only two-digit alleles are shown (A*01, A*25,
# B*08, B*18) so we are skipping that sample for now.
hla_df = pandas.DataFrame([
("Mel-8", "HLA-A*01:01 HLA-A*03:01 HLA-B*07:02 HLA-B*08:01 HLA-C*07:01 HLA-C*07:02"),
("Mel-12", "HLA-A*01:01 HLA-B*08:01 HLA-C*07:01"),
("Mel-15", "HLA-A*03:01 HLA-A*68:01 HLA-B*27:05 HLA-B*35:03 HLA-C*02:02 HLA-C*04:01"),
("Mel-16", "HLA-A*01:01 HLA-A*24:02 HLA-B*07:02 HLA-B*08:01 HLA-C*07:01 HLA-C*07:02"),
], columns=["sample_id", "hla"]).set_index("sample_id")
# We assert below that none of the class I hit peptides were found in any
# of the class II pull downs.
class_ii_cols = [
c for c in df.columns if c.endswith("HLA-II (arbitrary units)")
]
class_ii_hits = set(df.loc[
(df[class_ii_cols].fillna(0.0).sum(1) > 0)
].Sequence.unique())
results = []
for (sample_id, hla) in hla_df.hla.items():
intensity_col = "Intensity %s_HLA-I (arbitrary units)" % sample_id
sub_df = df.loc[
(df[intensity_col].fillna(0.0) > 0)
]
filtered_sub_df = sub_df.loc[
(~sub_df.Sequence.isin(class_ii_hits))
]
peptides = filtered_sub_df.Sequence.unique()
assert not any(p in class_ii_hits for p in peptides)
result_df = pandas.DataFrame({
"peptide": peptides,
})
result_df["sample_id"] = sample_id
result_df["hla"] = hla_df.loc[sample_id, "hla"]
result_df["pulldown_antibody"] = "W6/32"
result_df["format"] = "multiallelic"
result_df["mhc_class"] = "I"
result_df["sample_type"] = "melanoma_met"
result_df["cell_line"] = None
results.append(result_df)
result_df = pandas.concat(results, ignore_index=True)
return result_df
def handle_pmid_31154438(*filenames):
"""Shraibman, ..., Admon Mol Cell Proteomics 2019 [PMID 31154438]"""
# Note: this publication also includes analyses of the secreted HLA
# peptidedome (sHLA) but we are using only the data from membrane-bound
# HLA.
(xls, txt) = sorted(filenames, key=lambda s: not s.endswith(".xlsx"))
info = pandas.read_excel(xls, skiprows=1)
df = pandas.read_csv(txt, sep="\t", skiprows=1)
hla_df = info.loc[
~info["mHLA tissue sample"].isnull()
].set_index("mHLA tissue sample")[["HLA typing"]]
def fix_hla(string):
result = []
alleles = string.split(";")
for a in alleles:
a = a.strip()
if "/" in a:
(a1, a2) = a.split("/")
a2 = a1[:2] + a2
lst = [a1, a2]
else:
lst = [a]
for a in lst:
normalized = normalize_allele_name_or_return_unknown(a)
result.append(normalized)
return " ".join(result)
hla_df["hla"] = hla_df["HLA typing"].map(fix_hla)
results = []
for (sample_id, hla) in hla_df.hla.items():
intensity_col = "Intensity %s" % sample_id
sub_df = df.loc[
(df[intensity_col].fillna(0.0) > 0)
]
peptides = sub_df.Sequence.unique()
result_df = pandas.DataFrame({
"peptide": peptides,
})
result_df["sample_id"] = sample_id
result_df["hla"] = hla_df.loc[sample_id, "hla"]
result_df["pulldown_antibody"] = "W6/32"
result_df["format"] = "multiallelic"
result_df["mhc_class"] = "I"
result_df["sample_type"] = "glioblastoma_tissue"
result_df["cell_line"] = None
results.append(result_df)
result_df = pandas.concat(results, ignore_index=True)
return result_df
def handle_pmid_31844290(*filenames):
"""Sarkizova, ..., Keskin Nature Biotechnology 2019 [PMID 31844290]"""
(mono_filename, multi_filename) = sorted(filenames)
# Monoallelic
mono = pandas.read_excel(mono_filename, sheet_name=None)
dfs = []
for (key, value) in mono.items():
if key == 'Sheet1':
continue
allele_before_normalization = key
if not allele_before_normalization.startswith("HLA-"):
allele_before_normalization = "HLA-" + allele_before_normalization
allele = normalize_allele_name(allele_before_normalization)
assert allele != "UNKNOWN"
df = pandas.DataFrame({"peptide": value.sequence.values})
df["sample_id"] = "keskin_%s" % key
df["hla"] = allele
df["pulldown_antibody"] = "W6/32"
df["format"] = "monoallelic"
df["mhc_class"] = "I"
df["sample_type"] = "B-CELL"
df["cell_line"] = "b721"
dfs.append(df)
# Multiallelic
multi = pandas.read_excel(multi_filename, sheet_name=None)
metadata = multi['Tissue Sample Characteristics']
allele_table = metadata.drop_duplicates(
"Clinical ID").set_index("Clinical ID").loc[
:, [c for c in metadata if c.startswith("HLA-")]
]
allele_table = allele_table.loc[~allele_table.index.isnull()]
allele_table = allele_table.loc[allele_table["HLA-A"] != 'n.d.']
allele_table = allele_table.applymap(
lambda s: s[1:] if s.startswith("-") else s)
allele_table = allele_table.applymap(
lambda s: "B5101" if s == "B51" else s)
allele_table = allele_table.applymap(normalize_allele_name_or_return_unknown)
sample_info = metadata.drop_duplicates(
"Clinical ID").set_index("Clinical ID")[['Cancer type', 'IP Ab']]
sample_info = sample_info.loc[~sample_info.index.isnull()].fillna(
method='ffill')
sample_info = sample_info.loc[sample_info.index.isin(allele_table.index)]
sample_info = sample_info.loc[allele_table.index]
sample_info["hla"] = [
" ".join(row).replace("HLA-A*31:0102", "HLA-A*31:01") # fix a typo
for _, row in allele_table.iterrows()
]
sample_info["sample_type"] = sample_info['Cancer type'].map({
'CLL': "B-CELL",
'GBM': "GLIOBLASTOMA_TISSUE",
'Melanoma': "MELANOMA",
"Ovarian": "OVARY",
'ccRCC': "KIDNEY",
})
assert not sample_info["sample_type"].isnull().any()
assert not "UNKNOWN" in sample_info["hla"].any()
for (key, value) in multi.items():
if key == 'Tissue Sample Characteristics':
continue
for (directory, sub_df) in value.groupby("directory"):
if 'Pat7' in directory or 'Pat9' in directory:
print("Skipping due to no HLA typing", directory)
continue
try:
(sample_id,) = sample_info.loc[
sample_info.index.map(
lambda idx: (
idx in directory or
idx.replace("-", "_").replace("MEL_", "") in directory or
idx.replace(" ", "_") in directory
))
].index
except ValueError as e:
print(directory, e)
import ipdb ; ipdb.set_trace()
info = sample_info.loc[sample_id]
df = pandas.DataFrame({"peptide": sub_df.sequence.values})
df["sample_id"] = "keskin_%s" % sample_id.replace(" ", "_")
df["hla"] = info['hla']
df["pulldown_antibody"] = info['IP Ab']
df["format"] = "multiallelic"
df["mhc_class"] = "I"
df["sample_type"] = info['sample_type']
df["cell_line"] = None
dfs.append(df)
result_df = pandas.concat(dfs, ignore_index=True)
result_df["peptide"] = result_df.peptide.str.upper()
return result_df
EXPRESSION_GROUPS_ROWS = []
def make_expression_groups(dataset_identifier, df, groups):
result_df = pandas.DataFrame(index=df.index)
for (label, columns) in groups.items():
for col in columns:
if col not in df.columns:
raise ValueError(
"Missing: %s. Available: %s" % (col, df.columns.tolist()))
result_df[label] = df[columns].mean(1)
EXPRESSION_GROUPS_ROWS.append((dataset_identifier, label, columns))
return result_df
def handle_expression_GSE113126(*filenames):
"""
Barry, ..., Krummel Nature Medicine 2018 [PMID 29942093]
This is the melanoma met RNA-seq dataset.
"""
df = pandas.read_csv(filenames[0], sep="\t", index_col=0)
df = df[[]] # no columns
for filename in filenames:
df[os.path.basename(filename)] = pandas.read_csv(
filename, sep="\t", index_col=0)["TPM"]
assert len(df.columns) == len(filenames)
groups = {
"sample_type:MELANOMA_MET": df.columns.tolist(),
}
return [make_expression_groups("GSE113126", df, groups)]
def handle_expression_expression_atlas_22460905(filename):
df = pandas.read_csv(filename, sep="\t", skiprows=4, index_col=0)
del df["Gene Name"]
df.columns = df.columns.str.lower()
df = df.fillna(0.0)
def matches(*strings):
return [c for c in df.columns if all(s in c for s in strings)]
groups = {
"sample_type:B-LCL": (
matches("b-cell", "lymphoblast") + matches("b acute lymphoblastic")),
"sample_type:B-CELL": matches("b-cell"),
"sample_type:B721-LIKE": matches("b-cell"),
"sample_type:MELANOMA_CELL_LINE": matches("melanoma"),
"sample_type:MELANOMA": matches("melanoma"),
"sample_type:A375-LIKE": matches("melanoma"),
"sample_type:KG1-LIKE": matches("myeloid leukemia"),
# Using a fibrosarcoma cell line for our fibroblast sample.
"sample_type:FIBROBLAST": ['fibrosarcoma, ht-1080'],
# For GBM tissue we are just using a mixture of cell lines.
"sample_type:GLIOBLASTOMA_TISSUE": matches("glioblastoma"),
"cell_line:THP-1": ["childhood acute monocytic leukemia, thp-1"],
"cell_line:HL-60": ["adult acute myeloid leukemia, hl-60"],
"cell_line:U-87": ['glioblastoma, u-87 mg'],
"cell_line:LNT-229": ['glioblastoma, ln-229'],
"cell_line:T98G": ['glioblastoma, t98g'],
"cell_line:SK-MEL-5": ['cutaneous melanoma, sk-mel-5'],
'cell_line:MEWO': ['melanoma, mewo'],
"cell_line:HCC1937": ['breast ductal adenocarcinoma, hcc1937'],
"cell_line:HCT116": ['colon carcinoma, hct 116'],
"cell_line:HCC1143": ['breast ductal adenocarcinoma, hcc1143'],
}
return [make_expression_groups("expression_atlas_22460905", df, groups)]
def handle_expression_human_protein_atlas(*filenames):
(cell_line_filename,) = [f for f in filenames if "celline" in f]
(blood_filename,) = [f for f in filenames if "blood" in f]
(gtex_filename,) = [f for f in filenames if "gtex" in f]
cell_line_df = pandas.read_csv(cell_line_filename, sep="\t")
blood_df = | pandas.read_csv(blood_filename, sep="\t", index_col=0) | pandas.read_csv |
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
import requests
import time
from datetime import datetime
import pandas as pd
from urllib import parse
from config import ENV_VARIABLE
from os.path import getsize
fold_path = "./crawler_data/"
page_Max = 100
def stripID(url, wantStrip):
loc = url.find(wantStrip)
length = len(wantStrip)
return url[loc+length:]
def Kklee():
shop_id = 13
name = 'kklee'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.kklee.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
#
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='col-xs-12 ProductList-list']/a[%i]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//a[%i]/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[3]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Wishbykorea():
shop_id = 14
name = 'wishbykorea'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.wishbykorea.com/collection-727&pgno=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
print(url)
except:
break
time.sleep(1)
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div/div/label" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/a[@href]" % (i,)).get_attribute('href')
page_id = page_link.replace("https://www.wishbykorea.com/collection-view-", "").replace("&ca=727", "")
find_href = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/a/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip('")')
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div[@class='collection_item_info']/div[2]/label" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div[@class='collection_item_info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
if(sale_price == "0"):
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Aspeed():
shop_id = 15
name = 'aspeed'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.aspeed.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=72"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 73):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 73):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[2]/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 73):
p += 1
continue
i += 1
if(i == 73):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Openlady():
shop_id = 17
name = 'openlady'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.openlady.tw/item.html?&id=157172&page=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_name']/a[@class='mymy_item_link']" % (i,)).text
page_link = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_name']/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("&id=", "")
except:
close += 1
break
try:
pic_link = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_img']/a[@class='mymy_item_link']/img[@src]" % (i,)).get_attribute("src")
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_amount']/span[2]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_amount']/span[1]" % (i,)).text
ori_price = ori_price.strip('NT$ ')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_amount']/span[1]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Azoom():
shop_id = 20
name = 'azoom'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.aroom1988.com/categories/view-all?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 24):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.strip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip('")')
except:
i += 1
if(i == 24):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div/div" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 24):
p += 1
continue
i += 1
if(i == 24):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Roxy():
shop_id = 21
name = 'roxy'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.roxytaiwan.com.tw/new-collection?p=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 65):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]/div[@class='product-thumb-info']/p[@class='product-title']/a" % (i,)).text
page_link = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]/div[@class='product-thumb-info']/p[@class='product-title']/a[@href]" % (i,)).get_attribute('href')
page_id = stripID(page_link, "default=")
except:
close += 1
break
try:
pic_link = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]/div[@class='product-img']/a[@class='img-link']/picture[@class='main-picture']/img[@data-src]" % (i,)).get_attribute("data-src")
except:
i += 1
if(i == 65):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]//span[@class='special-price']//span[@class='price-dollars']" % (i,)).text
sale_price = sale_price.replace('TWD', "")
ori_price = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]//span[@class='old-price']//span[@class='price-dollars']" % (i,)).text
ori_price = ori_price.replace('TWD', "")
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]//span[@class='price-dollars']" % (i,)).text
sale_price = sale_price.replace('TWD', "")
ori_price = ""
except:
i += 1
if(i == 65):
p += 1
continue
i += 1
if(i == 65):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Shaxi():
shop_id = 22
name = 'shaxi'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.shaxi.tw/products?page=" + str(p)
try:
chrome.get(url)
except:
break
i = 1
while(i < 49):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 49):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 49):
p += 1
continue
i += 1
if(i == 49):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Cici():
shop_id = 23
name = 'cici'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.cici2.tw/products?page=" + str(p)
try:
chrome.get(url)
except:
break
i = 1
while(i < 49):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 49):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 49):
p += 1
continue
i += 1
if(i == 49):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Amesoeur():
shop_id = 25
name = 'amesour'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.amesoeur.co/categories/%E5%85%A8%E9%83%A8%E5%95%86%E5%93%81?page=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[2]/ul/li[%i]/a[@href]" % (i,)).get_attribute('href')
page_id = chrome.find_element_by_xpath(
"//div[2]/ul/li[%i]/a[@href]" % (i,)).get_attribute('product-id')
find_href = chrome.find_element_by_xpath(
"//li[%i]/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[3]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Singular():
shop_id = 27
name = 'singular'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
i = 1
offset = (p-1) * 50
url = "https://www.singular-official.com/products?limit=50&offset=" + \
str(offset) + "&price=0%2C10000&sort=createdAt-desc"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
while(i < 51):
try:
title = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>1ca3'][%i]/div[2]" % (i,)).text
except:
close += 1
# print(i, "title")
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='rmq-3ab81ca3'][%i]//a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/product/")
pic_link = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>1ca3'][%i]//img" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//div[@class='rmq-3ab81ca3'][%i]/div[3]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>3'][%i]/div[3]/div[1]/span/s" % (i,)).text
ori_price = ori_price.strip('NT$ ')
ori_price = ori_price.split()
ori_price = ori_price[0]
except:
i += 1
if(i == 51):
p += 1
continue
i += 1
if(i == 51):
p += 1
chrome.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN)
time.sleep(1)
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Folie():
shop_id = 28
name = 'folie'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.folief.com/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Corban():
shop_id = 29
name = 'corban'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
i = 1
offset = (p-1) * 50
url = "https://www.corban.com.tw/products?limit=50&offset=" + \
str(offset) + "&price=0%2C10000&sort=createdAt-desc&tags=ALL%20ITEMS"
try:
chrome.get(url)
except:
break
while(i < 51):
try:
title = chrome.find_element_by_xpath(
"//div[@class='rmq-3ab81ca3'][%i]/div[2]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='rmq-3ab81ca3'][%i]//a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/product/")
pic_link = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>'][%i]//img" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>3'][%i]/div[3]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>3'][%i]/div[3]/div[1]/span/s" % (i,)).text
ori_price = ori_price.strip('NT$ ')
except:
i += 1
if(i == 51):
p += 1
continue
i += 1
if(i == 51):
p += 1
chrome.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN)
time.sleep(1)
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Gmorning():
shop_id = 30
name = 'gmorning'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.gmorning.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def July():
shop_id = 31
name = 'july'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.july2017.co/products?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Per():
shop_id = 32
name = 'per'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.perdot.com.tw/categories/all?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Cereal():
shop_id = 33
name = 'cereal'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.cerealoutfit.com/new/page/" + str(p) + "/"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
try:
chrome.find_element_by_xpath(
"//button[@class='mfp-close']").click()
except:
pass
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//div[@data-loop='%i']/h3/a" % (i,)).text
if(title == ""):
i += 1
if(i == 25):
p += 1
continue
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@data-loop='%i']/div[1]/a[@href]" % (i,)).get_attribute('href')
page_id = chrome.find_element_by_xpath(
"//div[@data-loop='%i']" % (i,)).get_attribute('126-id')
pic_link = chrome.find_element_by_xpath(
"//div[@data-loop='%i']/div[1]/a/img" % (i,)).get_attribute('src')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@data-loop='%i']//ins//bdi" % (i,)).text
sale_price = sale_price.rstrip(' NT$')
ori_price = chrome.find_element_by_xpath(
"//div[@data-loop='%i']//del//bdi" % (i,)).text
ori_price = ori_price.rstrip(' NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@data-loop='%i']/div[2]//span[@class='woocommerce-Price-amount amount']" % (i,)).text
sale_price = sale_price.rstrip(' NT$')
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Jcjc():
shop_id = 35
name = 'jcjc'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.jcjc-dailywear.com/collections/in-stock?limit=24&page=" + \
str(p) + "&sort=featured"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/a/p[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/a[1][@href]" % (i,)).get_attribute('href')
pic_link = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/span/a/img" % (i,)).get_attribute('src')
page_id = pic_link[pic_link.find("i/")+2:pic_link.find(".j")]
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/a/p[2]/span" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/a/p[2]/s/span" % (i,)).text
ori_price = ori_price.strip('NT$ ')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/a/p[2]/span" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Ccshop():
shop_id = 36
name = 'ccshop'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.ccjshop.com/products?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[2]/ul/li[%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Iris():
shop_id = 37
name = 'iris'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.irisgarden.com.tw/products?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[@class='boxify-item product-item ng-isolate-scope'][%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Nook():
shop_id = 39
name = 'nook'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.nooknook.me/products?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Greenpea():
shop_id = 40
name = 'greenpea'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.greenpea-tw.com/products?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[2]/ul/li[%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[3]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Queen():
shop_id = 42
name = 'queen'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.queenshop.com.tw/zh-TW/QueenShop/ProductList?item1=01&item2=all&Page=" + \
str(p) + "&View=4"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/a/p" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/a[@href]" % (i,)).get_attribute('href')
page_id = stripID(page_link, "SaleID=")
pic_link = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/a/img[1]" % (i,)).get_attribute('data-src')
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/p[2]/span[2]" % (i,)).text
sale_price = sale_price.strip('NT. ')
ori_price = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/p[2]/span[1]" % (i,)).text
ori_price = ori_price.strip('NT. ')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/p[2]/span[1]" % (i,)).text
sale_price = sale_price.strip('NT. ')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Cozyfee():
shop_id = 48
name = 'cozyfee'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.cozyfee.com/product.php?page=" + \
str(p) + "&cid=55#prod_list"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 41):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/div[2]/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/div[2]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.lstrip("action=detail&pid=")
pic_link = chrome.find_element_by_xpath(
"//li[%i]/div[1]/a/img[1]" % (i,)).get_attribute('data-original')
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div[3]/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
i += 1
if(i == 41):
p += 1
continue
i += 1
if(i == 41):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Reishop():
shop_id = 49
name = 'reishop'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.reishop.com.tw/pdlist2.asp?item1=all&item2=&item3=&keyword=&ob=A&pagex=&pageno=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 31):
try:
title = chrome.find_element_by_xpath(
"//figcaption[%i]/a/span[2]/span[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//figcaption[%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.lstrip("yano=YA")
page_id = page_id.replace("&color=", "")
pic_link = chrome.find_element_by_xpath(
"//figcaption[%i]/a/span/img[1]" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//figcaption[%i]/a/span[2]/span[2]/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
i += 1
if(i == 31):
p += 1
continue
i += 1
if(i == 31):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Yourz():
shop_id = 50
name = 'yourz'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.yourz.com.tw/product/category/34/1/" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 13):
try:
title = chrome.find_element_by_xpath(
"//div[@class='pro_list'][%i]/div/table/tbody/tr/td/div/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='pro_list'][%i]/div/table/tbody/tr/td/div/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/product/detail/")
pic_link = chrome.find_element_by_xpath(
"//div[@class='pro_list'][%i]/div/a/img" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//div[@class='pro_list'][%i]/div[4]/p/font" % (i,)).text
sale_price = sale_price.replace('VIP價:NT$ ', '')
sale_price = sale_price.rstrip('元')
ori_price = chrome.find_element_by_xpath(
"//div[@class='pro_list'][%i]/div[4]/p/br" % (i,)).text
ori_price = ori_price.replace('NT$ ', '')
ori_price = ori_price.rstrip('元')
except:
i += 1
if(i == 13):
p += 1
continue
i += 1
if(i == 13):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Seoulmate():
shop_id = 54
name = 'seoulmate'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.seoulmate.com.tw/catalog.php?m=115&s=249&t=0&sort=&page=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 33):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/p[1]/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//ul/li[%i]/p[1]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("m=115&s=249&t=0&id=", "")
pic_link = chrome.find_element_by_xpath(
"//ul/li[%i]/a/img[1]" % (i,)).get_attribute('src')
if(pic_link == ""):
i += 1
if(i == 33):
p += 1
continue
except:
i += 1
if(i == 33):
p += 1
continue
try:
ori_price = chrome.find_element_by_xpath(
"//ul/li[%i]/p[3]/del" % (i,)).text
ori_price = ori_price.strip('NT.')
sale_price = chrome.find_element_by_xpath(
"//ul/li[%i]/p[3]" % (i,)).text
sale_price = sale_price.strip('NT.')
sale_price = sale_price.strip('NT.')
locate = sale_price.find("NT.")
sale_price = sale_price[locate+3:len(sale_price)]
except:
try:
sale_price = chrome.find_element_by_xpath(
"//ul/li[%i]/p[3]" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
i += 1
if(i == 33):
p += 1
continue
i += 1
if(i == 33):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Sweesa():
shop_id = 55
name = 'sweesa'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.sweesa.com/Shop/itemList.aspx?&m=20&o=5&sa=1&smfp=" + \
str(p)
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 45):
try:
title = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[2]/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[2]/a" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("mNo1=", "")
page_id = page_id.replace("&m=20", "")
pic_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]//a/img[@src]" % (i,)).get_attribute("src")
sale_price = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[4]/span" % (i,)).text
sale_price = sale_price.strip('TWD.')
ori_price = ""
except:
i += 1
if(i == 45):
p += 1
continue
i += 1
if(i == 45):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Pazzo():
shop_id = 56
name = 'pazzo'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.pazzo.com.tw/recent?P=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 41):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/div[2]/p/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/div[2]/p/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.lstrip("c=")
pic_link = chrome.find_element_by_xpath(
"//li[@class='item'][%i]/div[@class='item__images']/a/picture/img[@class='img-fluid']" % (i,)).get_attribute('src')
except:
i += 1
if(i == 41):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div[2]/p[2]/span[2]" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = chrome.find_element_by_xpath(
"//li[%i]/div[2]/p[2]/span[1]" % (i,)).text
ori_price = ori_price.strip('NT.')
except:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div[2]/p[2]/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
i += 1
if(i == 41):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Meierq():
shop_id = 57
name = 'meierq'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
page = 0
prefix_urls = [
"https://www.meierq.com/zh-tw/category/bottomclothing?P=",
"https://www.meierq.com/zh-tw/category/jewelry?P=",
"https://www.meierq.com/zh-tw/category/outerclothing?P=",
"https://www.meierq.com/zh-tw/category/accessories?P=",
]
for prefix in prefix_urls:
page += 1
for i in range(1, page_Max):
url = f"{prefix}{i}"
try:
print(url)
chrome.get(url)
chrome.find_element_by_xpath("//div[@class='items__image']")
except:
print("find_element_by_xpath_break", page)
if(page == 4):
chrome.quit()
print("break")
break
break
i = 1
while(i < 41):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/div/p/a" % (i,)).text
except:
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/div/p/a[@href]" % (i,)).get_attribute('href')
page_id = stripID(page_link, "n/")
page_id = page_id[:page_id.find("?c")]
pic_link = chrome.find_element_by_xpath(
"//li[%i]/div/img" % (i,)).get_attribute('src')
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div/p/span[2]" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = chrome.find_element_by_xpath(
"//li[%i]/div/p/span" % (i,)).text
ori_price = ori_price.strip('NT.')
except:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div/p/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
i += 1
if(i == 41):
p += 1
continue
i += 1
if(i == 41):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Harper():
shop_id = 58
name = 'harper'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
while True:
url = "https://www.harper.com.tw/Shop/itemList.aspx?&m=13&smfp=" + \
str(p)
if(p > 20):
chrome.quit()
break
try:
chrome.get(url)
except:
chrome.quit()
break
i = 1
while(i < 80):
try:
title = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[2]/a" % (i,)).text
except:
p += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[2]/a" % (i,)).get_attribute('href')
page_id = stripID(page_link, "cno=")
page_id = page_id.replace("&m=13", "")
pic_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]//a/img[@src]" % (i,)).get_attribute("src")
sale_price = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[4]/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
i += 1
if(i == 79):
p += 1
continue
i += 1
if(i == 79):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Lurehsu():
shop_id = 59
name = 'lurehsu'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.lurehsu.com/zh-TW/lure/productList?item1=00&item2=16&page=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
i = 1
while(i < 28):
try:
title = chrome.find_element_by_xpath(
"//div[@class='grid-item'][%i]/a/div[2]/p" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='grid-item'][%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.lstrip("SaleID=")
page_id = page_id[:page_id.find("&Color")]
pic_link = chrome.find_element_by_xpath(
"//div[@class='grid-item'][%i]/a/div/img" % (i,)).get_attribute('src')
except:
i += 1
if(i == 28):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='grid-item'][%i]/a/div[2]/div/p/span[2]" % (i,)).text
sale_price = sale_price.strip('NTD.')
ori_price = chrome.find_element_by_xpath(
"//div[@class='grid-item'][%i]/a/div[2]/div/p/span[1]" % (i,)).text
ori_price = ori_price.strip('NTD.')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='grid-item'][%i]/a/div[2]/div/p" % (i,)).text
sale_price = sale_price.strip('NTD.')
ori_price = ""
except:
i += 1
if(i == 28):
p += 1
continue
i += 1
if(i == 28):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Pufii():
shop_id = 61
name = 'pufii'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.pufii.com.tw/Shop/itemList.aspx?&m=6&smfp=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 37):
try:
title = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[3]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[1]/a" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("mNo1=P", "")
page_id = page_id.replace("&m=6", "")
pic_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]//a/img[@src]" % (i,)).get_attribute("src")
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[@class='pricediv']/span[2]" % (i,)).text
sale_price = sale_price.strip('活動價NT')
ori_price = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[@class='pricediv']/span[1]" % (i,)).text
ori_price = ori_price.strip('NT')
except:
sale_price = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[@class='pricediv']/span[1]" % (i,)).text
sale_price = sale_price.strip('NT')
ori_price = ""
except:
i += 1
if(i == 37):
p += 1
continue
i += 1
if(i == 37):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Mouggan():
shop_id = 62
name = 'mouggan'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.mouggan.com/zh-tw/category/ALL-ITEM?P=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
try:
chrome.find_element_by_xpath(
"//a[@class='close p-0']/i[@class='icon-popup-close']").click()
except:
pass
i = 1
while(i < 19):
try:
title = chrome.find_element_by_xpath(
"//div[2]/div[%i]/div[2]/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[2]/div[%i]/div[2]/a[@href]" % (i,)).get_attribute('href')
page_id = stripID(page_link, "c=")
pic_link = chrome.find_element_by_xpath(
"//div[2]/div[%i]/div[1]/div/a/img" % (i,)).get_attribute('src')
except:
i += 1
if(i == 19):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[2]/div[%i]/div[2]/div[1]/span[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//div[2]/div[%i]/div[2]/div[1]/span[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[2]/div[%i]/div[2]/div[1]/span[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 19):
p += 1
continue
i += 1
if(i == 19):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Mercci():
shop_id = 64
name = 'mercci'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.mercci22.com/zh-tw/tag/HOTTEST?P=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
# chrome.find_element_by_xpath("//a[@class='close p-0']/i[@class='icon-popup-close']").click()
i = 1
while(i < 41):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/div[@class='items__info']/div[@class='pdname']/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/div[@class='items__info']/div[@class='pdname']/a[@href]" % (i,)).get_attribute('href')
page_id = stripID(page_link, "c=")
pic_link = chrome.find_element_by_xpath(
"//li[%i]/a[@class='items__image js-loaded']/img" % (i,)).get_attribute('src')
except:
i += 1
if(i == 41):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div[@class='items__info']/div[@class='price']" % (i,)).text
sale_price = sale_price.strip('NT.')
k = sale_price.find("NT.")
sale_price = sale_price[k+3:len(sale_price)]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/div[@class='items__info']/div[@class='price']/span" % (i,)).text
ori_price = ori_price.strip('NT.')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div[@class='items__info']/p[@class='price']/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
i += 1
if(i == 41):
p += 1
continue
i += 1
if(i == 41):
p += 1
if(sale_price == ""):
continue
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Sivir():
shop_id = 65
name = 'sivir'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.sivir.com.tw/collections/new-all-%E6%89%80%E6%9C%89?page=" + \
str(p)
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product col-lg-3 col-sm-4 col-6'][%i]/div[2]/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='product col-lg-3 col-sm-4 col-6'][%i]/div[2]/a[@href]" % (i,)).get_attribute('href')
page_id = chrome.find_element_by_xpath(
"//div[@class='product col-lg-3 col-sm-4 col-6'][%i]/div[2]/a[@data-id]" % (i,)).get_attribute('data-id')
pic_link = chrome.find_element_by_xpath(
"//div[@class='product col-lg-3 col-sm-4 col-6'][%i]/div[1]/a/img" % (i,)).get_attribute('data-src')
pic_link = f"https:{pic_link}"
sale_price = chrome.find_element_by_xpath(
"//div[@class='product col-lg-3 col-sm-4 col-6'][%i]/div[4]/span" % (i,)).text
sale_price = sale_price.replace('NT$', '')
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Nana():
shop_id = 66
name = 'nana'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.2nana.tw/product.php?page=" + \
str(p) + "&cid=1#prod_list"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 75):
try:
title = chrome.find_element_by_xpath(
"//div[@class='col-xs-6 col-sm-4 col-md-3'][%i]/div/div[2]/div[1]/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='col-xs-6 col-sm-4 col-md-3'][%i]/div/div[1]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.lstrip("action=detail&pid=")
pic_link = chrome.find_element_by_xpath(
"//div[@class='col-xs-6 col-sm-4 col-md-3'][%i]/div/div[1]/a/img" % (i,)).get_attribute('data-original')
sale_price = chrome.find_element_by_xpath(
"//div[@class='col-xs-6 col-sm-4 col-md-3'][%i]/div/div[2]/div[2]/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = chrome.find_element_by_xpath(
"//div[@class='col-xs-6 col-sm-4 col-md-3'][%i]/div/div[2]/div[2]/del" % (i,)).text
ori_price = ori_price.strip('NT.')
except:
i += 1
if(i == 75):
p += 1
continue
i += 1
if(i == 75):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Aachic():
shop_id = 70
name = 'aachic'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.aachic.com/categories/all-%E6%89%80%E6%9C%89%E5%95%86%E5%93%81?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//a[%i]/div[2]/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='col-xs-12 ProductList-list']/a[%i][@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//a[%i]/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
sale_price = chrome.find_element_by_xpath(
"//a[%i]/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//a[%i]/div[2]/div[3]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Lovso():
shop_id = 71
name = 'lovso'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.lovso.com.tw/Shop/itemList.aspx?m=8&o=0&sa=0&smfp=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 37):
try:
title = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[2]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[1]/center/a" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("mNo1=", "")
page_id = page_id.replace("&m=8", "")
pic_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]//a/img[@src]" % (i,)).get_attribute("src")
sale_price = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[4]" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[3]" % (i,)).text
ori_price = ori_price.strip('NT.')
except:
i += 1
if(i == 37):
p += 1
continue
i += 1
if(i == 37):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Bowwow():
shop_id = 72
name = 'bowwow'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.bowwowkorea.com/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=48"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 49):
try:
title = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 49):
p += 1
continue
i += 1
if(i == 49):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Suitangtang():
shop_id = 74
name = 'suitangtang'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
i = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.suitangtang.com/Catalog/WOMAN"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
chrome.find_element_by_tag_name('body').send_keys(Keys.END)
time.sleep(1)
while(True):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product-list'][%i]/div[@class='name']" % (i,)).text
k = title.find("NT$")
title = title[0:k-1]
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='product-list'][%i]/a[@href]" % (i,)).get_attribute('href')
page_id = stripID(page_link, "/Product/")
page_id = page_id[:page_id.find("?c=")]
pic_link = chrome.find_element_by_xpath(
"//div[@class='product-list'][%i]/a/img" % (i,)).get_attribute('data-original')
except:
i += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-list'][%i]/div[2]/span" % (i,)).text
sale_price = sale_price.strip('NT$')
k = sale_price.find("NT$")
sale_price = sale_price[k+3:len(sale_price)]
ori_price = chrome.find_element_by_xpath(
"//div[@class='product-list'][%i]/div[2]/span/span" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-list'][%i]/div[2]/span" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
continue
i += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Chochobee():
shop_id = 78
name = 'chochobee'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.chochobee.com/catalog.php?m=40&s=0&t=0&sort=&page=" + \
str(p)
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//section/ul/li[%i]/span[2]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//section/ul/li[%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("m=40&s=0&t=0&id=", "")
pic_link = chrome.find_element_by_xpath(
"//section/ul/li[%i]/a/div/img" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//section/ul/li[%i]/span[3]" % (i,)).text
sale_price = sale_price.strip('NT.$')
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Asobi():
shop_id = 80
name = 'asobi'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.asobi.com.tw/Shop/itemList.aspx?undefined&smfp=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 34):
try:
title = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[2]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[1]/a" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("mNo1=", "")
page_id = page_id.replace("&&m=1&o=5&sa=1", "")
pic_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]//a/img[@src]" % (i,)).get_attribute("src")
sale_price = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[3]/div/span" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 34):
p += 1
continue
i += 1
if(i == 34):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Kiyumi():
shop_id = 81
name = 'kiyumi'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
flag = 0
while True:
if (flag == 1):
chrome.quit()
break
url = "https://www.kiyumishop.com/catalog.php?m=73&s=0&t=0&sort=&page=" + \
str(p)
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//section/ul/li[%i]/span[2]" % (i,)).text
except:
flag += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//section/ul/li[%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("m=73&s=0&t=0&id=", "")
pic_link = chrome.find_element_by_xpath(
"//section/ul/li[%i]/a/div/img" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//section/ul/li[%i]/span[3]" % (i,)).text
sale_price = sale_price.strip('NT.$')
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Genquo():
shop_id = 82
name = 'genquo'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
flag = 0
while True:
if (flag == 1):
chrome.quit()
break
url = "https://www.genquo.com/zh-tw/category/women?P=" + str(p)
print("處理頁面:", url)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
i = 1
while(i < 37):
try:
title = chrome.find_element_by_xpath(
"//li[@class='item'][%i]/div/p/a" % (i,)).text
except:
flag += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[@class='item'][%i]/div/p/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path + '?' + make_id.query
page_id = page_id.lstrip("/zh-tw/market/n/")
page_id = page_id[:page_id.find("?c=")]
pic_link = chrome.find_element_by_xpath(
"//li[@class='item'][%i]/div/a/img" % (i,)).get_attribute('src')
except:
i += 1
if(i == 37):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[@class='item'][%i]/div/p/span[1]" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[@class='item'][%i]/div/p/span[2]" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = chrome.find_element_by_xpath(
"//li[@class='item'][%i]/div/p/span[1]" % (i,)).text
ori_price = ori_price.strip('NT.')
except:
i += 1
if(i == 37):
p += 1
continue
i += 1
if(i == 37):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Oolala():
shop_id = 86
name = 'oolala'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
flag = 0
while True:
if (flag == 1):
chrome.quit()
break
url = "https://www.styleoolala.com/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=48"
print("處理頁面:", url)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 49):
try:
title = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
flag += 1
print(p, i)
break
try:
page_link = chrome.find_element_by_xpath(
"//div[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 49):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 49):
p += 1
continue
i += 1
if(i == 49):
p += 1
if(sale_price == ""):
continue
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Pattis():
shop_id = 87
name = 'pattis'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.i-pattis.com/catalog.php?m=1&s=21&t=0&sort=&page=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//section[@class='cataList']/ul/li[%i]/span[2]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//section[@class='cataList']/ul/li[%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("m=1&s=21&t=0&id=", "")
pic_link = chrome.find_element_by_xpath(
"//li[%i]/a/img" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//ul/li[%i]/span[3]" % (i,)).text
sale_price = sale_price.strip('NT.$')
ori_price = chrome.find_element_by_xpath(
"//ul/li[%i]/del" % (i,)).text
ori_price = ori_price.strip('NT.$')
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Scheminggg():
shop_id = 90
name = 'scheminggg'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.scheminggg.com/productlist?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 37):
try:
title = chrome.find_element_by_xpath(
"//div[@class='columns']/div[%i]/a/p" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='columns']/div[%i]/a[1][@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.lstrip("/products?saleid=")
page_id = page_id.rstrip("&colorid=")
pic_link = chrome.find_element_by_xpath(
"//div[@class='columns']/div[%i]/a/img" % (i,)).get_attribute('src')
if (pic_link == ""):
i += 1
if(i == 37):
p += 1
continue
except:
i += 1
if(i == 37):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='columns']/div[%i]/p[2]" % (i,)).text
sale_price = sale_price.strip('NT. ')
ori_price = chrome.find_element_by_xpath(
"//div[@class='columns']/div[%i]/p[1]" % (i,)).text
ori_price = ori_price.strip('NT. ')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='columns']/div[%i]/p[1]" % (i,)).text
sale_price = sale_price.strip('NT. ')
ori_price = ""
except:
i += 1
if(i == 37):
p += 1
continue
i += 1
if(i == 37):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = | pd.concat([dfAll, df]) | pandas.concat |
"""
Tests for CBMonthEnd CBMonthBegin, SemiMonthEnd, and SemiMonthBegin in offsets
"""
from datetime import (
date,
datetime,
)
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
from pandas._libs.tslibs.offsets import (
CBMonthBegin,
CBMonthEnd,
CDay,
SemiMonthBegin,
SemiMonthEnd,
)
from pandas import (
DatetimeIndex,
Series,
_testing as tm,
date_range,
)
from pandas.tests.tseries.offsets.common import (
Base,
assert_is_on_offset,
assert_offset_equal,
)
from pandas.tests.tseries.offsets.test_offsets import _ApplyCases
from pandas.tseries import offsets as offsets
from pandas.tseries.holiday import USFederalHolidayCalendar
class CustomBusinessMonthBase:
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.offset = self._offset()
self.offset1 = self.offset
self.offset2 = self._offset(2)
def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def test_roundtrip_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
assert unpickled == obj
_check_roundtrip(self._offset())
_check_roundtrip(self._offset(2))
_check_roundtrip(self._offset() * 2)
def test_copy(self):
# GH 17452
off = self._offset(weekmask="Mon Wed Fri")
assert off == off.copy()
class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base):
_offset = CBMonthEnd
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` does not match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == "<CustomBusinessMonthEnd>"
assert repr(self.offset2) == "<2 * CustomBusinessMonthEnds>"
def test_call(self):
with tm.assert_produces_warning(FutureWarning):
# GH#34171 DateOffset.__call__ is deprecated
assert self.offset2(self.d) == datetime(2008, 2, 29)
def testRollback1(self):
assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)
def testRollback2(self):
assert CBMonthEnd(10).rollback(self.d) == datetime(2007, 12, 31)
def testRollforward1(self):
assert CBMonthEnd(10).rollforward(self.d) == datetime(2008, 1, 31)
def test_roll_date_object(self):
offset = CBMonthEnd()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 8, 31)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 28)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [
(CBMonthEnd(), datetime(2008, 1, 31), True),
(CBMonthEnd(), datetime(2008, 1, 1), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, d, expected = case
assert_is_on_offset(offset, d, expected)
apply_cases: _ApplyCases = [
(
CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29),
},
),
(
2 * CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 2, 7): datetime(2008, 3, 31),
},
),
(
-CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 2, 8): datetime(2008, 1, 31),
},
),
(
-2 * CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2007, 11, 30),
datetime(2008, 2, 9): datetime(2007, 12, 31),
},
),
(
CBMonthEnd(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29),
},
),
]
@pytest.mark.parametrize("case", apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthEnd(10)
assert result == datetime(2013, 7, 31)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CBMonthEnd() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 29)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 5, 31)
assert rs == xp
def test_holidays(self):
# Define a TradingDay offset
holidays = ["2012-01-31", datetime(2012, 2, 28), np.datetime64("2012-02-29")]
bm_offset = CBMonthEnd(holidays=holidays)
dt = datetime(2012, 1, 1)
assert dt + bm_offset == datetime(2012, 1, 30)
assert dt + 2 * bm_offset == datetime(2012, 2, 27)
@pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
def test_datetimeindex(self):
from pandas.tseries.holiday import USFederalHolidayCalendar
hcal = USFederalHolidayCalendar()
freq = CBMonthEnd(calendar=hcal)
assert date_range(start="20120101", end="20130101", freq=freq).tolist()[
0
] == datetime(2012, 1, 31)
class TestCustomBusinessMonthBegin(CustomBusinessMonthBase, Base):
_offset = CBMonthBegin
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` does not match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == "<CustomBusinessMonthBegin>"
assert repr(self.offset2) == "<2 * CustomBusinessMonthBegins>"
def test_call(self):
with tm.assert_produces_warning(FutureWarning):
# GH#34171 DateOffset.__call__ is deprecated
assert self.offset2(self.d) == datetime(2008, 3, 3)
def testRollback1(self):
assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)
def testRollback2(self):
assert CBMonthBegin(10).rollback(self.d) == datetime(2008, 1, 1)
def testRollforward1(self):
assert CBMonthBegin(10).rollforward(self.d) == datetime(2008, 1, 1)
def test_roll_date_object(self):
offset = CBMonthBegin()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 9, 3)
result = offset.rollforward(dt)
assert result == datetime(2012, 10, 1)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [
(CBMonthBegin(), datetime(2008, 1, 1), True),
(CBMonthBegin(), datetime(2008, 1, 31), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
apply_cases: _ApplyCases = [
(
CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 2, 7): datetime(2008, 3, 3),
},
),
(
2 * CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2008, 3, 3),
datetime(2008, 2, 7): datetime(2008, 4, 1),
},
),
(
-CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2007, 12, 3),
datetime(2008, 2, 8): datetime(2008, 2, 1),
},
),
(
-2 * CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2007, 11, 1),
datetime(2008, 2, 9): datetime(2008, 1, 1),
},
),
(
CBMonthBegin(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 7): datetime(2008, 2, 1),
},
),
]
@pytest.mark.parametrize("case", apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthBegin(10)
assert result == datetime(2013, 8, 1)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CBMonthBegin() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 1)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 6, 1)
assert rs == xp
def test_holidays(self):
# Define a TradingDay offset
holidays = ["2012-02-01", datetime(2012, 2, 2), np.datetime64("2012-03-01")]
bm_offset = CBMonthBegin(holidays=holidays)
dt = datetime(2012, 1, 1)
assert dt + bm_offset == datetime(2012, 1, 2)
assert dt + 2 * bm_offset == datetime(2012, 2, 3)
@pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
def test_datetimeindex(self):
hcal = USFederalHolidayCalendar()
cbmb = CBMonthBegin(calendar=hcal)
assert date_range(start="20120101", end="20130101", freq=cbmb).tolist()[
0
] == datetime(2012, 1, 3)
class TestSemiMonthEnd(Base):
_offset = SemiMonthEnd
offset1 = _offset()
offset2 = _offset(2)
def test_offset_whole_year(self):
dates = (
datetime(2007, 12, 31),
datetime(2008, 1, 15),
datetime(2008, 1, 31),
datetime(2008, 2, 15),
datetime(2008, 2, 29),
datetime(2008, 3, 15),
datetime(2008, 3, 31),
datetime(2008, 4, 15),
datetime(2008, 4, 30),
datetime(2008, 5, 15),
datetime(2008, 5, 31),
datetime(2008, 6, 15),
datetime(2008, 6, 30),
datetime(2008, 7, 15),
datetime(2008, 7, 31),
datetime(2008, 8, 15),
datetime(2008, 8, 31),
datetime(2008, 9, 15),
datetime(2008, 9, 30),
datetime(2008, 10, 15),
datetime(2008, 10, 31),
datetime(2008, 11, 15),
datetime(2008, 11, 30),
datetime(2008, 12, 15),
datetime(2008, 12, 31),
)
for base, exp_date in zip(dates[:-1], dates[1:]):
assert_offset_equal(SemiMonthEnd(), base, exp_date)
# ensure .apply_index works as expected
s = DatetimeIndex(dates[:-1])
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = SemiMonthEnd() + s
exp = DatetimeIndex(dates[1:])
tm.assert_index_equal(result, exp)
# ensure generating a range with DatetimeIndex gives same result
result = date_range(start=dates[0], end=dates[-1], freq="SM")
exp = DatetimeIndex(dates, freq="SM")
tm.assert_index_equal(result, exp)
offset_cases = []
offset_cases.append(
(
SemiMonthEnd(),
{
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 15): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 15),
datetime(2006, 12, 14): datetime(2006, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 15),
datetime(2007, 1, 1): datetime(2007, 1, 15),
datetime(2006, 12, 1): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2006, 12, 31),
},
)
)
offset_cases.append(
(
SemiMonthEnd(day_of_month=20),
{
datetime(2008, 1, 1): datetime(2008, 1, 20),
datetime(2008, 1, 15): datetime(2008, 1, 20),
datetime(2008, 1, 21): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 20),
datetime(2006, 12, 14): datetime(2006, 12, 20),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 20),
datetime(2007, 1, 1): datetime(2007, 1, 20),
datetime(2006, 12, 1): datetime(2006, 12, 20),
datetime(2006, 12, 15): datetime(2006, 12, 20),
},
)
)
offset_cases.append(
(
SemiMonthEnd(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 16): datetime(2008, 1, 31),
datetime(2008, 1, 15): datetime(2008, 1, 15),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 15),
},
)
)
offset_cases.append(
(
SemiMonthEnd(0, day_of_month=16),
{
datetime(2008, 1, 1): datetime(2008, 1, 16),
datetime(2008, 1, 16): datetime(2008, 1, 16),
datetime(2008, 1, 15): datetime(2008, 1, 16),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 16),
},
)
)
offset_cases.append(
(
SemiMonthEnd(2),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2007, 1, 15),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2007, 1, 16): datetime(2007, 2, 15),
datetime(2006, 11, 1): datetime(2006, 11, 30),
},
)
)
offset_cases.append(
(
SemiMonthEnd(-1),
{
datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 6, 15),
datetime(2008, 12, 31): datetime(2008, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 15),
datetime(2006, 12, 30): datetime(2006, 12, 15),
datetime(2007, 1, 1): datetime(2006, 12, 31),
},
)
)
offset_cases.append(
(
SemiMonthEnd(-1, day_of_month=4),
{
datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2007, 1, 4): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 6, 4),
datetime(2008, 12, 31): datetime(2008, 12, 4),
datetime(2006, 12, 5): datetime(2006, 12, 4),
datetime(2006, 12, 30): datetime(2006, 12, 4),
datetime(2007, 1, 1): datetime(2006, 12, 31),
},
)
)
offset_cases.append(
(
SemiMonthEnd(-2),
{
datetime(2007, 1, 1): datetime(2006, 12, 15),
datetime(2008, 6, 30): datetime(2008, 5, 31),
datetime(2008, 3, 15): datetime(2008, 2, 15),
datetime(2008, 12, 31): datetime(2008, 11, 30),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 14): datetime(2006, 11, 15),
datetime(2007, 1, 1): datetime(2006, 12, 15),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
@pytest.mark.parametrize("case", offset_cases)
def test_apply_index(self, case):
# https://github.com/pandas-dev/pandas/issues/34580
offset, cases = case
s = DatetimeIndex(cases.keys())
exp = DatetimeIndex(cases.values())
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = offset + s
tm.assert_index_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = offset.apply_index(s)
tm.assert_index_equal(result, exp)
on_offset_cases = [
(datetime(2007, 12, 31), True),
(datetime(2007, 12, 15), True),
(datetime(2007, 12, 14), False),
(datetime(2007, 12, 1), False),
(datetime(2008, 2, 29), True),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
dt, expected = case
assert_is_on_offset(SemiMonthEnd(), dt, expected)
@pytest.mark.parametrize("klass", [Series, DatetimeIndex])
def test_vectorized_offset_addition(self, klass):
s = klass(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = s + SemiMonthEnd()
result2 = SemiMonthEnd() + s
exp = klass(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = klass(
[
Timestamp("2000-01-01 00:15:00", tz="US/Central"),
Timestamp("2000-02-01", tz="US/Central"),
],
name="a",
)
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = s + SemiMonthEnd()
result2 = SemiMonthEnd() + s
exp = klass(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
class TestSemiMonthBegin(Base):
_offset = SemiMonthBegin
offset1 = _offset()
offset2 = _offset(2)
def test_offset_whole_year(self):
dates = (
datetime(2007, 12, 15),
datetime(2008, 1, 1),
datetime(2008, 1, 15),
datetime(2008, 2, 1),
datetime(2008, 2, 15),
datetime(2008, 3, 1),
datetime(2008, 3, 15),
datetime(2008, 4, 1),
datetime(2008, 4, 15),
datetime(2008, 5, 1),
datetime(2008, 5, 15),
datetime(2008, 6, 1),
datetime(2008, 6, 15),
datetime(2008, 7, 1),
datetime(2008, 7, 15),
datetime(2008, 8, 1),
datetime(2008, 8, 15),
datetime(2008, 9, 1),
datetime(2008, 9, 15),
datetime(2008, 10, 1),
datetime(2008, 10, 15),
datetime(2008, 11, 1),
datetime(2008, 11, 15),
datetime(2008, 12, 1),
datetime(2008, 12, 15),
)
for base, exp_date in zip(dates[:-1], dates[1:]):
assert_offset_equal(SemiMonthBegin(), base, exp_date)
# ensure .apply_index works as expected
s = DatetimeIndex(dates[:-1])
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = SemiMonthBegin() + s
exp = DatetimeIndex(dates[1:])
tm.assert_index_equal(result, exp)
# ensure generating a range with DatetimeIndex gives same result
result = date_range(start=dates[0], end=dates[-1], freq="SMS")
exp = DatetimeIndex(dates, freq="SMS")
tm.assert_index_equal(result, exp)
offset_cases = [
(
SemiMonthBegin(),
{
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 15): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 14): datetime(2006, 12, 15),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 1): datetime(2007, 1, 15),
datetime(2006, 12, 1): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2007, 1, 1),
},
),
(
SemiMonthBegin(day_of_month=20),
{
datetime(2008, 1, 1): datetime(2008, 1, 20),
datetime(2008, 1, 15): datetime(2008, 1, 20),
datetime(2008, 1, 21): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 14): datetime(2006, 12, 20),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 1): datetime(2007, 1, 20),
datetime(2006, 12, 1): datetime(2006, 12, 20),
datetime(2006, 12, 15): datetime(2006, 12, 20),
},
),
(
SemiMonthBegin(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 16): datetime(2008, 2, 1),
datetime(2008, 1, 15): datetime(2008, 1, 15),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 2): datetime(2006, 12, 15),
datetime(2007, 1, 1): datetime(2007, 1, 1),
},
),
(
SemiMonthBegin(0, day_of_month=16),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 16): datetime(2008, 1, 16),
datetime(2008, 1, 15): datetime(2008, 1, 16),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 5): datetime(2007, 1, 16),
datetime(2007, 1, 1): datetime(2007, 1, 1),
},
),
(
SemiMonthBegin(2),
{
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 15),
datetime(2006, 12, 1): datetime(2007, 1, 1),
datetime(2006, 12, 29): datetime(2007, 1, 15),
datetime(2006, 12, 15): datetime(2007, 1, 15),
datetime(2007, 1, 1): datetime(2007, 2, 1),
datetime(2007, 1, 16): datetime(2007, 2, 15),
datetime(2006, 11, 1): datetime(2006, 12, 1),
},
),
(
SemiMonthBegin(-1),
{
datetime(2007, 1, 1): datetime(2006, 12, 15),
datetime(2008, 6, 30): datetime(2008, 6, 15),
datetime(2008, 6, 14): datetime(2008, 6, 1),
datetime(2008, 12, 31): datetime(2008, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2006, 12, 1),
datetime(2007, 1, 1): datetime(2006, 12, 15),
},
),
(
SemiMonthBegin(-1, day_of_month=4),
{
datetime(2007, 1, 1): datetime(2006, 12, 4),
datetime(2007, 1, 4): datetime(2007, 1, 1),
datetime(2008, 6, 30): datetime(2008, 6, 4),
datetime(2008, 12, 31): datetime(2008, 12, 4),
datetime(2006, 12, 5): datetime(2006, 12, 4),
datetime(2006, 12, 30): datetime(2006, 12, 4),
datetime(2006, 12, 2): datetime(2006, 12, 1),
datetime(2007, 1, 1): datetime(2006, 12, 4),
},
),
(
SemiMonthBegin(-2),
{
datetime(2007, 1, 1): datetime(2006, 12, 1),
datetime(2008, 6, 30): datetime(2008, 6, 1),
datetime(2008, 6, 14): datetime(2008, 5, 15),
datetime(2008, 12, 31): datetime(2008, 12, 1),
datetime(2006, 12, 29): datetime(2006, 12, 1),
datetime(2006, 12, 15): datetime(2006, 11, 15),
datetime(2007, 1, 1): datetime(2006, 12, 1),
},
),
]
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
@pytest.mark.parametrize("case", offset_cases)
def test_apply_index(self, case):
offset, cases = case
s = DatetimeIndex(cases.keys())
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = offset + s
exp = DatetimeIndex(cases.values())
tm.assert_index_equal(result, exp)
on_offset_cases = [
(datetime(2007, 12, 1), True),
(datetime(2007, 12, 15), True),
(datetime(2007, 12, 14), False),
(datetime(2007, 12, 31), False),
(datetime(2008, 2, 15), True),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
dt, expected = case
assert_is_on_offset(SemiMonthBegin(), dt, expected)
@pytest.mark.parametrize("klass", [Series, DatetimeIndex])
def test_vectorized_offset_addition(self, klass):
s = klass(
[
| Timestamp("2000-01-15 00:15:00", tz="US/Central") | pandas._libs.tslibs.Timestamp |
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_daq as daq
from dash.dependencies import Input, Output, State
import plotly.graph_objs as go
import sqlite3
import pandas as pd
from flask_caching import Cache
import pyarrow as pa
import pyarrow.plasma as plasma
import numpy as np
import pickle
from datetime import timedelta
from datetime import datetime as dt
import dash_auth
import toml
import plotly
import os
from layoutCode import app_page_layout, header_colors
from metaData import aq_cage, aq_cage_new, ref_cage
usrpwd = toml.load("usrpwd.toml")
VALID_USERNAME_PASSWORD_PAIR = [[usrpwd["username"], usrpwd["password"]]]
app = dash.Dash(__name__)
auth = dash_auth.BasicAuth(app, VALID_USERNAME_PASSWORD_PAIR)
# cache = Cache(
# app.server,
# config={
# "CACHE_TYPE": "redis",
# "CACHE_TYPE": "filesystem",
# "CACHE_DIR": "cache-directory",
# "CACHE_THRESHOLD": 100,
# },
# )
start_project_time = 1541498400
# end_project_time = 1557496200 # 10th of may
end_project_time = 1562277600
# timeout = 1 * 60 * 60 # 1 hour timeout for filesystem cache
# Init
tz = "Europe/Oslo"
dt_format = "%Y-%m-%d %H:%M:%S"
depth_tags = tuple(
*[
list(range(10, 90, 2))
+ list(range(97, 107))
+ list(range(107, 109))
+ list(range(110, 126, 2))
]
)
acc_tags = tuple(range(11, 91, 2))
aqz_tags = tuple(*[list(range(10, 50)) + list(range(97, 107))])
aqz_depth = tuple(*[list(range(10, 50, 2)) + list(range(97, 107))])
ref_depth = tuple(
*[list(range(50, 90, 2)) + list(range(107, 109)) + list(range(110, 126, 2))]
)
ref_tags = tuple(
*[list(range(50, 90)) + list(range(107, 109)) + list(range(110, 126, 2))]
)
all_tags = tuple(sorted([*aqz_tags, *ref_tags]))
aqz_tbrs = tuple([732, 735, 837])
ref_tbrs = tuple([730, 734, 836])
all_tbrs = tuple(sorted([*aqz_tbrs, *ref_tbrs]))
tag_frequencies = (69, 71, 73)
tag_freq_69 = tuple(range(10, 90))
tag_freq_71 = tuple(
*[list(range(97, 102)) + list(range(107, 109)) + list(range(110, 116, 2))]
)
tag_freq_73 = tuple(*[list(range(102, 107)) + list(range(116, 126, 2))])
# PROBABLY LAST AVAILABLE 71/73 data timestamp: 1549949008 | 12.02.2019 06:23:28
# Parameters
showDiv = {"display": "inline-block"}
hideDiv = {"display": "none"}
marker_line_options = [
{"label": "Solid", "value": "solid"},
{"label": "dot", "value": "dot"},
{"label": "Dash", "value": "dash"},
{"label": "Long Dash", "value": "longdash"},
{"label": "Dash Dot", "value": "dashdot"},
{"label": "Long Dash Dot", "value": "longdashdot"},
]
timeseries_xaxis_dict = dict(
rangeselector=dict(
buttons=list(
[
dict(count=1, label="1h", step="hour", stepmode="backward"),
dict(count=2, label="2h", step="hour", stepmode="backward"),
dict(count=3, label="3h", step="hour", stepmode="backward"),
dict(count=6, label="6h", step="hour", stepmode="backward"),
dict(count=12, label="12h", step="hour", stepmode="backward"),
dict(count=24, label="24h", step="hour", stepmode="backward"),
dict(count=7, label="1w", step="day", stepmode="backward"),
dict(count=14, label="2w", step="day", stepmode="backward"),
dict(count=1, label="1m", step="month", stepmode="backward"),
dict(count=2, label="2m", step="month", stepmode="backward"),
dict(count=3, label="3m", step="month", stepmode="backward"),
dict(count=6, label="6m", step="month", stepmode="backward"),
dict(step="all"),
]
)
),
rangeslider=dict(visible=True),
type="date",
title="Time",
)
def db_sql_query(starts_ts, table):
# Shared filters
ts_filt = f"timestamp > {starts_ts}"
order = "timestamp ASC"
freq = (69, 71, 73) # tag and positions
if table == "tag":
comm = "S256"
columns = "timestamp, tbr_serial_id, tag_id, tag_data, snr, millisecond"
freq_filt = f"frequency IN {freq}"
comm_filt = f"comm_protocol = '{comm}'"
tag_filt = f"tag_id IN {all_tags}"
tbr_filt = f"tbr_serial_id IN {all_tbrs}"
filters = (
f"{ts_filt} AND {freq_filt} AND {comm_filt} AND {tag_filt} AND {tbr_filt}"
)
elif table == "tbr":
columns = "timestamp, tbr_serial_id, temperature, noise_avg, noise_peak"
filters = f"{ts_filt}"
elif table == "pos":
table = "positions"
columns = (
"timestamp, tag_id, frequency, millisecond, x, y, z, latitude, longitude"
)
tag_filt = f"tag_id IN {all_tags}"
freq_filt = f"frequency IN {freq}"
filters = f"{ts_filt} AND {tag_filt} AND {freq_filt}"
# query
query = f"SELECT {columns} FROM {table} WHERE {filters} ORDER BY {order};"
return query
def clean_df(df, name):
# Optimize and transform dataframe
df.timestamp = df.timestamp.astype("uint32")
print("Converting timestamps to datetime...")
df["Date"] = pd.to_datetime(df["timestamp"], unit="s", utc=True)
print("Converting timezone...")
df.Date = df.Date.dt.tz_convert(tz)
print("Converting datetime to str...")
df["Date_str"] = df.Date.dt.strftime(dt_format)
print("Extracting hour into new column...")
df["hour"] = df.Date_str.str.slice(11, 13, 1)
df.hour = df.hour.astype("uint8")
print("Setting datetime as index")
df = df.set_index("Date")
print("optimizing memory...")
if name == "tag":
df.tbr_serial_id = df.tbr_serial_id.astype("uint16")
df.tag_id = df.tag_id.astype("uint8")
df.tag_data = -df.tag_data
df.snr = df.snr.astype("uint8")
df.millisecond = df.millisecond.astype("uint16")
elif name == "tbr":
df.tbr_serial_id = df.tbr_serial_id.astype("uint16")
df.noise_avg = df.noise_avg.astype("uint8")
df.noise_peak = df.noise_peak.astype("uint8")
elif name == "pos":
df.tag_id = df.tag_id.astype("uint8")
df.frequency = df.frequency.astype("uint8")
df.millisecond = df.millisecond.astype("uint16")
df.z = -df.z
return df
def write_to_plasma(df, type):
with open("plasma_state.pkl", "rb") as f:
plasma_state = pickle.load(f)
# get the object ID for the dataframe
object_id = plasma_state[type]
client = plasma.connect("/tmp/plasma")
client.delete([object_id])
# Convert the Pandas DataFrame into a PyArrow RecordBatch
record_batch = pa.RecordBatch.from_pandas(df)
# Create the Plasma object from the PyArrow RecordBatch. Most of the work here
# is done to determine the size of buffer to request from the object store.
object_id = plasma.ObjectID(np.random.bytes(20))
mock_sink = pa.MockOutputStream()
stream_writer = pa.RecordBatchStreamWriter(mock_sink, record_batch.schema)
stream_writer.write_batch(record_batch)
stream_writer.close()
data_size = mock_sink.size()
buf = client.create(object_id, data_size)
# Write the PyArrow RecordBatch to Plasma
stream = pa.FixedSizeBufferWriter(buf)
stream_writer = pa.RecordBatchStreamWriter(stream, record_batch.schema)
stream_writer.write_batch(record_batch)
stream_writer.close()
# Seal the Plasma object
client.seal(object_id)
# end the client
client.disconnect()
# Write the new object ID
plasma_state[type] = object_id
with open("plasma_state.pkl", "wb") as f:
pickle.dump(plasma_state, f)
def read_from_plasma(type):
# get the current plasma_state
with open("plasma_state.pkl", "rb") as f:
plasma_state = pickle.load(f)
# get the object ID for the dataframe
object_id = plasma_state[type]
# get the client and read from it
client = plasma.connect("/tmp/plasma")
# Fetch the Plasma object
[data] = client.get_buffers([object_id]) # Get PlasmaBuffer from ObjectID
buffer = pa.BufferReader(data)
# Convert object back into an Arrow RecordBatch
reader = pa.RecordBatchStreamReader(buffer)
record_batch = reader.read_next_batch()
# Convert back into Pandas
df = record_batch.to_pandas()
# close out and finish
client.disconnect()
return df
# @cache.memoize(timeout=timeout)
def clean_data(start_ts, name):
db = "Aquatraz.db"
# db = "../backend/src/backend/dbmanager/databases/iof.db"
# create sql query
query = db_sql_query(start_ts, name)
# Read from databse
print("Reading from database")
con = sqlite3.connect(db)
df = | pd.read_sql(query, con) | pandas.read_sql |
import pandas as pd
import numpy as np
from app.db.db_connection import get_db
def import_abandoned_vehicles(input_file: str) -> None:
""" Import the requests for abandoned vehicles to the database.
:param input_file: The file from which to load the requests for abandoned vehicles.
"""
print("Getting requests for abandoned vehicles")
db = next(get_db())
input_df = | pd.read_csv(input_file, sep=',') | pandas.read_csv |
# 2. Use the best model
from keras.models import load_model
from sklearn import preprocessing
import numpy as np
import pandas as pd
# data set
ud = | pd.read_csv('../dataset/ginseng-example.csv') | pandas.read_csv |
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import scipy.integrate as integrate
from scipy.optimize import brentq as root
import math
import numpy as np
import scipy.special as scp
from scipy.special import iv
# In[2]:
def rvonmises(n, mu, kappa):
vm = np.zeros(n)
a = 1 + (1 + 4 * (kappa**2))**0.5
b = (a - (2 * a)**0.5)/(2 * kappa)
r = (1 + b**2)/(2 * b)
obs = 0
while (obs < n):
U1 = np.random.uniform(0, 1, 1)
z = np.cos(np.pi * U1)
f = (1 + r * z)/(r + z)
c = kappa * (r - f)
U2 = np.random.uniform(0, 1, 1)
if (c * (2 - c) - U2 > 0):
U3 = np.random.uniform(0, 1, 1)
vm[obs] = np.sign(U3 - 0.5) * math.acos(f) + mu
vm[obs] = vm[obs] % (2 * np.pi)
obs = obs + 1
else:
if (math.log(c/U2) + 1 - c >= 0):
U3 = np.random.uniform(0, 1, 1)
vm[obs] = np.sign(U3 - 0.5) * math.acos(f) + mu
vm[obs] = vm[obs] % (2 * math.pi)
obs = obs + 1
return(vm)
# In[3]:
def dvonmises(x, mu, kappa, log = False):
if (type(x) == int):
x = [x]
if (type(x) == float):
x = [x]
vm = np.zeros(len(x))
if (log):
if (kappa == 0):
vm = np.log(np.repreat(1/(2*pi), len(x)))
elif (kappa < 100000):
vm = -(np.log(2*math.pi)+np.log(scp.ive(0, kappa)) + kappa) + kappa*(np.cos(np.subtract(x - mu)))
else:
if (((x-mu)%(2*math.pi))==0):
vm = math.inf
else:
vm = -math.inf
else:
if (kappa == 0):
vm = np.repeat(1/(2*np.pi), len(x))
elif (kappa < 100000):
vm = 1/(2 * np.pi * scp.ive(0, kappa)) * (np.exp(np.subtract(np.cos(np.subtract(x, mu)), 1)))**kappa
else:
if (np.mod(np.subtract(x, mu),(2*np.pi))==0):
vm = math.inf
else:
vm = 0
return(vm)
# In[21]:
def pvonmises(q, mu, kappa, tol = 1e-020):
from_ = mu - np.pi
mu = (mu - from_) % (2 * np.pi)
if (type(q) == int):
q = [q]
if(type(q) == float):
q =[q]
q = np.mod(np.subtract(q, from_), (2 * np.pi))
q = np.mod(q,(2 * np.pi))
n = len(q)
mu = mu % (2 * np.pi)
def pvm_mu0(q, kappa, tol):
flag = True
p = 1
sum_ = 0
while (flag):
term = (iv(p, kappa) * np.sin(np.multiply(q, p)))/p
sum_ = sum_ + term
p = p + 1
if (abs(term) < tol):
flag = False
return(np.divide(q,(2 * np.pi)) + sum_/(np.pi * iv(0, kappa)))
result = np.repeat(np.nan, n)
if (mu == 0):
for i in range(0,n):
result[i] = pvm_mu0(q[i], kappa, tol)
else:
for i in range(0,n):
if (q[i] <= mu):
upper = (q[i] - mu) % (2 * np.pi)
if (upper == 0):
upper = 2 * np.pi
lower = (-mu) % (2 * np.pi)
result[i] = pvm_mu0(upper, kappa, tol) - pvm_mu0(lower, kappa, tol)
else:
upper = q[i] - mu
lower = mu % (2 * np.pi)
result[i] = pvm_mu0(upper, kappa, tol) + pvm_mu0(lower, kappa, tol)
return(result)
# In[63]:
def qvonmises(p, mu = 0 , kappa = None, from_ = None, tol = np.finfo(float).eps**0.6):
epsilon = 10 * np.finfo(float).eps ##epsilon is Python equivalent of .Machine$double.eps
if (type(p) == int):
p = np.array([p])
elif (type(p) == float):
p = np.array([p])
else:
p = np.array(p)
if (np.any(p > 1)):
raise ValueError("p must be in [0,1]")
elif (np.any(p < 0)):
raise ValueError("p must be in [0,1]")
if (pd.isnull(from_)):
from_ = mu - np.pi
n = p.size
mu = (mu - from_)%(2 * np.pi)
if ( | pd.isnull(kappa) | pandas.isnull |
# feature importance
# local score 0.0449
# kaggle score .14106
# minimize score
import os
import sys # noqa
from time import time
from pprint import pprint # noqa
import lightgbm as lgb
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.feature_selection import VarianceThreshold
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
pd.options.display.float_format = '{:.4f}'.format
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 2000)
np.set_printoptions(threshold=sys.maxsize)
is_kaggle = os.environ['HOME'] == '/tmp'
zipext = '' # if is_kaggle else '.zip'
train_file = 'train' # if is_kaggle else 'sample'
start_time = time()
last_time = time()
def timer():
global last_time
print(f'{((time() - last_time) / 60):.1f} mins\n') # noqa
last_time = time()
def evaluate(train, test, unique_id, target):
print('evaluate')
lgb_model = lgb.LGBMRegressor(nthread=4, n_jobs=-1, verbose=-1, metric='rmse')
x_train = train.drop([target, unique_id], axis=1)
y_train = train[target]
x_test = test[x_train.columns]
lgb_model.fit(x_train, y_train)
train_predictions = lgb_model.predict(x_train)
test_predictions = lgb_model.predict(x_test)
train_score = np.sqrt(mean_squared_error(train_predictions, y_train))
timer()
return test_predictions, train_score
# --- missing values
def get_many_missing_values(train, test, unique_id, target):
print(f'get_many_missing_values')
train_targets = train[target]
threshold = 0.75
train_missing = (train.isnull().sum() / len(train)).sort_values(ascending=False)
test_missing = (test.isnull().sum() / len(test)).sort_values(ascending=False)
# identify missing values above threshold
train_missing = train_missing.index[train_missing > threshold]
test_missing = test_missing.index[test_missing > threshold]
all_missing = list(set(set(train_missing) | set(test_missing)))
if len(all_missing) > 0:
print(f'columns with more than {threshold}% missing values')
pprint(all_missing)
train = train.drop(columns=all_missing, axis=1)
test = test.drop(columns=all_missing, axis=1)
train, test = train.align(test, join='inner', axis=1)
# restore after align
train[target] = train_targets
timer()
return train, test
# --- remove keys
def remove_keys(list, keys):
result = [x for x in list if x not in keys]
return result
# --- replace missing values
def replace_missing_values(train, test, unique_id, target):
print(f'replace_missing_values')
numeric_cols = [col for col in train.columns
if (train[col].dtype == 'int64') | (train[col].dtype == 'float64')]
numeric_cols = remove_keys(numeric_cols, [unique_id, target])
categorical_cols = [col for col in train.columns if train[col].dtype == 'object']
categorical_cols = remove_keys(categorical_cols, [unique_id, target])
# replace missing numericals with mean
for col in numeric_cols:
if train[col].isna().any() | test[col].isna().any():
mean = train[col].mean()
train[col].fillna(mean, inplace=True)
if col in test.columns:
test[col].fillna(mean, inplace=True)
# convert to lowercase
for col in categorical_cols:
train[col] = train[col].apply(lambda x: str(x).lower())
if col in test.columns:
test[col] = test[col].apply(lambda x: str(x).lower())
# replace string nan with np.nan
train.replace('nan', np.nan, inplace=True)
test.replace('nan', np.nan, inplace=True)
# replace missing categoricals with mode
for col in categorical_cols:
if train[col].isna().any() or test[col].isna().any():
mode = train[col].mode()[0]
train[col].fillna(mode, inplace=True)
if col in test.columns:
test[col].fillna(mode, inplace=True)
timer()
return train, test
# --- column differences
def get_column_differences(train, test, unique_id, target):
print(f'get_column_differences')
train_without_target = train.drop(target, axis=1)
not_in_test = train_without_target.columns.difference(test.columns)
not_in_train = test.columns.difference(train_without_target.columns)
if len(not_in_test) > 0:
print(f'In train but not test\n{not_in_test}')
if len(not_in_train) > 0:
print(f'In test but not train\n{not_in_train}')
timer()
return train, test
# --- categorical data
def get_categorical_data(train, test, unique_id, target):
print(f'get_categorical_data')
train_targets = train[target]
categorical_cols = [col for col in train.columns if train[col].dtype == 'object']
if unique_id in categorical_cols:
categorical_cols.remove(unique_id)
max_categories = train.shape[0] * 0.5
too_many_value_categorical_cols = [col for col in categorical_cols
if train[col].nunique() >= max_categories]
if len(too_many_value_categorical_cols) > 0:
print('too many categorical values', too_many_value_categorical_cols)
# drop if too many values - usually a unique id column
categorical_cols = [i for i in categorical_cols if i not in too_many_value_categorical_cols]
train = train.drop(too_many_value_categorical_cols, axis=1)
test.drop([col for col in too_many_value_categorical_cols
if col in test.columns], axis=1, inplace=True)
# one-hot encode if not too many values
max_ohe_categories = 10
ohe_categorical_cols = [col for col in categorical_cols
if train[col].nunique() <= max_ohe_categories]
categorical_cols = [i for i in categorical_cols if i not in ohe_categorical_cols]
if len(ohe_categorical_cols) > 0:
print('one-hot encode', ohe_categorical_cols)
# one-hot encode & align to have same columns
train = pd.get_dummies(train, columns=ohe_categorical_cols)
test = pd.get_dummies(test, columns=ohe_categorical_cols)
train, test = train.align(test, join='inner', axis=1)
# restore after align
train[target] = train_targets
# possibly rank encode rather than ohe. see gstore.
# label encode (convert to integer)
label_encode_categorical_cols = categorical_cols
print('label encode', label_encode_categorical_cols)
for col in label_encode_categorical_cols:
lbl = LabelEncoder()
lbl.fit(list(train[col].values.astype('str')) + list(test[col].values.astype('str')))
train[col] = lbl.transform(list(train[col].values.astype('str')))
test[col] = lbl.transform(list(test[col].values.astype('str')))
timer()
return train, test
# --- feature selection
def get_feature_selection(train, test, unique_id, target):
print(f'get_feature_selection')
all_numeric_cols = [col for col in train.columns
if (train[col].dtype == 'int64') | (train[col].dtype == 'float64')]
if unique_id in all_numeric_cols:
all_numeric_cols.remove(unique_id)
if target in all_numeric_cols:
all_numeric_cols.remove(target)
# feature selection via variance
train_numeric = train[all_numeric_cols].fillna(0)
select_features = VarianceThreshold(threshold=0.2)
select_features.fit(train_numeric)
numeric_cols = train_numeric.columns[select_features.get_support(indices=True)].tolist()
# remove cols without variance
for col in all_numeric_cols:
if col not in numeric_cols:
print(f'variance drop {col}')
train.drop(col, axis=1, inplace=True)
if col in test.columns:
test.drop(col, axis=1, inplace=True)
timer()
return train, test
# --- feature importance
def get_feature_importance(train, test, unique_id, target):
print(f'get_feature_importance')
model = lgb.LGBMRegressor(nthread=4, n_jobs=-1, verbose=-1)
x_train = train.drop([unique_id, target], axis=1)
# initialize an empty array to hold feature importances
feature_importances = np.zeros(x_train.shape[1])
# fit the model twice to avoid overfitting
for i in range(2):
# split into training and validation set
train_features, valid_features, train_y, valid_y = train_test_split(x_train, train[target],
test_size=0.25, random_state=i)
# train using early stopping
model.fit(train_features, train_y, early_stopping_rounds=100,
eval_set=[(valid_features, valid_y)],
eval_metric='rmse', verbose=False)
# record the feature importances
feature_importances += model.feature_importances_
# average feature importances!
feature_importances = feature_importances / 2
feature_importances = pd.DataFrame(
{'feature': list(x_train.columns), 'importance': feature_importances}).sort_values('importance', ascending=False)
# sort features according to importance
feature_importances = feature_importances.sort_values('importance', ascending=False).reset_index()
most_important_features = feature_importances[0:10]['feature'].tolist()
# normalize the feature importances to add up to one
feature_importances['importance_normalized'] = feature_importances['importance'] / feature_importances['importance'].sum()
feature_importances['cumulative_importance'] = np.cumsum(feature_importances['importance_normalized'])
# find the features with minimal importance
# unimportant_features = list(feature_importances[feature_importances['importance'] == 0.0]['feature'])
# Threshold for cumulative importance
threshold = 0.9996
# extract the features to drop
features_to_drop = list(feature_importances[feature_importances[
'cumulative_importance'] > threshold]['feature'])
if len(features_to_drop) > 0:
print(feature_importances)
print(f'features to drop, under {threshold} importance:')
pprint(features_to_drop)
train = train.drop(features_to_drop, axis=1)
test = test.drop(features_to_drop, axis=1)
timer()
return train, test, most_important_features
# --- remove collinear features
def get_collinear_features(train, test, unique_id, target):
print('get_collinear_features')
corrs = train.corr()
upper = corrs.where(np.triu(np.ones(corrs.shape), k=1).astype(np.bool))
threshold = 0.8
# select columns with correlations above threshold
to_drop = [column for column in upper.columns if any(upper[column] > threshold) and column not in [unique_id, target]]
if len(to_drop) > 0:
print('collinear drop')
pprint(to_drop)
train = train.drop(columns=to_drop, axis=1)
test = test.drop(columns=to_drop, axis=1)
timer()
return train, test
# arithmetic features
def get_arithmetic_features(train, test, unique_id, target, cols, source_cols):
print('get_arithmetic_features')
# just choose from original columns, not encodeds
numeric_cols = [col for col in cols
if (col in source_cols) & (train[col].dtype == 'int64') | (train[col].dtype == 'float64')]
numeric_cols = remove_keys(numeric_cols, [unique_id, target])
for i1 in range(0, len(numeric_cols)):
col1 = numeric_cols[i1]
# powers
train[f'{col1} squared'] = train[col1] ** 2
test[f'{col1} squared'] = test[col1] ** 2
train[f'{col1} cubed'] = train[col1] ** 3
test[f'{col1} cubed'] = test[col1] ** 3
train[f'{col1}^4'] = train[col1] ** 4
test[f'{col1}^4'] = test[col1] ** 4
for i2 in range(i1 + 1, len(numeric_cols)):
col2 = numeric_cols[i2]
train[f'{col1} by {col2}'] = train[col1] * train[col2]
test[f'{col1} by {col2}'] = test[col1] * test[col2]
train[f'{col1} plus {col2}'] = train[col1] + train[col2]
test[f'{col1} plus {col2}'] = test[col1] + test[col2]
train[f'{col1} minus {col2}'] = train[col1] - train[col2]
test[f'{col1} minus {col2}'] = test[col1] - test[col2]
if not (train[col2] == 0).any():
train[f'{col1} on {col2}'] = train[col1] / train[col2]
test[f'{col1} on {col2}'] = test[col1] / test[col2]
elif not (train[col1] == 0).any():
train[f'{col2} on {col1}'] = train[col2] / train[col1]
test[f'{col2} on {col1}'] = test[col2] / test[col1]
timer()
return train, test
# custom features
def get_custom_features(train, test, unique_id, target):
print(f'get_custom_features')
timer()
return train, test
# remove skew towards a few large values by using log1p
def get_logged(train, test, target):
train[target] = np.log1p(train[target])
return train, test
# --------------------- run
def run():
unique_id = 'Id'
target = 'SalePrice'
# load data
train = | pd.read_csv(f'../input/{train_file}.csv{zipext}') | pandas.read_csv |
# -*- coding: utf-8 -*-
#
# Copyright 2017-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict, namedtuple
from typing import Iterable
import warnings
import numpy as np
import pandas as pd
from ..globalvar import SOURCE, TARGET, WEIGHT, TYPE_ATTR_NAME
from .element_data import NodeData, EdgeData
from .indexed_array import IndexedArray
from .validation import comma_sep, require_dataframe_has_columns
from .utils import (
is_real_iterable,
zero_sized_array,
smart_array_concatenate,
smart_array_index,
)
class ColumnarConverter:
"""
Convert data from a columnar representation like Pandas and Numpy into values appropriate for
element_data.py types.
Args:
name (str): the name of the argument for error messages
default_type (hashable): the default type to use for data without a type
type_column (hashable, optional): the name of the type column, if one is being used
column_defaults (dict of hashable to any): any default values for columns (using names before renaming!)
selected_columns (dict of hashable to hashable): renamings for columns, mapping original name to new name
dtype (str or numpy dtype): the data type to use for the feature matrices
transform_columns (dict of hashable to callable): column transformations, maps column name to transform
"""
def __init__(
self,
name,
default_type,
type_column,
column_defaults,
selected_columns,
transform_columns,
dtype=None,
):
if type_column is not None and type_column not in selected_columns:
raise ValueError(
f"selected_columns: expected type column ({type_column!r}) to be included when using, found only {comma_sep(list(selected_columns.keys()))}"
)
self._parent_name = name
self.type_column = type_column
self.column_defaults = column_defaults
self.selected_columns = selected_columns
self.default_type = default_type
self.transform_columns = transform_columns
self.dtype = dtype
def name(self, type_name=None):
if type_name is None:
return self._parent_name
return f"{self._parent_name}[{type_name!r}]"
def _convert_single(self, type_name, data):
if isinstance(data, pd.DataFrame):
return self._convert_pandas(type_name, data)
elif isinstance(data, (IndexedArray, np.ndarray)):
return self._convert_rowframe(type_name, data)
else:
raise TypeError(
f"{self.name(type_name)}: expected IndexedArray or pandas DataFrame, found {type(data).__name__}"
)
def _convert_pandas(self, type_name, data):
assert isinstance(data, pd.DataFrame)
existing = set(self.selected_columns).intersection(data.columns)
ids = data.index
# split the dataframe based on the columns we know about
missing_columns = []
def select_column(old_name):
if old_name in data.columns:
column = data[old_name].to_numpy()
elif old_name in self.column_defaults:
column = np.broadcast_to(self.column_defaults[old_name], len(ids))
else:
nonlocal missing_columns
missing_columns.append(old_name)
return None
transform = self.transform_columns.get(old_name)
if transform is not None:
column = transform(column)
return column
columns = {
new_name: select_column(old_name)
for old_name, new_name in self.selected_columns.items()
}
if missing_columns:
raise ValueError(
f"{self.name(type_name)}: expected {comma_sep(self.selected_columns)} columns, found: {comma_sep(data.columns)}"
)
if len(existing) != len(data.columns):
other = data.drop(columns=existing)
# to_numpy returns an unspecified order but it's Fortran in practice. Row-level bulk
# operations are more common (e.g. slicing out a couple of row, when sampling a few
# nodes) than column-level ones so having rows be contiguous (C order) is much more
# efficient.
features = np.ascontiguousarray(other.to_numpy(dtype=self.dtype))
else:
# if there's no extra columns we can save some effort and some memory usage by entirely
# avoiding the Pandas tricks
features = zero_sized_array((len(data), 0), self.dtype)
return ids, columns, features
def _convert_rowframe(self, type_name, data):
assert isinstance(data, (IndexedArray, np.ndarray))
if self.selected_columns:
raise ValueError(
f"{self.name(type_name)}: expected a Pandas DataFrame when selecting columns {comma_sep(self.selected_columns)}, found {type(data).__name__}"
)
if isinstance(data, np.ndarray):
try:
data = IndexedArray(data)
except Exception as e:
raise ValueError(
f"{self.name(type_name)}: could not convert NumPy array to a IndexedArray, see other error"
)
return data.index, {}, data.values
def _ids_columns_and_type_info_from_singles(self, singles):
type_info = []
type_ids = []
type_columns = defaultdict(list)
for type_name in sorted(singles.keys()):
ids, columns, data = singles[type_name]
type_info.append((type_name, data))
type_ids.append(ids)
for col_name, col_array in columns.items():
type_columns[col_name].append(col_array)
if type_ids:
ids = smart_array_concatenate(type_ids)
columns = {
col_name: smart_array_concatenate(col_arrays)
for col_name, col_arrays in type_columns.items()
}
else:
# there was no input types and thus no input elements, so create a dummy set of columns,
# that is maximally flexible by using a "minimal"/highly-promotable type
ids = []
columns = {
name: zero_sized_array((0,), dtype=np.uint8)
for name in self.selected_columns.values()
}
return ids, columns, type_info
def _convert_with_type_column(self, data):
# we've got a type column, so there's no dictionaries or separate dataframes. We just need
# to make sure things are arranged right, i.e. nodes of each type are contiguous, and
# 'range(...)' objects describing each one.
ids, columns, features = self._convert_single(None, data)
# the column we see in `known` is after being selected/renamed
type_column_name = self.selected_columns[self.type_column]
# the shared data doesn't use the type column; that info is encoded in `type_ranges` below
type_column = columns.pop(type_column_name)
sorting = np.argsort(type_column)
# arrange everything to be sorted by type
ids = ids[sorting]
type_column = type_column[sorting]
# For many graphs these end up with values for which actually indexing would be suboptimal
# (require allocating a new array, in particular), e.g. default edge weights in columns, or
# features.size == 0.
columns = {
name: smart_array_index(array, sorting) for name, array in columns.items()
}
features = smart_array_index(features, sorting)
# deduce the type ranges based on the first index of each of the known values
types, first_occurance = np.unique(type_column, return_index=True)
last_occurance = np.append(first_occurance[1:], len(type_column))
type_info = [
(type_name, features[start:stop, :])
for type_name, start, stop in zip(types, first_occurance, last_occurance)
]
return ids, columns, type_info
def convert(self, elements):
if self.type_column is not None:
return self._convert_with_type_column(elements)
if isinstance(elements, (pd.DataFrame, IndexedArray, np.ndarray)):
elements = {self.default_type: elements}
if not isinstance(elements, dict):
raise TypeError(
f"{self.name()}: expected dict, found {type(elements).__name__}"
)
singles = {
type_name: self._convert_single(type_name, data)
for type_name, data in elements.items()
}
ids, columns, type_info = self._ids_columns_and_type_info_from_singles(singles)
return (ids, columns, type_info)
def convert_nodes(data, *, name, default_type, dtype) -> NodeData:
converter = ColumnarConverter(
name,
default_type,
type_column=None,
column_defaults={},
selected_columns={},
transform_columns={},
dtype=dtype,
)
ids, columns, type_info = converter.convert(data)
assert len(columns) == 0
return NodeData(ids, type_info)
DEFAULT_WEIGHT = np.float32(1)
def convert_edges(
data,
*,
name,
default_type,
source_column,
target_column,
weight_column,
type_column,
nodes,
dtype,
):
def _node_ids_to_iloc(node_ids):
try:
return nodes.ids.to_iloc(node_ids, strict=True)
except KeyError as e:
missing_values = e.args[0]
if not is_real_iterable(missing_values):
missing_values = [missing_values]
missing_values = pd.unique(missing_values)
raise ValueError(
f"edges: expected all source and target node IDs to be contained in `nodes`, "
f"found some missing: {comma_sep(missing_values)}"
)
selected = {
source_column: SOURCE,
target_column: TARGET,
weight_column: WEIGHT,
}
if type_column is not None:
selected[type_column] = TYPE_ATTR_NAME
converter = ColumnarConverter(
name,
default_type,
type_column=type_column,
column_defaults={weight_column: DEFAULT_WEIGHT},
selected_columns=selected,
transform_columns={
source_column: _node_ids_to_iloc,
target_column: _node_ids_to_iloc,
},
dtype=dtype,
)
ids, columns, type_info = converter.convert(data)
weight_col = columns[WEIGHT]
if not | pd.api.types.is_numeric_dtype(weight_col) | pandas.api.types.is_numeric_dtype |
#
# Copyright 2018 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from datetime import time
from os.path import abspath, dirname, join
from unittest import TestCase
import typing
import re
import functools
import itertools
import pathlib
from collections import abc
import pytest
import numpy as np
import pandas as pd
import pandas.testing as tm
from pandas import Timedelta, read_csv
from parameterized import parameterized
import pytz
from pytz import UTC
from toolz import concat
from exchange_calendars import get_calendar
from exchange_calendars.calendar_utils import (
ExchangeCalendarDispatcher,
_default_calendar_aliases,
_default_calendar_factories,
)
from exchange_calendars.errors import (
CalendarNameCollision,
InvalidCalendarName,
NoSessionsError,
)
from exchange_calendars.exchange_calendar import ExchangeCalendar, days_at_time
from .test_utils import T
class FakeCalendar(ExchangeCalendar):
name = "DMY"
tz = "Asia/Ulaanbaatar"
open_times = ((None, time(11, 13)),)
close_times = ((None, time(11, 49)),)
class CalendarRegistrationTestCase(TestCase):
def setup_method(self, method):
self.dummy_cal_type = FakeCalendar
self.dispatcher = ExchangeCalendarDispatcher({}, {}, {})
def teardown_method(self, method):
self.dispatcher.clear_calendars()
def test_register_calendar(self):
# Build a fake calendar
dummy_cal = self.dummy_cal_type()
# Try to register and retrieve the calendar
self.dispatcher.register_calendar("DMY", dummy_cal)
retr_cal = self.dispatcher.get_calendar("DMY")
self.assertEqual(dummy_cal, retr_cal)
# Try to register again, expecting a name collision
with self.assertRaises(CalendarNameCollision):
self.dispatcher.register_calendar("DMY", dummy_cal)
# Deregister the calendar and ensure that it is removed
self.dispatcher.deregister_calendar("DMY")
with self.assertRaises(InvalidCalendarName):
self.dispatcher.get_calendar("DMY")
def test_register_calendar_type(self):
self.dispatcher.register_calendar_type("DMY", self.dummy_cal_type)
retr_cal = self.dispatcher.get_calendar("DMY")
self.assertEqual(self.dummy_cal_type, type(retr_cal))
def test_both_places_are_checked(self):
dummy_cal = self.dummy_cal_type()
# if instance is registered, can't register type with same name
self.dispatcher.register_calendar("DMY", dummy_cal)
with self.assertRaises(CalendarNameCollision):
self.dispatcher.register_calendar_type("DMY", type(dummy_cal))
self.dispatcher.deregister_calendar("DMY")
# if type is registered, can't register instance with same name
self.dispatcher.register_calendar_type("DMY", type(dummy_cal))
with self.assertRaises(CalendarNameCollision):
self.dispatcher.register_calendar("DMY", dummy_cal)
def test_force_registration(self):
self.dispatcher.register_calendar("DMY", self.dummy_cal_type())
first_dummy = self.dispatcher.get_calendar("DMY")
# force-register a new instance
self.dispatcher.register_calendar("DMY", self.dummy_cal_type(), force=True)
second_dummy = self.dispatcher.get_calendar("DMY")
self.assertNotEqual(first_dummy, second_dummy)
class DefaultsTestCase(TestCase):
def test_default_calendars(self):
dispatcher = ExchangeCalendarDispatcher(
calendars={},
calendar_factories=_default_calendar_factories,
aliases=_default_calendar_aliases,
)
# These are ordered aliases first, so that we can deregister the
# canonical factories when we're done with them, and we'll be done with
# them after they've been used by all aliases and by canonical name.
for name in concat([_default_calendar_aliases, _default_calendar_factories]):
self.assertIsNotNone(
dispatcher.get_calendar(name), "get_calendar(%r) returned None" % name
)
dispatcher.deregister_calendar(name)
class DaysAtTimeTestCase(TestCase):
@parameterized.expand(
[
# NYSE standard day
(
"2016-07-19",
0,
time(9, 31),
pytz.timezone("America/New_York"),
"2016-07-19 9:31",
),
# CME standard day
(
"2016-07-19",
-1,
time(17, 1),
pytz.timezone("America/Chicago"),
"2016-07-18 17:01",
),
# CME day after DST start
(
"2004-04-05",
-1,
time(17, 1),
pytz.timezone("America/Chicago"),
"2004-04-04 17:01",
),
# ICE day after DST start
(
"1990-04-02",
-1,
time(19, 1),
pytz.timezone("America/Chicago"),
"1990-04-01 19:01",
),
]
)
def test_days_at_time(self, day, day_offset, time_offset, tz, expected):
days = pd.DatetimeIndex([pd.Timestamp(day, tz=tz)])
result = days_at_time(days, time_offset, tz, day_offset)[0]
expected = pd.Timestamp(expected, tz=tz).tz_convert(UTC)
self.assertEqual(result, expected)
class ExchangeCalendarTestBase(object):
# Override in subclasses.
answer_key_filename = None
calendar_class = None
# Affects test_start_bound. Should be set to earliest date for which
# calendar can be instantiated, or None if no start bound.
START_BOUND: pd.Timestamp | None = None
# Affects test_end_bound. Should be set to latest date for which
# calendar can be instantiated, or None if no end bound.
END_BOUND: pd.Timestamp | None = None
# Affects tests that care about the empty periods between sessions. Should
# be set to False for 24/7 calendars.
GAPS_BETWEEN_SESSIONS = True
# Affects tests that care about early closes. Should be set to False for
# calendars that don't have any early closes.
HAVE_EARLY_CLOSES = True
# Affects tests that care about late opens. Since most do not, defaulting
# to False.
HAVE_LATE_OPENS = False
# Affects test_for_breaks. True if one or more calendar sessions has a
# break.
HAVE_BREAKS = False
# Affects test_session_has_break.
SESSION_WITH_BREAK = None # None if no session has a break
SESSION_WITHOUT_BREAK = T("2011-06-15") # None if all sessions have breaks
# Affects test_sanity_check_session_lengths. Should be set to the largest
# number of hours that ever appear in a single session.
MAX_SESSION_HOURS = 0
# Affects test_minute_index_to_session_labels.
# Change these if the start/end dates of your test suite don't contain the
# defaults.
MINUTE_INDEX_TO_SESSION_LABELS_START = pd.Timestamp("2011-01-04", tz=UTC)
MINUTE_INDEX_TO_SESSION_LABELS_END = pd.Timestamp("2011-04-04", tz=UTC)
# Affects tests around daylight savings. If possible, should contain two
# dates that are not both in the same daylight savings regime.
DAYLIGHT_SAVINGS_DATES = ["2004-04-05", "2004-11-01"]
# Affects test_start_end. Change these if your calendar start/end
# dates between 2010-01-03 and 2010-01-10 don't match the defaults.
TEST_START_END_FIRST = pd.Timestamp("2010-01-03", tz=UTC)
TEST_START_END_LAST = pd.Timestamp("2010-01-10", tz=UTC)
TEST_START_END_EXPECTED_FIRST = pd.Timestamp("2010-01-04", tz=UTC)
TEST_START_END_EXPECTED_LAST = pd.Timestamp("2010-01-08", tz=UTC)
@staticmethod
def load_answer_key(filename):
"""
Load a CSV from tests/resources/{filename}.csv
"""
fullpath = join(
dirname(abspath(__file__)),
"./resources",
filename + ".csv",
)
return read_csv(
fullpath,
index_col=0,
# NOTE: Merely passing parse_dates=True doesn't cause pandas to set
# the dtype correctly, and passing all reasonable inputs to the
# dtype kwarg cause read_csv to barf.
parse_dates=[0, 1, 2],
date_parser=lambda x: pd.Timestamp(x, tz=UTC),
)
@classmethod
def setup_class(cls):
cls.answers = cls.load_answer_key(cls.answer_key_filename)
cls.start_date = cls.answers.index[0]
cls.end_date = cls.answers.index[-1]
cls.calendar = cls.calendar_class(cls.start_date, cls.end_date)
cls.one_minute = pd.Timedelta(1, "T")
cls.one_hour = pd.Timedelta(1, "H")
cls.one_day = pd.Timedelta(1, "D")
cls.today = pd.Timestamp.now(tz="UTC").floor("D")
@classmethod
def teardown_class(cls):
cls.calendar = None
cls.answers = None
def test_bound_start(self):
if self.START_BOUND is not None:
cal = self.calendar_class(self.START_BOUND, self.today)
self.assertIsInstance(cal, ExchangeCalendar)
start = self.START_BOUND - pd.DateOffset(days=1)
with pytest.raises(ValueError, match=re.escape(f"{start}")):
self.calendar_class(start, self.today)
else:
# verify no bound imposed
cal = self.calendar_class(pd.Timestamp("1902-01-01", tz="UTC"), self.today)
self.assertIsInstance(cal, ExchangeCalendar)
def test_bound_end(self):
if self.END_BOUND is not None:
cal = self.calendar_class(self.today, self.END_BOUND)
self.assertIsInstance(cal, ExchangeCalendar)
end = self.END_BOUND + pd.DateOffset(days=1)
with pytest.raises(ValueError, match=re.escape(f"{end}")):
self.calendar_class(self.today, end)
else:
# verify no bound imposed
cal = self.calendar_class(self.today, pd.Timestamp("2050-01-01", tz="UTC"))
self.assertIsInstance(cal, ExchangeCalendar)
def test_sanity_check_session_lengths(self):
# make sure that no session is longer than self.MAX_SESSION_HOURS hours
for session in self.calendar.all_sessions:
o, c = self.calendar.open_and_close_for_session(session)
delta = c - o
self.assertLessEqual(delta.seconds / 3600, self.MAX_SESSION_HOURS)
def test_calculated_against_csv(self):
tm.assert_index_equal(self.calendar.schedule.index, self.answers.index)
def test_adhoc_holidays_specification(self):
"""adhoc holidays should be tz-naive (#33, #39)."""
dti = pd.DatetimeIndex(self.calendar.adhoc_holidays)
assert dti.tz is None
def test_is_open_on_minute(self):
one_minute = pd.Timedelta(minutes=1)
m = self.calendar.is_open_on_minute
for market_minute in self.answers.market_open[1:]:
market_minute_utc = market_minute
# The exchange should be classified as open on its first minute
self.assertTrue(m(market_minute_utc, _parse=False))
if self.GAPS_BETWEEN_SESSIONS:
# Decrement minute by one, to minute where the market was not
# open
pre_market = market_minute_utc - one_minute
self.assertFalse(m(pre_market, _parse=False))
for market_minute in self.answers.market_close[:-1]:
close_minute_utc = market_minute
# should be open on its last minute
self.assertTrue(m(close_minute_utc, _parse=False))
if self.GAPS_BETWEEN_SESSIONS:
# increment minute by one minute, should be closed
post_market = close_minute_utc + one_minute
self.assertFalse(m(post_market, _parse=False))
def _verify_minute(
self,
calendar,
minute,
next_open_answer,
prev_open_answer,
next_close_answer,
prev_close_answer,
):
next_open = calendar.next_open(minute, _parse=False)
self.assertEqual(next_open, next_open_answer)
prev_open = self.calendar.previous_open(minute, _parse=False)
self.assertEqual(prev_open, prev_open_answer)
next_close = self.calendar.next_close(minute, _parse=False)
self.assertEqual(next_close, next_close_answer)
prev_close = self.calendar.previous_close(minute, _parse=False)
self.assertEqual(prev_close, prev_close_answer)
def test_next_prev_open_close(self):
# for each session, check:
# - the minute before the open (if gaps exist between sessions)
# - the first minute of the session
# - the second minute of the session
# - the minute before the close
# - the last minute of the session
# - the first minute after the close (if gaps exist between sessions)
opens = self.answers.market_open.iloc[1:-2]
closes = self.answers.market_close.iloc[1:-2]
previous_opens = self.answers.market_open.iloc[:-1]
previous_closes = self.answers.market_close.iloc[:-1]
next_opens = self.answers.market_open.iloc[2:]
next_closes = self.answers.market_close.iloc[2:]
for (
open_minute,
close_minute,
previous_open,
previous_close,
next_open,
next_close,
) in zip(
opens, closes, previous_opens, previous_closes, next_opens, next_closes
):
minute_before_open = open_minute - self.one_minute
# minute before open
if self.GAPS_BETWEEN_SESSIONS:
self._verify_minute(
self.calendar,
minute_before_open,
open_minute,
previous_open,
close_minute,
previous_close,
)
# open minute
self._verify_minute(
self.calendar,
open_minute,
next_open,
previous_open,
close_minute,
previous_close,
)
# second minute of session
self._verify_minute(
self.calendar,
open_minute + self.one_minute,
next_open,
open_minute,
close_minute,
previous_close,
)
# minute before the close
self._verify_minute(
self.calendar,
close_minute - self.one_minute,
next_open,
open_minute,
close_minute,
previous_close,
)
# the close
self._verify_minute(
self.calendar,
close_minute,
next_open,
open_minute,
next_close,
previous_close,
)
# minute after the close
if self.GAPS_BETWEEN_SESSIONS:
self._verify_minute(
self.calendar,
close_minute + self.one_minute,
next_open,
open_minute,
next_close,
close_minute,
)
def test_next_prev_minute(self):
all_minutes = self.calendar.all_minutes
# test 20,000 minutes because it takes too long to do the rest.
for idx, minute in enumerate(all_minutes[1:20000]):
self.assertEqual(
all_minutes[idx + 2], self.calendar.next_minute(minute, _parse=False)
)
self.assertEqual(
all_minutes[idx], self.calendar.previous_minute(minute, _parse=False)
)
# test a couple of non-market minutes
if self.GAPS_BETWEEN_SESSIONS:
for open_minute in self.answers.market_open[1:]:
hour_before_open = open_minute - self.one_hour
self.assertEqual(
open_minute,
self.calendar.next_minute(hour_before_open, _parse=False),
)
for close_minute in self.answers.market_close[1:]:
hour_after_close = close_minute + self.one_hour
self.assertEqual(
close_minute,
self.calendar.previous_minute(hour_after_close, _parse=False),
)
def test_date_to_session_label(self):
m = self.calendar.date_to_session_label
sessions = self.answers.index[:30] # first 30 sessions
# test for error if request session prior to first calendar session.
date = self.answers.index[0] - self.one_day
error_msg = (
"Cannot get a session label prior to the first calendar"
f" session ('{self.answers.index[0]}'). Consider passing"
" `direction` as 'next'."
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
m(date, "previous", _parse=False)
# direction as "previous"
dates = pd.date_range(sessions[0], sessions[-1], freq="D")
last_session = None
for date in dates:
session_label = m(date, "previous", _parse=False)
if date in sessions:
assert session_label == date
last_session = session_label
else:
assert session_label == last_session
# direction as "next"
last_session = None
for date in dates.sort_values(ascending=False):
session_label = m(date, "next", _parse=False)
if date in sessions:
assert session_label == date
last_session = session_label
else:
assert session_label == last_session
# test for error if request session after last calendar session.
date = self.answers.index[-1] + self.one_day
error_msg = (
"Cannot get a session label later than the last calendar"
f" session ('{self.answers.index[-1]}'). Consider passing"
" `direction` as 'previous'."
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
m(date, "next", _parse=False)
if self.GAPS_BETWEEN_SESSIONS:
not_sessions = dates[~dates.isin(sessions)][:5]
for not_session in not_sessions:
error_msg = (
f"`date` '{not_session}' does not represent a session. Consider"
" passing a `direction`."
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
m(not_session, "none", _parse=False)
# test default behaviour
with pytest.raises(ValueError, match=re.escape(error_msg)):
m(not_session, _parse=False)
# non-valid direction (can only be thrown if gaps between sessions)
error_msg = (
"'not a direction' is not a valid `direction`. Valid `direction`"
' values are "next", "previous" and "none".'
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
m(not_session, "not a direction", _parse=False)
def test_minute_to_session_label(self):
m = self.calendar.minute_to_session_label
# minute is prior to first session's open
minute_before_first_open = self.answers.iloc[0].market_open - self.one_minute
session_label = self.answers.index[0]
minutes_that_resolve_to_this_session = [
m(minute_before_first_open, _parse=False),
m(minute_before_first_open, direction="next", _parse=False),
]
unique_session_labels = set(minutes_that_resolve_to_this_session)
self.assertTrue(len(unique_session_labels) == 1)
self.assertIn(session_label, unique_session_labels)
with self.assertRaises(ValueError):
m(minute_before_first_open, direction="previous", _parse=False)
with self.assertRaises(ValueError):
m(minute_before_first_open, direction="none", _parse=False)
# minute is between first session's open and last session's close
for idx, (session_label, open_minute, close_minute, _, _) in enumerate(
self.answers.iloc[1:-2].itertuples(name=None)
):
hour_into_session = open_minute + self.one_hour
minute_before_session = open_minute - self.one_minute
minute_after_session = close_minute + self.one_minute
next_session_label = self.answers.index[idx + 2]
previous_session_label = self.answers.index[idx]
# verify that minutes inside a session resolve correctly
minutes_that_resolve_to_this_session = [
m(open_minute, _parse=False),
m(open_minute, direction="next", _parse=False),
m(open_minute, direction="previous", _parse=False),
m(open_minute, direction="none", _parse=False),
m(hour_into_session, _parse=False),
m(hour_into_session, direction="next", _parse=False),
m(hour_into_session, direction="previous", _parse=False),
m(hour_into_session, direction="none", _parse=False),
m(close_minute),
m(close_minute, direction="next", _parse=False),
m(close_minute, direction="previous", _parse=False),
m(close_minute, direction="none", _parse=False),
session_label,
]
if self.GAPS_BETWEEN_SESSIONS:
minutes_that_resolve_to_this_session.append(
m(minute_before_session, _parse=False)
)
minutes_that_resolve_to_this_session.append(
m(minute_before_session, direction="next", _parse=False)
)
minutes_that_resolve_to_this_session.append(
m(minute_after_session, direction="previous", _parse=False)
)
self.assertTrue(
all(
x == minutes_that_resolve_to_this_session[0]
for x in minutes_that_resolve_to_this_session
)
)
minutes_that_resolve_to_next_session = [
m(minute_after_session, _parse=False),
m(minute_after_session, direction="next", _parse=False),
next_session_label,
]
self.assertTrue(
all(
x == minutes_that_resolve_to_next_session[0]
for x in minutes_that_resolve_to_next_session
)
)
self.assertEqual(
m(minute_before_session, direction="previous", _parse=False),
previous_session_label,
)
if self.GAPS_BETWEEN_SESSIONS:
# Make sure we use the cache correctly
minutes_that_resolve_to_different_sessions = [
m(minute_after_session, direction="next", _parse=False),
m(minute_after_session, direction="previous", _parse=False),
m(minute_after_session, direction="next", _parse=False),
]
self.assertEqual(
minutes_that_resolve_to_different_sessions,
[next_session_label, session_label, next_session_label],
)
# make sure that exceptions are raised at the right time
with self.assertRaises(ValueError):
m(open_minute, "asdf", _parse=False)
if self.GAPS_BETWEEN_SESSIONS:
with self.assertRaises(ValueError):
m(minute_before_session, direction="none", _parse=False)
# minute is later than last session's close
minute_after_last_close = self.answers.iloc[-1].market_close + self.one_minute
session_label = self.answers.index[-1]
minute_that_resolves_to_session_label = m(
minute_after_last_close, direction="previous", _parse=False
)
self.assertEqual(session_label, minute_that_resolves_to_session_label)
with self.assertRaises(ValueError):
m(minute_after_last_close, _parse=False)
with self.assertRaises(ValueError):
m(minute_after_last_close, direction="next", _parse=False)
with self.assertRaises(ValueError):
m(minute_after_last_close, direction="none", _parse=False)
@parameterized.expand(
[
(1, 0),
(2, 0),
(2, 1),
]
)
def test_minute_index_to_session_labels(self, interval, offset):
minutes = self.calendar.minutes_for_sessions_in_range(
self.MINUTE_INDEX_TO_SESSION_LABELS_START,
self.MINUTE_INDEX_TO_SESSION_LABELS_END,
)
minutes = minutes[range(offset, len(minutes), interval)]
np.testing.assert_array_equal(
pd.DatetimeIndex(minutes.map(self.calendar.minute_to_session_label)),
self.calendar.minute_index_to_session_labels(minutes),
)
def test_next_prev_session(self):
session_labels = self.answers.index[1:-2]
max_idx = len(session_labels) - 1
# the very first session
first_session_label = self.answers.index[0]
with self.assertRaises(ValueError):
self.calendar.previous_session_label(first_session_label, _parse=False)
# all the sessions in the middle
for idx, session_label in enumerate(session_labels):
if idx < max_idx:
self.assertEqual(
self.calendar.next_session_label(session_label, _parse=False),
session_labels[idx + 1],
)
if idx > 0:
self.assertEqual(
self.calendar.previous_session_label(session_label, _parse=False),
session_labels[idx - 1],
)
# the very last session
last_session_label = self.answers.index[-1]
with self.assertRaises(ValueError):
self.calendar.next_session_label(last_session_label, _parse=False)
@staticmethod
def _find_full_session(calendar):
for session_label in calendar.schedule.index:
if session_label not in calendar.early_closes:
return session_label
return None
def test_minutes_for_period(self):
# full session
# find a session that isn't an early close. start from the first
# session, should be quick.
full_session_label = self._find_full_session(self.calendar)
if full_session_label is None:
raise ValueError("Cannot find a full session to test!")
minutes = self.calendar.minutes_for_session(full_session_label)
_open, _close = self.calendar.open_and_close_for_session(full_session_label)
_break_start, _break_end = self.calendar.break_start_and_end_for_session(
full_session_label
)
if not pd.isnull(_break_start):
constructed_minutes = np.concatenate(
[
pd.date_range(start=_open, end=_break_start, freq="min"),
pd.date_range(start=_break_end, end=_close, freq="min"),
]
)
else:
constructed_minutes = pd.date_range(start=_open, end=_close, freq="min")
np.testing.assert_array_equal(
minutes,
constructed_minutes,
)
# early close period
if self.HAVE_EARLY_CLOSES:
early_close_session_label = self.calendar.early_closes[0]
minutes_for_early_close = self.calendar.minutes_for_session(
early_close_session_label
)
_open, _close = self.calendar.open_and_close_for_session(
early_close_session_label
)
np.testing.assert_array_equal(
minutes_for_early_close,
pd.date_range(start=_open, end=_close, freq="min"),
)
# late open period
if self.HAVE_LATE_OPENS:
late_open_session_label = self.calendar.late_opens[0]
minutes_for_late_open = self.calendar.minutes_for_session(
late_open_session_label
)
_open, _close = self.calendar.open_and_close_for_session(
late_open_session_label
)
np.testing.assert_array_equal(
minutes_for_late_open,
pd.date_range(start=_open, end=_close, freq="min"),
)
def test_sessions_in_range(self):
# pick two sessions
session_count = len(self.calendar.schedule.index)
first_idx = session_count // 3
second_idx = 2 * first_idx
first_session_label = self.calendar.schedule.index[first_idx]
second_session_label = self.calendar.schedule.index[second_idx]
answer_key = self.calendar.schedule.index[first_idx : second_idx + 1]
rtrn = self.calendar.sessions_in_range(
first_session_label, second_session_label, _parse=False
)
np.testing.assert_array_equal(answer_key, rtrn)
def get_session_block(self):
"""
Get an "interesting" range of three sessions in a row. By default this
tries to find and return a (full session, early close session, full
session) block.
"""
if not self.HAVE_EARLY_CLOSES:
# If we don't have any early closes, just return a "random" chunk
# of three sessions.
return self.calendar.all_sessions[10:13]
shortened_session = self.calendar.early_closes[0]
shortened_session_idx = self.calendar.schedule.index.get_loc(shortened_session)
session_before = self.calendar.schedule.index[shortened_session_idx - 1]
session_after = self.calendar.schedule.index[shortened_session_idx + 1]
return [session_before, shortened_session, session_after]
def test_minutes_in_range(self):
sessions = self.get_session_block()
first_open, first_close = self.calendar.open_and_close_for_session(sessions[0])
minute_before_first_open = first_open - self.one_minute
middle_open, middle_close = self.calendar.open_and_close_for_session(
sessions[1]
)
last_open, last_close = self.calendar.open_and_close_for_session(sessions[-1])
minute_after_last_close = last_close + self.one_minute
# get all the minutes between first_open and last_close
minutes1 = self.calendar.minutes_in_range(first_open, last_close, _parse=False)
minutes2 = self.calendar.minutes_in_range(
minute_before_first_open, minute_after_last_close, _parse=False
)
if self.GAPS_BETWEEN_SESSIONS:
np.testing.assert_array_equal(minutes1, minutes2)
else:
# if no gaps, then minutes2 should have 2 extra minutes
np.testing.assert_array_equal(minutes1, minutes2[1:-1])
# manually construct the minutes
(
first_break_start,
first_break_end,
) = self.calendar.break_start_and_end_for_session(sessions[0])
(
middle_break_start,
middle_break_end,
) = self.calendar.break_start_and_end_for_session(sessions[1])
(
last_break_start,
last_break_end,
) = self.calendar.break_start_and_end_for_session(sessions[-1])
intervals = [
(first_open, first_break_start, first_break_end, first_close),
(middle_open, middle_break_start, middle_break_end, middle_close),
(last_open, last_break_start, last_break_end, last_close),
]
all_minutes = []
for _open, _break_start, _break_end, _close in intervals:
if pd.isnull(_break_start):
all_minutes.append(
pd.date_range(start=_open, end=_close, freq="min"),
)
else:
all_minutes.append(
pd.date_range(start=_open, end=_break_start, freq="min"),
)
all_minutes.append(
pd.date_range(start=_break_end, end=_close, freq="min"),
)
all_minutes = np.concatenate(all_minutes)
np.testing.assert_array_equal(all_minutes, minutes1)
def test_minutes_for_sessions_in_range(self):
sessions = self.get_session_block()
minutes = self.calendar.minutes_for_sessions_in_range(sessions[0], sessions[-1])
# do it manually
session0_minutes = self.calendar.minutes_for_session(sessions[0])
session1_minutes = self.calendar.minutes_for_session(sessions[1])
session2_minutes = self.calendar.minutes_for_session(sessions[2])
concatenated_minutes = np.concatenate(
[session0_minutes.values, session1_minutes.values, session2_minutes.values]
)
np.testing.assert_array_equal(concatenated_minutes, minutes.values)
def test_sessions_window(self):
sessions = self.get_session_block()
np.testing.assert_array_equal(
self.calendar.sessions_window(sessions[0], len(sessions) - 1, _parse=False),
self.calendar.sessions_in_range(sessions[0], sessions[-1], _parse=False),
)
np.testing.assert_array_equal(
self.calendar.sessions_window(
sessions[-1], -1 * (len(sessions) - 1), _parse=False
),
self.calendar.sessions_in_range(sessions[0], sessions[-1], _parse=False),
)
def test_session_distance(self):
sessions = self.get_session_block()
forward_distance = self.calendar.session_distance(
sessions[0],
sessions[-1],
_parse=False,
)
self.assertEqual(forward_distance, len(sessions))
backward_distance = self.calendar.session_distance(
sessions[-1],
sessions[0],
_parse=False,
)
self.assertEqual(backward_distance, -len(sessions))
one_day_distance = self.calendar.session_distance(
sessions[0],
sessions[0],
_parse=False,
)
self.assertEqual(one_day_distance, 1)
def test_open_and_close_for_session(self):
for session_label, open_answer, close_answer, _, _ in self.answers.itertuples(
name=None
):
found_open, found_close = self.calendar.open_and_close_for_session(
session_label, _parse=False
)
# Test that the methods for just session open and close produce the
# same values as the method for getting both.
alt_open = self.calendar.session_open(session_label, _parse=False)
self.assertEqual(alt_open, found_open)
alt_close = self.calendar.session_close(session_label, _parse=False)
self.assertEqual(alt_close, found_close)
self.assertEqual(open_answer, found_open)
self.assertEqual(close_answer, found_close)
def test_session_opens_in_range(self):
found_opens = self.calendar.session_opens_in_range(
self.answers.index[0],
self.answers.index[-1],
_parse=False,
)
found_opens.index.freq = None
tm.assert_series_equal(found_opens, self.answers["market_open"])
def test_session_closes_in_range(self):
found_closes = self.calendar.session_closes_in_range(
self.answers.index[0],
self.answers.index[-1],
_parse=False,
)
found_closes.index.freq = None
tm.assert_series_equal(found_closes, self.answers["market_close"])
def test_daylight_savings(self):
# 2004 daylight savings switches:
# Sunday 2004-04-04 and Sunday 2004-10-31
# make sure there's no weirdness around calculating the next day's
# session's open time.
m = dict(self.calendar.open_times)
m[pd.Timestamp.min] = m.pop(None)
open_times = pd.Series(m)
for date in self.DAYLIGHT_SAVINGS_DATES:
next_day = pd.Timestamp(date, tz=UTC)
open_date = next_day + Timedelta(days=self.calendar.open_offset)
the_open = self.calendar.schedule.loc[next_day].market_open
localized_open = the_open.tz_localize(UTC).tz_convert(self.calendar.tz)
self.assertEqual(
(open_date.year, open_date.month, open_date.day),
(localized_open.year, localized_open.month, localized_open.day),
)
open_ix = open_times.index.searchsorted(pd.Timestamp(date), side="right")
if open_ix == len(open_times):
open_ix -= 1
self.assertEqual(open_times.iloc[open_ix].hour, localized_open.hour)
self.assertEqual(open_times.iloc[open_ix].minute, localized_open.minute)
def test_start_end(self):
"""
Check ExchangeCalendar with defined start/end dates.
"""
calendar = self.calendar_class(
start=self.TEST_START_END_FIRST,
end=self.TEST_START_END_LAST,
)
self.assertEqual(
calendar.first_trading_session,
self.TEST_START_END_EXPECTED_FIRST,
)
self.assertEqual(
calendar.last_trading_session,
self.TEST_START_END_EXPECTED_LAST,
)
def test_has_breaks(self):
has_breaks = self.calendar.has_breaks()
self.assertEqual(has_breaks, self.HAVE_BREAKS)
def test_session_has_break(self):
if self.SESSION_WITHOUT_BREAK is not None:
self.assertFalse(
self.calendar.session_has_break(self.SESSION_WITHOUT_BREAK)
)
if self.SESSION_WITH_BREAK is not None:
self.assertTrue(self.calendar.session_has_break(self.SESSION_WITH_BREAK))
# TODO remove this class when all calendars migrated. No longer requried as
# `minute_index_to_session_labels` comprehensively tested under new suite.
class OpenDetectionTestCase(TestCase):
# This is an extra set of unit tests that were added during a rewrite of
# `minute_index_to_session_labels` to ensure that the existing
# calendar-generic test suite correctly covered edge cases around
# non-market minutes.
def test_detect_non_market_minutes(self):
cal = get_calendar("NYSE")
# NOTE: This test is here instead of being on the base class for all
# calendars because some of our calendars are 24/7, which means there
# aren't any non-market minutes to find.
day0 = cal.minutes_for_sessions_in_range(
pd.Timestamp("2013-07-03", tz=UTC),
pd.Timestamp("2013-07-03", tz=UTC),
)
for minute in day0:
self.assertTrue(cal.is_open_on_minute(minute))
day1 = cal.minutes_for_sessions_in_range(
pd.Timestamp("2013-07-05", tz=UTC),
pd.Timestamp("2013-07-05", tz=UTC),
)
for minute in day1:
self.assertTrue(cal.is_open_on_minute(minute))
def NYSE_timestamp(s):
return pd.Timestamp(s, tz="America/New_York").tz_convert(UTC)
non_market = [
# After close.
NYSE_timestamp("2013-07-03 16:01"),
# Holiday.
NYSE_timestamp("2013-07-04 10:00"),
# Before open.
NYSE_timestamp("2013-07-05 9:29"),
]
for minute in non_market:
self.assertFalse(cal.is_open_on_minute(minute), minute)
input_ = pd.to_datetime(
np.hstack([day0.values, minute.asm8, day1.values]),
utc=True,
)
with self.assertRaises(ValueError) as e:
cal.minute_index_to_session_labels(input_)
exc_str = str(e.exception)
self.assertIn("First Bad Minute: {}".format(minute), exc_str)
# TODO remove this class when all calendars migrated. No longer requried as
# this case is handled by new test base internally.
class NoDSTExchangeCalendarTestBase(ExchangeCalendarTestBase):
def test_daylight_savings(self):
"""
Several countries in Africa / Asia do not observe DST
so we need to skip over this test for those markets
"""
pass
def get_csv(name: str) -> pd.DataFrame:
"""Get csv file as DataFrame for given calendar `name`."""
filename = name.replace("/", "-").lower() + ".csv"
path = pathlib.Path(__file__).parent.joinpath("resources", filename)
df = pd.read_csv(
path,
index_col=0,
parse_dates=[0, 1, 2, 3, 4],
infer_datetime_format=True,
)
df.index = df.index.tz_localize("UTC")
for col in df:
df[col] = df[col].dt.tz_localize("UTC")
return df
class Answers:
"""Inputs and expected output for testing a given calendar and side.
Inputs and expected outputs are provided by public instance methods and
properties. These either read directly from the corresponding .csv file
or are evaluated from the .csv file contents. NB Properites / methods
MUST NOT make evaluations by way of repeating the code of the
ExchangeCalendar method they are intended to test!
Parameters
----------
calendar_name
Canonical name of calendar for which require answer info. For
example, 'XNYS'.
side {'both', 'left', 'right', 'neither'}
Side of sessions to treat as trading minutes.
"""
ONE_MIN = pd.Timedelta(1, "T")
TWO_MIN = pd.Timedelta(2, "T")
ONE_DAY = pd.Timedelta(1, "D")
LEFT_SIDES = ["left", "both"]
RIGHT_SIDES = ["right", "both"]
def __init__(
self,
calendar_name: str,
side: str,
):
self._name = calendar_name.upper()
self._side = side
# --- Exposed constructor arguments ---
@property
def name(self) -> str:
"""Name of corresponding calendar."""
return self._name
@property
def side(self) -> str:
"""Side of calendar for which answers valid."""
return self._side
# --- Properties read (indirectly) from csv file ---
@functools.lru_cache(maxsize=4)
def _answers(self) -> pd.DataFrame:
return get_csv(self.name)
@property
def answers(self) -> pd.DataFrame:
"""Answers as correspoding csv."""
return self._answers()
@property
def sessions(self) -> pd.DatetimeIndex:
"""Session labels."""
return self.answers.index
@property
def opens(self) -> pd.Series:
"""Market open time for each session."""
return self.answers.market_open
@property
def closes(self) -> pd.Series:
"""Market close time for each session."""
return self.answers.market_close
@property
def break_starts(self) -> pd.Series:
"""Break start time for each session."""
return self.answers.break_start
@property
def break_ends(self) -> pd.Series:
"""Break end time for each session."""
return self.answers.break_end
# --- get and helper methods ---
def get_next_session(self, session: pd.Timestamp) -> pd.Timestamp:
"""Get session that immediately follows `session`."""
assert (
session != self.last_session
), "Cannot get session later than last answers' session."
idx = self.sessions.get_loc(session) + 1
return self.sessions[idx]
def session_has_break(self, session: pd.Timestamp) -> bool:
"""Query if `session` has a break."""
return session in self.sessions_with_break
@staticmethod
def get_sessions_sample(sessions: pd.DatetimeIndex):
"""Return sample of given `sessions`.
Sample includes:
All sessions within first two years of `sessions`.
All sessions within last two years of `sessions`.
All sessions falling:
within first 3 days of any month.
from 28th of any month.
from 14th through 16th of any month.
"""
if sessions.empty:
return sessions
mask = (
(sessions < sessions[0] + pd.DateOffset(years=2))
| (sessions > sessions[-1] - pd.DateOffset(years=2))
| (sessions.day <= 3)
| (sessions.day >= 28)
| (14 <= sessions.day) & (sessions.day <= 16)
)
return sessions[mask]
def get_sessions_minutes(
self, start: pd.Timestamp, end: pd.Timestamp | int = 1
) -> pd.DatetimeIndex:
"""Get trading minutes for 1 or more consecutive sessions.
Parameters
----------
start
Session from which to get trading minutes.
end
Session through which to get trading mintues. Can be passed as:
pd.Timestamp: return will include trading minutes for `end`
session.
int: where int represents number of consecutive sessions
inclusive of `start`, for which require trading
minutes. Default is 1, such that by default will return
trading minutes for only `start` session.
"""
idx = self.sessions.get_loc(start)
stop = idx + end if isinstance(end, int) else self.sessions.get_loc(end) + 1
indexer = slice(idx, stop)
dtis = []
for first, last, last_am, first_pm in zip(
self.first_minutes[indexer],
self.last_minutes[indexer],
self.last_am_minutes[indexer],
self.first_pm_minutes[indexer],
):
if pd.isna(last_am):
dtis.append(pd.date_range(first, last, freq="T"))
else:
dtis.append(pd.date_range(first, last_am, freq="T"))
dtis.append(pd.date_range(first_pm, last, freq="T"))
return dtis[0].union_many(dtis[1:])
# --- Evaluated general calendar properties ---
@functools.lru_cache(maxsize=4)
def _has_a_session_with_break(self) -> pd.DatetimeIndex:
return self.break_starts.notna().any()
@property
def has_a_session_with_break(self) -> bool:
"""Does any session of answers have a break."""
return self._has_a_session_with_break()
@property
def has_a_session_without_break(self) -> bool:
"""Does any session of answers not have a break."""
return self.break_starts.isna().any()
# --- Evaluated properties for first and last sessions ---
@property
def first_session(self) -> pd.Timestamp:
"""First session covered by answers."""
return self.sessions[0]
@property
def last_session(self) -> pd.Timestamp:
"""Last session covered by answers."""
return self.sessions[-1]
@property
def sessions_range(self) -> tuple[pd.Timestamp, pd.Timestamp]:
"""First and last sessions covered by answers."""
return self.first_session, self.last_session
@property
def first_session_open(self) -> pd.Timestamp:
"""Open time of first session covered by answers."""
return self.opens[0]
@property
def last_session_close(self) -> pd.Timestamp:
"""Close time of last session covered by answers."""
return self.closes[-1]
@property
def first_trading_minute(self) -> pd.Timestamp:
open_ = self.first_session_open
return open_ if self.side in self.LEFT_SIDES else open_ + self.ONE_MIN
@property
def last_trading_minute(self) -> pd.Timestamp:
close = self.last_session_close
return close if self.side in self.RIGHT_SIDES else close - self.ONE_MIN
@property
def trading_minutes_range(self) -> tuple[pd.Timestamp, pd.Timestamp]:
"""First and last trading minutes covered by answers."""
return self.first_trading_minute, self.last_trading_minute
# --- out-of-bounds properties ---
@property
def minute_too_early(self) -> pd.Timestamp:
"""Minute earlier than first trading minute."""
return self.first_trading_minute - self.ONE_MIN
@property
def minute_too_late(self) -> pd.Timestamp:
"""Minute later than last trading minute."""
return self.last_trading_minute + self.ONE_MIN
@property
def session_too_early(self) -> pd.Timestamp:
"""Date earlier than first session."""
return self.first_session - self.ONE_DAY
@property
def session_too_late(self) -> pd.Timestamp:
"""Date later than last session."""
return self.last_session + self.ONE_DAY
# --- Evaluated properties covering every session. ---
@functools.lru_cache(maxsize=4)
def _first_minutes(self) -> pd.Series:
if self.side in self.LEFT_SIDES:
minutes = self.opens.copy()
else:
minutes = self.opens + self.ONE_MIN
minutes.name = "first_minutes"
return minutes
@property
def first_minutes(self) -> pd.Series:
"""First trading minute of each session (UTC)."""
return self._first_minutes()
@property
def first_minutes_plus_one(self) -> pd.Series:
"""First trading minute of each session plus one minute."""
return self.first_minutes + self.ONE_MIN
@property
def first_minutes_less_one(self) -> pd.Series:
"""First trading minute of each session less one minute."""
return self.first_minutes - self.ONE_MIN
@functools.lru_cache(maxsize=4)
def _last_minutes(self) -> pd.Series:
if self.side in self.RIGHT_SIDES:
minutes = self.closes.copy()
else:
minutes = self.closes - self.ONE_MIN
minutes.name = "last_minutes"
return minutes
@property
def last_minutes(self) -> pd.Series:
"""Last trading minute of each session."""
return self._last_minutes()
@property
def last_minutes_plus_one(self) -> pd.Series:
"""Last trading minute of each session plus one minute."""
return self.last_minutes + self.ONE_MIN
@property
def last_minutes_less_one(self) -> pd.Series:
"""Last trading minute of each session less one minute."""
return self.last_minutes - self.ONE_MIN
@functools.lru_cache(maxsize=4)
def _last_am_minutes(self) -> pd.Series:
if self.side in self.RIGHT_SIDES:
minutes = self.break_starts.copy()
else:
minutes = self.break_starts - self.ONE_MIN
minutes.name = "last_am_minutes"
return minutes
@property
def last_am_minutes(self) -> pd.Series:
"""Last pre-break trading minute of each session.
NaT if session does not have a break.
"""
return self._last_am_minutes()
@property
def last_am_minutes_plus_one(self) -> pd.Series:
"""Last pre-break trading minute of each session plus one minute."""
return self.last_am_minutes + self.ONE_MIN
@property
def last_am_minutes_less_one(self) -> pd.Series:
"""Last pre-break trading minute of each session less one minute."""
return self.last_am_minutes - self.ONE_MIN
@functools.lru_cache(maxsize=4)
def _first_pm_minutes(self) -> pd.Series:
if self.side in self.LEFT_SIDES:
minutes = self.break_ends.copy()
else:
minutes = self.break_ends + self.ONE_MIN
minutes.name = "first_pm_minutes"
return minutes
@property
def first_pm_minutes(self) -> pd.Series:
"""First post-break trading minute of each session.
NaT if session does not have a break.
"""
return self._first_pm_minutes()
@property
def first_pm_minutes_plus_one(self) -> pd.Series:
"""First post-break trading minute of each session plus one minute."""
return self.first_pm_minutes + self.ONE_MIN
@property
def first_pm_minutes_less_one(self) -> pd.Series:
"""First post-break trading minute of each session less one minute."""
return self.first_pm_minutes - self.ONE_MIN
# --- Evaluated session sets and ranges that meet a specific condition ---
@property
def _mask_breaks(self) -> pd.Series:
return self.break_starts.notna()
@functools.lru_cache(maxsize=4)
def _sessions_with_break(self) -> pd.DatetimeIndex:
return self.sessions[self._mask_breaks]
@property
def sessions_with_break(self) -> pd.DatetimeIndex:
return self._sessions_with_break()
@functools.lru_cache(maxsize=4)
def _sessions_without_break(self) -> pd.DatetimeIndex:
return self.sessions[~self._mask_breaks]
@property
def sessions_without_break(self) -> pd.DatetimeIndex:
return self._sessions_without_break()
@property
def sessions_without_break_run(self) -> pd.DatetimeIndex:
"""Longest run of consecutive sessions without a break."""
s = self.break_starts.isna()
if s.empty:
return pd.DatetimeIndex([], tz="UTC")
trues_grouped = (~s).cumsum()[s]
group_sizes = trues_grouped.value_counts()
max_run_size = group_sizes.max()
max_run_group_id = group_sizes[group_sizes == max_run_size].index[0]
run_without_break = trues_grouped[trues_grouped == max_run_group_id].index
return run_without_break
@property
def sessions_without_break_range(self) -> tuple[pd.Timestamp, pd.Timestamp] | None:
"""Longest session range that does not include a session with a break.
Returns None if all sessions have a break.
"""
sessions = self.sessions_without_break_run
if sessions.empty:
return None
return sessions[0], sessions[-1]
@property
def _mask_sessions_without_gap_after(self) -> pd.Series:
if self.side == "neither":
# will always have gap after if neither open or close are trading
# minutes (assuming sessions cannot overlap)
return pd.Series(False, index=self.sessions)
elif self.side == "both":
# a trading minute cannot be a minute of more than one session.
assert not (self.closes == self.opens.shift(-1)).any()
# there will be no gap if next open is one minute after previous close
closes_plus_min = self.closes + pd.Timedelta(1, "T")
return self.opens.shift(-1) == closes_plus_min
else:
return self.opens.shift(-1) == self.closes
@property
def _mask_sessions_without_gap_before(self) -> pd.Series:
if self.side == "neither":
# will always have gap before if neither open or close are trading
# minutes (assuming sessions cannot overlap)
return pd.Series(False, index=self.sessions)
elif self.side == "both":
# a trading minute cannot be a minute of more than one session.
assert not (self.closes == self.opens.shift(-1)).any()
# there will be no gap if previous close is one minute before next open
opens_minus_one = self.opens - pd.Timedelta(1, "T")
return self.closes.shift(1) == opens_minus_one
else:
return self.closes.shift(1) == self.opens
@functools.lru_cache(maxsize=4)
def _sessions_without_gap_after(self) -> pd.DatetimeIndex:
mask = self._mask_sessions_without_gap_after
return self.sessions[mask][:-1]
@property
def sessions_without_gap_after(self) -> pd.DatetimeIndex:
"""Sessions not followed by a non-trading minute.
Rather, sessions immediately followed by first trading minute of
next session.
"""
return self._sessions_without_gap_after()
@functools.lru_cache(maxsize=4)
def _sessions_with_gap_after(self) -> pd.DatetimeIndex:
mask = self._mask_sessions_without_gap_after
return self.sessions[~mask][:-1]
@property
def sessions_with_gap_after(self) -> pd.DatetimeIndex:
"""Sessions followed by a non-trading minute."""
return self._sessions_with_gap_after()
@functools.lru_cache(maxsize=4)
def _sessions_without_gap_before(self) -> pd.DatetimeIndex:
mask = self._mask_sessions_without_gap_before
return self.sessions[mask][1:]
@property
def sessions_without_gap_before(self) -> pd.DatetimeIndex:
"""Sessions not preceeded by a non-trading minute.
Rather, sessions immediately preceeded by last trading minute of
previous session.
"""
return self._sessions_without_gap_before()
@functools.lru_cache(maxsize=4)
def _sessions_with_gap_before(self) -> pd.DatetimeIndex:
mask = self._mask_sessions_without_gap_before
return self.sessions[~mask][1:]
@property
def sessions_with_gap_before(self) -> pd.DatetimeIndex:
"""Sessions preceeded by a non-trading minute."""
return self._sessions_with_gap_before()
# times are changing...
@functools.lru_cache(maxsize=16)
def _get_sessions_with_times_different_to_next_session(
self,
column: str, # typing.Literal["opens", "closes", "break_starts", "break_ends"]
) -> list[pd.DatetimeIndex]:
"""For a given answers column, get session labels where time differs
from time of next session.
Where `column` is a break time ("break_starts" or "break_ends"), return
will not include sessions when next session has a different `has_break`
status. For example, if session_0 has a break and session_1 does not have
a break, or vice versa, then session_0 will not be included to return. For
sessions followed by a session with a different `has_break` status, see
`_get_sessions_with_has_break_different_to_next_session`.
Returns
-------
list of pd.Datetimeindex
[0] sessions with earlier next session
[1] sessions with later next session
"""
# column takes string to allow lru_cache (Series not hashable)
is_break_col = column[0] == "b"
column_ = getattr(self, column)
if is_break_col:
if column_.isna().all():
return [pd.DatetimeIndex([], tz="UTC")] * 4
column_ = column_.fillna(method="ffill").fillna(method="bfill")
diff = (column_.shift(-1) - column_)[:-1]
remainder = diff % pd.Timedelta(hours=24)
mask = remainder != pd.Timedelta(0)
sessions = self.sessions[:-1][mask]
next_session_earlier_mask = remainder[mask] > pd.Timedelta(hours=12)
next_session_earlier = sessions[next_session_earlier_mask]
next_session_later = sessions[~next_session_earlier_mask]
if is_break_col:
mask = next_session_earlier.isin(self.sessions_without_break)
next_session_earlier = next_session_earlier.drop(next_session_earlier[mask])
mask = next_session_later.isin(self.sessions_without_break)
next_session_later = next_session_later.drop(next_session_later[mask])
return [next_session_earlier, next_session_later]
@property
def _sessions_with_opens_different_to_next_session(
self,
) -> list[pd.DatetimeIndex]:
return self._get_sessions_with_times_different_to_next_session("opens")
@property
def _sessions_with_closes_different_to_next_session(
self,
) -> list[pd.DatetimeIndex]:
return self._get_sessions_with_times_different_to_next_session("closes")
@property
def _sessions_with_break_start_different_to_next_session(
self,
) -> list[pd.DatetimeIndex]:
return self._get_sessions_with_times_different_to_next_session("break_starts")
@property
def _sessions_with_break_end_different_to_next_session(
self,
) -> list[pd.DatetimeIndex]:
return self._get_sessions_with_times_different_to_next_session("break_ends")
@property
def sessions_next_open_earlier(self) -> pd.DatetimeIndex:
return self._sessions_with_opens_different_to_next_session[0]
@property
def sessions_next_open_later(self) -> pd.DatetimeIndex:
return self._sessions_with_opens_different_to_next_session[1]
@property
def sessions_next_open_different(self) -> pd.DatetimeIndex:
return self.sessions_next_open_earlier.union(self.sessions_next_open_later)
@property
def sessions_next_close_earlier(self) -> pd.DatetimeIndex:
return self._sessions_with_closes_different_to_next_session[0]
@property
def sessions_next_close_later(self) -> pd.DatetimeIndex:
return self._sessions_with_closes_different_to_next_session[1]
@property
def sessions_next_close_different(self) -> pd.DatetimeIndex:
return self.sessions_next_close_earlier.union(self.sessions_next_close_later)
@property
def sessions_next_break_start_earlier(self) -> pd.DatetimeIndex:
return self._sessions_with_break_start_different_to_next_session[0]
@property
def sessions_next_break_start_later(self) -> pd.DatetimeIndex:
return self._sessions_with_break_start_different_to_next_session[1]
@property
def sessions_next_break_start_different(self) -> pd.DatetimeIndex:
earlier = self.sessions_next_break_start_earlier
later = self.sessions_next_break_start_later
return earlier.union(later)
@property
def sessions_next_break_end_earlier(self) -> pd.DatetimeIndex:
return self._sessions_with_break_end_different_to_next_session[0]
@property
def sessions_next_break_end_later(self) -> pd.DatetimeIndex:
return self._sessions_with_break_end_different_to_next_session[1]
@property
def sessions_next_break_end_different(self) -> pd.DatetimeIndex:
earlier = self.sessions_next_break_end_earlier
later = self.sessions_next_break_end_later
return earlier.union(later)
@functools.lru_cache(maxsize=4)
def _get_sessions_with_has_break_different_to_next_session(
self,
) -> tuple[pd.DatetimeIndex, pd.DatetimeIndex]:
"""Get sessions with 'has_break' different to next session.
Returns
-------
tuple[pd.DatetimeIndex, pd.DatetimeIndex]
[0] Sessions that have a break and are immediately followed by
a session which does not have a break.
[1] Sessions that do not have a break and are immediately
followed by a session which does have a break.
"""
mask = (self.break_starts.notna() & self.break_starts.shift(-1).isna())[:-1]
sessions_with_break_next_session_without_break = self.sessions[:-1][mask]
mask = (self.break_starts.isna() & self.break_starts.shift(-1).notna())[:-1]
sessions_without_break_next_session_with_break = self.sessions[:-1][mask]
return (
sessions_with_break_next_session_without_break,
sessions_without_break_next_session_with_break,
)
@property
def sessions_with_break_next_session_without_break(self) -> pd.DatetimeIndex:
return self._get_sessions_with_has_break_different_to_next_session()[0]
@property
def sessions_without_break_next_session_with_break(self) -> pd.DatetimeIndex:
return self._get_sessions_with_has_break_different_to_next_session()[1]
@functools.lru_cache(maxsize=4)
def _sessions_next_time_different(self) -> pd.DatetimeIndex:
return self.sessions_next_open_different.union_many(
[
self.sessions_next_close_different,
self.sessions_next_break_start_different,
self.sessions_next_break_end_different,
self.sessions_with_break_next_session_without_break,
self.sessions_without_break_next_session_with_break,
]
)
@property
def sessions_next_time_different(self) -> pd.DatetimeIndex:
"""Sessions where next session has a different time for any column.
Includes sessions where next session has a different `has_break`
status.
"""
return self._sessions_next_time_different()
# session blocks...
def _create_changing_times_session_block(
self, session: pd.Timestamp
) -> pd.DatetimeIndex:
"""Create block of sessions with changing times.
Given a `session` known to have at least one time (open, close,
break_start or break_end) different from the next session, returns
a block of consecutive sessions ending with the first session after
`session` that has the same times as the session that immediately
preceeds it (i.e. the last two sessions of the block will have the
same times), or the last calendar session.
"""
start_idx = self.sessions.get_loc(session)
end_idx = start_idx + 1
while self.sessions[end_idx] in self.sessions_next_time_different:
end_idx += 1
end_idx += 2 # +1 to include session with same times, +1 to serve as end index
return self.sessions[start_idx:end_idx]
def _get_normal_session_block(self) -> pd.DatetimeIndex:
"""Block of 3 sessions with unchanged timings."""
start_idx = len(self.sessions) // 3
end_idx = start_idx + 21
for i in range(start_idx, end_idx):
times_1 = self.answers.iloc[i].dt.time
times_2 = self.answers.iloc[i + 1].dt.time
times_3 = self.answers.iloc[i + 2].dt.time
one_and_two_equal = (times_1 == times_2) | (times_1.isna() & times_2.isna())
one_and_three_equal = (times_1 == times_3) | (
times_1.isna() & times_3.isna()
)
if (one_and_two_equal & one_and_three_equal).all():
break
assert i < (end_idx - 1), "Unable to evaluate a normal session block!"
return self.sessions[i : i + 3]
def _get_session_block(
self, from_session_of: pd.DatetimeIndex, to_session_of: pd.DatetimeIndex
) -> pd.DatetimeIndex:
"""Get session block with bounds defined by sessions of given indexes.
Block will start with middle session of `from_session_of`.
Block will run to the nearest subsequent session of `to_session_of`
(or `self.final_session` if this comes first). Block will end with
the session that immedidately follows this session.
"""
i = len(from_session_of) // 2
start_session = from_session_of[i]
start_idx = self.sessions.get_loc(start_session)
end_idx = start_idx + 1
end_session = self.sessions[end_idx]
while end_session not in to_session_of and end_session != self.last_session:
end_idx += 1
end_session = self.sessions[end_idx]
return self.sessions[start_idx : end_idx + 2]
@functools.lru_cache(maxsize=4)
def _session_blocks(self) -> dict[str, pd.DatetimeIndex]:
blocks = {}
blocks["normal"] = self._get_normal_session_block()
blocks["first_three"] = self.sessions[:3]
blocks["last_three"] = self.sessions[-3:]
# blocks here include where:
# session 1 has at least one different time from session 0
# session 0 has a break and session 1 does not (and vice versa)
sessions_indexes = (
("next_open_earlier", self.sessions_next_open_earlier),
("next_open_later", self.sessions_next_open_later),
("next_close_earlier", self.sessions_next_close_earlier),
("next_close_later", self.sessions_next_close_later),
("next_break_start_earlier", self.sessions_next_break_start_earlier),
("next_break_start_later", self.sessions_next_break_start_later),
("next_break_end_earlier", self.sessions_next_break_end_earlier),
("next_break_end_later", self.sessions_next_break_end_later),
(
"with_break_to_without_break",
self.sessions_with_break_next_session_without_break,
),
(
"without_break_to_with_break",
self.sessions_without_break_next_session_with_break,
),
)
for name, index in sessions_indexes:
if index.empty:
blocks[name] = pd.DatetimeIndex([], tz="UTC")
else:
session = index[0]
blocks[name] = self._create_changing_times_session_block(session)
# blocks here move from session with gap to session without gap and vice versa
if (not self.sessions_with_gap_after.empty) and (
not self.sessions_without_gap_after.empty
):
without_gap_to_with_gap = self._get_session_block(
self.sessions_without_gap_after, self.sessions_with_gap_after
)
with_gap_to_without_gap = self._get_session_block(
self.sessions_with_gap_after, self.sessions_without_gap_after
)
else:
without_gap_to_with_gap = pd.DatetimeIndex([], tz="UTC")
with_gap_to_without_gap = pd.DatetimeIndex([], tz="UTC")
blocks["without_gap_to_with_gap"] = without_gap_to_with_gap
blocks["with_gap_to_without_gap"] = with_gap_to_without_gap
# blocks that adjoin or contain a non_session date
follows_non_session = pd.DatetimeIndex([], tz="UTC")
preceeds_non_session = pd.DatetimeIndex([], tz="UTC")
contains_non_session = pd.DatetimeIndex([], tz="UTC")
if len(self.non_sessions) > 1:
diff = self.non_sessions[1:] - self.non_sessions[:-1]
mask = diff != pd.Timedelta(
1, "D"
) # non_session dates followed by a session
valid_non_sessions = self.non_sessions[:-1][mask]
if len(valid_non_sessions) > 1:
slce = self.sessions.slice_indexer(
valid_non_sessions[0], valid_non_sessions[1]
)
sessions_between_non_sessions = self.sessions[slce]
block_length = min(2, len(sessions_between_non_sessions))
follows_non_session = sessions_between_non_sessions[:block_length]
preceeds_non_session = sessions_between_non_sessions[-block_length:]
# take session before and session after non-session
contains_non_session = self.sessions[slce.stop - 1 : slce.stop + 1]
blocks["follows_non_session"] = follows_non_session
blocks["preceeds_non_session"] = preceeds_non_session
blocks["contains_non_session"] = contains_non_session
return blocks
@property
def session_blocks(self) -> dict[str, pd.DatetimeIndex]:
"""Dictionary of session blocks of a particular behaviour.
A block comprises either a single session or multiple contiguous
sessions.
Keys:
"normal" - three sessions with unchanging timings.
"first_three" - answers' first three sessions.
"last_three" - answers's last three sessions.
"next_open_earlier" - session 1 open is earlier than session 0
open.
"next_open_later" - session 1 open is later than session 0
open.
"next_close_earlier" - session 1 close is earlier than session
0 close.
"next_close_later" - session 1 close is later than session 0
close.
"next_break_start_earlier" - session 1 break_start is earlier
than session 0 break_start.
"next_break_start_later" - session 1 break_start is later than
session 0 break_start.
"next_break_end_earlier" - session 1 break_end is earlier than
session 0 break_end.
"next_break_end_later" - session 1 break_end is later than
session 0 break_end.
"with_break_to_without_break" - session 0 has a break, session
1 does not have a break.
"without_break_to_with_break" - session 0 does not have a
break, session 1 does have a break.
"without_gap_to_with_gap" - session 0 is not followed by a
gap, session -2 is followed by a gap, session -1 is
preceeded by a gap.
"with_gap_to_without_gap" - session 0 is followed by a gap,
session -2 is not followed by a gap, session -1 is not
preceeded by a gap.
"follows_non_session" - one or two sessions where session 0
is preceeded by a date that is a non-session.
"follows_non_session" - one or two sessions where session -1
is followed by a date that is a non-session.
"contains_non_session" = two sessions with at least one
non-session date in between.
If no such session block exists for any key then value will take an
empty DatetimeIndex (UTC).
"""
return self._session_blocks()
def session_block_generator(self) -> abc.Iterator[tuple[str, pd.DatetimeIndex]]:
"""Generator of session blocks of a particular behaviour."""
for name, block in self.session_blocks.items():
if not block.empty:
yield (name, block)
@functools.lru_cache(maxsize=4)
def _session_block_minutes(self) -> dict[str, pd.DatetimeIndex]:
d = {}
for name, block in self.session_blocks.items():
if block.empty:
d[name] = pd.DatetimeIndex([], tz="UTC")
continue
d[name] = self.get_sessions_minutes(block[0], len(block))
return d
@property
def session_block_minutes(self) -> dict[str, pd.DatetimeIndex]:
"""Trading minutes for each `session_block`.
Key:
Session block name as documented to `session_blocks`.
Value:
Trading minutes of corresponding session block.
"""
return self._session_block_minutes()
@property
def sessions_sample(self) -> pd.DatetimeIndex:
"""Sample of normal and unusual sessions.
Sample comprises set of sessions of all `session_blocks` (see
`session_blocks` doc). In this way sample includes at least one
sample of every indentified unique circumstance.
"""
dtis = list(self.session_blocks.values())
return dtis[0].union_many(dtis[1:])
# non-sessions...
@functools.lru_cache(maxsize=4)
def _non_sessions(self) -> pd.DatetimeIndex:
all_dates = pd.date_range(
start=self.first_session, end=self.last_session, freq="D"
)
return all_dates.difference(self.sessions)
@property
def non_sessions(self) -> pd.DatetimeIndex:
"""Dates (UTC midnight) within answers range that are not sessions."""
return self._non_sessions()
@property
def sessions_range_defined_by_non_sessions(
self,
) -> tuple[tuple[pd.Timestamp, pd.Timestamp], pd.Datetimeindex] | None:
"""Range containing sessions although defined with non-sessions.
Returns
-------
tuple[tuple[pd.Timestamp, pd.Timestamp], pd.Datetimeindex]:
[0] tuple[pd.Timestamp, pd.Timestamp]:
[0] range start as non-session date.
[1] range end as non-session date.
[1] pd.DatetimeIndex:
Sessions in range.
"""
non_sessions = self.non_sessions
if len(non_sessions) <= 1:
return None
limit = len(self.non_sessions) - 2
i = 0
start, end = non_sessions[i], non_sessions[i + 1]
while (end - start) < pd.Timedelta(4, "D"):
i += 1
start, end = non_sessions[i], non_sessions[i + 1]
if i == limit:
# Unable to evaluate range from consecutive non-sessions
# that covers >= 3 sessions. Just go with max range...
start, end = non_sessions[0], non_sessions[-1]
slice_start, slice_end = self.sessions.searchsorted((start, end))
return (start, end), self.sessions[slice_start:slice_end]
@property
def non_sessions_run(self) -> pd.DatetimeIndex:
"""Longest run of non_sessions."""
ser = self.sessions.to_series()
diff = ser.shift(-1) - ser
max_diff = diff.max()
if max_diff == pd.Timedelta(1, "D"):
return pd.DatetimeIndex([])
session_before_run = diff[diff == max_diff].index[-1]
run = pd.date_range(
start=session_before_run + pd.Timedelta(1, "D"),
periods=(max_diff // pd.Timedelta(1, "D")) - 1,
freq="D",
)
assert run.isin(self.non_sessions).all()
assert run[0] > self.first_session
assert run[-1] < self.last_session
return run
@property
def non_sessions_range(self) -> tuple[pd.Timestamp, pd.Timestamp] | None:
"""Longest range covering a period without a session."""
non_sessions_run = self.non_sessions_run
if non_sessions_run.empty:
return None
else:
return self.non_sessions_run[0], self.non_sessions_run[-1]
# --- Evaluated sets of minutes ---
@functools.lru_cache(maxsize=4)
def _evaluate_trading_and_break_minutes(self) -> tuple[tuple, tuple]:
sessions = self.sessions_sample
first_mins = self.first_minutes[sessions]
first_mins_plus_one = first_mins + self.ONE_MIN
last_mins = self.last_minutes[sessions]
last_mins_less_one = last_mins - self.ONE_MIN
trading_mins = []
break_mins = []
for session, mins_ in zip(
sessions,
zip(first_mins, first_mins_plus_one, last_mins, last_mins_less_one),
):
trading_mins.append((mins_, session))
if self.has_a_session_with_break:
last_am_mins = self.last_am_minutes[sessions]
last_am_mins = last_am_mins[last_am_mins.notna()]
first_pm_mins = self.first_pm_minutes[last_am_mins.index]
last_am_mins_less_one = last_am_mins - self.ONE_MIN
last_am_mins_plus_one = last_am_mins + self.ONE_MIN
last_am_mins_plus_two = last_am_mins + self.TWO_MIN
first_pm_mins_plus_one = first_pm_mins + self.ONE_MIN
first_pm_mins_less_one = first_pm_mins - self.ONE_MIN
first_pm_mins_less_two = first_pm_mins - self.TWO_MIN
for session, mins_ in zip(
last_am_mins.index,
zip(
last_am_mins,
last_am_mins_less_one,
first_pm_mins,
first_pm_mins_plus_one,
),
):
trading_mins.append((mins_, session))
for session, mins_ in zip(
last_am_mins.index,
zip(
last_am_mins_plus_one,
last_am_mins_plus_two,
first_pm_mins_less_one,
first_pm_mins_less_two,
),
):
break_mins.append((mins_, session))
return (tuple(trading_mins), tuple(break_mins))
@property
def trading_minutes(self) -> tuple[tuple[tuple[pd.Timestamp], pd.Timestamp]]:
"""Edge trading minutes of `sessions_sample`.
Returns
-------
tuple of tuple[tuple[trading_minutes], session]
tuple[trading_minutes] includes:
first two trading minutes of a session.
last two trading minutes of a session.
If breaks:
last two trading minutes of session's am subsession.
first two trading minutes of session's pm subsession.
session
Session of trading_minutes
"""
return self._evaluate_trading_and_break_minutes()[0]
def trading_minutes_only(self) -> abc.Iterator[pd.Timestamp]:
"""Generator of trading minutes of `self.trading_minutes`."""
for mins, _ in self.trading_minutes:
for minute in mins:
yield minute
@property
def trading_minute(self) -> pd.Timestamp:
"""A single trading minute."""
return self.trading_minutes[0][0][0]
@property
def break_minutes(self) -> tuple[tuple[tuple[pd.Timestamp], pd.Timestamp]]:
"""Sample of break minutes of `sessions_sample`.
Returns
-------
tuple of tuple[tuple[break_minutes], session]
tuple[break_minutes]:
first two minutes of a break.
last two minutes of a break.
session
Session of break_minutes
"""
return self._evaluate_trading_and_break_minutes()[1]
def break_minutes_only(self) -> abc.Iterator[pd.Timestamp]:
"""Generator of break minutes of `self.break_minutes`."""
for mins, _ in self.break_minutes:
for minute in mins:
yield minute
@functools.lru_cache(maxsize=4)
def _non_trading_minutes(
self,
) -> tuple[tuple[tuple[pd.Timestamp], pd.Timestamp, pd.Timestamp]]:
non_trading_mins = []
sessions = self.sessions_sample
sessions = prev_sessions = sessions[sessions.isin(self.sessions_with_gap_after)]
next_sessions = self.sessions[self.sessions.get_indexer(sessions) + 1]
last_mins_plus_one = self.last_minutes[sessions] + self.ONE_MIN
first_mins_less_one = self.first_minutes[next_sessions] - self.ONE_MIN
for prev_session, next_session, mins_ in zip(
prev_sessions, next_sessions, zip(last_mins_plus_one, first_mins_less_one)
):
non_trading_mins.append((mins_, prev_session, next_session))
return tuple(non_trading_mins)
@property
def non_trading_minutes(
self,
) -> tuple[tuple[tuple[pd.Timestamp], pd.Timestamp, pd.Timestamp]]:
"""non_trading_minutes that edge `sessions_sample`.
NB. Does not include break minutes.
Returns
-------
tuple of tuple[tuple[non-trading minute], previous session, next session]
tuple[non-trading minute]
Two non-trading minutes.
[0] first non-trading minute to follow a session.
[1] last non-trading minute prior to the next session.
previous session
Session that preceeds non-trading minutes.
next session
Session that follows non-trading minutes.
See Also
--------
break_minutes
"""
return self._non_trading_minutes()
def non_trading_minutes_only(self) -> abc.Iterator[pd.Timestamp]:
"""Generator of non-trading minutes of `self.non_trading_minutes`."""
for mins, _, _ in self.non_trading_minutes:
for minute in mins:
yield minute
# --- method-specific inputs/outputs ---
def prev_next_open_close_minutes(
self,
) -> abc.Iterator[
tuple[
pd.Timestamp,
tuple[
pd.Timestamp | None,
pd.Timestamp | None,
pd.Timestamp | None,
pd.Timestamp | None,
],
]
]:
"""Generator of test parameters for prev/next_open/close methods.
Inputs include following minutes of each session:
open
one minute prior to open (not included for first session)
one minute after open
close
one minute before close
one minute after close (not included for last session)
NB Assumed that minutes prior to first open and after last close
will be handled via parse_timestamp.
Yields
------
2-tuple:
[0] Input a minute sd pd.Timestamp
[1] 4 tuple of expected output of corresponding method:
[0] previous_open as pd.Timestamp | None
[1] previous_close as pd.Timestamp | None
[2] next_open as pd.Timestamp | None
[3] next_close as pd.Timestamp | None
NB None indicates that corresponding method is expected to
raise a ValueError for this input.
"""
close_is_next_open_bv = self.closes == self.opens.shift(-1)
open_was_prev_close_bv = self.opens == self.closes.shift(+1)
close_is_next_open = close_is_next_open_bv[0]
# minutes for session 0
minute = self.opens[0]
yield (minute, (None, None, self.opens[1], self.closes[0]))
minute = minute + self.ONE_MIN
yield (minute, (self.opens[0], None, self.opens[1], self.closes[0]))
minute = self.closes[0]
next_open = self.opens[2] if close_is_next_open else self.opens[1]
yield (minute, (self.opens[0], None, next_open, self.closes[1]))
minute += self.ONE_MIN
prev_open = self.opens[1] if close_is_next_open else self.opens[0]
yield (minute, (prev_open, self.closes[0], next_open, self.closes[1]))
minute = self.closes[0] - self.ONE_MIN
yield (minute, (self.opens[0], None, self.opens[1], self.closes[0]))
# minutes for sessions over [1:-1] except for -1 close and 'close + one_min'
opens = self.opens[1:-1]
closes = self.closes[1:-1]
prev_opens = self.opens[:-2]
prev_closes = self.closes[:-2]
next_opens = self.opens[2:]
next_closes = self.closes[2:]
opens_after_next = self.opens[3:]
# add dummy row to equal lengths (won't be used)
_ = pd.Series(pd.Timestamp("2200-01-01", tz="UTC"))
opens_after_next = opens_after_next.append(_)
stop = closes[-1]
for (
open_,
close,
prev_open,
prev_close,
next_open,
next_close,
open_after_next,
close_is_next_open,
open_was_prev_close,
) in zip(
opens,
closes,
prev_opens,
prev_closes,
next_opens,
next_closes,
opens_after_next,
close_is_next_open_bv[1:-2],
open_was_prev_close_bv[1:-2],
):
if not open_was_prev_close:
# only include open minutes if not otherwise duplicating
# evaluations already made for prior close.
yield (open_, (prev_open, prev_close, next_open, close))
yield (open_ - self.ONE_MIN, (prev_open, prev_close, open_, close))
yield (open_ + self.ONE_MIN, (open_, prev_close, next_open, close))
yield (close - self.ONE_MIN, (open_, prev_close, next_open, close))
if close != stop:
next_open_ = open_after_next if close_is_next_open else next_open
yield (close, (open_, prev_close, next_open_, next_close))
open_ = next_open if close_is_next_open else open_
yield (close + self.ONE_MIN, (open_, close, next_open_, next_close))
# close and 'close + one_min' for session -2
minute = self.closes[-2]
next_open = None if close_is_next_open_bv[-2] else self.opens[-1]
yield (minute, (self.opens[-2], self.closes[-3], next_open, self.closes[-1]))
minute += self.ONE_MIN
prev_open = self.opens[-1] if close_is_next_open_bv[-2] else self.opens[-2]
yield (minute, (prev_open, self.closes[-2], next_open, self.closes[-1]))
# minutes for session -1
if not open_was_prev_close_bv[-1]:
open_ = self.opens[-1]
prev_open = self.opens[-2]
prev_close = self.closes[-2]
next_open = None
close = self.closes[-1]
yield (open_, (prev_open, prev_close, next_open, close))
yield (open_ - self.ONE_MIN, (prev_open, prev_close, open_, close))
yield (open_ + self.ONE_MIN, (open_, prev_close, next_open, close))
minute = self.closes[-1]
next_open = self.opens[2] if close_is_next_open_bv[-1] else self.opens[1]
yield (minute, (self.opens[-1], self.closes[-2], None, None))
minute -= self.ONE_MIN
yield (minute, (self.opens[-1], self.closes[-2], None, self.closes[-1]))
# dunder
def __repr__(self) -> str:
return f"<Answers: calendar {self.name}, side {self.side}>"
def no_parsing(f: typing.Callable):
"""Wrap a method under test so that it skips input parsing."""
return lambda *args, **kwargs: f(*args, _parse=False, **kwargs)
class ExchangeCalendarTestBaseNew:
"""Test base for an ExchangeCalendar.
Notes
-----
=== Fixtures ===
In accordance with the pytest framework, whilst methods are requried to
have `self` as their first argument, no method should use `self`.
All required inputs should come by way of fixtures received to the
test method's arguments.
Methods that are directly or indirectly dependent on the evaluation of
trading minutes should be tested against the parameterized
`all_calendars_with_answers` fixture. This fixture will execute the
test against multiple calendar instances, one for each viable `side`.
The following methods directly evaluate trading minutes:
all_minutes
_last_minute_nanos()
_last_am_minute_nanos()
_first_minute_nanos()
_first_pm_minute_nanos()
NB this list does not include methods that indirectly evaluate methods
by way of calling (directly or indirectly) one of the above methods.
Methods that are not dependent on the evaluation of trading minutes
should only be tested against only the `default_calendar_with_answers`
or `default_calendar` fixture.
Calendar instances provided by fixtures should be used exclusively to
call the method being tested. NO TEST INPUT OR EXPECTED OUTPUT SHOULD
BE EVALUATED BY WAY OF CALLING A CALENDAR METHOD. Rather, test
inputs and expected output should be taken directly, or evaluated from,
properties/methods of the corresponding Answers fixture.
Subclasses are required to override a limited number of fixtures and
may be required to override others. Refer to the block comments.
"""
# subclass must override the following fixtures
@pytest.fixture(scope="class")
def calendar_cls(self) -> abc.Iterator[typing.Type[ExchangeCalendar]]:
"""ExchangeCalendar class to be tested.
Examples:
XNYSExchangeCalendar
AlwaysOpenCalendar
"""
raise NotImplementedError("fixture must be implemented on subclass")
@pytest.fixture
def max_session_hours(self) -> abc.Iterator[int | float]:
"""Largest number of hours that can comprise a single session.
Examples:
8
6.5
"""
raise NotImplementedError("fixture must be implemented on subclass")
# if subclass has a 24h session then subclass must override this fixture.
# Define on subclass as is here with only difference being passing
# ["left", "right"] to decorator's 'params' arg (24h calendars cannot
# have a side defined as 'both' or 'neither'.).
@pytest.fixture(scope="class", params=["both", "left", "right", "neither"])
def all_calendars_with_answers(
self, request, calendars, answers
) -> abc.Iterator[tuple[ExchangeCalendar, Answers]]:
"""Parameterized calendars and answers for each side."""
yield (calendars[request.param], answers[request.param])
# subclass should override the following fixtures in the event that the
# default defined here does not apply.
@pytest.fixture
def start_bound(self) -> abc.Iterator[pd.Timestamp | None]:
"""Earliest date for which calendar can be instantiated, or None if
there is no start bound."""
yield None
@pytest.fixture
def end_bound(self) -> abc.Iterator[pd.Timestamp | None]:
"""Latest date for which calendar can be instantiated, or None if
there is no end bound."""
yield None
# Subclass can optionally override the following fixtures. By overriding
# a fixture the associated test will be executed with input as yielded
# by the fixture. Where fixtures are not overriden the associated tests
# will be skipped.
@pytest.fixture
def regular_holidays_sample(self) -> abc.Iterator[list[str]]:
"""Sample of known regular calendar holidays. Empty list if no holidays.
`test_regular_holidays_sample` will check that each date does not
represent a calendar session.
Example return:
["2020-12-25", "2021-01-01", ...]
"""
yield []
@pytest.fixture
def adhoc_holidays_sample(self) -> abc.Iterator[list[str]]:
"""Sample of adhoc calendar holidays. Empty list if no adhoc holidays.
`test_adhoc_holidays_sample` will check that each date does not
represent a calendar session.
Example return:
["2015-04-17", "2021-09-12", ...]
"""
yield []
@pytest.fixture
def non_holidays_sample(self) -> abc.Iterator[list[str]]:
"""Sample of known dates that are not holidays.
`test_non_holidays_sample` will check that each date represents a
calendar session.
Subclass should use this fixture if wishes to test edge cases, for
example where a session is an exception to a rule, or where session
preceeds/follows a holiday that is an exception to a rule.
Example return:
["2019-12-27", "2020-01-02", ...]
"""
yield []
@pytest.fixture
def late_opens_sample(self) -> abc.Iterator[list[str]]:
"""Sample of known calendar sessions with late opens.
`test_late_opens_sample` will check that each date represents a
session with a late open.
Example returns:
["2022-01-03", "2022-04-22", ...]
"""
yield []
@pytest.fixture
def early_closes_sample(self) -> abc.Iterator[list[str]]:
"""Sample of known calendar sessions with early closes.
`test_early_closes_sample` will check that each date represents a
session with an early close.
Example returns:
["2019-12-24", "2019-12-31", ...]
"""
yield []
@pytest.fixture
def early_closes_sample_time(self) -> abc.Iterator[pd.Timedelta | None]:
"""Local close time of sessions of `early_closes_sample` fixture.
`test_early_closes_sample_time` will check all sessions of
`early_closes_sample` have this close time.
Only override fixture if:
- `early_closes_sample` is overriden by subclass
- ALL sessions of `early_closes_sample` have the same local
close time (if sessions of `early_closes_sample` have
different local close times then the subclass should
instead check close times with a test defined on the
subclass).
Example returns:
pd.Timedelta(14, "H") # 14:00 local time
pd.Timedelta(hours=13, minutes=15) # 13:15 local time
"""
yield None
@pytest.fixture
def non_early_closes_sample(self) -> abc.Iterator[list[str]]:
"""Sample of known calendar sessions with normal close times.
`test_non_early_closes_sample` will check each date does not
represent a calendar session with an early close.
Subclass should use this fixture to test edge cases, for example
where an otherwise early close is an exception to a rule.
Example return:
["2022-12-23", "2022-12-30]
"""
yield []
@pytest.fixture
def non_early_closes_sample_time(self) -> abc.Iterator[pd.Timedelta | None]:
"""Local close time of sessions of `non_early_closes_sample` fixture.
`test_non_early_closes_sample_time` will check all sessions of
`non_early_closes_sample` have this close time.
Only override fixture if:
- `non_early_closes_sample` is overriden by subclass.
- ALL sessions of `non_early_closes_sample` have the same local
close time (if sessions of `non_early_closes_sample` have
different local close times then the subclass should
instead check close times with a test defined on the
subclass).
Example returns:
pd.Timedelta(17, "H") # 17:00 local time
pd.Timedelta(hours=16, minutes=30) # 16:30 local time
"""
yield None
# --- NO FIXTURE BELOW THIS LINE SHOULD BE OVERRIDEN ON A SUBCLASS ---
def test_testbase_integrity(self):
"""Ensure integrity of TestBase.
Raises error if a reserved fixture is overriden by the subclass.
"""
cls = self.__class__
for fixture in [
"test_testbase_integrity",
"name",
"has_24h_session",
"default_side",
"sides",
"answers",
"default_answers",
"calendars",
"default_calendar",
"calendars_with_answers",
"default_calendar_with_answers",
"one_minute",
"today",
"all_directions",
"valid_overrides",
"non_valid_overrides",
"daylight_savings_dates",
"late_opens",
"early_closes",
]:
if getattr(cls, fixture) != getattr(ExchangeCalendarTestBaseNew, fixture):
raise RuntimeError(f"fixture '{fixture}' should not be overriden!")
# Base class fixtures
@pytest.fixture(scope="class")
def name(self, calendar_cls) -> abc.Iterator[str]:
"""Calendar name."""
yield calendar_cls.name
@pytest.fixture(scope="class")
def has_24h_session(self, name) -> abc.Iterator[bool]:
df = get_csv(name)
yield (df.market_close == df.market_open.shift(-1)).any()
@pytest.fixture(scope="class")
def default_side(self, has_24h_session) -> abc.Iterator[str]:
"""Default calendar side."""
if has_24h_session:
yield "left"
else:
yield "both"
@pytest.fixture(scope="class")
def sides(self, has_24h_session) -> abc.Iterator[list[str]]:
"""All valid sides options for calendar."""
if has_24h_session:
yield ["left", "right"]
else:
yield ["both", "left", "right", "neither"]
# Calendars and answers
@pytest.fixture(scope="class")
def answers(self, name, sides) -> abc.Iterator[dict[str, Answers]]:
"""Dict of answers, key as side, value as corresoponding answers."""
yield {side: Answers(name, side) for side in sides}
@pytest.fixture(scope="class")
def default_answers(self, answers, default_side) -> abc.Iterator[Answers]:
yield answers[default_side]
@pytest.fixture(scope="class")
def calendars(
self, calendar_cls, default_answers, sides
) -> abc.Iterator[dict[str, ExchangeCalendar]]:
"""Dict of calendars, key as side, value as corresoponding calendar."""
start = default_answers.first_session
end = default_answers.last_session
yield {side: calendar_cls(start, end, side) for side in sides}
@pytest.fixture(scope="class")
def default_calendar(
self, calendars, default_side
) -> abc.Iterator[ExchangeCalendar]:
yield calendars[default_side]
@pytest.fixture(scope="class")
def calendars_with_answers(
self, calendars, answers, sides
) -> abc.Iterator[dict[str, tuple[ExchangeCalendar, Answers]]]:
"""Dict of calendars and answers, key as side."""
yield {side: (calendars[side], answers[side]) for side in sides}
@pytest.fixture(scope="class")
def default_calendar_with_answers(
self, calendars_with_answers, default_side
) -> abc.Iterator[tuple[ExchangeCalendar, Answers]]:
yield calendars_with_answers[default_side]
# General use fixtures.
@pytest.fixture(scope="class")
def one_minute(self) -> abc.Iterator[pd.Timedelta]:
yield pd.Timedelta(1, "T")
@pytest.fixture(scope="class")
def today(self) -> abc.Iterator[pd.Timedelta]:
yield pd.Timestamp.now(tz="UTC").floor("D")
@pytest.fixture(scope="class", params=["next", "previous", "none"])
def all_directions(self, request) -> abc.Iterator[str]:
"""Parameterised fixture of direction to go if minute is not a trading minute"""
yield request.param
@pytest.fixture(scope="class")
def valid_overrides(self) -> abc.Iterator[list[str]]:
"""Names of methods that can be overriden by a subclass."""
yield [
"name",
"bound_start",
"bound_end",
"_bound_start_error_msg",
"_bound_end_error_msg",
"default_start",
"default_end",
"tz",
"open_times",
"break_start_times",
"break_end_times",
"close_times",
"weekmask",
"open_offset",
"close_offset",
"regular_holidays",
"adhoc_holidays",
"special_opens",
"special_opens_adhoc",
"special_closes",
"special_closes_adhoc",
"special_weekmasks",
"special_offsets",
"special_offsets_adhoc",
]
@pytest.fixture(scope="class")
def non_valid_overrides(self, valid_overrides) -> abc.Iterator[list[str]]:
"""Names of methods that cannot be overriden by a subclass."""
yield [
name
for name in dir(ExchangeCalendar)
if name not in valid_overrides
and not name.startswith("__")
and not name == "_abc_impl"
]
@pytest.fixture(scope="class")
def daylight_savings_dates(
self, default_calendar
) -> abc.Iterator[list[pd.Timestamp]]:
"""All dates in a specific year that mark the first day of a new
time regime.
Yields empty list if timezone's UCT offset does not change.
Notes
-----
NB Any test that employs this fixture assumes the accuarcy of the
default calendar's `tz` property.
"""
cal = default_calendar
year = cal.last_session.year - 1
days = pd.date_range(str(year), str(year + 1), freq="D")
tzinfo = pytz.timezone(cal.tz.zone)
prev_offset = tzinfo.utcoffset(days[0])
dates = []
for day in days[1:]:
try:
offset = tzinfo.utcoffset(day)
except pytz.NonExistentTimeError:
offset = tzinfo.utcoffset(day + pd.Timedelta(1, "H"))
if offset != prev_offset:
dates.append(day)
if len(dates) == 2:
break
prev_offset = offset
yield dates
@pytest.fixture(scope="class")
def late_opens(
self, default_calendar_with_answers
) -> abc.Iterator[pd.DatetimeIndex]:
"""Calendar sessions with a late open.
Late opens evaluated as those that are later than the prevailing
open time as defined by `default_calendar.open_times`.
Notes
-----
NB Any test that employs this fixture ASSUMES the accuarcy of the
following calendar properties:
`open_times`
`tz`
"""
cal, ans = default_calendar_with_answers
d = dict(cal.open_times)
d[pd.Timestamp.min] = d.pop(None)
s = pd.Series(d).sort_index(ascending=False)
date_to = pd.Timestamp.max
dtis: list[pd.DatetimeIndex] = []
# For each period over which a distinct open time prevails...
for date_from, time_ in s.iteritems():
opens = ans.opens.tz_convert(None)[date_from:date_to] # index to tz-naive
sessions = opens.index
td = pd.Timedelta(hours=time_.hour, minutes=time_.minute)
# Evaluate session opens as if were all normal open time.
normal_opens = sessions + pd.Timedelta(cal.open_offset, "D") + td
normal_opens_utc = normal_opens.tz_localize(cal.tz).tz_convert("UTC")
# Append those sessions with opens (according to answers) later than
# what would be normal.
dtis.append(sessions[opens > normal_opens_utc])
if date_from != pd.Timestamp.min:
date_to = date_from - pd.Timedelta(1, "D")
late_opens = dtis[0].union_many(dtis[1:]).tz_localize("UTC")
yield late_opens
@pytest.fixture(scope="class")
def early_closes(
self, default_calendar_with_answers
) -> abc.Iterator[pd.DatetimeIndex]:
"""Calendar sessions with a late open.
Early closes evaluated as those that are earlier than the
prevailing close time as defined by `default_calendar.close_times`.
Notes
-----
NB Any test that employs this fixture ASSUMES the accuarcy of the
following calendar properties:
`close_times`
`tz`
"""
cal, ans = default_calendar_with_answers
d = dict(cal.close_times)
d[pd.Timestamp.min] = d.pop(None)
s = pd.Series(d).sort_index(ascending=False)
date_to = pd.Timestamp.max
dtis: list[pd.DatetimeIndex] = []
for date_from, time_ in s.iteritems():
closes = ans.closes.tz_convert(None)[date_from:date_to] # index to tz-naive
sessions = closes.index
td = pd.Timedelta(hours=time_.hour, minutes=time_.minute)
normal_closes = sessions + pd.Timedelta(cal.close_offset, "D") + td
normal_closes_utc = normal_closes.tz_localize(cal.tz).tz_convert("UTC")
dtis.append(sessions[closes < normal_closes_utc])
if date_from != pd.Timestamp.min:
date_to = date_from - pd.Timedelta(1, "D")
early_closes = dtis[0].union_many(dtis[1:]).tz_localize("UTC")
yield early_closes
# --- TESTS ---
# Tests for calendar definition and construction methods.
def test_base_integrity(self, calendar_cls, non_valid_overrides):
cls = calendar_cls
for name in non_valid_overrides:
assert getattr(cls, name) == getattr(ExchangeCalendar, name)
def test_calculated_against_csv(self, default_calendar_with_answers):
calendar, ans = default_calendar_with_answers
tm.assert_index_equal(calendar.schedule.index, ans.sessions)
def test_start_end(self, default_answers, calendar_cls):
ans = default_answers
sessions = ans.session_blocks["normal"]
start, end = sessions[0], sessions[-1]
cal = calendar_cls(start, end)
assert cal.first_session == start
assert cal.last_session == end
if len(ans.non_sessions) > 1:
# start and end as non-sessions
(start, end), sessions = ans.sessions_range_defined_by_non_sessions
cal = calendar_cls(start, end)
assert cal.first_session == sessions[0]
assert cal.last_session == sessions[-1]
def test_invalid_input(self, calendar_cls, sides, default_answers, name):
ans = default_answers
invalid_side = "both" if "both" not in sides else "invalid_side"
error_msg = f"`side` must be in {sides} although received as {invalid_side}."
with pytest.raises(ValueError, match=re.escape(error_msg)):
calendar_cls(side=invalid_side)
start = ans.sessions[1]
end_same_as_start = ans.sessions[1]
error_msg = (
"`start` must be earlier than `end` although `start` parsed as"
f" '{start}' and `end` as '{end_same_as_start}'."
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
calendar_cls(start=start, end=end_same_as_start)
end_before_start = ans.sessions[0]
error_msg = (
"`start` must be earlier than `end` although `start` parsed as"
f" '{start}' and `end` as '{end_before_start}'."
)
with pytest.raises(ValueError, match=re.escape(error_msg)):
calendar_cls(start=start, end=end_before_start)
if len(ans.non_sessions) > 1:
start, end = ans.non_sessions_range
error_msg = (
f"The requested ExchangeCalendar, {name.upper()}, cannot be created as"
f" there would be no sessions between the requested `start` ('{start}')"
f" and `end` ('{end}') dates."
)
with pytest.raises(NoSessionsError, match=re.escape(error_msg)):
calendar_cls(start=start, end=end)
def test_bound_start(self, calendar_cls, start_bound, today):
if start_bound is not None:
cal = calendar_cls(start_bound, today)
assert isinstance(cal, ExchangeCalendar)
start = start_bound - pd.DateOffset(days=1)
with pytest.raises(ValueError, match=re.escape(f"{start}")):
calendar_cls(start, today)
else:
# verify no bound imposed
cal = calendar_cls(pd.Timestamp("1902-01-01", tz="UTC"), today)
assert isinstance(cal, ExchangeCalendar)
def test_bound_end(self, calendar_cls, end_bound, today):
if end_bound is not None:
cal = calendar_cls(today, end_bound)
assert isinstance(cal, ExchangeCalendar)
end = end_bound + pd.DateOffset(days=1)
with pytest.raises(ValueError, match=re.escape(f"{end}")):
calendar_cls(today, end)
else:
# verify no bound imposed
cal = calendar_cls(today, pd.Timestamp("2050-01-01", tz="UTC"))
assert isinstance(cal, ExchangeCalendar)
def test_sanity_check_session_lengths(self, default_calendar, max_session_hours):
cal = default_calendar
cal_max_secs = (cal.market_closes_nanos - cal.market_opens_nanos).max()
assert cal_max_secs / 3600000000000 <= max_session_hours
def test_adhoc_holidays_specification(self, default_calendar):
"""adhoc holidays should be tz-naive (#33, #39)."""
dti = pd.DatetimeIndex(default_calendar.adhoc_holidays)
assert dti.tz is None
def test_daylight_savings(self, default_calendar, daylight_savings_dates):
# make sure there's no weirdness around calculating the next day's
# session's open time.
if not daylight_savings_dates:
pytest.skip()
cal = default_calendar
d = dict(cal.open_times)
d[pd.Timestamp.min] = d.pop(None)
open_times = pd.Series(d)
for date in daylight_savings_dates:
# where `next day` is first session of new daylight savings regime
next_day = cal.date_to_session_label(T(date), "next")
open_date = next_day + Timedelta(days=cal.open_offset)
the_open = cal.schedule.loc[next_day].market_open
localized_open = the_open.tz_localize(UTC).tz_convert(cal.tz)
assert open_date.year == localized_open.year
assert open_date.month == localized_open.month
assert open_date.day == localized_open.day
open_ix = open_times.index.searchsorted(date, side="right")
if open_ix == len(open_times):
open_ix -= 1
open_time = open_times.iloc[open_ix]
assert open_time.hour == localized_open.hour
assert open_time.minute == localized_open.minute
# Tests for properties covering all sessions.
def test_all_sessions(self, default_calendar_with_answers):
cal, ans = default_calendar_with_answers
ans_sessions = ans.sessions
cal_sessions = cal.all_sessions
tm.assert_index_equal(ans_sessions, cal_sessions)
def test_opens_closes_break_starts_ends(self, default_calendar_with_answers):
"""Test `opens`, `closes, `break_starts` and `break_ends` properties."""
cal, ans = default_calendar_with_answers
for prop in (
"opens",
"closes",
"break_starts",
"break_ends",
):
ans_series = getattr(ans, prop).dt.tz_convert(None)
cal_series = getattr(cal, prop)
tm.assert_series_equal(ans_series, cal_series, check_freq=False)
def test_minutes_properties(self, all_calendars_with_answers):
"""Test minute properties.
Tests following calendar properties:
all_first_minutes
all_last_minutes
all_last_am_minutes
all_first_pm_minutes
"""
cal, ans = all_calendars_with_answers
for prop in (
"first_minutes",
"last_minutes",
"last_am_minutes",
"first_pm_minutes",
):
ans_minutes = getattr(ans, prop)
cal_minutes = getattr(cal, "all_" + prop)
| tm.assert_series_equal(ans_minutes, cal_minutes, check_freq=False) | pandas.testing.assert_series_equal |
import copy
import io
import json
import os
import string
from collections import OrderedDict
from datetime import datetime
from unittest import TestCase
import numpy as np
import pandas as pd
import pytest
import pytz
from hypothesis import (
given,
settings,
)
from hypothesis.strategies import (
datetimes,
integers,
fixed_dictionaries,
floats,
just,
lists,
sampled_from,
text,
)
from pandas.testing import assert_frame_equal
from tempfile import NamedTemporaryFile
from oasislmf.utils.data import (
factorize_array,
factorize_ndarray,
fast_zip_arrays,
get_dataframe,
get_timestamp,
get_utctimestamp,
get_location_df,
)
from oasislmf.utils.defaults import (
get_loc_dtypes,
)
from oasislmf.utils.exceptions import OasisException
def arrays_are_identical(expected, result):
try:
np.testing.assert_array_equal(expected, result)
except AssertionError:
raise
return True
class TestFactorizeArrays(TestCase):
@settings(max_examples=10)
@given(
num_chars=integers(min_value=2, max_value=len(string.ascii_lowercase + string.digits)),
str_len=integers(min_value=2, max_value=100),
num_strs=integers(min_value=10, max_value=100)
)
def test_factorize_1darray(self, num_chars, str_len, num_strs):
alphabet = np.random.choice(list(string.ascii_lowercase + string.digits), size=num_chars)
strings = [''.join([np.random.choice(alphabet) for i in range(str_len)]) for j in range(num_strs)]
expected_groups = list(OrderedDict({s: s for s in strings}))
expected_enum = np.array([expected_groups.index(s) + 1 for s in strings])
result_enum, result_groups = factorize_array(strings)
self.assertTrue(arrays_are_identical(expected_groups, result_groups))
self.assertTrue(arrays_are_identical(expected_enum, result_enum))
@settings(max_examples=1)
@given(
num_chars=integers(min_value=2, max_value=len(string.ascii_lowercase + string.digits)),
str_len=integers(min_value=2, max_value=100),
rows=integers(min_value=10, max_value=100),
cols=integers(min_value=10, max_value=100)
)
def test_factorize_ndarray__no_row_or_col_indices_provided__raises_oasis_exception(self, num_chars, str_len, rows, cols):
alphabet = np.random.choice(list(string.ascii_lowercase + string.digits), size=num_chars)
strings = [''.join([np.random.choice(alphabet) for i in range(str_len)]) for j in range(rows * cols)]
ndarr = np.random.choice(strings, (rows, cols))
with self.assertRaises(OasisException):
factorize_ndarray(ndarr)
@settings(max_examples=10, deadline=None)
@given(
num_chars=integers(min_value=2, max_value=len(string.ascii_lowercase + string.digits)),
str_len=integers(min_value=2, max_value=100),
rows=integers(min_value=10, max_value=100),
cols=integers(min_value=10, max_value=100),
num_row_idxs=integers(min_value=2, max_value=10)
)
def test_factorize_ndarray__by_row_idxs(self, num_chars, str_len, rows, cols, num_row_idxs):
alphabet = np.random.choice(list(string.ascii_lowercase + string.digits), size=num_chars)
strings = [''.join([np.random.choice(alphabet) for i in range(str_len)]) for j in range(rows * cols)]
ndarr = np.random.choice(strings, (rows, cols))
row_idxs = np.random.choice(range(rows), num_row_idxs, replace=False).tolist()
zipped = list(zip(*(ndarr[i, :] for i in row_idxs)))
groups = list(OrderedDict({x: x for x in zipped}))
expected_groups = np.empty(len(groups), dtype=object)
expected_groups[:] = groups
expected_enum = np.array([groups.index(x) + 1 for x in zipped])
result_enum, result_groups = factorize_ndarray(ndarr, row_idxs=row_idxs)
self.assertTrue(arrays_are_identical(expected_groups, result_groups))
self.assertTrue(arrays_are_identical(expected_enum, result_enum))
@settings(max_examples=10, deadline=None)
@given(
num_chars=integers(min_value=2, max_value=len(string.ascii_lowercase + string.digits)),
str_len=integers(min_value=2, max_value=100),
rows=integers(min_value=10, max_value=100),
cols=integers(min_value=10, max_value=100),
num_col_idxs=integers(min_value=2, max_value=10)
)
def test_factorize_ndarray__by_col_idxs(self, num_chars, str_len, rows, cols, num_col_idxs):
alphabet = np.random.choice(list(string.ascii_lowercase + string.digits), size=num_chars)
strings = [''.join([np.random.choice(alphabet) for i in range(str_len)]) for j in range(rows * cols)]
ndarr = np.random.choice(strings, (rows, cols))
col_idxs = np.random.choice(range(cols), num_col_idxs, replace=False).tolist()
zipped = list(zip(*(ndarr[:, i] for i in col_idxs)))
groups = list(OrderedDict({x: x for x in zipped}))
expected_groups = np.empty(len(groups), dtype=object)
expected_groups[:] = groups
expected_enum = np.array([groups.index(x) + 1 for x in zipped])
result_enum, result_groups = factorize_ndarray(ndarr, col_idxs=col_idxs)
self.assertTrue(arrays_are_identical(expected_groups, result_groups))
self.assertTrue(arrays_are_identical(expected_enum, result_enum))
class TestFastZipArrays(TestCase):
@settings(max_examples=10)
@given(
array_len=integers(min_value=10, max_value=100),
num_arrays=integers(2, 100)
)
def test_fast_zip_arrays(self, array_len, num_arrays):
arrays = np.random.randint(1, 10**6, (num_arrays, array_len))
li = list(zip(*arrays))
zipped = np.empty(len(li), dtype=object)
zipped[:] = li
result = fast_zip_arrays(*arrays)
self.assertTrue(arrays_are_identical(zipped, result))
def dataframes_are_identical(df1, df2):
try:
assert_frame_equal(df1, df2)
except AssertionError:
return False
return True
class TestGetDataframe(TestCase):
def test_get_dataframe__no_src_fp_or_buf_or_data_provided__oasis_exception_is_raised(self):
with self.assertRaises(OasisException):
get_dataframe(src_fp=None, src_buf=None, src_data=None)
@settings(max_examples=10)
@given(
data=lists(
fixed_dictionaries({
'str_col': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase),
'int_col': integers(min_value=1, max_value=10),
'float_col': floats(min_value=0.0, max_value=10.0),
'bool_col': sampled_from([True, False]),
'null_col': just(np.nan)
}),
min_size=10,
max_size=10
)
)
def test_get_dataframe__from_csv_file__use_default_options(self, data):
fp = NamedTemporaryFile('w', delete=False)
try:
df = pd.DataFrame(data)
df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False)
fp.close()
expected = df.copy(deep=True)
result = get_dataframe(src_fp=fp.name)
self.assertTrue(dataframes_are_identical(result, expected))
finally:
os.remove(fp.name)
@settings(max_examples=10)
@given(
data=lists(
fixed_dictionaries({
'STR_COL': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase),
'int_col': integers(min_value=1, max_value=10),
'FloatCol': floats(min_value=0.0, max_value=10.0),
'boolCol': sampled_from([True, False]),
'null_col': just(np.nan)
}),
min_size=10,
max_size=10
)
)
def test_get_dataframe__from_csv_file_with_mixed_case_cols__use_default_options(self, data):
fp = NamedTemporaryFile('w', delete=False)
try:
df = pd.DataFrame(data)
df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False)
fp.close()
expected = df.copy(deep=True)
expected.columns = expected.columns.str.lower()
result = get_dataframe(src_fp=fp.name)
self.assertTrue(dataframes_are_identical(result, expected))
finally:
os.remove(fp.name)
@settings(max_examples=10)
@given(
data=lists(
fixed_dictionaries({
'str_col': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase),
'int_col': integers(min_value=1, max_value=10),
'float_col': floats(min_value=0.0, max_value=10.0),
'bool_col': sampled_from([True, False]),
'null_col': just(np.nan)
}),
min_size=10,
max_size=10
),
dtypes=fixed_dictionaries({
'int_col': sampled_from(['int32', 'int64']),
'float_col': sampled_from(['float32', 'float64'])
})
)
def test_get_dataframe__from_csv_file__set_col_dtypes_option_and_use_defaults_for_all_other_options(self, data, dtypes):
fp = NamedTemporaryFile('w', delete=False)
try:
df = pd.DataFrame(data)
for col, dtype in dtypes.items():
df[col] = df[col].astype(dtype)
df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False)
fp.close()
expected = pd.read_csv(fp.name, dtype=dtypes)
result = get_dataframe(src_fp=fp.name, col_dtypes=dtypes)
self.assertTrue(dataframes_are_identical(result, expected))
finally:
os.remove(fp.name)
@settings(max_examples=10)
@given(
data=lists(
fixed_dictionaries({
'str_col': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase),
'INT_COL': integers(min_value=1, max_value=10),
'FloatCol': floats(min_value=0.0, max_value=10.0),
'boolCol': sampled_from([True, False]),
'null_col': just(np.nan)
}),
min_size=10,
max_size=10
),
dtypes=fixed_dictionaries({
'INT_COL': sampled_from(['int32', 'int64']),
'FloatCol': sampled_from(['float32', 'float64'])
})
)
def test_get_dataframe__from_csv_file_with_mixed_case_cols__set_col_dtypes_option_and_use_defaults_for_all_other_options(self, data, dtypes):
fp = NamedTemporaryFile('w', delete=False)
try:
df = pd.DataFrame(data)
for col, dtype in dtypes.items():
df[col] = df[col].astype(dtype)
df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False)
fp.close()
expected = pd.read_csv(fp.name, dtype=dtypes)
expected.columns = expected.columns.str.lower()
result = get_dataframe(src_fp=fp.name, col_dtypes=dtypes)
self.assertTrue(dataframes_are_identical(result, expected))
finally:
os.remove(fp.name)
@settings(max_examples=10)
@given(empty_data_err_msg=text(min_size=1, max_size=10, alphabet=string.ascii_lowercase))
def test_get_dataframe__from_empty_csv_file__set_empty_data_err_msg_and_defaults_for_all_other_options__oasis_exception_is_raised_with_empty_data_err_msg(self, empty_data_err_msg):
fp = NamedTemporaryFile('w', delete=False)
try:
df = pd.DataFrame()
df.to_csv(path_or_buf=fp)
fp.close()
with self.assertRaises(OasisException):
try:
get_dataframe(src_fp=fp.name, empty_data_error_msg=empty_data_err_msg)
except OasisException as e:
self.assertEqual(str(e), empty_data_err_msg)
raise e
finally:
os.remove(fp.name)
@settings(max_examples=10)
@given(
data=lists(
fixed_dictionaries({
'str_col': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase),
'int_col': integers(min_value=1, max_value=10),
'float_col': floats(min_value=0.0, max_value=10.0),
'bool_col': sampled_from([True, False]),
'null_col': just(np.nan)
}),
min_size=10,
max_size=10
),
required=just(
np.random.choice(
['str_col', 'int_col', 'float_col', 'bool_col', 'null_col'],
np.random.choice(range(1, 6)),
replace=False
).tolist()
)
)
def test_get_dataframe__from_csv_file__set_required_cols_option_and_use_defaults_for_all_other_options(self, data, required):
fp = NamedTemporaryFile('w', delete=False)
try:
df = pd.DataFrame(data)
df.to_csv(path_or_buf=fp, columns=df.columns, encoding='utf-8', index=False)
fp.close()
expected = df.copy(deep=True)
result = get_dataframe(
src_fp=fp.name,
required_cols=required
)
self.assertTrue(dataframes_are_identical(result, expected))
finally:
os.remove(fp.name)
@settings(max_examples=10)
@given(
data=lists(
fixed_dictionaries({
'STR_COL': text(min_size=1, max_size=10, alphabet=string.ascii_lowercase),
'int_col': integers(min_value=1, max_value=10),
'FloatCol': floats(min_value=0.0, max_value=10.0),
'boolCol': sampled_from([True, False]),
'null_col': just(np.nan)
}),
min_size=10,
max_size=10
),
required=just(
np.random.choice(
['STR_COL', 'int_col', 'FloatCol', 'boolCol', 'null_col'],
np.random.choice(range(1, 6)),
replace=False
).tolist()
)
)
def test_get_dataframe__from_csv_file_with_mixed_case_cols__set_required_cols_option_and_use_defaults_for_all_other_options(self, data, required):
fp = NamedTemporaryFile('w', delete=False)
try:
df = | pd.DataFrame(data) | pandas.DataFrame |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
"""
cd /Users/brunoflaven/Documents/02_copy/_000_IA_bruno_light/_my_article_python-explorations/git_repo_python_explorations_nlp/article_1_keyword_extraction_nlp/
python 09_article_1_keyword_extraction_nlp.py
"""
## settings
path="/Users/brunoflaven/Documents/02_copy/_000_IA_bruno_light/_my_article_python-explorations/git_repo_python_explorations_nlp/article_1_keyword_extraction_nlp/"
data_file="/Users/brunoflaven/Documents/02_copy/_000_IA_bruno_light/_my_article_python-explorations/git_repo_python_explorations_nlp/article_1_keyword_extraction_nlp/userfeedback-data-small.tsv"
stopword_file="/Users/brunoflaven/Documents/02_copy/_000_IA_bruno_light/_my_article_python-explorations/git_repo_python_explorations_nlp/article_1_keyword_extraction_nlp/custom-stopwords.txt"
datacol="feedback"
## 1. Load dataset and identify text fields to analyze
# Change current directory to desktop
# This isn't necessary for any of the analysis, but makes it easy to see and find file outputs
# Change this to whatever directory works for you if your desktop is a horror show
import os
# usr = os.getlogin()
# os.chdir('/Users/'+usr+'/Desktop')
os.chdir(path)
cwd = os.getcwd()
print('Working in ', cwd, '\n')
# List TSV files on desktop and select the file to analyze
# CSVs will also work, but TSV is better for free text entry (people use commas in comments)
files = [f for f in os.listdir('.') if os.path.isfile(f)]
print('---\nTSV files currently on your desktop: \n')
for f in files:
if '.tsv' in f:
print(f)
# data_file = input('\n---\nWhich file would like to analyze? \n\n')
# data_file="/Users/brunoflaven/Documents/02_copy/_000_IA_bruno_light/article_1_keyword-extraction-nlp/nlp-text-analysis-master/userfeedback-data.tsv"
#/Users/brunoflaven/Documents/02_copy/_000_IA_bruno_light/article_1_keyword-extraction-nlp/nlp-text-analysis-master/rfi-data.tsv
# /Users/brunoflaven/Documents/02_copy/_000_IA_bruno_light/article_1_keyword-extraction-nlp/nlp-text-analysis-master/userfeedback-data.tsv
# userfeedback-data
# Prefix output files with TSV filename prefix—-these will be saved to your desktop
file_prefix = data_file.split('.')
file_prefix = file_prefix[0]+'_'
print('\nFile exports will be prefixed with:', file_prefix)
# Load the dataset and view sample data and column headers
import pandas
dataset = pandas.read_csv(data_file, delimiter = '\t')
dataset.head()
# Select the content column to analyze
# datacol = input('\n---\nWhich column contains the text data you would like to analyze?\n\n')
# Select the column feedback
print ("\n\n --- / result 1 --- ")
# View 10 most common words prior to text pre-processing
freq = pandas.Series(' '.join(map(str, dataset[datacol])).split()).value_counts()[:10]
# freq
print ("\n\n --- / result 2 --- ")
# View 10 least common words prior to text pre-processing
freq1 = pandas.Series(' '.join(map(str,dataset
[datacol])).split()).value_counts()[-10:]
# freq1
## 2. Create a list of stop words
#
## Import libraries for text preprocessing
import re
import nltk
# You only need to download these resources once. After you run this
# the first time--or if you know you already have these installed--
# you can comment these two lines out (with a #)
# nltk.download('stopwords')
# nltk.download('wordnet')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.tokenize import RegexpTokenizer
from nltk.stem.wordnet import WordNetLemmatizer
# Create a list of stop words from nltk
stop_words = set(stopwords.words("english"))
print ("\n\n --- / result 3 --- ")
print(sorted(stop_words))
# Load a set of custom stop words from a text file (one stopword per line)
csw = set(line.strip() for line in open(stopword_file))
csw = [sw.lower() for sw in csw]
print ("\n\n --- / result 4 --- ")
print(sorted(csw))
# Combine custom stop words with stop_words list
stop_words = stop_words.union(csw)
print ("\n\n --- / result 5 --- ")
print(sorted(stop_words))
## 3. Pre-process the dataset to get a cleaned, normalized text corpus
# Pre-process dataset to get a cleaned and normalised text corpus
corpus = []
dataset['word_count'] = dataset[datacol].apply(lambda x: len(str(x).split(" ")))
ds_count = len(dataset.word_count)
for i in range(0, ds_count):
# Remove punctuation
text = re.sub('[^a-zA-Z]', ' ', str(dataset[datacol][i]))
# Convert to lowercase
text = text.lower()
# Remove tags
text=re.sub("</?.*?>"," <> ",text)
# Remove special characters and digits
text=re.sub("(\\d|\\W)+"," ",text)
# Convert to list from string
text = text.split()
# Stemming
ps=PorterStemmer()
# Lemmatisation
lem = WordNetLemmatizer()
text = [lem.lemmatize(word) for word in text if not word in
stop_words]
text = " ".join(text)
corpus.append(text)
#View sample pre-processed corpus item
print ("\n\n --- / result 6 --- ")
corpus[10]
# Generate word cloud
from os import path
from PIL import Image
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import matplotlib.pyplot as plt
# %matplotlib inline
wordcloud = WordCloud(
background_color='white',
stopwords=stop_words,
max_words=100,
max_font_size=50,
random_state=42
).generate(str(corpus))
print(wordcloud)
fig = plt.figure(1)
plt.imshow(wordcloud)
plt.axis('off')
# plt.show()
fig.savefig(file_prefix + "wordcloud_only.png", dpi=900)
# Tokenize the text and build a vocabulary of known words
from sklearn.feature_extraction.text import CountVectorizer
import re
cv=CountVectorizer(max_df=0.8,stop_words=stop_words, max_features=10000, ngram_range=(1,3))
X=cv.fit_transform(corpus)
# Sample the returned vector encoding the length of the entire vocabulary
list(cv.vocabulary_.keys())[:10]
## 4. Extract most frequently occurring keywords and n-grams
# View most frequently occuring keywords
def get_top_n_words(corpus, n=None):
vec = CountVectorizer().fit(corpus)
bag_of_words = vec.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx]) for word, idx in
vec.vocabulary_.items()]
words_freq =sorted(words_freq, key = lambda x: x[1],
reverse=True)
return words_freq[:n]
print ("\n\n --- / result 7 --- ")
# Convert most freq words to dataframe for plotting bar plot, save as CSV
top_words = get_top_n_words(corpus, n=20)
top_df = pandas.DataFrame(top_words)
top_df.columns=["Keyword", "Frequency"]
print(top_df)
top_df.to_csv(file_prefix + '_top_words.csv')
# Barplot of most freq words
import seaborn as sns
sns.set(rc={'figure.figsize':(13,8)})
g = sns.barplot(x="Keyword", y="Frequency", data=top_df, palette="Blues_d")
g.set_xticklabels(g.get_xticklabels(), rotation=45)
g.figure.savefig(file_prefix + "_keyword.png", bbox_inches = "tight")
print ("\n\n --- / result 8 --- ")
# Most frequently occuring bigrams
def get_top_n2_words(corpus, n=None):
vec1 = CountVectorizer(ngram_range=(2,2),
max_features=2000).fit(corpus)
bag_of_words = vec1.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx]) for word, idx in
vec1.vocabulary_.items()]
words_freq =sorted(words_freq, key = lambda x: x[1],
reverse=True)
return words_freq[:n]
# Convert most freq bigrams to dataframe for plotting bar plot, save as CSV
top2_words = get_top_n2_words(corpus, n=20)
top2_df = | pandas.DataFrame(top2_words) | pandas.DataFrame |
import time
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, learning_curve, ShuffleSplit
from sklearn import metrics
from sklearn.naive_bayes import GaussianNB, MultinomialNB
import paho.mqtt.client as mqtt
# df_raw_normal = pd.read_excel('dataRaw.xlsx', 'nomal')
# df_raw_normal['label'] = 0
# df_raw_moveLeftRight = pd.read_excel('dataRaw.xlsx', 'moveLeftRight')
# df_raw_moveLeftRight['label'] = 1
# df_raw_moveUpDown = pd.read_excel('dataRaw.xlsx', 'moveUpDown')
# df_raw_moveUpDown['label'] = 2
# df_raw_goOnAndBack = pd.read_excel('dataRaw.xlsx', 'GoOnAndBack')
# df_raw_goOnAndBack['label'] = 3
df_raw_0 = pd.read_csv('motionData/data_0.csv')
df_raw_1 = pd.read_csv('motionData/data_1.csv')
df_raw_1_1 = pd.read_csv('motionData/data_1_1.csv')
df_raw = | pd.concat([df_raw_0, df_raw_1, df_raw_1_1]) | pandas.concat |
# Setup
import pandas as pd
# Load All Files
### Get filenames from repo
# We first retrieve the filenames of all files listed in the repository:
import requests
user = "ard-data"
repo = "2020-rki-archive"
url = "https://api.github.com/repos/{}/{}/git/trees/master?recursive=1".format(user, repo)
r = requests.get(url)
res = r.json()
# We only keep the files in the folder data/2_parsed
files_parsed = [file["path"] for file in res["tree"] if file["path"].startswith("data/2_parsed/")]
# the first 5 files in the folder data/2_parsed
files_parsed[:5]
files_parsed[-1]
### Retrieve latest file from data/2_parsed
df = pd.read_json('https://github.com/ard-data/2020-rki-archive/raw/master/' + files_parsed[-1], compression='bz2')
df.head()
# Cleaning
# If the column 'DatenstandISO' is missing, we infer it from 'Datenstand'.
if 'DatenstandISO' not in df.columns:
df['DatenstandISO'] = pd.to_datetime(df.Datenstand.str.replace('Uhr', '')).astype(str)
# Aggregation on state level (Bundesländer)
# compute the sum for each date within each state
df_agg = df[df.NeuerTodesfall >= 0].groupby(['DatenstandISO', 'Bundesland'])['AnzahlTodesfall'].sum().reset_index()
df_agg
### Add FIPS region codes
# We add the corresponding region codes for each state given by https://en.wikipedia.org/wiki/List_of_FIPS_region_codes_(G–I)#GM:_Germany.
state_names = ['Baden-Württemberg', 'Bayern', 'Bremen', 'Hamburg', 'Hessen', 'Niedersachsen', 'Nordrhein-Westfalen', 'Rheinland-Pfalz',
'Saarland', 'Schleswig-Holstein', 'Brandenburg', 'Mecklenburg-Vorpommern', 'Sachsen', 'Sachsen-Anhalt', 'Thüringen', 'Berlin']
gm = ['GM0' + str(i) for i in range(1, 10)] + ['GM' + str(i) for i in range(10, 17)]
fips_codes = pd.DataFrame({'Bundesland':state_names, 'location':gm})
# add fips codes to dataframe with aggregated data
df_agg = df_agg.merge(fips_codes, left_on='Bundesland', right_on='Bundesland')
### Change location_name to English names
fips_english = pd.read_csv('../../template/base_germany.csv')
df_agg = df_agg.merge(fips_english, left_on='location', right_on='V1')
### Rename columns and sort by date and location
df_agg = df_agg.rename(columns={'DatenstandISO': 'date', 'AnzahlTodesfall': 'value', 'V2':'location_name'})[
['date', 'location', 'location_name', 'value']].sort_values(['date', 'location']).reset_index(drop=True)
df_germany = df_agg.groupby('date')['value'].sum().reset_index()
df_germany['location'] = 'GM'
df_germany['location_name'] = 'Germany'
# add data for Germany to dataframe with states
df_cum = pd.concat([df_agg, df_germany]).sort_values(['date', 'location']).reset_index(drop=True)
# Load Current Dataframe
df_all = pd.read_csv('../../data-truth/RKI/truth_cum_deaths.csv')
# Add New Dataframe
df_cum = | pd.concat([df_all, df_cum]) | pandas.concat |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan, u(
'fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'), u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
tm.assert_almost_equal(result, exp)
def test_find(self):
values = Series(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF', 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.find(0)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan(self):
values = Series(['ABCDEFG', np.nan, 'DEFGHIJEF', np.nan, 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_index(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF'])
result = s.str.index('EF')
_check(result, klass([4, 3, 1, 0]))
expected = np.array([v.index('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF')
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('EF', 3)
_check(result, klass([4, 3, 7, 4]))
expected = np.array([v.index('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF', 3)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('E', 4, 8)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.index('E', 4, 8) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('E', 0, 5)
_check(result, klass([4, 3, 1, 4]))
expected = np.array([v.rindex('E', 0, 5) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(ValueError,
"substring not found"):
result = s.str.index('DE')
with tm.assert_raises_regex(TypeError,
"expected a string "
"object, not int"):
result = s.str.index(0)
# test with nan
s = Series(['abcb', 'ab', 'bcbe', np.nan])
result = s.str.index('b')
tm.assert_series_equal(result, Series([1, 1, 0, np.nan]))
result = s.str.rindex('b')
tm.assert_series_equal(result, Series([3, 1, 2, np.nan]))
def test_pad(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left')
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='left')
xp = Series([' a', NA, ' b', NA, NA, ' ee', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='right')
xp = Series(['a ', NA, 'b ', NA, NA, 'ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='both')
xp = Series([' a ', NA, ' b ', NA, NA, ' ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.pad(5, side='left')
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_pad_fillchar(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left', fillchar='X')
exp = Series(['XXXXa', 'XXXXb', NA, 'XXXXc', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right', fillchar='X')
exp = Series(['aXXXX', 'bXXXX', NA, 'cXXXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both', fillchar='X')
exp = Series(['XXaXX', 'XXbXX', NA, 'XXcXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.pad(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.pad(5, fillchar=5)
def test_pad_width(self):
# GH 13598
s = Series(['1', '22', 'a', 'bb'])
for f in ['center', 'ljust', 'rjust', 'zfill', 'pad']:
with tm.assert_raises_regex(TypeError,
"width must be of "
"integer type, not*"):
getattr(s.str, f)('f')
def test_translate(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['abcdefg', 'abcc', 'cdddfg', 'cdefggg'])
if not compat.PY3:
import string
table = string.maketrans('abc', 'cde')
else:
table = str.maketrans('abc', 'cde')
result = s.str.translate(table)
expected = klass(['cdedefg', 'cdee', 'edddfg', 'edefggg'])
_check(result, expected)
# use of deletechars is python 2 only
if not compat.PY3:
result = s.str.translate(table, deletechars='fg')
expected = klass(['cdede', 'cdee', 'eddd', 'ede'])
_check(result, expected)
result = s.str.translate(None, deletechars='fg')
expected = klass(['abcde', 'abcc', 'cddd', 'cde'])
_check(result, expected)
else:
with tm.assert_raises_regex(
ValueError, "deletechars is not a valid argument"):
result = s.str.translate(table, deletechars='fg')
# Series with non-string values
s = Series(['a', 'b', 'c', 1.2])
expected = Series(['c', 'd', 'e', np.nan])
result = s.str.translate(table)
tm.assert_series_equal(result, expected)
def test_center_ljust_rjust(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.center(5)
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'c', 'eee', None,
1, 2.])
rs = Series(mixed).str.center(5)
xp = Series([' a ', NA, ' b ', NA, NA, ' c ', ' eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.ljust(5)
xp = Series(['a ', NA, 'b ', NA, NA, 'c ', 'eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rjust(5)
xp = Series([' a', NA, ' b', NA, NA, ' c', ' eee', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.center(5)
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_center_ljust_rjust_fillchar(self):
values = Series(['a', 'bb', 'cccc', 'ddddd', 'eeeeee'])
result = values.str.center(5, fillchar='X')
expected = Series(['XXaXX', 'XXbbX', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.center(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.ljust(5, fillchar='X')
expected = Series(['aXXXX', 'bbXXX', 'ccccX', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.ljust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rjust(5, fillchar='X')
expected = Series(['XXXXa', 'XXXbb', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.rjust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
# If fillchar is not a charatter, normal str raises TypeError
# 'aaa'.ljust(5, 'XY')
# TypeError: must be char, not str
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.center(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.ljust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.rjust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.center(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.ljust(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.rjust(5, fillchar=1)
def test_zfill(self):
values = Series(['1', '22', 'aaa', '333', '45678'])
result = values.str.zfill(5)
expected = Series(['00001', '00022', '00aaa', '00333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(5) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.zfill(3)
expected = Series(['001', '022', 'aaa', '333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(3) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
values = Series(['1', np.nan, 'aaa', np.nan, '45678'])
result = values.str.zfill(5)
expected = Series(['00001', np.nan, '00aaa', np.nan, '45678'])
tm.assert_series_equal(result, expected)
def test_split(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.split('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.split('__')
tm.assert_series_equal(result, exp)
result = values.str.split('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.split('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.split('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.split('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.split('[,_]')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
def test_rsplit(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.rsplit('__')
tm.assert_series_equal(result, exp)
result = values.str.rsplit('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.rsplit('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.rsplit('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.rsplit('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split is not supported by rsplit
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.rsplit('[,_]')
exp = Series([[u('a,b_c')], [u('c_d,e')], NA, [u('f,g,h')]])
tm.assert_series_equal(result, exp)
# setting max number of splits, make sure it's from reverse
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_', n=1)
exp = Series([['a_b', 'c'], ['c_d', 'e'], NA, ['f_g', 'h']])
tm.assert_series_equal(result, exp)
def test_split_noargs(self):
# #1859
s = Series(['<NAME>', '<NAME>'])
result = s.str.split()
expected = ['Travis', 'Oliphant']
assert result[1] == expected
result = s.str.rsplit()
assert result[1] == expected
def test_split_maxsplit(self):
# re.split 0, str.split -1
s = Series(['bd asdf jfg', 'kjasdflqw asdfnfk'])
result = s.str.split(n=-1)
xp = s.str.split()
tm.assert_series_equal(result, xp)
result = s.str.split(n=0)
tm.assert_series_equal(result, xp)
xp = s.str.split('asdf')
result = s.str.split('asdf', n=0)
tm.assert_series_equal(result, xp)
result = s.str.split('asdf', n=-1)
tm.assert_series_equal(result, xp)
def test_split_no_pat_with_nonzero_n(self):
s = Series(['split once', 'split once too!'])
result = s.str.split(n=1)
expected = Series({0: ['split', 'once'], 1: ['split', 'once too!']})
tm.assert_series_equal(expected, result, check_index_type=False)
def test_split_to_dataframe(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_unequal_splits', 'one_of_these_things_is_not'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'one'],
1: ['unequal', 'of'],
2: ['splits', 'these'],
3: [NA, 'things'],
4: [NA, 'is'],
5: [NA, 'not']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
with tm.assert_raises_regex(ValueError, "expand must be"):
s.str.split('_', expand="not_a_boolean")
def test_split_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.split('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_unequal_splits', 'one_of_these_things_is_not'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'unequal', 'splits', NA, NA, NA
), ('one', 'of', 'these', 'things',
'is', 'not')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 6
with tm.assert_raises_regex(ValueError, "expand must be"):
idx.str.split('_', expand="not_a_boolean")
def test_rsplit_to_dataframe_expand(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=2)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=1)
exp = DataFrame({0: ['some_equal', 'with_no'], 1: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
def test_rsplit_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.rsplit('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True, n=1)
exp = MultiIndex.from_tuples([('some_equal', 'splits'),
('with_no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 2
def test_split_nan_expand(self):
# gh-18450
s = Series(["foo,bar,baz", NA])
result = s.str.split(",", expand=True)
exp = DataFrame([["foo", "bar", "baz"], [NA, NA, NA]])
tm.assert_frame_equal(result, exp)
# check that these are actually np.nan and not None
# TODO see GH 18463
# tm.assert_frame_equal does not differentiate
assert all(np.isnan(x) for x in result.iloc[1])
def test_split_with_name(self):
# GH 12617
# should preserve name
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.split(',')
exp = Series([['a', 'b'], ['c', 'd']], name='xxx')
tm.assert_series_equal(res, exp)
res = s.str.split(',', expand=True)
exp = DataFrame([['a', 'b'], ['c', 'd']])
tm.assert_frame_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.split(',')
exp = Index([['a', 'b'], ['c', 'd']], name='xxx')
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
res = idx.str.split(',', expand=True)
exp = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')])
assert res.nlevels == 2
tm.assert_index_equal(res, exp)
def test_partition_series(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([('a', '_', 'b_c'), ('c', '_', 'd_e'), NA,
('f', '_', 'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('a_b', '_', 'c'), ('c_d', '_', 'e'), NA,
('f_g', '_', 'h')])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.partition('__', expand=False)
exp = Series([('a', '__', 'b__c'), ('c', '__', 'd__e'), NA,
('f', '__', 'g__h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('__', expand=False)
exp = Series([('a__b', '__', 'c'), ('c__d', '__', 'e'), NA,
('f__g', '__', 'h')])
tm.assert_series_equal(result, exp)
# None
values = Series(['a b c', 'c d e', NA, 'f g h'])
result = values.str.partition(expand=False)
exp = Series([('a', ' ', 'b c'), ('c', ' ', 'd e'), NA,
('f', ' ', 'g h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition(expand=False)
exp = Series([('a b', ' ', 'c'), ('c d', ' ', 'e'), NA,
('f g', ' ', 'h')])
tm.assert_series_equal(result, exp)
# Not splited
values = Series(['abc', 'cde', NA, 'fgh'])
result = values.str.partition('_', expand=False)
exp = Series([('abc', '', ''), ('cde', '', ''), NA, ('fgh', '', '')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('', '', 'abc'), ('', '', 'cde'), NA, ('', '', 'fgh')])
tm.assert_series_equal(result, exp)
# unicode
values = Series([u'a_b_c', u'c_d_e', NA, u'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([(u'a', u'_', u'b_c'), (u'c', u'_', u'd_e'),
NA, (u'f', u'_', u'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([(u'a_b', u'_', u'c'), (u'c_d', u'_', u'e'),
NA, (u'f_g', u'_', u'h')])
tm.assert_series_equal(result, exp)
# compare to standard lib
values = Series(['A_B_C', 'B_C_D', 'E_F_G', 'EFGHEF'])
result = values.str.partition('_', expand=False).tolist()
assert result == [v.partition('_') for v in values]
result = values.str.rpartition('_', expand=False).tolist()
assert result == [v.rpartition('_') for v in values]
def test_partition_index(self):
values = Index(['a_b_c', 'c_d_e', 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Index(np.array([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_',
'g_h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.rpartition('_', expand=False)
exp = Index(np.array([('a_b', '_', 'c'), ('c_d', '_', 'e'), (
'f_g', '_', 'h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.partition('_')
exp = Index([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_', 'g_h')])
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
result = values.str.rpartition('_')
exp = Index([('a_b', '_', 'c'), ('c_d', '_', 'e'), ('f_g', '_', 'h')])
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
def test_partition_to_dataframe(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_')
exp = DataFrame({0: ['a', 'c', np.nan, 'f'],
1: ['_', '_', np.nan, '_'],
2: ['b_c', 'd_e', np.nan, 'g_h']})
tm.assert_frame_equal(result, exp)
result = values.str.rpartition('_')
exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'],
1: ['_', '_', np.nan, '_'],
2: ['c', 'e', np.nan, 'h']})
tm.assert_frame_equal(result, exp)
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=True)
exp = DataFrame({0: ['a', 'c', np.nan, 'f'],
1: ['_', '_', np.nan, '_'],
2: ['b_c', 'd_e', np.nan, 'g_h']})
tm.assert_frame_equal(result, exp)
result = values.str.rpartition('_', expand=True)
exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'],
1: ['_', '_', np.nan, '_'],
2: ['c', 'e', np.nan, 'h']})
tm.assert_frame_equal(result, exp)
def test_partition_with_name(self):
# GH 12617
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.partition(',')
exp = DataFrame({0: ['a', 'c'], 1: [',', ','], 2: ['b', 'd']})
tm.assert_frame_equal(res, exp)
# should preserve name
res = s.str.partition(',', expand=False)
exp = Series([('a', ',', 'b'), ('c', ',', 'd')], name='xxx')
tm.assert_series_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.partition(',')
exp = MultiIndex.from_tuples([('a', ',', 'b'), ('c', ',', 'd')])
assert res.nlevels == 3
tm.assert_index_equal(res, exp)
# should preserve name
res = idx.str.partition(',', expand=False)
exp = Index(np.array([('a', ',', 'b'), ('c', ',', 'd')]), name='xxx')
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
def test_pipe_failures(self):
# #2119
s = Series(['A|B|C'])
result = s.str.split('|')
exp = Series([['A', 'B', 'C']])
tm.assert_series_equal(result, exp)
result = s.str.replace('|', ' ')
exp = Series(['A B C'])
tm.assert_series_equal(result, exp)
def test_slice(self):
values = Series(['aafootwo', 'aabartwo', NA, 'aabazqux'])
result = values.str.slice(2, 5)
exp = Series(['foo', 'bar', NA, 'baz'])
tm.assert_series_equal(result, exp)
for start, stop, step in [(0, 3, -1), (None, None, -1), (3, 10, 2),
(3, 0, -1)]:
try:
result = values.str.slice(start, stop, step)
expected = Series([s[start:stop:step] if not isna(s) else NA
for s in values])
tm.assert_series_equal(result, expected)
except:
print('failed on %s:%s:%s' % (start, stop, step))
raise
# mixed
mixed = Series(['aafootwo', NA, 'aabartwo', True, datetime.today(),
None, 1, 2.])
rs = Series(mixed).str.slice(2, 5)
xp = Series(['foo', NA, 'bar', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = | Series(mixed) | pandas.Series |
from django.test import TestCase
import pandas as pd
from .models import join_files
from pandas._testing import assert_frame_equal
class JoinFilesTestCase(TestCase):
def test_two_files_can_be_joined(self):
csv1 = pd.DataFrame(data={'user_id': [1, 2], 'age': [43, 28]})
csv2 = pd.DataFrame(data={'user_id': [1, 2], 'gender': ['F', 'M']})
csv_files = [csv1, csv2]
joined_file = join_files(csv_files, 'user_id')
expected = pd.DataFrame(data={'user_id': [1, 2], 'age': [43, 28], 'gender': ['F', 'M']})
assert_frame_equal(joined_file, expected)
def test_three_files_can_be_joined(self):
csv1 = pd.DataFrame(data={'user_id': [1, 2], 'age': [43, 28]})
csv2 = pd.DataFrame(data={'user_id': [1, 2], 'gender': ['F', 'M']})
csv3 = | pd.DataFrame(data={'user_id': [1, 2], 'name': ['A', 'B']}) | pandas.DataFrame |
import os.path
from surprise import SVDpp
import pandas as pd
import numpy as np
from surprise import BaselineOnly
from surprise import NormalPredictor
from surprise import Dataset
from surprise.model_selection import cross_validate
from surprise.model_selection import KFold
from surprise import Reader
from surprise import accuracy
from surprise import SVD
from surprise.model_selection import GridSearchCV
import Deezerdata
# importing the train and test data as pandas dataframe
train_data = | pd.read_csv("../Data/train-prep.csv") | pandas.read_csv |
import os
from unittest import TestCase
# most of the features of this script are already tested indirectly when
# running vensim and xmile integration tests
_root = os.path.dirname(__file__)
class TestErrors(TestCase):
def test_canonical_file_not_found(self):
from pysd.tools.benchmarking import runner
with self.assertRaises(FileNotFoundError) as err:
runner(os.path.join(_root, "more-tests/not_existent.mdl"))
self.assertIn(
'Canonical output file not found.',
str(err.exception))
def test_non_valid_model(self):
from pysd.tools.benchmarking import runner
with self.assertRaises(ValueError) as err:
runner(os.path.join(
_root,
"more-tests/not_vensim/test_not_vensim.txt"))
self.assertIn(
'Modelfile should be *.mdl or *.xmile',
str(err.exception))
def test_non_valid_outputs(self):
from pysd.tools.benchmarking import load_outputs
with self.assertRaises(ValueError) as err:
load_outputs(
os.path.join(
_root,
"more-tests/not_vensim/test_not_vensim.txt"))
self.assertIn(
"Not able to read '",
str(err.exception))
self.assertIn(
"more-tests/not_vensim/test_not_vensim.txt'.",
str(err.exception))
def test_different_frames_error(self):
from pysd.tools.benchmarking import load_outputs, assert_frames_close
with self.assertRaises(AssertionError) as err:
assert_frames_close(
load_outputs(os.path.join(_root, "data/out_teacup.csv")),
load_outputs(
os.path.join(_root, "data/out_teacup_modified.csv")))
self.assertIn(
"Following columns are not close:\n\tTeacup Temperature",
str(err.exception))
self.assertNotIn(
"Column 'Teacup Temperature' is not close.",
str(err.exception))
self.assertNotIn(
"Actual values:\n\t",
str(err.exception))
self.assertNotIn(
"Expected values:\n\t",
str(err.exception))
with self.assertRaises(AssertionError) as err:
assert_frames_close(
load_outputs(os.path.join(_root, "data/out_teacup.csv")),
load_outputs(
os.path.join(_root, "data/out_teacup_modified.csv")),
verbose=True)
self.assertIn(
"Following columns are not close:\n\tTeacup Temperature",
str(err.exception))
self.assertIn(
"Column 'Teacup Temperature' is not close.",
str(err.exception))
self.assertIn(
"Actual values:\n\t",
str(err.exception))
self.assertIn(
"Expected values:\n\t",
str(err.exception))
def test_different_frames_warning(self):
from warnings import catch_warnings
from pysd.tools.benchmarking import load_outputs, assert_frames_close
with catch_warnings(record=True) as ws:
assert_frames_close(
load_outputs(os.path.join(_root, "data/out_teacup.csv")),
load_outputs(
os.path.join(_root, "data/out_teacup_modified.csv")),
assertion="warn")
# use only user warnings
wu = [w for w in ws if issubclass(w.category, UserWarning)]
self.assertEqual(len(wu), 1)
self.assertIn(
"Following columns are not close:\n\tTeacup Temperature",
str(wu[0].message))
self.assertNotIn(
"Column 'Teacup Temperature' is not close.",
str(wu[0].message))
self.assertNotIn(
"Actual values:\n\t",
str(wu[0].message))
self.assertNotIn(
"Expected values:\n\t",
str(wu[0].message))
with catch_warnings(record=True) as ws:
assert_frames_close(
load_outputs(os.path.join(_root, "data/out_teacup.csv")),
load_outputs(
os.path.join(_root, "data/out_teacup_modified.csv")),
assertion="warn", verbose=True)
# use only user warnings
wu = [w for w in ws if issubclass(w.category, UserWarning)]
self.assertEqual(len(wu), 1)
self.assertIn(
"Following columns are not close:\n\tTeacup Temperature",
str(wu[0].message))
self.assertIn(
"Column 'Teacup Temperature' is not close.",
str(wu[0].message))
self.assertIn(
"Actual values:\n\t",
str(wu[0].message))
self.assertIn(
"Expected values:\n\t",
str(wu[0].message))
def test_transposed_frame(self):
from pysd.tools.benchmarking import load_outputs, assert_frames_close
assert_frames_close(
load_outputs(os.path.join(_root, "data/out_teacup.csv")),
load_outputs(
os.path.join(_root, "data/out_teacup_transposed.csv"),
transpose=True))
def test_load_columns(self):
from pysd.tools.benchmarking import load_outputs
out0 = load_outputs(
os.path.join(_root, "data/out_teacup.csv"))
out1 = load_outputs(
os.path.join(_root, "data/out_teacup.csv"),
columns=["Room Temperature", "Teacup Temperature"])
out2 = load_outputs(
os.path.join(_root, "data/out_teacup_transposed.csv"),
transpose=True,
columns=["Heat Loss to Room"])
self.assertEqual(
set(out1.columns),
set(["Room Temperature", "Teacup Temperature"]))
self.assertEqual(
set(out2.columns),
set(["Heat Loss to Room"]))
self.assertTrue((out0.index == out1.index).all())
self.assertTrue((out0.index == out2.index).all())
def test_different_cols(self):
from warnings import catch_warnings
from pysd.tools.benchmarking import assert_frames_close
import pandas as pd
d1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4], 'd': [6, 7]})
d2 = | pd.DataFrame({'a': [1, 2]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 12 11:56:58 2022
@author: lawashburn
"""
import os
import csv
import pandas as pd
import numpy as np
from datetime import datetime
now = datetime.now()
fragment_matches = pd.read_csv(r"C:\Users\lawashburn\Documents\Nhu_Prescursor_Matching\20220417_oldprecursor\output_directory\SG-CHH-Fraction-UnoptimizedMethod_NewLCgradient_1fragment_matches.csv")
by_ion_list = pd.read_csv(r"C:\Users\lawashburn\Documents\Nhu_Prescursor_Matching\20220417_oldprecursor\by_ions.csv")
final_dir = r"C:\Users\lawashburn\Documents\Nhu_Prescursor_Matching\20220417_oldprecursor\output_directory"
sample_name = 'SG-CHH-Fraction-UnoptimizedMethod_NewLCgradient_1'
fragment_charges = [1,2,3,4,5]
scan_no = []
t_p = []
a_p = []
p_err = []
p_a_z = []
p_t_z = []
a_f_mz = []
a_f_z = []
f_intensity = []
resolution_archive = []
f_a_M = []
f_t_mz = []
f_t_M = []
f_err = []
species_archive = []
f_t_mz2 = []
f_t_z = []
for a in fragment_charges:
fragments_filtered = pd.DataFrame()
fragments_filtered['species'] = by_ion_list['peptide']
fragments_filtered[str(a)] = by_ion_list[str(a)]
fragments_merged = fragment_matches.merge(fragments_filtered,left_on='Fragment Theoretical m/z',right_on=[str(a)])
fragments_merged['Theoretical ion charge'] = a
scan = fragments_merged['Scan #'].values.tolist()
t_prec = fragments_merged['Theoretical Precursor'].values.tolist()
a_prec = fragments_merged['Actual Precursor'].values.tolist()
prec_err = fragments_merged['Precursor error (ppm)'].values.tolist()
a_prec_z = fragments_merged['Precursor Actual Charge'].values.tolist()
t_prec_z = fragments_merged['Precursor Theoretical Charge'].values.tolist()
a_frag_mz = fragments_merged['Actual Fragment m/z'].values.tolist()
a_frag_z = fragments_merged['Actual Fragment Charge'].values.tolist()
intensity = fragments_merged['Fragment Intensity'].values.tolist()
resolution = fragments_merged['MS2 Resolution'].values.tolist()
a_frag_M = fragments_merged['Fragment Actual M'].values.tolist()
t_frag_mz = fragments_merged['Fragment Theoretical m/z'].values.tolist()
t_frag_M = fragments_merged['Fragment Theoretical M'].values.tolist()
frag_err = fragments_merged['Fragment error (Da)'].values.tolist()
species = fragments_merged['species'].values.tolist()
t_frag_mz_2 = fragments_merged[str(a)].values.tolist()
t_z_z = fragments_merged['Theoretical ion charge'].values.tolist()
for b in scan:
scan_no.append(b)
for c in t_prec:
t_p.append(c)
for d in a_prec:
a_p.append(d)
for e in prec_err:
p_err.append(e)
for f in a_prec_z:
p_a_z.append(f)
for g in t_prec_z:
p_t_z.append(g)
for h in a_frag_mz:
a_f_mz.append(h)
for i in a_frag_z:
a_f_z.append(i)
for j in intensity:
f_intensity.append(j)
for k in resolution:
resolution_archive.append(k)
for l in a_frag_M:
f_a_M.append(l)
for m in t_frag_mz:
f_t_mz.append(m)
for n in t_frag_M:
f_t_M.append(n)
for o in frag_err:
f_err.append(o)
for p in species:
species_archive.append(p)
for q in t_frag_mz_2:
f_t_mz2.append(q)
for t in t_z_z:
f_t_z.append(t)
match_table = pd.DataFrame()
match_table['Species'] = species_archive
match_table['Scan #'] = scan_no
match_table['theoretical precursor'] = t_p
match_table['actual precursor'] = a_p
match_table['theoretical precursor charge'] = p_t_z
match_table['actual precursor charge'] = p_a_z
match_table['precursor error (ppm)'] = p_err
match_table['theoretical fragment m/z'] = f_t_mz
match_table['actual fragment m/z'] = a_f_mz
match_table['theoretical fragment charge'] = f_t_z
match_table['actual fragment charge'] = a_f_z
match_table['theoretical fragment M'] = f_t_M
match_table['actual fragment M'] = f_a_M
match_table['fragment error (Da)'] = f_err
match_table['fragment intensity'] = f_intensity
match_table['MS2 resolution'] = resolution_archive
match_table = match_table[match_table['theoretical fragment charge'] == match_table['actual fragment charge']]
species_reference = match_table['Species'].values.tolist()
species_filtered = []
for z in species_reference:
if z not in species_filtered:
species_filtered.append(z)
for y in species_filtered:
scan_summary_no = []
scan_summary_instances = []
species_matches = match_table[match_table['Species'] == y]
species_matches_no_dups = species_matches.drop_duplicates(subset = ['Scan #','theoretical precursor','actual precursor','theoretical precursor charge','actual precursor charge',
'precursor error (ppm)','theoretical fragment m/z','actual fragment m/z','theoretical fragment charge',
'actual fragment charge','theoretical fragment M','actual fragment M','fragment error (Da)',
'fragment intensity','MS2 resolution'])
if len(species_matches_no_dups)>0:
file_name = sample_name + '_' + y + '_fragment_matches.csv'
file_out_path = final_dir + '\\' + file_name
with open(file_out_path,'w',newline='') as filec:
writerc = csv.writer(filec)
species_matches_no_dups.to_csv(filec,index=False)
scans_present = species_matches_no_dups['Scan #'].values.tolist()
scans_filtered = []
for u in scans_present:
if u not in scans_filtered:
scans_filtered.append(u)
for v in scans_filtered:
scan_count = scans_present.count(v)
scan_summary_no.append(v)
scan_summary_instances.append(scan_count)
scan_report = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
from datetime import datetime
from dateutil.relativedelta import relativedelta
class stock:
now = datetime.now()
def __init__(self, stock_code, from_month) -> None:
self.stock_code = stock_code
self.from_month = from_month
if self.now.month< from_month:
self.start = datetime(self.now.year-1, self.from_month, 1)
else:
self.start = datetime(self.now.year, self.from_month, 1)
def get_three_major(self):
"""
日期 外資 投信 自營商 單日合計
0 109/12/04 18731 77 -340 18468
1 109/12/03 -4885 13 -57 -4929
2 109/12/02 10469 106 -415 10160
3 109/12/01 587 211 397 1195
4 109/11/30 -33427 -42 1226 -32243
"""
end = self.now
time_format = "%Y-%m-%d"
start_string = self.start.strftime(time_format)
end_string = end.strftime(time_format)
# 日盛證卷
url = f'http://jsjustweb.jihsun.com.tw/z/zc/zcl/zcl.djhtm?a={self.stock_code}&d={end_string}&c={start_string}'
df_orignal = pd.read_html(url)
df_extracted = df_orignal[2][7:-1]
#如果沒有資料回傳,則 回傳錯誤
if len(df_extracted.index) < 2:
raise Exception('No data retrun from this stock code')
#設定df columns 不然都會是1.2.3......
df_extracted.columns = df_orignal[2].iloc[6]
df_extracted.columns.name = ''
return df_extracted.reset_index(drop = True).iloc[:,:5]
def get_price_value(self):
"""
1 日期 開盤價 最高價 最低價 收盤價 成交量
0 109/06/30 313.50 314.00 311.00 313.00 48784
1 109/06/29 314.00 315.00 310.00 312.00 56455
2 109/06/24 319.00 320.00 316.00 317.50 54019
3 109/06/23 316.00 316.50 312.50 315.00 41183
4 109/06/22 314.50 316.50 312.00 312.00 37295
"""
data_list = []
query_time = self.start
while (query_time<self.now):
#聚財網
url = f'https://stock.wearn.com/cdata.asp?Year={query_time.year-1911}&month={query_time.month:0>2d}&kind={self.stock_code}'
df_orignal = pd.read_html(url)
df_extracted = df_orignal[0][2:]
df_extracted.columns = df_orignal[0].iloc[1]
data_list.append(df_extracted)
query_time += relativedelta(months = +1)
data_final = pd.concat(data_list).drop_duplicates().reset_index(drop = True)
data_final.columns.name = ''
return data_final
def get_all_data(self, enable_color = True):
"""
日期 外資 投信 自營商 單日合計 開盤價 最高價 最低價 收盤價 成交量
0 109/12/04 18731 77 -340 18468 498.50 505.00 497.50 503.00 50920
1 109/12/03 -4885 13 -57 -4929 499.50 499.50 495.00 497.00 35557
2 109/12/02 10469 106 -415 10160 499.50 500.00 493.50 499.00 50422
3 109/12/01 587 211 397 1195 489.50 490.00 483.50 490.00 36915
"""
data1 = self.get_three_major()
data2 = self.get_price_value()
data = data1.merge(data2, left_on = '日期', right_on = '日期')
print('data: {}'.format(data) )
print('data[\'日期\']: {}'.format( data.loc[ data['日期'] ]) )
# 將日期109/11/1 轉換 至2020/11/1...
#data['日期'] = data['日期'].apply(lambda x: f'{ int( (x :=x.split("/") )[0])+1911}/{x[1]}/{x[2] }')
data['日期'] = data['日期'].apply(lambda x: f'{ int( ( x.split("/") )[0])+1911}/{x[1]}/{x[2] }')
#print('data[\'日期\']: {}'.format(data['日期']) )
# 屬性轉換 --> 將其餘資料轉至 number
for i in data.columns:
data[i] = | pd.to_numeric(data[i], errors='ignore') | pandas.to_numeric |
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def test_ops(self):
td = Timedelta(10, unit='d')
self.assertEqual(-td, Timedelta(-10, unit='d'))
self.assertEqual(+td, Timedelta(10, unit='d'))
self.assertEqual(td - td, Timedelta(0, unit='ns'))
self.assertTrue((td - pd.NaT) is pd.NaT)
self.assertEqual(td + td, Timedelta(20, unit='d'))
self.assertTrue((td + pd.NaT) is pd.NaT)
self.assertEqual(td * 2, Timedelta(20, unit='d'))
self.assertTrue((td * pd.NaT) is pd.NaT)
self.assertEqual(td / 2, Timedelta(5, unit='d'))
self.assertEqual(abs(td), td)
self.assertEqual(abs(-td), td)
self.assertEqual(td / td, 1)
self.assertTrue((td / pd.NaT) is np.nan)
# invert
self.assertEqual(-td, Timedelta('-10d'))
self.assertEqual(td * -1, Timedelta('-10d'))
self.assertEqual(-1 * td, Timedelta('-10d'))
self.assertEqual(abs(-td), Timedelta('10d'))
# invalid
self.assertRaises(TypeError, lambda: Timedelta(11, unit='d') // 2)
# invalid multiply with another timedelta
self.assertRaises(TypeError, lambda: td * td)
# can't operate with integers
self.assertRaises(TypeError, lambda: td + 2)
self.assertRaises(TypeError, lambda: td - 2)
def test_ops_offsets(self):
td = Timedelta(10, unit='d')
self.assertEqual(Timedelta(241, unit='h'), td + pd.offsets.Hour(1))
self.assertEqual(Timedelta(241, unit='h'), pd.offsets.Hour(1) + td)
self.assertEqual(240, td / pd.offsets.Hour(1))
self.assertEqual(1 / 240.0, pd.offsets.Hour(1) / td)
self.assertEqual(Timedelta(239, unit='h'), td - pd.offsets.Hour(1))
self.assertEqual(Timedelta(-239, unit='h'), pd.offsets.Hour(1) - td)
def test_ops_ndarray(self):
td = Timedelta('1 day')
# timedelta, timedelta
other = pd.to_timedelta(['1 day']).values
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
self.assertRaises(TypeError, lambda: td + np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) + td)
expected = pd.to_timedelta(['0 days']).values
self.assert_numpy_array_equal(td - other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(-other + td, expected)
self.assertRaises(TypeError, lambda: td - np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) - td)
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td * np.array([2]), expected)
self.assert_numpy_array_equal(np.array([2]) * td, expected)
self.assertRaises(TypeError, lambda: td * other)
self.assertRaises(TypeError, lambda: other * td)
self.assert_numpy_array_equal(td / other,
np.array([1], dtype=np.float64))
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other / td,
np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(['2000-01-01']).values
expected = pd.to_datetime(['2000-01-02']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(['1999-12-31']).values
self.assert_numpy_array_equal(-td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other - td, expected)
def test_ops_series(self):
# regression test for GH8813
td = Timedelta('1 day')
other = pd.Series([1, 2])
expected = pd.Series(pd.to_timedelta(['1 day', '2 days']))
tm.assert_series_equal(expected, td * other)
tm.assert_series_equal(expected, other * td)
def test_ops_series_object(self):
# GH 13043
s = pd.Series([pd.Timestamp('2015-01-01', tz='US/Eastern'),
pd.Timestamp('2015-01-01', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timestamp('2015-01-02', tz='US/Eastern'),
pd.Timestamp('2015-01-02', tz='Asia/Tokyo')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('1 days'), exp)
tm.assert_series_equal(pd.Timedelta('1 days') + s, exp)
# object series & object series
s2 = pd.Series([pd.Timestamp('2015-01-03', tz='US/Eastern'),
pd.Timestamp('2015-01-05', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s2.dtype, object)
exp = pd.Series([pd.Timedelta('2 days'), pd.Timedelta('4 days')],
name='xxx')
tm.assert_series_equal(s2 - s, exp)
tm.assert_series_equal(s - s2, -exp)
s = pd.Series([pd.Timedelta('01:00:00'), pd.Timedelta('02:00:00')],
name='xxx', dtype=object)
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timedelta('01:30:00'), pd.Timedelta('02:30:00')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('00:30:00'), exp)
tm.assert_series_equal(pd.Timedelta('00:30:00') + s, exp)
def test_ops_notimplemented(self):
class Other:
pass
other = Other()
td = Timedelta('1 day')
self.assertTrue(td.__add__(other) is NotImplemented)
self.assertTrue(td.__sub__(other) is NotImplemented)
self.assertTrue(td.__truediv__(other) is NotImplemented)
self.assertTrue(td.__mul__(other) is NotImplemented)
self.assertTrue(td.__floordiv__(td) is NotImplemented)
def test_ops_error_str(self):
# GH 13624
tdi = TimedeltaIndex(['1 day', '2 days'])
for l, r in [(tdi, 'a'), ('a', tdi)]:
with tm.assertRaises(TypeError):
l + r
with tm.assertRaises(TypeError):
l > r
with tm.assertRaises(TypeError):
l == r
with | tm.assertRaises(TypeError) | pandas.util.testing.assertRaises |
import os
import numpy as np
import pandas as pd
from collections import defaultdict
from .io import save_data, load_data, exists_data, save_results
from . import RAW_DATA_DIR
DATASETS = ['password', 'keypad', 'fixed_text', 'free_text', 'mobile']
MOBILE_SENSORS = ['pressure', 'tool_major', 'x', 'x_acceleration', 'x_rotation', 'y', 'y_acceleration',
'y_rotation', 'z_acceleration', 'z_rotation']
SUMMARY_COLS = ['Users', 'Samples/user', 'Total events', 'Min events/user', 'Max events/user',
'Events/sample', 'Mean user freq', 'Mean user period', 'Global freq', 'Mean global period']
KEYGROUP = {
# 'backspace': 'backspace',
'space': 'space',
'shift': 'shift',
'period': 'period',
'comma': 'comma'
}
for k in list('<KEY>'):
KEYGROUP[k] = 'left_letter'
for k in list('<KEY>'):
KEYGROUP[k] = 'right_letter'
FEATURE_FUNS = defaultdict(lambda: lambda df, col: df[col])
FEATURE_FUNS.update({
# Timing features
'tau': lambda df, col: df['timepress'].diff().fillna(np.median(df['timepress'].diff().dropna())),
'duration': lambda df, col: df['timerelease'] - df['timepress'],
# Event type functions
'none': lambda df, col: np.ones(len(df)),
'keygroup': lambda df, col: df['keyname'].map(KEYGROUP).fillna('other'),
'position': lambda df, col: np.arange(len(df)),
})
def preprocess_data(df, event_col, feature_cols):
def pp_fun(x, feature_cols=feature_cols):
x['event'] = FEATURE_FUNS[event_col](x, event_col)
for col in feature_cols:
x[col] = FEATURE_FUNS[col](x, col)
return x[['event'] + feature_cols]
if df.index.nlevels > 1:
level = np.arange(df.index.nlevels).tolist()
else:
level = 0
df = df.groupby(level=level).apply(pp_fun)
return df
def reduce_dataset(df, num_users=None,
min_samples=None, max_samples=None,
min_obs=None, max_obs=None):
'''
Reducing the size of a dateset is a common operation when a certain number
of observations, samples, or users is desired. This function limits each
of these by attempting to satisfy the constraints in the following order:
num observations
num samples
num users
'''
if max_obs:
df = df.groupby(level=[0, 1]).apply(lambda x: x[:max_obs]).reset_index(level=[2, 3], drop=True)
num_obs = df.groupby(level=[0, 1]).size()
if min_obs:
num_obs = num_obs[num_obs >= min_obs]
num_samples = num_obs.groupby(level=0).size()
if min_samples:
num_samples = num_samples[num_samples >= min_samples]
if num_users and num_users < len(num_samples):
users = np.random.permutation(num_samples.index.values)[:num_users]
else:
users = num_samples.index.values
num_obs = num_obs.loc[users.tolist()]
if max_samples:
num_obs = num_obs.groupby(level=0).apply(
lambda x: x.loc[np.random.permutation(np.sort(x.index.unique()))[:max_samples]]).reset_index(level=1,
drop=True)
df = df.loc[num_obs.index].sort_index()
return df
def _filter(df, max_dups=4, max_pause=6e4):
# Drop sessions with many duplicate times
s = df.reset_index().groupby(['user', 'session', 'timepress']).size()
s = s[s > max_dups].reset_index(level=2, drop=True)
dropme = s.index.unique()
df = df.drop(dropme)
# Drop sessions with more than 5 minute pauses
s = df.groupby(level=[0, 1]).apply(lambda x: np.any(x['timepress'].diff() > max_pause))
s = s[s]
dropme = s.index.unique()
df = df.drop(dropme)
# Drop 0 durations
df = df[df['timerelease'] - df['timepress'] > 0]
# Separate duplicate key presses by at least 1 ms
while np.any(df.groupby(level=[0, 1]).apply(lambda x: x['timepress'].diff() == 0)):
def _inc_timepress_dups(x):
idx = x['timepress'].diff().fillna(1) == 0
x.loc[idx, 'timepress'] += 1
x.loc[idx, 'timerelease'] += 1
return x.reset_index()
df = df.groupby(level=[0, 1]).apply(_inc_timepress_dups).set_index(['user', 'session'])
df = df.groupby(level=[0, 1]).apply(lambda x: x.sort_values('timepress'))
return df
def _normalize(df):
def norm_session_times(x):
t0 = x.iloc[0]['timepress']
x['timepress'] -= t0
x['timerelease'] -= t0
return x
df = df.groupby(level=[0, 1]).apply(norm_session_times)
df = df.reset_index()
df['user'] = df['user'].map(dict(zip(df['user'].unique(), range(len(df['user'].unique())))))
def renumber_sessions(x):
x['session'] = x['session'].map(dict(zip(sorted(x['session'].unique()), range(len(x['session'].unique())))))
return x
df = df.groupby('user').apply(renumber_sessions).set_index(['user', 'session'])
df = df.sort_index()
return df
def preprocess_password(fname_in):
def process_row(idx_row):
idx, row = idx_row
timepress = 1000 * np.r_[0, row[4::3].astype(float).values].cumsum()
timerelease = timepress + 1000 * row[3::3].astype(float).values
keyname = list('.tie5Roanl') + ['enter']
return pd.DataFrame.from_items([
('user', [row['subject']] * 11),
('session', [row['sessionIndex'] * 100 + row['rep']] * 11),
('keyname', keyname),
('timepress', timepress),
('timerelease', timerelease)
])
df = pd.concat(map(process_row, pd.read_csv(fname_in).iterrows())).set_index(['user', 'session'])
df = _normalize(df)
save_data(df, 'password')
return
def preprocess_keypad(fname_in):
df = pd.read_csv(fname_in, index_col=[0, 1])
# Discard incorrect entries
keynames = ['numpad_%s' % s for s in '9141937761'] + ['enter']
df = df.groupby(level=[0, 1]).filter(lambda x: (len(x) == 11) and (x['keyname'] == keynames).all())
df = _normalize(df)
save_data(df, 'keypad')
return
def preprocess_fixed_text(fname1_in, fname2_in, num_samples=4, num_obs=100):
df1 = | pd.read_csv(fname1_in, index_col=[0, 1]) | pandas.read_csv |
# License: Apache-2.0
import databricks.koalas as ks
import pandas as pd
import numpy as np
import pytest
from pandas.testing import assert_frame_equal
from gators.imputers.numerics_imputer import NumericsImputer
from gators.imputers.int_imputer import IntImputer
from gators.imputers.float_imputer import FloatImputer
from gators.imputers.object_imputer import ObjectImputer
ks.set_option('compute.default_index_type', 'distributed-sequence')
@pytest.fixture()
def data():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', None], 'F': ['a', 'a', 's', np.nan]})
X_int_expected = pd.DataFrame(
{'A': [0., 1., 1., -9.], 'B': [3., 4., 4., -9.]})
X_float_expected = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, 1.1], 'D': [2.1, 3.1, 4.1, 3.1]})
X_object_expected = pd.DataFrame(
{'E': ['q', 'w', 'w', 'MISSING'], 'F': ['a', 'a', 's', 'MISSING']})
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X_object)
X_dict = {
'int': X_int,
'float': X_float,
'object': X_object,
}
X_expected_dict = {
'int': X_int_expected,
'float': X_float_expected,
'object': X_object_expected,
}
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture()
def data_num():
X_int = pd.DataFrame(
{'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]},
dtype=np.float32)
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]},
dtype=np.float32)
X_int_expected = pd.DataFrame(
{'A': [0., 1., 1., -9.], 'B': [3., 4., 4., -9.]},
dtype=np.float32)
X_float_expected = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, 1.1], 'D': [2.1, 3.1, 4.1, 3.1]},
dtype=np.float32)
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
X_dict = {
'int': X_int,
'float': X_float,
}
X_expected_dict = {
'int': X_int_expected,
'float': X_float_expected,
}
objs_dict = {
'int': obj_int,
'float': obj_float,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture()
def data_no_missing():
X_int = pd.DataFrame({'A': [0, 1, 1, 8], 'B': [3, 4, 4, 8]}, dtype=int)
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, 9.], 'D': [2.1, 3.1, 4.1, 9.]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', 'x'], 'F': ['a', 'a', 's', 'x']})
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X_object)
X_dict = {
'int': X_int,
'float': X_float,
'object': X_object,
}
X_expected_dict = {
'int': X_int.copy(),
'float': X_float.copy(),
'object': X_object.copy(),
}
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture
def data_full():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', np.nan], 'F': ['a', 'a', 's', None]})
X = pd.concat([X_int, X_float, X_object], axis=1)
X_expected = pd.DataFrame(
[[0.0, 3.0, 0.1, 2.1, 'q', 'a'],
[1.0, 4.0, 1.1, 3.1, 'w', 'a'],
[1.0, 4.0, 2.1, 4.1, 'w', 's'],
[-9.0, -9.0, 1.1, 3.1, 'w', 'a']],
columns=['A', 'B', 'C', 'D', 'E', 'F'],
)
obj_int = IntImputer(strategy='constant', value=-9).fit(X)
obj_float = FloatImputer(strategy='median').fit(X)
obj_object = ObjectImputer(strategy='most_frequent').fit(X)
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X, X_expected
@pytest.fixture()
def data_ks():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', None], 'F': ['a', 'a', 's', np.nan]})
X_int_expected = pd.DataFrame(
{'A': [0., 1., 1., -9.], 'B': [3., 4., 4., -9.]})
X_float_expected = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, 1.1], 'D': [2.1, 3.1, 4.1, 3.1]})
X_object_expected = pd.DataFrame(
{'E': ['q', 'w', 'w', 'MISSING'], 'F': ['a', 'a', 's', 'MISSING']})
X_int_ks = ks.from_pandas(X_int)
X_float_ks = ks.from_pandas(X_float)
X_object_ks = ks.from_pandas(X_object)
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X_object)
X_dict = {
'int': X_int,
'float': X_float,
'object': X_object,
}
X_dict = {
'int': X_int_ks,
'float': X_float_ks,
'object': X_object_ks,
}
X_expected_dict = {
'int': X_int_expected,
'float': X_float_expected,
'object': X_object_expected,
}
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture()
def data_num_ks():
X_int = ks.DataFrame(
{'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]},
dtype=np.float32)
X_float = ks.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]},
dtype=np.float32)
X_int_expected = pd.DataFrame(
{'A': [0., 1., 1., -9.], 'B': [3., 4., 4., -9.]},
dtype=np.float32)
X_float_expected = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, 1.1], 'D': [2.1, 3.1, 4.1, 3.1]},
dtype=np.float32)
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
X_dict = {
'int': X_int,
'float': X_float,
}
X_expected_dict = {
'int': X_int_expected,
'float': X_float_expected,
}
objs_dict = {
'int': obj_int,
'float': obj_float,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture()
def data_no_missing_ks():
X_int = ks.DataFrame({'A': [0, 1, 1, 8], 'B': [3, 4, 4, 8]}, dtype=int)
X_float = ks.DataFrame(
{'C': [0.1, 1.1, 2.1, 9.], 'D': [2.1, 3.1, 4.1, 9.]})
X_object = ks.DataFrame(
{'E': ['q', 'w', 'w', 'x'], 'F': ['a', 'a', 's', 'x']})
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X_object)
X_dict = {
'int': X_int,
'float': X_float,
'object': X_object,
}
X_expected_dict = {
'int': X_int.to_pandas().copy(),
'float': X_float.to_pandas().copy(),
'object': X_object.to_pandas().copy(),
}
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture
def data_full_ks():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', np.nan], 'F': ['a', 'a', 's', None]})
X = ks.from_pandas(pd.concat([X_int, X_float, X_object], axis=1))
X_expected = pd.DataFrame(
[[0.0, 3.0, 0.1, 2.1, 'q', 'a'],
[1.0, 4.0, 1.1, 3.1, 'w', 'a'],
[1.0, 4.0, 2.1, 4.1, 'w', 's'],
[-9.0, -9.0, 1.1, 3.1, 'w', 'a']],
columns=['A', 'B', 'C', 'D', 'E', 'F'],
)
obj_int = IntImputer(strategy='constant', value=-9).fit(X)
obj_float = FloatImputer(strategy='median').fit(X)
obj_object = ObjectImputer(strategy='most_frequent').fit(X)
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X, X_expected
def test_int_pd(data):
objs_dict, X_dict, X_expected_dict = data
assert_frame_equal(
objs_dict['int'].transform(X_dict['int']), X_expected_dict['int'],
)
def test_float_pd(data):
objs_dict, X_dict, X_expected_dict = data
assert_frame_equal(
objs_dict['float'].transform(
X_dict['float']), X_expected_dict['float'],
)
def test_object_pd(data):
objs_dict, X_dict, X_expected_dict = data
assert_frame_equal(
objs_dict['object'].transform(
X_dict['object']), X_expected_dict['object'],
)
@pytest.mark.koalas
def test_int_ks(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
assert_frame_equal(
objs_dict['int'].transform(X_dict['int']).to_pandas(),
X_expected_dict['int'],)
@pytest.mark.koalas
def test_float_ks(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
assert_frame_equal(
objs_dict['float'].transform(X_dict['float']).to_pandas(),
X_expected_dict['float'])
@pytest.mark.koalas
def test_object_ks(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
assert_frame_equal(
objs_dict['object'].transform(X_dict['object']).to_pandas(),
X_expected_dict['object'],
)
def test_int_pd_np(data):
objs_dict, X_dict, X_expected_dict = data
X_new_np = objs_dict['int'].transform_numpy(X_dict['int'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['int'].columns)
assert_frame_equal(X_new, X_expected_dict['int'])
def test_float_pd_np(data):
objs_dict, X_dict, X_expected_dict = data
X_new_np = objs_dict['float'].transform_numpy(X_dict['float'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['float'].columns)
assert_frame_equal(X_new, X_expected_dict['float'])
def test_object_pd_np(data):
objs_dict, X_dict, X_expected_dict = data
X_new_np = objs_dict['object'].transform_numpy(X_dict['object'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['object'].columns)
assert_frame_equal(X_new, X_expected_dict['object'])
@pytest.mark.koalas
def test_int_ks_np(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
X_new_np = objs_dict['int'].transform_numpy(X_dict['int'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['int'].columns)
assert_frame_equal(X_new, X_expected_dict['int'])
@pytest.mark.koalas
def test_float_ks_np(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
X_new_np = objs_dict['float'].transform_numpy(
X_dict['float'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['float'].columns)
assert_frame_equal(X_new, X_expected_dict['float'])
@pytest.mark.koalas
def test_object_ks_np(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
X_new_np = objs_dict['object'].transform_numpy(
X_dict['object'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['object'].columns)
assert_frame_equal(X_new, X_expected_dict['object'])
def test_num_int_pd(data_num):
objs_dict, X_dict, X_expected_dict = data_num
assert_frame_equal(
objs_dict['int'].transform(X_dict['int']), X_expected_dict['int'],
)
def test_num_float_pd(data_num):
objs_dict, X_dict, X_expected_dict = data_num
assert_frame_equal(
objs_dict['float'].transform(
X_dict['float']), X_expected_dict['float'],
)
@pytest.mark.koalas
def test_num_int_ks(data_num_ks):
objs_dict, X_dict, X_expected_dict = data_num_ks
assert_frame_equal(objs_dict['int'].transform(
X_dict['int'].to_pandas()), X_expected_dict['int'],
)
@pytest.mark.koalas
def test_num_float_ks(data_num_ks):
objs_dict, X_dict, X_expected_dict = data_num_ks
assert_frame_equal(objs_dict['float'].transform(
X_dict['float'].to_pandas()), X_expected_dict['float'],
)
def test_num_int_pd_np(data_num):
objs_dict, X_dict, X_expected_dict = data_num
X_new_np = objs_dict['int'].transform_numpy(X_dict['int'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['int'].columns)
assert_frame_equal(X_new, X_expected_dict['int'])
def test_num_float_pd_np(data_num):
objs_dict, X_dict, X_expected_dict = data_num
X_new_np = objs_dict['float'].transform_numpy(X_dict['float'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['float'].columns)
assert_frame_equal(X_new, X_expected_dict['float'])
@pytest.mark.koalas
def test_num_int_ks_np(data_num_ks):
objs_dict, X_dict, X_expected_dict = data_num_ks
X_new_np = objs_dict['int'].transform_numpy(X_dict['int'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['int'].columns)
assert_frame_equal(X_new, X_expected_dict['int'])
@pytest.mark.koalas
def test_num_float_ks_np(data_num_ks):
objs_dict, X_dict, X_expected_dict = data_num_ks
X_new_np = objs_dict['float'].transform_numpy(
X_dict['float'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['float'].columns)
assert_frame_equal(X_new, X_expected_dict['float'])
def test_no_missing_int_pd(data_no_missing):
objs_dict, X_dict, X_expected_dict = data_no_missing
assert_frame_equal(
objs_dict['int'].transform(X_dict['int']), X_expected_dict['int'],
)
def test_no_missing_float_pd(data_no_missing):
objs_dict, X_dict, X_expected_dict = data_no_missing
assert_frame_equal(
objs_dict['float'].transform(
X_dict['float']), X_expected_dict['float'],
)
def test_no_missing_object_pd(data_no_missing):
objs_dict, X_dict, X_expected_dict = data_no_missing
assert_frame_equal(
objs_dict['object'].transform(
X_dict['object']), X_expected_dict['object'],
)
@pytest.mark.koalas
def test_no_missing_int_ks(data_no_missing_ks):
objs_dict, X_dict, X_expected_dict = data_no_missing_ks
assert_frame_equal(objs_dict['int'].transform(
X_dict['int'].to_pandas()), X_expected_dict['int'],
)
@pytest.mark.koalas
def test_no_missing_float_ks(data_no_missing_ks):
objs_dict, X_dict, X_expected_dict = data_no_missing_ks
assert_frame_equal(objs_dict['float'].transform(
X_dict['float'].to_pandas()), X_expected_dict['float'],
)
@pytest.mark.koalas
def test_no_missing_object_ks(data_no_missing_ks):
objs_dict, X_dict, X_expected_dict = data_no_missing_ks
assert_frame_equal(objs_dict['object'].transform(
X_dict['object'].to_pandas()), X_expected_dict['object'],
)
def test_no_missing_int_pd_np(data_no_missing):
objs_dict, X_dict, X_expected_dict = data_no_missing
X_new_np = objs_dict['int'].transform_numpy(X_dict['int'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['int'].columns)
assert_frame_equal(X_new, X_expected_dict['int'])
def test_no_missing_float_pd_np(data_no_missing):
objs_dict, X_dict, X_expected_dict = data_no_missing
X_new_np = objs_dict['float'].transform_numpy(X_dict['float'].to_numpy())
X_new = | pd.DataFrame(X_new_np, columns=X_dict['float'].columns) | pandas.DataFrame |
import os
import pandas as pd
import numpy as np
from joblib import load
import MLPipeline
import AppConfig as app_config
import ml_pipeline.utils.Helper as helper
DATA_FLD_NAME = app_config.TSG_FLD_NAME
class TestSetPreprocessing:
def __init__(self, ml_pipeline: MLPipeline):
self.ml_pipeline = ml_pipeline
self.jlogger = self.ml_pipeline.jlogger
self.jlogger.info("Inside TestSetPreprocessing initialization")
if self.ml_pipeline.status == app_config.STEP6_STATUS: # resuming at step 1
self.apply_on_all_fg()
def apply_on_all_fg(self):
# Padel
if self.ml_pipeline.config.fg_padelpy_flg:
self.fg_fld_name = app_config.FG_PADEL_FLD_NAME
self.preprocess_test_set()
if self.ml_pipeline.config.fg_mordered_flg:
# Mordred
self.fg_fld_name = app_config.FG_MORDRED_FLD_NAME
self.preprocess_test_set()
updated_status = app_config.STEP6_1_STATUS
job_oth_config_fp = self.ml_pipeline.job_data['job_oth_config_path']
helper.update_job_status(job_oth_config_fp, updated_status)
self.ml_pipeline.status = updated_status
self.jlogger.info("Generated test set preprocessing completed successfully")
def preprocess_test_set(self):
padel_raw_fld_path = os.path.join(
*[self.ml_pipeline.job_data['job_data_path'], DATA_FLD_NAME, self.fg_fld_name,
app_config.TSG_RAW_FLD_NAME])
padel_pp_fld_path = os.path.join(
*[self.ml_pipeline.job_data['job_data_path'], DATA_FLD_NAME, self.fg_fld_name,
app_config.TSG_PP_FLD_NAME])
os.makedirs(padel_pp_fld_path, exist_ok=True)
padel_pp_lime_fld_path = os.path.join(
*[self.ml_pipeline.job_data['job_data_path'], DATA_FLD_NAME, self.fg_fld_name,
app_config.TSG_PP_LIME_FLD_NAME])
os.makedirs(padel_pp_lime_fld_path, exist_ok=True)
padel_test_cmpnd_fld_path = os.path.join(
*[self.ml_pipeline.job_data['job_data_path'], DATA_FLD_NAME, self.fg_fld_name,
app_config.TSG_CMPND_FLD_NAME])
os.makedirs(padel_test_cmpnd_fld_path, exist_ok=True)
for file in os.listdir(padel_raw_fld_path):
if file.endswith(".csv"): # checking only csv files
self.jlogger.info("Starting preprocessing {}".format(file))
padel_fp = os.path.join(padel_raw_fld_path, file)
cnames, padel_pp_lime_df, padel_pp_fin_df = self.preprocess_generated_test_set(padel_fp)
cnames_df = pd.DataFrame(cnames, columns=["CNAME"])
test_cmpnd_fp = os.path.join(padel_test_cmpnd_fld_path, file)
cnames_df.to_csv(test_cmpnd_fp, index=False)
padel_pp_fp = os.path.join(padel_pp_fld_path, file)
padel_pp_fin_df.to_csv(padel_pp_fp, index=False)
padel_pp_lime_fp = os.path.join(padel_pp_lime_fld_path, file)
padel_pp_lime_df.to_csv(padel_pp_lime_fp, index=False)
def preprocess_generated_test_set(self, padel_fp):
df_test = pd.read_csv(padel_fp)
compound_names = df_test['CNAME']
self.jlogger.info("Before shape test {}".format(df_test.shape))
df_init_train, init_features = self.extract_initial_train_features()
df_init_test_fltrd = df_test[init_features]
df_test_pp = self.apply_other_preprocess(df_init_train, df_init_test_fltrd)
self.jlogger.info("After preprocessing shape test {}".format(df_test_pp.shape))
df_fin_train, fin_features = self.extract_final_train_features()
df_test_pp_final = df_test_pp[fin_features]
self.jlogger.info("After feature selection shape test {}".format(df_test_pp_final.shape))
test_final_np = self.apply_pca(df_test_pp_final)
self.jlogger.info("After feature extraction shape test {}".format(test_final_np.shape))
df_test_final = pd.DataFrame(test_final_np)
return compound_names, df_test_pp_final, df_test_final
def extract_initial_train_features(self):
if self.ml_pipeline.config.pp_mv_col_pruning_flg:
pp_train_path = os.path.join(
*[self.ml_pipeline.job_data['job_data_path'], app_config.PP_FLD_NAME, self.fg_fld_name,
app_config.PP_INIT_COL_PRUNED_FNAME])
else:
pp_train_path = os.path.join(
*[self.ml_pipeline.job_data['job_data_path'], app_config.PP_FLD_NAME, self.fg_fld_name,
app_config.PP_INIT_DATA_FNAME])
df = pd.read_csv(pp_train_path)
features = df.columns.to_list()
return df, features
def extract_final_train_features(self):
fin_features = []
df = None
if self.ml_pipeline.config.fs_boruta_flg:
boruta_train_path = os.path.join(
*[self.ml_pipeline.job_data['job_data_path'], app_config.FS_FLD_NAME, self.fg_fld_name,
app_config.FS_XTRAIN_FNAME])
df = | pd.read_csv(boruta_train_path) | pandas.read_csv |
Subsets and Splits