prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
|---|---|---|
import os
import sys
import warnings
import typing
import time
import keras
import keras.preprocessing.image
import tensorflow as tf
import pandas as pd
import numpy as np
from object_detection_retinanet import layers
from object_detection_retinanet import losses
from object_detection_retinanet import models
from collections import OrderedDict
from d3m import container, utils
from d3m.container import DataFrame as d3m_DataFrame
from d3m.metadata import base as metadata_base, hyperparams, params
from d3m.primitive_interfaces.base import PrimitiveBase, CallResult
from object_detection_retinanet.callbacks import RedirectModel
from object_detection_retinanet.callbacks.eval import Evaluate
from object_detection_retinanet.utils.eval import evaluate
from object_detection_retinanet.models.retinanet import retinanet_bbox
from object_detection_retinanet.preprocessing.csv_generator import CSVGenerator
from object_detection_retinanet.utils.anchors import make_shapes_callback
from object_detection_retinanet.utils.model import freeze as freeze_model
from object_detection_retinanet.utils.gpu import setup_gpu
from object_detection_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image
Inputs = container.pandas.DataFrame
Outputs = container.pandas.DataFrame
class Hyperparams(hyperparams.Hyperparams):
backbone = hyperparams.Union(
OrderedDict({
'resnet50': hyperparams.Constant[str](
default = 'resnet50',
semantic_types = ['https://metadata.datadrivendiscovery.org/types/TuningParameter'],
description = "Backbone architecture from resnet50 architecture (https://arxiv.org/abs/1512.03385)"
)
# 'resnet101': hyperparams.Constant[str](
# default = 'resnet101',
# semantic_types = ['https://metadata.datadrivendiscovery.org/types/TuningParameter'],
# description = "Backbone architecture from resnet101 architecture (https://arxiv.org/abs/1512.03385)"
# ),
# 'resnet152': hyperparams.Constant[str](
# default = 'resnet152',
# semantic_types = ['https://metadata.datadrivendiscovery.org/types/TuningParameter'],
# description = "Backbone architecture from resnet152 architecture (https://arxiv.org/abs/1512.03385)"
# )
}),
default = 'resnet50',
semantic_types = ['https://metadata.datadrivendiscovery.org/types/TuningParameter'],
description = "Backbone architecture from which RetinaNet is built. All backbones " +
"require a weights file downloaded for use during runtime."
)
batch_size = hyperparams.Hyperparameter[int](
default = 1,
semantic_types = ['https://metadata.datadrivendiscovery.org/types/TuningParameter'],
description = "Size of the batches as input to the model."
)
n_epochs = hyperparams.Hyperparameter[int](
default = 20,
semantic_types = ['https://metadata.datadrivendiscovery.org/types/TuningParameter'],
description = "Number of epochs to train."
)
freeze_backbone = hyperparams.Hyperparameter[bool](
default = True,
semantic_types = ['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description = "Freeze training of backbone layers."
)
weights = hyperparams.Hyperparameter[bool](
default = True,
semantic_types = ['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description = "Load the model with pretrained weights specific to selected backbone."
)
learning_rate = hyperparams.Hyperparameter[float](
default = 1e-5,
semantic_types = ['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description = "Learning rate."
)
n_steps = hyperparams.Hyperparameter[int](
default = 50,
semantic_types = ['https://metadata.datadrivendiscovery.org/types/TuningParameter'],
description = "Number of steps/epoch."
)
output = hyperparams.Hyperparameter[bool](
default = False,
semantic_types = ['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description = "Output images and predicted bounding boxes after evaluation."
)
class Params(params.Params):
pass
class ObjectDetectionRNPrimitive(PrimitiveBase[Inputs, Outputs, Params, Hyperparams]):
"""
Primitive that utilizes RetinaNet, a convolutional neural network (CNN), for object
detection. The methodology comes from "Focal Loss for Dense Object Detection" by
Lin et al. 2017 (https://arxiv.org/abs/1708.02002). The code implementation is based
off of the base library found at: https://github.com/fizyr/keras-retinanet.
The primitive accepts a Dataset consisting of images, labels as input and returns
a dataframe as output which include the bounding boxes for each object in each image.
"""
metadata = metadata_base.PrimitiveMetadata(
{
'id': 'd921be1e-b158-4ab7-abb3-cb1b17f42639',
'version': '0.1.0',
'name': 'retina_net',
'python_path': 'd3m.primitives.object_detection.retinanet',
'keywords': ['object detection', 'convolutional neural network', 'digital image processing', 'RetinaNet'],
'source': {
'name': 'Distil',
'contact': 'mailto:<EMAIL>',
'uris': [
'https://github.com/NewKnowledge/object-detection-d3m-wrapper',
],
},
'installation': [
{
'type': 'PIP',
'package_uri': 'git+https://github.com/NewKnowledge/object-detection-d3m-wrapper.git@{git_commit}#egg=objectDetectionD3MWrapper'.format(
git_commit = utils.current_git_commit(os.path.dirname(__file__)),)
},
{
'type': "FILE",
'key': "resnet50",
'file_uri': "http://public.datadrivendiscovery.org/ResNet-50-model.keras.h5",
'file_digest': "0128cdfa3963288110422e4c1a57afe76aa0d760eb706cda4353ef1432c31b9c"
}
],
'algorithm_types': [metadata_base.PrimitiveAlgorithmType.RETINANET],
'primitive_family': metadata_base.PrimitiveFamily.OBJECT_DETECTION,
}
)
def __init__(self, *, hyperparams: Hyperparams, volumes: typing.Dict[str,str] = None) -> None:
super().__init__(hyperparams = hyperparams, volumes = volumes)
self.image_paths = None
self.annotations = None
self.base_dir = None
self.classes = None
self.backbone = None
self.y_true = None
self.workers = 1
self.multiprocessing = 1
self.max_queue_size = 10
def get_params(self) -> Params:
return self._params
def set_params(self, *, params: Params) -> None:
self.params = params
def set_training_data(self, *, inputs: Inputs, outputs: Outputs) -> None:
"""
Sets the primitive's training data and preprocesses the files for RetinaNet format.
Parameters
----------
inputs: numpy ndarray of size (n_images, dimension) containing the d3m Index, image name,
and bounding box for each image.
Returns
-------
No returns. Function is called by pipeline at runtime.
"""
# Prepare annotation file
## Generate image paths
image_cols = inputs.metadata.get_columns_with_semantic_type('https://metadata.datadrivendiscovery.org/types/FileName')
self.base_dir = [inputs.metadata.query((metadata_base.ALL_ELEMENTS, t))['location_base_uris'][0].replace('file:///', '/') for t in image_cols]
self.image_paths = np.array([[os.path.join(self.base_dir, filename) for filename in inputs.iloc[:,col]] for self.base_dir, col in zip(self.base_dir, image_cols)]).flatten()
self.image_paths =
|
pd.Series(self.image_paths)
|
pandas.Series
|
"""Tests for the SQLite DatabaseManager `SQLiteDb`.
Tests all methods of the DatabaseManager because it is easy to test with SQLite.
"""
# =================================================
# Imports
# =================================================
# Standard Library
from pathlib import Path
# Third Party
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import pytest
from sqlalchemy.sql import text
import sqlalchemy
# Local
import pandemy
from .dependencies import PANDAS_VERSION
# =================================================
# Setup
# =================================================
class SQLiteSQLContainer(pandemy.SQLContainer):
r"""A correctly defined pandemy.SQLContainer subclass"""
my_query = 'SELECT * FROM MyTable;'
class SQLiteFakeSQLContainer:
r"""
SQLContainer class that does not inherit from `pandemy.SQLContainer`.
This class is not a valid input to the container parameter of
`pandemy.DatabaseManager`.
"""
my_query = 'SELECT * FROM MyTable;'
# =================================================
# Tests
# =================================================
class TestInitSQLiteDb:
r"""Test the initalization of the SQLite DatabaseManager `SQLiteDb`.
Fixtures
--------
sqlite_db_file : Path
Path to a SQLite database that exists on disk.
"""
def test_all_defaults(self):
r"""Create an instance of SQLiteDb that lives in memory with all default values."""
# Setup - None
# ===========================================================
# Exercise
# ===========================================================
db = pandemy.SQLiteDb()
# Verify
# ===========================================================
assert db.file == ':memory:'
assert db.must_exist is False
assert db.container is None
assert db.engine_config is None
assert db.conn_str == r'sqlite://'
assert isinstance(db.engine, sqlalchemy.engine.base.Engine)
# Clean up - None
# ===========================================================
def test_in_memory(self):
r"""Create an instance of SQLiteDb that lives in memory."""
# Setup - None
# ===========================================================
# Exercise
# ===========================================================
db = pandemy.SQLiteDb(file=':memory:')
# Verify
# ===========================================================
assert db.file == ':memory:'
assert db.must_exist is False
assert db.conn_str == r'sqlite://'
assert isinstance(db.engine, sqlalchemy.engine.base.Engine)
# Clean up - None
# ===========================================================
@pytest.mark.parametrize('file_as_str', [pytest.param(True, id='str'), pytest.param(False, id='Path')])
def test_file_must_exist(self, file_as_str, sqlite_db_file):
r"""Create an instance with a file supplied as a string and pathlib.Path object.
The default option `must_exist` is set to True.
The file exists on disk.
Parameters
----------
file_as_str : bool
True if the file should be supplied as a string and False for pathlib.Path.
"""
# Setup - None
# ===========================================================
# Exercise
# ===========================================================
if file_as_str:
db = pandemy.SQLiteDb(file=str(sqlite_db_file), must_exist=True)
else:
db = pandemy.SQLiteDb(file=sqlite_db_file, must_exist=True)
# Verify
# ===========================================================
assert db.file == sqlite_db_file
assert db.must_exist is True
assert db.conn_str == fr'sqlite:///{str(sqlite_db_file)}'
assert isinstance(db.engine, sqlalchemy.engine.base.Engine)
# Clean up - None
# ===========================================================
@pytest.mark.raises
@pytest.mark.parametrize('file', [pytest.param('does not exist', id='str'),
pytest.param(Path('does not exist'), id='Path')])
def test_on_file_must_exist_file_does_not_exist(self, file):
r"""Create an instance with a file supplied as a string and pathlib.Path object.
The default option `must_exist` is set to True.
The file does not exists on disk.
pandemy.DatabaseFileNotFoundError is expected to be raised.
Parameters
----------
file : str or Path
The file with the SQLite database.
"""
# Setup - None
# ===========================================================
# Exercise & Verify
# ===========================================================
with pytest.raises(pandemy.DatabaseFileNotFoundError):
pandemy.SQLiteDb(file=file, must_exist=True)
# Clean up - None
# ===========================================================
def test_on_file_with_SQLContainer(self):
r"""Create an instance with a SQLContainer class.
The option `must_exist` is set to False.
The file does not exists on disk.
"""
# Setup
# ===========================================================
must_exist = False
file = 'mydb.db'
# Exercise
# ===========================================================
db = pandemy.SQLiteDb(file=file, must_exist=must_exist, container=SQLiteSQLContainer)
# Verify
# ===========================================================
assert db.file == Path(file)
assert db.must_exist is must_exist
assert db.container is SQLiteSQLContainer
# Clean up - None
# ===========================================================
# file, must_exist, container, engine_config, error_msg
input_test_bad_input = [
pytest.param(42, False, None, None, 'Received: 42', id='file=42'),
pytest.param('my_db.db', 'False', None, {'encoding': 'UTF-8'}, 'Received: False', id="must_exist='False'"),
pytest.param('my_db.db', False, [42], None, 'container must be a subclass of pandemy.SQLContainer',
id="container=[42]"),
pytest.param(Path('my_db.db'), False, SQLiteFakeSQLContainer, None,
'container must be a subclass of pandemy.SQLContainer', id="container=FakeSQLContainer"),
pytest.param('my_db.db', False, None, 42, 'engine_config must be a dict', id="engine_config=42"),
]
@pytest.mark.raises
@pytest.mark.parametrize('file, must_exist, container, engine_config, error_msg', input_test_bad_input)
def test_bad_input_parameters(self, file, must_exist, container, engine_config, error_msg):
r"""Test bad input parameters.
pandemy.InvalidInputError is expected to be raised.
Parameters
----------
file : str or Path, default ':memory:'
The file (with path) to the SQLite database.
The default creates an in memory database.
must_exist : bool, default True
If True validate that file exists unless file = ':memory:'.
If it does not exist FileNotFoundError is raised.
If False the validation is omitted.
container : pandemy.SQLContainer or None, default None
A container of database statements that the SQLite DatabaseManager can use.
engine_config : dict or None
Additional keyword arguments passed to the SQLAlchemy create_engine function.
"""
# Setup - None
# ===========================================================
# Exercise & Verify
# ===========================================================
with pytest.raises(pandemy.InvalidInputError, match=error_msg):
pandemy.SQLiteDb(file=file, must_exist=must_exist, container=container, engine_config=engine_config)
# Clean up - None
# ===========================================================
@pytest.mark.raises
def test_invalid_parameter_to_create_engine(self):
r"""Test to supply an invalid parameter to the SQLAlchemy create_engine function.
pandemy.CreateEngineError is expected to be raised.
Also supply a keyword argument that is not used for anything.
It should not affect the initialization.
"""
# Setup
# ===========================================================
error_msg = 'invalid_param'
engine_config = {'invalid_param': True}
# Exercise & Verify
# ===========================================================
with pytest.raises(pandemy.CreateEngineError, match=error_msg):
pandemy.SQLiteDb(file='my_db.db', must_exist=False, container=None,
engine_config=engine_config, kwarg='kwarg')
# Clean up - None
# ===========================================================
class TestExecuteMethod:
r"""Test the `execute` method of the SQLite DatabaseManager `SQLiteDb`.
Fixtures
--------
sqlite_db : pandemy.SQLiteDb
An instance of the test database.
sqlite_db_empty : pandemy.SQLiteDb
An instance of the test database where all tables are empty.
df_owner : pd.DataFrame
The owner table of the test database.
"""
# The query for test_select_all_owners
select_all_owners = """SELECT OwnerId, OwnerName, BirthDate FROM Owner;"""
@pytest.mark.parametrize('query', [pytest.param(select_all_owners, id='query: str'),
pytest.param(text(select_all_owners), id='query: sqlalchemy TextClause')])
def test_select_all_owners(self, query, sqlite_db, df_owner):
r"""Test to execute a SELECT query.
Query all rows from the Owner table.
Parameters
----------
query : str or text
The SQL query to execute.
"""
# Setup
# ===========================================================
with sqlite_db.engine.connect() as conn:
# Exercise
# ===========================================================
result = sqlite_db.execute(sql=query, conn=conn)
# Verify
# ===========================================================
for idx, row in enumerate(result):
assert row.OwnerId == df_owner.index[idx]
assert row.OwnerName == df_owner.loc[row.OwnerId, 'OwnerName']
assert row.BirthDate == df_owner.loc[row.OwnerId, 'BirthDate'].strftime(r'%Y-%m-%d')
# Clean up - None
# ===========================================================
# The query for test_select_owner_by_id
select_owner_by_id = """SELECT OwnerId, OwnerName
FROM Owner
WHERE OwnerId = :id;
"""
# query, id, owner_exp
input_test_select_owner_by_id = [pytest.param(select_owner_by_id, 1,
id='query: str, id=1'),
pytest.param(text(select_owner_by_id), 2,
id='query: sqlalchemy TextClause, id=2')]
@pytest.mark.parametrize('query, owner_id', input_test_select_owner_by_id)
def test_select_owner_by_id(self, query, owner_id, sqlite_db, df_owner):
r"""Test to execute a SELECT query with a query parameter.
Parameters
----------
query : str or sqlalchemy.sql.elements.TextClause
The SQL query to execute.
owner_id : int
The parameter representing OwnerId in `query`.
"""
# Setup
# ===========================================================
with sqlite_db.engine.connect() as conn:
# Exercise
# ===========================================================
result = sqlite_db.execute(sql=query, conn=conn, params={'id': owner_id})
# Verify
# ===========================================================
for row in result:
assert row.OwnerId == owner_id
assert row.OwnerName == df_owner.loc[owner_id, 'OwnerName']
# Clean up - None
# ===========================================================
def test_select_owner_by_2_params(self, sqlite_db, df_owner):
r"""Test to execute a SELECT query with 2 query parameters."""
# Setup
# ===========================================================
query = text("""SELECT OwnerId, OwnerName, BirthDate
FROM Owner
WHERE OwnerName = :name OR
DATE(BirthDate) > DATE(:bdate)
ORDER BY OwnerName ASC;
""")
df_exp_result = df_owner.loc[[3, 1], :]
with sqlite_db.engine.connect() as conn:
# Exercise
# ===========================================================
result = sqlite_db.execute(sql=query, conn=conn, params={'name': 'John', 'bdate': '1941-12-07'})
# Verify
# ===========================================================
for idx, row in enumerate(result):
assert row.OwnerId == df_exp_result.index[idx]
assert row.OwnerName == df_exp_result.loc[row.OwnerId, 'OwnerName']
assert row.BirthDate == df_exp_result.loc[row.OwnerId, 'BirthDate'].strftime(r'%Y-%m-%d')
# Clean up - None
# ===========================================================
input_test_insert_owner = [
pytest.param([{'id': 1, 'name': '<NAME>', 'bdate': '2021-07-07'}], id='1 Owner'),
pytest.param([{'id': 1, 'name': '<NAME>', 'bdate': '2021-07-07'},
{'id': 2, 'name': '<NAME>', 'bdate': '1987-07-21'}], id='2 Owners'),
]
@pytest.mark.parametrize('params', input_test_insert_owner)
def test_insert_into_owner(self, params, sqlite_db_empty):
r"""Test to insert new owner(s) into the Owner table of the empty test database.
Parameters
----------
params : list of dict
The parameters to pass to the insert statement.
"""
# Setup
# ===========================================================
statement = text("""INSERT INTO Owner (OwnerId, OwnerName, BirthDate)
VALUES (:id, :name, :bdate);
""")
# The query to read back the inserted owners
query_exp = """SELECT OwnerId, OwnerName, BirthDate FROM Owner;"""
with sqlite_db_empty.engine.connect() as conn:
# Exercise
# ===========================================================
sqlite_db_empty.execute(sql=statement, conn=conn, params=params)
# Verify
# ===========================================================
result = sqlite_db_empty.execute(sql=query_exp, conn=conn)
for idx, row in enumerate(result):
assert row.OwnerId == params[idx]['id']
assert row.OwnerName == params[idx]['name']
assert row.BirthDate == params[idx]['bdate']
# Clean up - None
# ===========================================================
@pytest.mark.raises
def test_invalid_select_syntax(self, sqlite_db):
r"""Execute a SELECT query with invalid syntax.
No query parameters are supplied. It should raise pandemy.ExecuteStatementError.
"""
# Setup
# ===========================================================
query = 'SELE * FROM Owner'
with sqlite_db.engine.connect() as conn:
# Exercise & Verify
# ===========================================================
with pytest.raises(pandemy.ExecuteStatementError):
sqlite_db.execute(sql=query, conn=conn)
# Clean up - None
# ===========================================================
@pytest.mark.raises
def test_invalid_query_param(self, sqlite_db):
r"""
Execute a SELECT query with a parameter (:id) and the name of the supplied
parameter (:di) to the query does not match the parameter name in the query.
It should raise pandemy.ExecuteStatementError.
"""
# Setup
# ===========================================================
with sqlite_db.engine.connect() as conn:
# Exercise & Verify
# ===========================================================
with pytest.raises(pandemy.ExecuteStatementError):
sqlite_db.execute(sql=self.select_owner_by_id, conn=conn, params={'di': 1})
# Clean up - None
# ===========================================================
@pytest.mark.raises
def test_invalid_sql_param(self, sqlite_db):
r"""Supply and invalid type to the `sql` parameter.
It should raise pandemy.InvalidInputError.
"""
# Setup
# ===========================================================
with sqlite_db.engine.connect() as conn:
# Exercise & Verify
# ===========================================================
with pytest.raises(pandemy.InvalidInputError, match='list'):
sqlite_db.execute(sql=['Invalid query'], conn=conn, params={'di': 1})
# Clean up - None
# ===========================================================
class TestIsValidTableName:
r"""Test the `_is_valid_table_name` method of the SQLiteDb DatabaseManager `SQLiteDb`.
Fixtures
--------
sqlite_db_empty : pandemy.SQLiteDb
An instance of the test database where all tables are empty.
"""
@pytest.mark.parametrize('table', [pytest.param('Customer', id='Customer'),
pytest.param('1', id='1'),
pytest.param('', id='empty string'),
pytest.param('DELETE', id='DELETE'),
pytest.param('"DROP"', id='DROP'),
pytest.param('""DELETEFROMTABLE""', id='""DELETEFROMTABLE""')])
def test_is_valid_table_name_valid_table_names(self, table, sqlite_db_empty):
r"""Test that valid table names can pass the validation.
The `_is_valid_table_name method` checks that the table name consists
of a single word. If the table name is valid the method returns None
and no exception should be raised.
Parameters
----------
table : str
The name of the table.
"""
# Setup - None
# ===========================================================
# Exercise
# ===========================================================
result = sqlite_db_empty._is_valid_table_name(table=table)
# Verify
# ===========================================================
assert result is None
# Clean up - None
# ===========================================================
@pytest.mark.raises
@pytest.mark.parametrize('table, spaces', [pytest.param('Customer DELETE', '1',
id='2 words, 1 space'),
pytest.param(' Customer DELETE', '3',
id='2 words, 3 spaces'),
pytest.param('"DROP TABLE Customer"', '2',
id='3 words, 2 spaces'),
pytest.param(';""DELETE FROM TABLE Customer;"', '3',
id='4 words, 3 spaces')])
def test_is_valid_table_name_invalid_table_names(self, table, spaces, sqlite_db_empty):
r"""Test that invalid table names can be detected correctly.
The `_is_valid_table_name method` checks that the table name consists
of a single word.
pandemy.InvalidTableNameError is expected to be raised
if the table name is invalid.
Parameters
----------
table : str
The name of the table.
spaces : str
The number of space characters in `table`.
"""
# Setup - None
# ===========================================================
# Exercise
# ===========================================================
with pytest.raises(pandemy.InvalidTableNameError) as exc_info:
sqlite_db_empty._is_valid_table_name(table=table)
# Verify
# ===========================================================
assert exc_info.type is pandemy.InvalidTableNameError
assert table in exc_info.value.args[0]
assert spaces in exc_info.value.args[0]
assert table == exc_info.value.data
# Clean up - None
# ===========================================================
@pytest.mark.raises
@pytest.mark.parametrize('table', [pytest.param(1, id='int'),
pytest.param(3.14, id='float'),
pytest.param([1, '1'], id='list'),
pytest.param({'table': 'name'}, id='dict')])
def test_is_valid_table_name_invalid_input(self, table, sqlite_db_empty):
r"""Test invalid input to the `table` parameter.
If `table` is not a string pandemy.InvalidInputError should be raised.
Parameters
----------
table : str
The name of the table.
"""
# Setup - None
# ===========================================================
# Exercise
# ===========================================================
with pytest.raises(pandemy.InvalidInputError) as exc_info:
sqlite_db_empty._is_valid_table_name(table=table)
# Verify
# ===========================================================
assert exc_info.type is pandemy.InvalidInputError
assert str(table) in exc_info.value.args[0]
assert table == exc_info.value.data
# Clean up - None
# ===========================================================
class TestDeleteAllRecordsFromTable:
r"""Test the `delete_all_records_from_table` method of the SQLiteDb DatabaseManager `SQLiteDb`.
Fixtures
--------
sqlite_db_empty : pandemy.SQLiteDb
An instance of the test database where all tables are empty.
df_customer : pd.DataFrame
The Customer table of the test database.
"""
def test_delete_all_records(self, sqlite_db_empty, df_customer):
r"""Delete all records from the table Customer in the test database."""
# Setup
# ===========================================================
query = """SELECT * FROM Customer;"""
df_exp_result = pd.DataFrame(columns=df_customer.columns)
df_exp_result.index.name = df_customer.index.name
with sqlite_db_empty.engine.begin() as conn:
# Write data to the empty table
df_customer.to_sql(name='Customer', con=conn, if_exists='append')
# Exercise
# ===========================================================
with sqlite_db_empty.engine.begin() as conn:
sqlite_db_empty.delete_all_records_from_table(table='Customer', conn=conn)
# Verify
# ===========================================================
with sqlite_db_empty.engine.begin() as conn:
df_result = pd.read_sql(sql=query, con=conn, index_col='CustomerId', parse_dates=['BirthDate'])
assert_frame_equal(df_result, df_exp_result, check_dtype=False, check_index_type=False)
@pytest.mark.raises
def test_delete_all_records_table_does_not_exist(self, sqlite_db_empty):
r"""Try to delete all records from the table Custom that does not exist in the database.
pandemy.DeleteFromTableError is expected to be raised.
"""
# Setup
# ===========================================================
table = 'Custom'
# Exercise
# ===========================================================
with pytest.raises(pandemy.DeleteFromTableError) as exc_info:
with sqlite_db_empty.engine.begin() as conn:
sqlite_db_empty.delete_all_records_from_table(table=table, conn=conn)
# Verify
# ===========================================================
assert exc_info.type is pandemy.DeleteFromTableError
assert table in exc_info.value.args[0]
assert table in exc_info.value.data[0]
# Clean up - None
# ===========================================================
@pytest.mark.raises
@pytest.mark.parametrize('table', [pytest.param('Customer DELETE', id='table name = 2 words'),
pytest.param('"DROP TABLE Customer"', id='table name = 3 words'),
pytest.param(';""DELETE FROM TABLE Customer;"', id='table name = 4 words')])
def test_delete_all_records_invalid_table_name(self, table, sqlite_db_empty):
r"""Try to delete all records from specified table when supplying and invalid table name.
pandemy.InvalidTableNameError is expected to be raised.
Parameters
----------
table: str
The name of the table to delete records from.
"""
# Setup - None
# ===========================================================
# Exercise
# ===========================================================
with pytest.raises(pandemy.InvalidTableNameError) as exc_info:
with sqlite_db_empty.engine.begin() as conn:
sqlite_db_empty.delete_all_records_from_table(table=table, conn=conn)
# Verify
# ===========================================================
assert exc_info.type is pandemy.InvalidTableNameError
assert table in exc_info.value.args[0]
assert table == exc_info.value.data
# Clean up - None
# ===========================================================
class TestSaveDfMethod:
r"""Test the `save_df` method of the SQLiteDb DatabaseManager `SQLiteDb`.
Fixtures
--------
sqlite_db : pandemy.SQLiteDb
An instance of the test database.
sqlite_db_empty : pandemy.SQLiteDb
An instance of the test database where all tables are empty.
df_customer : pd.DataFrame
The Customer table of the test database.
"""
@pytest.mark.parametrize('chunksize', [pytest.param(None, id='chunksize=None'),
pytest.param(2, id='chunksize=2')])
def test_save_to_existing_empty_table(self, chunksize, sqlite_db_empty, df_customer):
r"""Save a DataFrame to an exisitng empty table.
Parameters
----------
chunksize : int or None
The number of rows in each batch to be written at a time.
If None, all rows will be written at once.
"""
# Setup
# ===========================================================
query = """SELECT * FROM Customer;"""
# Exercise
# ===========================================================
with sqlite_db_empty.engine.begin() as conn:
sqlite_db_empty.save_df(df=df_customer, table='Customer', conn=conn,
if_exists='append', chunksize=chunksize)
# Verify
# ===========================================================
df_result = pd.read_sql(sql=query, con=conn, index_col='CustomerId', parse_dates=['BirthDate'])
assert_frame_equal(df_result, df_customer, check_dtype=False, check_index_type=False)
# Clean up - None
# ===========================================================
def test_save_to_new_table_with_schema(self, sqlite_db_empty, df_customer):
r"""Save a DataFrame to a new table in the database with a schema specified.
The table Customer already exists as an empty table in the database. By saving the DataFrame
to a temporary table (the temp schema) called Customer, while the parameter `if_exists` = 'fail',
no exception should be raised since the tables called Customer exist in different schemas.
SQLite supports the schemas 'temp', 'main' or the name of an attached database.
See Also
--------
https://sqlite.org/lang_createtable.html
"""
# Setup
# ===========================================================
schema = 'temp'
query = f"""SELECT * FROM {schema}.Customer;"""
# Exercise
# ===========================================================
with sqlite_db_empty.engine.begin() as conn:
sqlite_db_empty.save_df(df=df_customer, table='Customer', conn=conn,
schema=schema, if_exists='fail')
# Verify
# ===========================================================
df_result = pd.read_sql(sql=query, con=conn, index_col='CustomerId', parse_dates=['BirthDate'])
assert_frame_equal(df_result, df_customer, check_dtype=False, check_index_type=False)
# Clean up - None
# ===========================================================
def test_save_to_existing_non_empty_table_if_exists_replace(self, sqlite_db_empty, df_customer):
r"""Save a DataFrame to an exisitng non empty table.
The existing rows in the table are deleted before writing the DataFrame.
"""
# Setup
# ===========================================================
query = """SELECT * FROM Customer;"""
with sqlite_db_empty.engine.begin() as conn:
# Write data to the empty table
df_customer.to_sql(name='Customer', con=conn, if_exists='append')
# Exercise
# ===========================================================
sqlite_db_empty.save_df(df=df_customer, table='Customer', conn=conn, if_exists='replace')
# Verify
# ===========================================================
df_result = pd.read_sql(sql=query, con=conn, index_col='CustomerId', parse_dates=['BirthDate'])
assert_frame_equal(df_result, df_customer, check_dtype=False, check_index_type=False)
@pytest.mark.raises
def test_save_to_existing_table_if_exists_fail(self, sqlite_db_empty, df_customer):
r"""Save a DataFrame to an exisitng table when `if_exists` = 'fail'.
pandemy.TableExistsError is expected to be raised.
"""
# Setup
# ===========================================================
with sqlite_db_empty.engine.begin() as conn:
# Exercise & Verify
# ===========================================================
with pytest.raises(pandemy.TableExistsError, match='Table Customer already exists!'):
sqlite_db_empty.save_df(df=df_customer, table='Customer', conn=conn, if_exists='fail')
# Clean up - None
# ===========================================================
@pytest.mark.raises
def test_save_to_existing_non_empty_table_if_exists_append(self, sqlite_db_empty, df_customer):
r"""Save a DataFrame to an exisitng non empty table.
The rows of the DataFrame are already present in the database table
and inserting the rows will violate a UNIQUE constraint.
pandemy.SaveDataFrameError is expected to be raised.
"""
# Setup
# ===========================================================
with sqlite_db_empty.engine.begin() as conn:
# Write data to the empty table
df_customer.to_sql(name='Customer', con=conn, if_exists='append')
# Exercise & Verify
# ===========================================================
with pytest.raises(pandemy.SaveDataFrameError, match='Could not save DataFrame to table Customer'):
sqlite_db_empty.save_df(df=df_customer, table='Customer', conn=conn, if_exists='append')
# Clean up - None
# ===========================================================
def test_index_False(self, sqlite_db_empty, df_customer):
r"""Save a DataFrame to an exisitng empty table.
The index column of the DataFrame is not written to the table.
"""
# Setup
# ===========================================================
query = """SELECT * FROM Customer;"""
df_customer.reset_index(inplace=True) # Convert the index CustomerId to a regular column
with sqlite_db_empty.engine.begin() as conn:
# Exercise
# ===========================================================
sqlite_db_empty.save_df(df=df_customer, table='Customer', conn=conn, if_exists='append', index=False)
# Verify
# ===========================================================
df_result = pd.read_sql(sql=query, con=conn, parse_dates=['BirthDate'])
|
assert_frame_equal(df_result, df_customer, check_dtype=False, check_index_type=False)
|
pandas.testing.assert_frame_equal
|
# -*- coding: utf-8 -*-
"""
@file:compete_pctr_yes_test.py
@time:2019/6/12 21:10
@author:Tangj
@software:Pycharm
@Desc
"""
import pandas as pd
import numpy as np
import time
tt = time.time()
fea = pd.DataFrame()
test_bid = pd.read_csv('../usingData/test/test_bid.csv')
request = pd.read_csv('../usingData/test/Request_list.csv')
log = pd.read_csv('../usingData/test/test_log.csv')
log = log.fillna(-100)
print(request.columns)
print(time.localtime(tt))
'''
统计的仍然是pctr,ecpm和bid等的均值信息,用的是前一天的平移过来的pctr等信息,这里的平移的前一天的是早就已经统计好的信息,是日志中的总的均值等信息
找出对应的人群的,每一个广告都有一个dataframe,然后将那个log文件和它merge起来
也就是这个广告对应的竞争队列,取出竞争队列中的前两个,取均值,应该先输出一下竞争队列的那些值是不是已经排好序的
'''
request_list = request['RequestList'].values
test_aid = request['ad_id'].values
print(len(request_list))
print(len(test_aid))
log_compete = log['competeAd'].values
log_reqid = log['RequestId'].values
log_posiId = log['PositionId'].values
request = []
for i, k in enumerate(log_reqid):
s = str(log_reqid[i]) + ',' + str(log_posiId[i])
request.append(s)
log_new = pd.DataFrame()
log_new['compete'] = log_compete
log_new['request'] = np.array(request)
uid_len = []
k = 0
test_com_pctr = []
test_quality_ecpm = []
test_total_ecpm = []
test_com_bid = []
test_new_aid = []
using_rate =
|
pd.read_csv('../usingData/feature/total_ad_pctr.csv')
|
pandas.read_csv
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for module main.py. Fixtures comes from file conftest.py located at the same dir
of this file.
"""
from __future__ import absolute_import, division, print_function
import os
import mock
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_array_equal
from pandas.core.indexes.range import RangeIndex
from pandas.util.testing import assert_frame_equal
from statsmodels.tsa.statespace.structural import (
UnobservedComponents, UnobservedComponentsResultsWrapper)
from causalimpact import CausalImpact
from causalimpact.misc import standardize
def test_default_causal_cto(rand_data, pre_int_period, post_int_period):
ci = CausalImpact(rand_data, pre_int_period, post_int_period)
assert_frame_equal(ci.data, rand_data)
assert ci.pre_period == pre_int_period
assert ci.post_period == post_int_period
pre_data = rand_data.loc[pre_int_period[0]: pre_int_period[1], :]
assert_frame_equal(ci.pre_data, pre_data)
post_data = rand_data.loc[post_int_period[0]: post_int_period[1], :]
assert_frame_equal(ci.post_data, post_data)
assert ci.alpha == 0.05
normed_pre_data, (mu, sig) = standardize(pre_data)
assert_frame_equal(ci.normed_pre_data, normed_pre_data)
normed_post_data = (post_data - mu) / sig
assert_frame_equal(ci.normed_post_data, normed_post_data)
assert ci.mu_sig == (mu[0], sig[0])
assert ci.model_args == {'standardize': True, 'nseasons': []}
assert isinstance(ci.model, UnobservedComponents)
assert_array_equal(ci.model.endog, normed_pre_data.iloc[:, 0].values.reshape(-1, 1))
assert_array_equal(ci.model.exog, normed_pre_data.iloc[:, 1:].values.reshape(
-1,
rand_data.shape[1] - 1
)
)
assert ci.model.endog_names == 'y'
assert ci.model.exog_names == ['x1', 'x2']
assert ci.model.k_endog == 1
assert ci.model.level
assert ci.model.trend_specification == 'local level'
assert isinstance(ci.trained_model, UnobservedComponentsResultsWrapper)
assert ci.trained_model.nobs == len(pre_data)
assert ci.inferences is not None
assert ci.p_value > 0 and ci.p_value < 1
assert ci.n_sims == 1000
def test_default_causal_cto_w_date(date_rand_data, pre_str_period, post_str_period):
ci = CausalImpact(date_rand_data, pre_str_period, post_str_period)
assert_frame_equal(ci.data, date_rand_data)
assert ci.pre_period == pre_str_period
assert ci.post_period == post_str_period
pre_data = date_rand_data.loc[pre_str_period[0]: pre_str_period[1], :]
assert_frame_equal(ci.pre_data, pre_data)
post_data = date_rand_data.loc[post_str_period[0]: post_str_period[1], :]
assert_frame_equal(ci.post_data, post_data)
assert ci.alpha == 0.05
normed_pre_data, (mu, sig) = standardize(pre_data)
assert_frame_equal(ci.normed_pre_data, normed_pre_data)
normed_post_data = (post_data - mu) / sig
assert_frame_equal(ci.normed_post_data, normed_post_data)
assert ci.mu_sig == (mu[0], sig[0])
assert ci.model_args == {'standardize': True, 'nseasons': []}
assert isinstance(ci.model, UnobservedComponents)
assert_array_equal(ci.model.endog, normed_pre_data.iloc[:, 0].values.reshape(-1, 1))
assert_array_equal(ci.model.exog, normed_pre_data.iloc[:, 1:].values.reshape(
-1,
date_rand_data.shape[1] - 1
)
)
assert ci.model.endog_names == 'y'
assert ci.model.exog_names == ['x1', 'x2']
assert ci.model.k_endog == 1
assert ci.model.level
assert ci.model.trend_specification == 'local level'
assert isinstance(ci.trained_model, UnobservedComponentsResultsWrapper)
assert ci.trained_model.nobs == len(pre_data)
assert ci.inferences is not None
assert ci.p_value > 0 and ci.p_value < 1
assert ci.n_sims == 1000
def test_default_causal_cto_no_exog(rand_data, pre_int_period, post_int_period):
rand_data = pd.DataFrame(rand_data.iloc[:, 0])
ci = CausalImpact(rand_data, pre_int_period, post_int_period)
assert_frame_equal(ci.data, rand_data)
assert ci.pre_period == pre_int_period
assert ci.post_period == post_int_period
pre_data = rand_data.loc[pre_int_period[0]: pre_int_period[1], :]
|
assert_frame_equal(ci.pre_data, pre_data)
|
pandas.util.testing.assert_frame_equal
|
# -*- coding: utf-8 -*-
"""De-Stress Chatbot.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1nJOL3jGeZyfNRaxrWqK26mLz4VQd7xZo
# Funciones
"""
def is_question(input_string):
for i in input_string:
if i == '?':
output = True
else:
output = False
return output
def remove_punctuation(input_string):
out_string = ""
for i in input_string:
if i not in string.punctuation:
out_string += i
return out_string
def prepare_text(input_string):
temp_string = input_string.lower()
temp_string = remove_punctuation(temp_string)
out_list = temp_string.split()
return out_list
def respond_echo(input_string, number_of_echoes,spacer):
if input_string != None:
echo_output = (input_string + spacer) * number_of_echoes
else:
echo_output = None
return echo_output
def selector(input_list, check_list, return_list):
output = None
for i in input_list:
if i in check_list:
output = random.choice(return_list)
break
return output
def string_concatenator(string1, string2, separator):
output = string1 + separator + string2
return output
def list_to_string(input_list, separator):
output = input_list[0]
for i in input_list[1:]:
output = string_concatenator(output, i, separator)
return output
def end_chat(input_list):
if 'quit' in input_list:
output = True
else:
output = False
return output
def is_in_list(list_one, list_two):
"""Check if any element of list_one is in list_two."""
for element in list_one:
if element in list_two:
return True
return False
def find_in_list(list_one, list_two):
"""Find and return an element from list_one that is in list_two, or None otherwise."""
for element in list_one:
if element in list_two:
return element
return None
def is_points(input_string):
p = 0
h = []
for i in input_string:
o = i.count('.')
if o == 1:
p += 1
if p == 5:
break
h.append(i)
h.append('.')
return h
"""# Librerias"""
#!pip install covid
from covid import Covid
import string
import random
import nltk
import pandas as pd
import numpy as np
import textwrap
import cv2
"""# Información"""
#!git clone https://github.com/ChatBotChallengeCdCMX/ChatBotForCovidDe-stress.git
Hombres = pd.read_csv('/content/ChatBotForCovidDe-stress/DataBases/nombreshombres .csv')
Mujeres = pd.read_csv('/content/ChatBotForCovidDe-stress/DataBases/nombresmujeres.csv')
Hombres = list(Hombres.iloc[:,0])
Mujeres = list(Mujeres.iloc[:,0])
Nombres = Hombres + Mujeres
Musica = pd.read_csv('/content/ChatBotForCovidDe-stress/DataBases/Music.csv')
Music = pd.DataFrame(Musica)
categorias_musica = list(pd.unique(Music['terms']))
Videos = pd.read_csv('/content/ChatBotForCovidDe-stress/DataBases/YTVideos.csv')
Videos =
|
pd.DataFrame(Videos)
|
pandas.DataFrame
|
""" Models. """
import numpy as np
import inspect
import tensorflow as tf
from scipy.stats import ttest_ind
from sklearn.neighbors import KernelDensity
from epi.error_formatters import format_type_err_msg
from epi.normalizing_flows import NormalizingFlow
from epi.util import (
gaussian_backward_mapping,
aug_lag_vars,
unbiased_aug_grad,
AugLagHPs,
array_str,
np_column_vec,
plot_square_mat,
get_hash,
set_dir_index,
get_dir_index,
dbg_check,
)
import matplotlib.pyplot as plt
from matplotlib import animation
import pandas as pd
import seaborn as sns
import pickle
import time
import os
REAL_NUMERIC_TYPES = (int, float)
class Parameter(object):
"""Univariate parameter of a model.
:param name: Parameter name.
:type name: str
:param D: Number of dimensions of parameter.
:type D: int
:param lb: Lower bound of variable, defaults to `np.NINF*np.ones(D)`.
:type lb: np.ndarray, optional
:param ub: Upper bound of variable, defaults to `np.PINF*np.ones(D)`.
:type ub: np.ndarray, optional
"""
def __init__(self, name, D, lb=None, ub=None):
"""Constructor method."""
self._set_name(name)
self._set_D(D)
self._set_bounds(lb, ub)
def _set_name(self, name):
if type(name) is not str:
raise TypeError(format_type_err_msg(self, "name", name, str))
self.name = name
def _set_D(self, D):
if type(D) is not int:
raise TypeError(format_type_err_msg(self, "D", D, int))
if D < 1:
raise ValueError("Dimension of parameter must be positive.")
self.D = D
def _set_bounds(self, lb, ub):
if lb is None:
lb = np.NINF * np.ones(self.D)
elif isinstance(lb, REAL_NUMERIC_TYPES):
lb = np.array([lb])
if ub is None:
ub = np.PINF * np.ones(self.D)
elif isinstance(ub, REAL_NUMERIC_TYPES):
ub = np.array([ub])
if type(lb) is not np.ndarray:
raise TypeError(format_type_err_msg(self, "lb", lb, np.ndarray))
if type(ub) is not np.ndarray:
raise TypeError(format_type_err_msg(self, "ub", ub, np.ndarray))
lb_shape = lb.shape
if len(lb_shape) != 1:
raise ValueError("Lower bound lb must be vector.")
if lb_shape[0] != self.D:
raise ValueError("Lower bound lb does not have dimension D = %d." % self.D)
ub_shape = ub.shape
if len(ub_shape) != 1:
raise ValueError("Upper bound ub must be vector.")
if ub_shape[0] != self.D:
raise ValueError("Upper bound ub does not have dimension D = %d." % self.D)
for i in range(self.D):
if lb[i] > ub[i]:
raise ValueError(
"Parameter %s lower bound is greater than upper bound." % self.name
)
elif lb[i] == ub[i]:
raise ValueError(
"Parameter %s lower bound is equal to upper bound." % self.name
)
self.lb = lb
self.ub = ub
class Model(object):
"""Model to run emergent property inference on. To run EPI on a model:
#. Initialize an :obj:`epi.models.Model` with a list of :obj:`epi.models.Parameter`.
#. Use :obj:`epi.models.Model.set_eps` to set the emergent property statistics of the model.
#. Run emergent property inference for mean parameter :math:`\\mu` using :obj:`epi.models.Model.epi`.
:param name: Name of model.
:type name: str
:param parameters: List of :obj:`epi.models.Parameter`.
:type parameters: list
"""
def __init__(self, name, parameters):
self._set_name(name)
self._set_parameters(parameters)
self.eps = None
self.M_test = 200
self.M_norm = 200
def _set_name(self, name):
if type(name) is not str:
raise TypeError(format_type_err_msg(self, "name", name, str))
self.name = name
def _set_parameters(self, parameters):
if type(parameters) is not list:
raise TypeError(format_type_err_msg(self, parameters, "parameters", list))
for parameter in parameters:
if not parameter.__class__.__name__ == "Parameter":
raise TypeError(
format_type_err_msg(self, "parameter", parameter, Parameter)
)
if not self.parameter_check(parameters, verbose=True):
raise ValueError("Invalid parameter list.")
self.parameters = parameters
self.D = sum([param.D for param in parameters])
def set_eps(self, eps):
"""Set the emergent property statistic calculation for this model.
The arguments of eps should be batch vectors of univariate parameter
tensors following the naming convention in :obj:`self.Parameters`.
:param eps: Emergent property statistics function.
:type eps: function
"""
fullargspec = inspect.getfullargspec(eps)
args = fullargspec.args
_parameters = []
Ds = []
for arg in args:
found = False
for param in self.parameters:
if param.name == arg:
found = True
_parameters.append(param)
Ds.append(param.D)
self.parameters.remove(param)
break
if not found:
raise ValueError(
"Function eps has argument %s not in model parameter list." % arg
)
self.parameters = _parameters
def _eps(z):
ind = 0
zs = []
for D in Ds:
zs.append(z[:, ind : (ind + D)])
ind += D
return eps(*zs)
self.eps = _eps
self.eps.__name__ = eps.__name__
# Measure the eps dimensionality to populate self.m.
z = tf.ones((1, self.D))
T_z = self.eps(z)
T_z_shape = T_z.shape
if len(T_z_shape) != 2:
raise ValueError("Method eps must return tf.Tensor of dimension (N, D).")
self.m = T_z_shape[1]
return None
def _get_bounds(self,):
lb = np.zeros((self.D,))
ub = np.zeros((self.D,))
ind = 0
for param in self.parameters:
lb[ind : (ind + param.D)] = param.lb
ub[ind : (ind + param.D)] = param.ub
ind += param.D
return (lb, ub)
def epi(
self,
mu,
arch_type="coupling",
num_stages=3,
num_layers=2,
num_units=50,
elemwise_fn="affine",
batch_norm=False,
bn_momentum=0.0,
post_affine=True,
random_seed=1,
init_type=None, # "iso_gauss",
init_params=None, # {"loc": 0.0, "scale": 1.0},
K=10,
num_iters=1000,
N=500,
lr=1e-3,
c0=1.0,
gamma=0.25,
beta=4.0,
alpha=0.05,
nu=1.0,
stop_early=False,
log_rate=50,
verbose=False,
save_movie_data=False,
):
"""Runs emergent property inference for this model with mean parameter :math:`\\mu`.
:param mu: Mean parameter of the emergent property.
:type mu: np.ndarray
:param arch_type: :math:`\\in` :obj:`['autoregressive', 'coupling']`, defaults to :obj:`'coupling'`.
:type arch_type: str, optional
:param num_stages: Number of coupling or autoregressive stages, defaults to 3.
:type num_stages: int, optional
:param num_layers: Number of neural network layer per conditional, defaults to 2.
:type num_layers: int, optional
:param num_units: Number of units per layer, defaults to max(2D, 15).
:type num_units: int, optional
:type elemwise_fn: str, optional
:param elemwise_fn: Inter-stage bijector `\\in` :obj:`['affine', 'spline']`, defaults to 'affine'.
:param batch_norm: Use batch normalization between stages, defaults to True.
:type batch_norm: bool, optional
:param bn_momentum: Batch normalization momentum parameter, defaults to 0.99.
:type bn_momentrum: float, optional
:param post_affine: Shift and scale following main transform, defaults to False.
:type post_affine: bool, optional
:param random_seed: Random seed of architecture parameters, defaults to 1.
:type random_seed: int, optional
:param init_type: :math:`\\in` :obj:`['gaussian', 'abc']`.
:type init_type: str, optional
:param init_params: Parameters according to :obj:`init_type`.
:type init_params: dict, optional
:param K: Number of augmented Lagrangian iterations, defaults to 10.
:type K: int, float, optional
:param num_iters: Number of optimization iterations, defaults to 1000.
:type num_iters: int, optional
:param N: Number of batch samples per iteration, defaults to 500.
:type N: int, optional
:param lr: Adam optimizer learning rate, defaults to 1e-3.
:type lr: float, optional
:param c0: Initial augmented Lagrangian coefficient, defaults to 1.0.
:type c0: float, optional
:param gamma: Augmented lagrangian hyperparameter, defaults to 0.25.
:type gamma: float, optional
:param beta: Augmented lagrangian hyperparameter, defaults to 4.0.
:type beta: float, optional
:param alpha: P-value threshold for convergence testing, defaults to 0.05.
:type alpha: float, optional
:param nu: Fraction of N for convergence testing, defaults to 0.1.
:type nu: float, optional
:param stop_early: Exit if converged, defaults to False.
:type stop_early: bool, optional
:param log_rate: Record optimization data every so iterations, defaults to 100.
:type log_rate: int, optional
:param verbose: Print optimization information, defaults to False.
:type verbose: bool, optional
:param save_movie_data: Save data for making optimization movie, defaults to False.
:type save_movie_data: bool, optional
:returns: q_theta, opt_df, save_path, failed
:rtype: epi.models.Distribution, pandas.DataFrame, str, bool
"""
if num_units is None:
num_units = min(max(2 * self.D, 15), 100)
nf = NormalizingFlow(
arch_type=arch_type,
D=self.D,
num_stages=num_stages,
num_layers=num_layers,
num_units=num_units,
elemwise_fn=elemwise_fn,
batch_norm=batch_norm,
bn_momentum=bn_momentum,
post_affine=post_affine,
bounds=self._get_bounds(),
random_seed=random_seed,
)
# Hyperparameter object
aug_lag_hps = AugLagHPs(N, lr, c0, gamma, beta)
# Initialize architecture to gaussian.
print("Initializing %s architecture." % nf.to_string(), flush=True)
if init_type is None or init_type == "gaussian":
if init_params is None:
mu_init = np.zeros((self.D))
Sigma = np.zeros((self.D, self.D))
for i in range(self.D):
if np.isneginf(nf.lb[i]) and np.isposinf(nf.ub[i]):
mu_init[i] = 0.0
Sigma[i, i] = 1.0
elif np.isneginf(nf.lb[i]):
mu_init[i] = nf.ub[i] - 2.0
Sigma[i, i] = 1.0
elif np.isposinf(nf.ub[i]):
mu_init[i] = nf.lb[i] + 2.0
Sigma[i, i] = 1.0
else:
mu_init[i] = (nf.lb[i] + nf.ub[i]) / 2.0
Sigma[i, i] = np.square((nf.ub[i] - nf.lb[i]) / 4)
init_params = {"mu": mu_init, "Sigma": Sigma}
elif init_type == "abc":
if "num_keep" in init_params.keys():
num_keep = init_params["num_keep"]
else:
num_keep = 200
if "means" in init_params.keys():
means = init_params["means"]
else:
means = mu[: len(mu) // 2]
if "stds" in init_params.keys():
stds = init_params["stds"]
else:
stds = np.sqrt(mu[len(mu) // 2 :])
hash_str = get_hash([nf.lb, nf.ub])
abc_dir = os.path.join("data", "abc")
abc_fname = os.path.join(
abc_dir,
"M=%d_p=%.2f_std=%.3f_%s_abc.npz"
% (num_keep, means[0], stds[0], hash_str),
)
if os.path.exists(abc_fname):
print("Loading prev ABC.")
npzfile = np.load(abc_fname)
init_params = {"mu": npzfile["mu"], "Sigma": npzfile["Sigma"]}
else:
print("Running ABC!")
def accept_inds(T_x, means, stds):
acc = np.array(
[
np.logical_and(
means[i] - 2 * stds[i] < T_x[:, i],
T_x[:, i] < means[i] + 2 * stds[i],
)
for i in range(len(means))
]
)
return np.logical_and.reduce(acc, axis=0)
num_found = 0
z_abc = None
T_x_abc = None
while num_found < num_keep:
_z = np.zeros((N, self.D), dtype=np.float32)
for j in range(self.D):
_z[:, j] = np.random.uniform(
self.parameters[j].lb, self.parameters[j].ub, (N,)
)
_T_x = self.eps(_z).numpy()
inds = accept_inds(_T_x, means, stds)
_z = _z[inds, :]
_T_x = _T_x[inds, :]
num_found += _z.shape[0]
if z_abc is None:
z_abc = _z
T_x_abc = _T_x
else:
z_abc = np.concatenate((z_abc, _z), axis=0)
T_x_abc = np.concatenate((T_x_abc, _T_x), axis=0)
print("ABC for init: %d/%d\r" % (num_found, num_keep), end="")
mu_init = np.mean(z_abc, axis=0)
Sigma = np.eye(self.D)
if not os.path.exists(abc_dir):
os.mkdir(abc_dir)
np.savez(abc_fname, mu=mu_init, Sigma=Sigma)
init_params = {"mu": mu_init, "Sigma": Sigma}
nf.initialize(init_params["mu"], init_params["Sigma"], N=N, verbose=True)
# Checkpoint the initialization.
optimizer = tf.keras.optimizers.Adam(lr)
ckpt = tf.train.Checkpoint(optimizer=optimizer, model=nf)
ckpt_dir, exists = self.get_epi_path(init_params, nf, mu, aug_lag_hps)
if exists:
print("Loading cached epi at %s." % ckpt_dir)
q_theta = self._get_epi_dist(-1, init_params, nf, mu, aug_lag_hps)
opt_df = pd.read_csv(os.path.join(ckpt_dir, "opt_data.csv"), index_col=0)
failed = (opt_df["cost"].isna()).sum() > 0
return q_theta, opt_df, ckpt_dir, failed
manager = tf.train.CheckpointManager(ckpt, directory=ckpt_dir, max_to_keep=None)
manager.save(checkpoint_number=0)
print("Saving EPI models to %s." % ckpt_dir, flush=True)
@tf.function
def train_step(eta, c):
with tf.GradientTape(persistent=True) as tape:
z, log_q_z = nf(N)
params = nf.trainable_variables
tape.watch(params)
H, R, R1s, R2 = aug_lag_vars(z, log_q_z, self.eps, mu, N)
neg_H = -H
lagrange_dot = tf.reduce_sum(tf.multiply(eta, R))
aug_l2 = c / 2.0 * tf.reduce_sum(tf.square(R))
cost = neg_H + lagrange_dot + aug_l2
H_grad = tape.gradient(neg_H, params)
lagrange_grad = tape.gradient(lagrange_dot, params)
aug_grad = unbiased_aug_grad(R1s, R2, params, tape)
gradients = [
g1 + g2 + c * g3 for g1, g2, g3 in zip(H_grad, lagrange_grad, aug_grad)
]
MAX_NORM = 1e10
gradients = [tf.clip_by_norm(g, MAX_NORM) for g in gradients]
optimizer.apply_gradients(zip(gradients, params))
return cost, H, R, z, log_q_z
N_test = int(nu * N)
# Initialize augmented Lagrangian parameters eta and c.
eta, c = np.zeros((self.m,), np.float32), c0
etas, cs = np.zeros((K, self.m)), np.zeros((K,))
# Initialize optimization data frame.
z, log_q_z = nf(N)
H_0, R_0, _, _ = aug_lag_vars(z, log_q_z, self.eps, mu, N)
cost_0 = -H_0 + np.dot(eta, R_0) + np.sum(np.square(R_0))
R_keys = ["R%d" % (i + 1) for i in range(self.m)]
opt_it_dfs = [
self._opt_it_df(
0, 0, H_0.numpy(), cost_0.numpy(), R_0.numpy(), log_rate, R_keys
)
]
# Record samples for movie.
if save_movie_data:
N_save = 200
zs = [z.numpy()[:N_save, :]]
log_q_zs = [log_q_z.numpy()[:N_save]]
# Measure initial R norm distribution.
mu_colvec = np_column_vec(mu).astype(np.float32).T
norms = get_R_norm_dist(nf, self.eps, mu_colvec, self.M_norm, N)
# EPI optimization
print(format_opt_msg(0, 0, cost_0, H_0, R_0, 0.0), flush=True)
failed = False
time_per_it = np.nan
epoch_times = []
for k in range(1, K + 1):
epoch_start = time.time()
etas[k - 1], cs[k - 1], eta, c
for i in range(1, num_iters + 1):
time1 = time.time()
cost, H, R, z, log_q_z = train_step(eta, c)
time2 = time.time()
if i % log_rate == 0:
time_per_it = time2 - time1
if verbose:
print(format_opt_msg(k, i, cost, H, R, time_per_it), flush=True)
it = (k - 1) * num_iters + i
opt_it_dfs.append(
self._opt_it_df(
k, it, H.numpy(), cost.numpy(), R.numpy(), log_rate, R_keys
)
)
if save_movie_data:
zs.append(z.numpy()[:N_save, :])
log_q_zs.append(log_q_z.numpy()[:N_save])
if np.isnan(cost):
failed = True
if verbose:
print(format_opt_msg(k, i, cost, H, R, time_per_it), flush=True)
it = (k - 1) * num_iters + i
opt_it_dfs.append(
self._opt_it_df(
k, it, H.numpy(), cost.numpy(), R.numpy(), log_rate, R_keys
)
)
print("NaN in EPI optimization. Exiting.")
break
if not verbose:
print(format_opt_msg(k, i, cost, H, R, time_per_it), flush=True)
# Save epi optimization data following aug lag iteration k.
opt_it_df = pd.concat(opt_it_dfs)
manager.save(checkpoint_number=k)
if failed:
converged = False
else:
R_means = get_R_mean_dist(nf, self.eps, mu_colvec, self.M_test, N_test)
converged = self.test_convergence(R_means.numpy(), alpha)
last_ind = opt_it_df["iteration"] == k * num_iters
opt_it_df.loc[last_ind, "converged"] = converged
self._save_epi_opt(ckpt_dir, opt_it_df, cs, etas)
opt_it_dfs = [opt_it_df]
end_opt = False
if k < K:
if np.isnan(cost):
end_opt = True
# Check for convergence if early stopping.
elif stop_early and converged:
print("Stopping early because converged!", flush=True)
end_opt = True
else:
# Update eta and c
eta = eta + c * R
norms_k = get_R_norm_dist(nf, self.eps, mu_colvec, self.M_norm, N)
t, p = ttest_ind(
norms_k.numpy(), gamma * norms.numpy(), equal_var=False
)
u = np.random.rand(1)
if u < 1 - p / 2.0 and t > 0.0:
c = beta * c
norms = norms_k
time_per_it = time2 - time1
epoch_end = time.time()
epoch_times.append(epoch_end - epoch_start)
if save_movie_data:
np.savez(
os.path.join(ckpt_dir, "movie_data.npz"),
zs=np.array(zs),
log_q_zs=np.array(log_q_zs),
time_per_it=time_per_it,
epoch_times=np.array(epoch_times),
iterations=np.arange(0, k * num_iters + 1, log_rate),
)
else:
np.savez(
os.path.join(ckpt_dir, "timing.npz"),
epoch_times=epoch_times,
time_per_it=time_per_it,
)
if end_opt:
break
# Save hyperparameters.
self.aug_lag_hps = aug_lag_hps
# Return optimized distribution.
q_theta = Distribution(nf, self.parameters)
# q_theta.set_batch_norm_trainable(False)
return q_theta, opt_it_dfs[0], ckpt_dir, failed
def get_epi_df(self):
base_path = os.path.join("data", "epi", self.name)
next_listdir = [os.path.join(base_path, f) for f in os.listdir(base_path)]
init_paths = [f for f in next_listdir if os.path.isdir(f)]
dfs = []
for init_path in init_paths:
init = get_dir_index(os.path.join(init_path, "init.pkl"))
if init is None:
continue
next_listdir = [os.path.join(init_path, f) for f in os.listdir(init_path)]
arch_paths = [f for f in next_listdir if os.path.isdir(f)]
for arch_path in arch_paths:
arch = get_dir_index(os.path.join(arch_path, "arch.pkl"))
if arch is None:
continue
next_listdir = [
os.path.join(arch_path, f) for f in os.listdir(arch_path)
]
ep_paths = [f for f in next_listdir if os.path.isdir(f)]
for ep_path in ep_paths:
ep = get_dir_index(os.path.join(ep_path, "ep.pkl"))
if ep is None:
continue
next_listdir = [
os.path.join(ep_path, f) for f in os.listdir(ep_path)
]
AL_hp_paths = [f for f in next_listdir if os.path.isdir(f)]
for AL_hp_path in AL_hp_paths:
AL_hps = get_dir_index(os.path.join(AL_hp_path, "AL_hps.pkl"))
if AL_hps is None:
continue
opt_data_file = os.path.join(AL_hp_path, "opt_data.csv")
if os.path.exists(opt_data_file):
df = pd.read_csv(opt_data_file)
df["path"] = AL_hp_path
df["init"] = df.shape[0] * [init]
df["arch"] = df.shape[0] * [arch]
df["EP"] = df.shape[0] * [ep]
df["AL_hps"] = df.shape[0] * [AL_hps]
dfs.append(df)
return pd.concat(dfs)
def epi_opt_movie(self, path):
"""Generate video of EPI optimization.
:param path: Path to folder with optimization data.
:type param: str
"""
D = self.D
palette = sns.color_palette()
fontsize = 22
z_filename = os.path.join(path, "movie_data.npz")
opt_data_filename = os.path.join(path, "opt_data.csv")
# Load zs for optimization.
if os.path.exists(z_filename):
z_file = np.load(z_filename)
else:
raise IOError("File %s does not exist." % z_filename)
if os.path.exists(opt_data_filename):
opt_data_df =
|
pd.read_csv(opt_data_filename)
|
pandas.read_csv
|
# ─── LIBRARIES ──────────────────────────────────────────────────────────────────
import pandas as pd
import os, os.path
# ─── IMPORTS ────────────────────────────────────────────────────────────────────
from variables import output_path, fieldnames, input_path, books_to_scrape
from file_generation import filter_asin_from_url_list, read_input_file
# ─── PROGRAM ────────────────────────────────────────────────────────────────────
def read_initial_values():
print('Running initial validation')
url_list = read_input_file(input_path, books_to_scrape)
asin_list = filter_asin_from_url_list(url_list)
## Filter repeated values from the input
input_df =
|
pd.DataFrame(asin_list, columns=['ASIN'])
|
pandas.DataFrame
|
import numpy as np
from glob import glob
import pandas as pd
import scipy.stats as st
import pickle
import csv
import sys
from francis.universe.transient_universe import TransientUniverse, SteadyUniverse
from francis.universe.transient_universe import *
from francis import utils
f_path = utils.get_francis_path()
eff_area_path = f_path + 'icecube_misc/effective_areas_alerts/'
# Commented paths point to original file locations
bg_trials = '/data/user/apizzuto/fast_response_skylab/alert_event_followup/analysis_trials/bg/'
signal_trials = '/data/user/apizzuto/fast_response_skylab/alert_event_followup/analysis_trials/fits/'
# bg_trials = '/data/ana/analyses/NuSources/2021_v2_alert_stacking_FRA/analysis_trials/bg/'
# signal_trials = '/data/ana/analyses/NuSources/2021_v2_alert_stacking_FRA/analysis_trials/fits/'
class UniverseAnalysis():
r'''Given cosmological parameters, calculate the expected TS distribution
from triggering short timescale analyses on alert events'''
def __init__(self, lumi, evol, density, diffuse_flux_norm, diffuse_flux_ind,
**kwargs):
self.lumi = lumi
self.evol = evol
self.density = density
self.diffuse_flux_norm = diffuse_flux_norm
self.diffuse_flux_ind = diffuse_flux_ind
self.deltaT = kwargs.pop('deltaT', None)
self.sigma = kwargs.pop('sigma', 1.0)
self.transient = True if self.deltaT is not None else False
if self.deltaT is not None:
kwargs['timescale'] = self.deltaT
self.seed = kwargs.pop('seed', 1234)
if self.transient:
self.universe = TransientUniverse(self.lumi, self.evol, self.density,
self.diffuse_flux_norm, self.diffuse_flux_ind, seed=self.seed, sigma=self.sigma,
**kwargs)
else:
self.universe = SteadyUniverse(self.lumi, self.evol, self.density,
self.diffuse_flux_norm, self.diffuse_flux_ind, seed=self.seed, sigma=self.sigma,
**kwargs)
self.smear = kwargs.pop('smeared', True)
self.smear_str = 'smeared/' if self.smear else 'norm_prob/'
self.verbose = kwargs.pop('verbose', False)
self.rng = np.random.RandomState(self.seed)
self.initialize_universe()
def print_analysis_info(self):
r'''Print a message with info about the source once
the analysis is running'''
analysis_name = 'Alert event interpretation'
int_str = '*'*80
int_str += '\n*' + ' '*78 + '*\n'
int_str += '*' + ' '*((78-len(analysis_name))//2) + analysis_name + ' '*((78-len(analysis_name))//2 + len(analysis_name)%2) + '*'
int_str += '\n*' + ' '*78 + '*\n'
int_str += '*'*80 + '\n'
int_str += ' '*5 + 'Density: {:.1e}'.format(self.density)
int_str += ' '*7 + 'Luminosity: {}'.format(self.lumi) + '\n'
int_str += ' '*5 + 'Evolution: {}'.format(self.evol)
time_str = 'Steady' if not self.transient else '{:.1e} s'.format(self.deltaT)
int_str += ' '*5 + 'Timescale: {}'.format(time_str) + '\n'
int_str += ' '*5 + 'Diffuse gamma: {:.1f}'.format(self.diffuse_flux_ind)
int_str += ' '*6 + 'Smearing: {}'.format(self.smear)
int_str += '\n\n'
print(int_str)
#@profile
def initialize_universe(self):
"""Simulate sources with the given cosmological parameters,
also find the alert events as well as the additional injected
events
"""
if self.verbose:
print("Simulating universe with specified cosmological parameters")
self.universe.create_universe()
self.universe.find_alerts()
self.universe.find_alert_skymaps()
self.universe.additional_signal_events()
#@profile
def make_alerts_dataframe(self):
"""
Reformat the results from the simulation into a dataframe
"""
alerts = {'signalness': [], 'declination': [], 'background': [],
'skymap_ind': [], 'stream': [], 'skymap_dec': [],
'extra_evs': []}
for k in self.universe.bg_alerts.keys():
if self.universe.bg_alerts[k][0] > 0:
alerts['signalness'].extend(self.universe.bg_alerts[k][1])
alerts['declination'].extend(self.universe.bg_alerts[k][2])
alerts['background'].extend([True]*self.universe.bg_alerts[k][0])
alerts['skymap_ind'].extend(self.universe.bg_alerts[k][4])
alerts['skymap_dec'].extend(self.universe.bg_alerts[k][3])
alerts['stream'].extend([k]*self.universe.bg_alerts[k][0])
alerts['extra_evs'].extend([0]*self.universe.bg_alerts[k][0])
for k in self.universe.sig_alerts.keys():
for jj in range(len(self.universe.sig_alerts[k])):
if self.universe.sig_alerts[k][jj][0] == 0:
continue
else:
alerts['signalness'].append(self.universe.sig_alerts[k][jj][1][0])
alerts['declination'].append(np.radians(self.universe.sources['dec'][jj]))
alerts['background'].append(False)
alerts['skymap_ind'].append(self.universe.skymaps[k][jj][1])
alerts['skymap_dec'].append(self.universe.skymaps[k][jj][0])
alerts['stream'].append(k)
alerts['extra_evs'].append(self.universe.extra_events[k][jj])
alerts =
|
pd.DataFrame(alerts)
|
pandas.DataFrame
|
from typing import Tuple
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from etna.datasets import generate_ar_df
from etna.datasets.tsdataset import TSDataset
from etna.transforms import DateFlagsTransform
from etna.transforms import TimeSeriesImputerTransform
@pytest.fixture()
def tsdf_with_exog(random_seed) -> TSDataset:
df_1 = pd.DataFrame.from_dict({"timestamp": pd.date_range("2021-02-01", "2021-07-01", freq="1d")})
df_2 = pd.DataFrame.from_dict({"timestamp": pd.date_range("2021-02-01", "2021-07-01", freq="1d")})
df_1["segment"] = "Moscow"
df_1["target"] = [x ** 2 + np.random.uniform(-2, 2) for x in list(range(len(df_1)))]
df_2["segment"] = "Omsk"
df_2["target"] = [x ** 0.5 + np.random.uniform(-2, 2) for x in list(range(len(df_2)))]
classic_df = pd.concat([df_1, df_2], ignore_index=True)
df = classic_df.pivot(index="timestamp", columns="segment")
df = df.reorder_levels([1, 0], axis=1)
df = df.sort_index(axis=1)
df.columns.names = ["segment", "feature"]
exog = generate_ar_df(start_time="2021-01-01", periods=600, n_segments=2)
exog = exog.pivot(index="timestamp", columns="segment")
exog = exog.reorder_levels([1, 0], axis=1)
exog = exog.sort_index(axis=1)
exog.columns.names = ["segment", "feature"]
exog.columns = pd.MultiIndex.from_arrays([["Moscow", "Omsk"], ["exog", "exog"]])
ts = TSDataset(df=df, df_exog=exog, freq="1D")
return ts
@pytest.fixture()
def df_and_regressors() -> Tuple[pd.DataFrame, pd.DataFrame]:
timestamp = pd.date_range("2021-01-01", "2021-02-01")
df_1 = pd.DataFrame({"timestamp": timestamp, "target": 11, "segment": "1"})
df_2 = pd.DataFrame({"timestamp": timestamp[5:], "target": 12, "segment": "2"})
df = pd.concat([df_1, df_2], ignore_index=True)
df = TSDataset.to_dataset(df)
timestamp = pd.date_range("2020-12-01", "2021-02-11")
df_1 = pd.DataFrame({"timestamp": timestamp, "regressor_1": 1, "regressor_2": 2, "segment": "1"})
df_2 = pd.DataFrame({"timestamp": timestamp[5:], "regressor_1": 3, "regressor_2": 4, "segment": "2"})
df_exog = pd.concat([df_1, df_2], ignore_index=True)
df_exog = TSDataset.to_dataset(df_exog)
return df, df_exog
@pytest.fixture()
def ts_future(example_reg_tsds):
future = example_reg_tsds.make_future(10)
return future
def test_check_endings_error_raise():
"""Check that _check_endings method raises exception if some segments end with nan."""
timestamp = pd.date_range("2021-01-01", "2021-02-01")
df1 = pd.DataFrame({"timestamp": timestamp, "target": 11, "segment": "1"})
df2 = pd.DataFrame({"timestamp": timestamp[:-5], "target": 12, "segment": "2"})
df = pd.concat([df1, df2], ignore_index=True)
df = TSDataset.to_dataset(df)
ts = TSDataset(df=df, freq="D")
with pytest.raises(ValueError):
ts._check_endings()
def test_check_endings_error_pass():
"""Check that _check_endings method passes if there is no nans at the end of all segments."""
timestamp = pd.date_range("2021-01-01", "2021-02-01")
df1 = pd.DataFrame({"timestamp": timestamp, "target": 11, "segment": "1"})
df2 = pd.DataFrame({"timestamp": timestamp, "target": 12, "segment": "2"})
df = pd.concat([df1, df2], ignore_index=True)
df = TSDataset.to_dataset(df)
ts = TSDataset(df=df, freq="D")
ts._check_endings()
def test_categorical_after_call_to_pandas():
classic_df = generate_ar_df(periods=30, start_time="2021-06-01", n_segments=2)
classic_df["categorical_column"] = [0] * 30 + [1] * 30
classic_df["categorical_column"] = classic_df["categorical_column"].astype("category")
df = TSDataset.to_dataset(classic_df[["timestamp", "segment", "target"]])
exog = TSDataset.to_dataset(classic_df[["timestamp", "segment", "categorical_column"]])
ts = TSDataset(df, "D", exog)
flatten_df = ts.to_pandas(flatten=True)
assert flatten_df["categorical_column"].dtype == "category"
@pytest.mark.parametrize(
"borders, true_borders",
(
(
("2021-02-01", "2021-06-20", "2021-06-21", "2021-07-01"),
("2021-02-01", "2021-06-20", "2021-06-21", "2021-07-01"),
),
(
("2021-02-03", "2021-06-20", "2021-06-22", "2021-07-01"),
("2021-02-03", "2021-06-20", "2021-06-22", "2021-07-01"),
),
(
("2021-02-01", "2021-06-20", "2021-06-21", "2021-06-28"),
("2021-02-01", "2021-06-20", "2021-06-21", "2021-06-28"),
),
(
("2021-02-01", "2021-06-20", "2021-06-23", "2021-07-01"),
("2021-02-01", "2021-06-20", "2021-06-23", "2021-07-01"),
),
((None, "2021-06-20", "2021-06-23", "2021-06-28"), ("2021-02-01", "2021-06-20", "2021-06-23", "2021-06-28")),
(("2021-02-03", "2021-06-20", "2021-06-23", None), ("2021-02-03", "2021-06-20", "2021-06-23", "2021-07-01")),
((None, "2021-06-20", "2021-06-23", None), ("2021-02-01", "2021-06-20", "2021-06-23", "2021-07-01")),
((None, "2021-06-20", None, None), ("2021-02-01", "2021-06-20", "2021-06-21", "2021-07-01")),
((None, None, "2021-06-21", None), ("2021-02-01", "2021-06-20", "2021-06-21", "2021-07-01")),
),
)
def test_train_test_split(borders, true_borders, tsdf_with_exog):
train_start, train_end, test_start, test_end = borders
train_start_true, train_end_true, test_start_true, test_end_true = true_borders
train, test = tsdf_with_exog.train_test_split(
train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end
)
assert isinstance(train, TSDataset)
assert isinstance(test, TSDataset)
assert (train.df == tsdf_with_exog.df[train_start_true:train_end_true]).all().all()
assert (train.df_exog == tsdf_with_exog.df_exog).all().all()
assert (test.df == tsdf_with_exog.df[test_start_true:test_end_true]).all().all()
assert (test.df_exog == tsdf_with_exog.df_exog).all().all()
@pytest.mark.parametrize(
"test_size, true_borders",
(
(11, ("2021-02-01", "2021-06-20", "2021-06-21", "2021-07-01")),
(9, ("2021-02-01", "2021-06-22", "2021-06-23", "2021-07-01")),
(1, ("2021-02-01", "2021-06-30", "2021-07-01", "2021-07-01")),
),
)
def test_train_test_split_with_test_size(test_size, true_borders, tsdf_with_exog):
train_start_true, train_end_true, test_start_true, test_end_true = true_borders
train, test = tsdf_with_exog.train_test_split(test_size=test_size)
assert isinstance(train, TSDataset)
assert isinstance(test, TSDataset)
assert (train.df == tsdf_with_exog.df[train_start_true:train_end_true]).all().all()
assert (train.df_exog == tsdf_with_exog.df_exog).all().all()
assert (test.df == tsdf_with_exog.df[test_start_true:test_end_true]).all().all()
assert (test.df_exog == tsdf_with_exog.df_exog).all().all()
@pytest.mark.parametrize(
"test_size, borders, true_borders",
(
(
10,
("2021-02-01", "2021-06-20", "2021-06-21", "2021-07-01"),
("2021-02-01", "2021-06-20", "2021-06-21", "2021-07-01"),
),
(
15,
("2021-02-03", "2021-06-20", "2021-06-22", "2021-07-01"),
("2021-02-03", "2021-06-20", "2021-06-22", "2021-07-01"),
),
(11, ("2021-02-02", None, None, "2021-06-28"), ("2021-02-02", "2021-06-17", "2021-06-18", "2021-06-28")),
(
4,
("2021-02-03", "2021-06-20", None, "2021-07-01"),
("2021-02-03", "2021-06-20", "2021-06-28", "2021-07-01"),
),
(
4,
("2021-02-03", "2021-06-20", None, None),
("2021-02-03", "2021-06-20", "2021-06-21", "2021-06-24"),
),
),
)
def test_train_test_split_both(test_size, borders, true_borders, tsdf_with_exog):
train_start, train_end, test_start, test_end = borders
train_start_true, train_end_true, test_start_true, test_end_true = true_borders
train, test = tsdf_with_exog.train_test_split(
train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end, test_size=test_size
)
assert isinstance(train, TSDataset)
assert isinstance(test, TSDataset)
assert (train.df == tsdf_with_exog.df[train_start_true:train_end_true]).all().all()
assert (train.df_exog == tsdf_with_exog.df_exog).all().all()
assert (test.df == tsdf_with_exog.df[test_start_true:test_end_true]).all().all()
assert (test.df_exog == tsdf_with_exog.df_exog).all().all()
@pytest.mark.parametrize(
"borders, match",
(
(("2021-01-01", "2021-06-20", "2021-06-21", "2021-07-01"), "Min timestamp in df is"),
(("2021-02-01", "2021-06-20", "2021-06-21", "2021-08-01"), "Max timestamp in df is"),
),
)
def test_train_test_split_warning(borders, match, tsdf_with_exog):
train_start, train_end, test_start, test_end = borders
with pytest.warns(UserWarning, match=match):
tsdf_with_exog.train_test_split(
train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end
)
@pytest.mark.parametrize(
"test_size, borders, match",
(
(
10,
("2021-02-01", None, "2021-06-21", "2021-07-01"),
"test_size, test_start and test_end cannot be applied at the same time. test_size will be ignored",
),
),
)
def test_train_test_split_warning2(test_size, borders, match, tsdf_with_exog):
train_start, train_end, test_start, test_end = borders
with pytest.warns(UserWarning, match=match):
tsdf_with_exog.train_test_split(
train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end, test_size=test_size
)
@pytest.mark.parametrize(
"test_size, borders, match",
(
(
None,
("2021-02-03", None, None, "2021-07-01"),
"At least one of train_end, test_start or test_size should be defined",
),
(
17,
("2021-02-01", "2021-06-20", None, "2021-07-01"),
"The beginning of the test goes before the end of the train",
),
(
17,
("2021-02-01", "2021-06-20", "2021-06-26", None),
"test_size is 17, but only 6 available with your test_start",
),
),
)
def test_train_test_split_failed(test_size, borders, match, tsdf_with_exog):
train_start, train_end, test_start, test_end = borders
with pytest.raises(ValueError, match=match):
tsdf_with_exog.train_test_split(
train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end, test_size=test_size
)
def test_dataset_datetime_conversion():
classic_df = generate_ar_df(periods=30, start_time="2021-06-01", n_segments=2)
classic_df["timestamp"] = classic_df["timestamp"].astype(str)
df = TSDataset.to_dataset(classic_df[["timestamp", "segment", "target"]])
# todo: deal with pandas datetime format
assert df.index.dtype == "datetime64[ns]"
def test_dataset_datetime_conversion_during_init():
classic_df = generate_ar_df(periods=30, start_time="2021-06-01", n_segments=2)
classic_df["categorical_column"] = [0] * 30 + [1] * 30
classic_df["categorical_column"] = classic_df["categorical_column"].astype("category")
df = TSDataset.to_dataset(classic_df[["timestamp", "segment", "target"]])
exog = TSDataset.to_dataset(classic_df[["timestamp", "segment", "categorical_column"]])
df.index = df.index.astype(str)
exog.index = df.index.astype(str)
ts = TSDataset(df, "D", exog)
assert ts.df.index.dtype == "datetime64[ns]"
def test_make_future_raise_error_on_diff_endings(ts_diff_endings):
with pytest.raises(ValueError, match="All segments should end at the same timestamp"):
ts_diff_endings.make_future(10)
def test_make_future_with_imputer(ts_diff_endings, ts_future):
imputer = TimeSeriesImputerTransform(in_column="target")
ts_diff_endings.fit_transform([imputer])
future = ts_diff_endings.make_future(10)
assert_frame_equal(future.df, ts_future.df)
def test_make_future():
timestamp = pd.date_range("2020-01-01", periods=100, freq="D")
df1 = pd.DataFrame({"timestamp": timestamp, "target": 1, "segment": "segment_1"})
df2 = pd.DataFrame({"timestamp": timestamp, "target": 2, "segment": "segment_2"})
df = pd.concat([df1, df2], ignore_index=False)
ts = TSDataset(TSDataset.to_dataset(df), freq="D")
ts_future = ts.make_future(10)
assert np.all(ts_future.index == pd.date_range(ts.index.max() + pd.Timedelta("1D"), periods=10, freq="D"))
assert set(ts_future.columns.get_level_values("feature")) == {"target"}
def test_make_future_small_horizon():
timestamp = np.arange(np.datetime64("2021-01-01"), np.datetime64("2021-02-01"))
target1 = [np.sin(i) for i in range(len(timestamp))]
target2 = [np.cos(i) for i in range(len(timestamp))]
df1 = pd.DataFrame({"timestamp": timestamp, "target": target1, "segment": "1"})
df2 = pd.DataFrame({"timestamp": timestamp, "target": target2, "segment": "2"})
df = pd.concat([df1, df2], ignore_index=True)
df = TSDataset.to_dataset(df)
ts = TSDataset(df, freq="D")
train = TSDataset(ts[: ts.index[10], :, :], freq="D")
with pytest.warns(UserWarning, match="TSDataset freq can't be inferred"):
assert len(train.make_future(1).df) == 1
def test_make_future_with_exog():
timestamp = pd.date_range("2020-01-01", periods=100, freq="D")
df1 = pd.DataFrame({"timestamp": timestamp, "target": 1, "segment": "segment_1"})
df2 = pd.DataFrame({"timestamp": timestamp, "target": 2, "segment": "segment_2"})
df = pd.concat([df1, df2], ignore_index=False)
exog = df.copy()
exog.columns = ["timestamp", "exog", "segment"]
ts = TSDataset(df=TSDataset.to_dataset(df), df_exog=TSDataset.to_dataset(exog), freq="D")
ts_future = ts.make_future(10)
assert np.all(ts_future.index == pd.date_range(ts.index.max() + pd.Timedelta("1D"), periods=10, freq="D"))
assert set(ts_future.columns.get_level_values("feature")) == {"target", "exog"}
def test_make_future_with_regressors(df_and_regressors):
df, df_exog = df_and_regressors
ts = TSDataset(df=df, df_exog=df_exog, freq="D")
ts_future = ts.make_future(10)
assert np.all(ts_future.index == pd.date_range(ts.index.max() + pd.Timedelta("1D"), periods=10, freq="D"))
assert set(ts_future.columns.get_level_values("feature")) == {"target", "regressor_1", "regressor_2"}
@pytest.mark.parametrize("exog_starts_later,exog_ends_earlier", ((True, False), (False, True), (True, True)))
def test_dataset_check_exog_raise_error(exog_starts_later: bool, exog_ends_earlier: bool):
start_time = "2021-01-10" if exog_starts_later else "2021-01-01"
end_time = "2021-01-20" if exog_ends_earlier else "2021-02-01"
timestamp = pd.date_range("2021-01-01", "2021-02-01")
df1 = pd.DataFrame({"timestamp": timestamp, "target": 11, "segment": "1"})
df2 = pd.DataFrame({"timestamp": timestamp[5:], "target": 12, "segment": "2"})
df = pd.concat([df1, df2], ignore_index=True)
df = TSDataset.to_dataset(df)
timestamp = pd.date_range(start_time, end_time)
df1 = pd.DataFrame({"timestamp": timestamp, "regressor_aaa": 1, "segment": "1"})
df2 = pd.DataFrame({"timestamp": timestamp[5:], "regressor_aaa": 2, "segment": "2"})
dfexog = pd.concat([df1, df2], ignore_index=True)
dfexog = TSDataset.to_dataset(dfexog)
with pytest.raises(ValueError):
TSDataset._check_regressors(df=df, df_exog=dfexog)
def test_dataset_check_exog_pass(df_and_regressors):
df, df_exog = df_and_regressors
_ = TSDataset._check_regressors(df=df, df_exog=df_exog)
def test_warn_not_enough_exog(df_and_regressors):
"""Check that warning is thrown if regressors don't have enough values."""
df, df_exog = df_and_regressors
ts = TSDataset(df=df, df_exog=df_exog, freq="D")
with pytest.warns(UserWarning, match="Some regressors don't have enough values"):
ts.make_future(ts.df_exog.shape[0] + 100)
def test_getitem_only_date(tsdf_with_exog):
df_date_only = tsdf_with_exog["2021-02-01"]
assert df_date_only.name == pd.Timestamp("2021-02-01")
pd.testing.assert_series_equal(tsdf_with_exog.df.loc["2021-02-01"], df_date_only)
def test_getitem_slice_date(tsdf_with_exog):
df_slice = tsdf_with_exog["2021-02-01":"2021-02-03"]
expected_index = pd.DatetimeIndex(pd.date_range("2021-02-01", "2021-02-03"), name="timestamp")
pd.testing.assert_index_equal(df_slice.index, expected_index)
pd.testing.assert_frame_equal(tsdf_with_exog.df.loc["2021-02-01":"2021-02-03"], df_slice)
def test_getitem_second_ellipsis(tsdf_with_exog):
df_slice = tsdf_with_exog["2021-02-01":"2021-02-03", ...]
expected_index = pd.DatetimeIndex(pd.date_range("2021-02-01", "2021-02-03"), name="timestamp")
pd.testing.assert_index_equal(df_slice.index, expected_index)
pd.testing.assert_frame_equal(tsdf_with_exog.df.loc["2021-02-01":"2021-02-03"], df_slice)
def test_getitem_first_ellipsis(tsdf_with_exog):
df_slice = tsdf_with_exog[..., "target"]
df_expected = tsdf_with_exog.df.loc[:, [["Moscow", "target"], ["Omsk", "target"]]]
pd.testing.assert_frame_equal(df_expected, df_slice)
def test_getitem_all_indexes(tsdf_with_exog):
df_slice = tsdf_with_exog[:, :, :]
df_expected = tsdf_with_exog.df
pd.testing.assert_frame_equal(df_expected, df_slice)
def test_finding_regressors(df_and_regressors):
"""Check that ts.regressors property works correctly."""
df, df_exog = df_and_regressors
ts = TSDataset(df=df, df_exog=df_exog, freq="D")
assert sorted(ts.regressors) == ["regressor_1", "regressor_2"]
def test_head_default(tsdf_with_exog):
assert np.all(tsdf_with_exog.head() == tsdf_with_exog.df.head())
def test_tail_default(tsdf_with_exog):
np.all(tsdf_with_exog.tail() == tsdf_with_exog.df.tail())
def test_updating_regressors_fit_transform(df_and_regressors):
"""Check that ts.regressors is updated after making ts.fit_transform()."""
df, df_exog = df_and_regressors
ts = TSDataset(df=df, df_exog=df_exog, freq="D")
date_flags_transform = DateFlagsTransform(
day_number_in_week=True,
day_number_in_month=False,
week_number_in_month=False,
week_number_in_year=False,
month_number_in_year=False,
year_number=False,
is_weekend=True,
out_column="regressor_dateflag",
)
initial_regressors = set(ts.regressors)
ts.fit_transform(transforms=[date_flags_transform])
final_regressors = set(ts.regressors)
expected_columns = {"regressor_dateflag_day_number_in_week", "regressor_dateflag_is_weekend"}
assert initial_regressors.issubset(final_regressors)
assert final_regressors.difference(initial_regressors) == expected_columns
def test_right_format_sorting():
"""Need to check if to_dataset method does not mess up with data and column names,
sorting it with no respect to each other
"""
df = pd.DataFrame({"timestamp": pd.date_range("2020-01-01", periods=100)})
df["segment"] = "segment_1"
# need names and values in inverse fashion
df["reg_2"] = 1
df["reg_1"] = 2
tsd = TSDataset(TSDataset.to_dataset(df), freq="D")
inv_df = tsd.to_pandas(flatten=True)
pd.testing.assert_series_equal(df["reg_1"], inv_df["reg_1"])
pd.testing.assert_series_equal(df["reg_2"], inv_df["reg_2"])
def test_to_flatten(example_df):
"""Check that TSDataset.to_flatten works correctly."""
sorted_columns = sorted(example_df.columns)
expected_df = example_df[sorted_columns]
obtained_df = TSDataset.to_flatten(TSDataset.to_dataset(example_df))
assert sorted_columns == sorted(obtained_df.columns)
assert (expected_df.values == obtained_df[sorted_columns].values).all()
def test_transform_raise_warning_on_diff_endings(ts_diff_endings):
with pytest.warns(Warning, match="Segments contains NaNs in the last timestamps."):
ts_diff_endings.transform([])
def test_fit_transform_raise_warning_on_diff_endings(ts_diff_endings):
with pytest.warns(Warning, match="Segments contains NaNs in the last timestamps."):
ts_diff_endings.fit_transform([])
def test_gather_common_data(df_and_regressors):
"""Check that TSDataset._gather_common_data correctly finds common data for info/describe methods."""
df, df_exog = df_and_regressors
ts = TSDataset(df=df, df_exog=df_exog, freq="D")
common_data = ts._gather_common_data()
assert common_data["num_segments"] == 2
assert common_data["num_exogs"] == 2
assert common_data["num_regressors"] == 2
assert common_data["freq"] == "D"
def test_gather_segments_data(df_and_regressors):
"""Check that TSDataset._gather_segments_data correctly finds segment data for info/describe methods."""
df, df_exog = df_and_regressors
# add NaN in the middle
df.iloc[-5, 0] = np.NaN
# add NaNs at the end
df.iloc[-3:, 1] = np.NaN
ts = TSDataset(df=df, df_exog=df_exog, freq="D")
segments = ts.segments
segments_dict = ts._gather_segments_data(segments)
segment_df = pd.DataFrame(segments_dict, index=segments)
assert np.all(segment_df.index == ts.segments)
assert segment_df.loc["1", "start_timestamp"] == pd.Timestamp("2021-01-01")
assert segment_df.loc["2", "start_timestamp"] == pd.Timestamp("2021-01-06")
assert segment_df.loc["1", "end_timestamp"] ==
|
pd.Timestamp("2021-02-01")
|
pandas.Timestamp
|
# -*- coding: utf-8 -*-
#
# @author <NAME>
# @date 1 Feb 2019
import numpy as np
import pandas as pd
import itertools
from sklearn import svm
from data_handling import *
from data_stats import *
# Data paths
proj_dir = '/Users/nikhil/code/git_repos/compare-surf-tools/'
data_dir = proj_dir + 'data/'
demograph_file = 'ABIDE_Phenotype.csv'
ants_file = 'ABIDE_ants_thickness_data.csv'
fs53_file = 'ABIDE_fs5.3_thickness.csv'
fs51_file = 'cortical_fs5.1_measuresenigma_thickavg.csv'
fs60_lh_file = 'aparc_lh_thickness_table.txt'
fs60_rh_file = 'aparc_rh_thickness_table.txt'
# Global Vars
subject_ID_col = 'SubjID'
# test_1: stdize data
test_name = 'test_1: stdize data'
print('\n ------------- Running {} -------------'.format(test_name))
# Demographics and Dx
demograph = pd.read_csv(data_dir + demograph_file)
demograph = demograph.rename(columns={'Subject_ID':subject_ID_col})
# ANTs
ants_data = pd.read_csv(data_dir + ants_file, header=2)
print('shape of ants data {}'.format(ants_data.shape))
ants_data_std = standardize_ants_data(ants_data, subject_ID_col)
print('shape of stdized ants data {}'.format(ants_data_std.shape))
print(list(ants_data_std.columns)[:5])
print('')
# FS
fs53_data = pd.read_csv(data_dir + fs53_file)
print('shape of fs51 data {}'.format(fs53_data.shape))
fs53_data_std = standardize_fs_data(fs53_data, subject_ID_col)
print('shape of stdized fs53 data {}'.format(fs53_data_std.shape))
print(list(fs53_data_std.columns[:5]))
print('')
fs51_data = pd.read_csv(data_dir + fs51_file)
print('shape of fs51 data {}'.format(fs51_data.shape))
fs51_data_std = standardize_fs_data(fs51_data, subject_ID_col)
print('shape of stdized fs51 data {}'.format(fs51_data_std.shape))
print(list(fs51_data_std.columns[:5]))
print('')
fs60_lh_data =
|
pd.read_csv(data_dir + fs60_lh_file, delim_whitespace=True)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
import unittest
import platform
import pandas as pd
import numpy as np
import pyarrow.parquet as pq
import hpat
from hpat.tests.test_utils import (
count_array_REPs, count_parfor_REPs, count_array_OneDs, get_start_end)
from hpat.tests.gen_test_data import ParquetGenerator
from numba import types
from numba.config import IS_32BITS
from numba.errors import TypingError
_cov_corr_series = [(pd.Series(x), pd.Series(y)) for x, y in [
(
[np.nan, -2., 3., 9.1],
[np.nan, -2., 3., 5.0],
),
# TODO(quasilyte): more intricate data for complex-typed series.
# Some arguments make assert_almost_equal fail.
# Functions that yield mismaching results:
# _column_corr_impl and _column_cov_impl.
(
[complex(-2., 1.0), complex(3.0, 1.0)],
[complex(-3., 1.0), complex(2.0, 1.0)],
),
(
[complex(-2.0, 1.0), complex(3.0, 1.0)],
[1.0, -2.0],
),
(
[1.0, -4.5],
[complex(-4.5, 1.0), complex(3.0, 1.0)],
),
]]
min_float64 = np.finfo('float64').min
max_float64 = np.finfo('float64').max
test_global_input_data_float64 = [
[1., np.nan, -1., 0., min_float64, max_float64],
[np.nan, np.inf, np.NINF, np.NZERO]
]
min_int64 = np.iinfo('int64').min
max_int64 = np.iinfo('int64').max
max_uint64 = np.iinfo('uint64').max
test_global_input_data_integer64 = [
[1, -1, 0],
[min_int64, max_int64],
[max_uint64]
]
test_global_input_data_numeric = test_global_input_data_integer64 + test_global_input_data_float64
test_global_input_data_unicode_kind4 = [
'ascii',
'12345',
'1234567890',
'¡Y tú quién te crees?',
'🐍⚡',
'大处着眼,小处着手。',
]
test_global_input_data_unicode_kind1 = [
'ascii',
'12345',
'1234567890',
]
def _make_func_from_text(func_text, func_name='test_impl'):
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars[func_name]
return test_impl
def _make_func_use_binop1(operator):
func_text = "def test_impl(A, B):\n"
func_text += " return A {} B\n".format(operator)
return _make_func_from_text(func_text)
def _make_func_use_binop2(operator):
func_text = "def test_impl(A, B):\n"
func_text += " A {} B\n".format(operator)
func_text += " return A\n"
return _make_func_from_text(func_text)
def _make_func_use_method_arg1(method):
func_text = "def test_impl(A, B):\n"
func_text += " return A.{}(B)\n".format(method)
return _make_func_from_text(func_text)
GLOBAL_VAL = 2
class TestSeries(unittest.TestCase):
def test_create1(self):
def test_impl():
df = pd.DataFrame({'A': [1, 2, 3]})
return (df.A == 1).sum()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_unicode(self):
def test_impl():
S = pd.Series([
['abc', 'defg', 'ijk'],
['lmn', 'opq', 'rstuvwxyz']
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_integer(self):
def test_impl():
S = pd.Series([
[123, 456, -789],
[-112233, 445566, 778899]
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_float(self):
def test_impl():
S = pd.Series([
[1.23, -4.56, 7.89],
[11.2233, 44.5566, -778.899]
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
def test_create2(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n)})
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
def test_create_series1(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index1(self):
# create and box an indexed Series
def test_impl():
A = pd.Series([1, 2, 3], ['A', 'C', 'B'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index2(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index3(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'], name='A')
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index4(self):
def test_impl(name):
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'], name=name)
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func('A'), test_impl('A'))
def test_create_str(self):
def test_impl():
df = pd.DataFrame({'A': ['a', 'b', 'c']})
return (df.A == 'a').sum()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
def test_pass_df1(self):
def test_impl(df):
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df), test_impl(df))
def test_pass_df_str(self):
def test_impl(df):
return (df.A == 'a').sum()
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['a', 'b', 'c']})
self.assertEqual(hpat_func(df), test_impl(df))
def test_pass_series1(self):
# TODO: check to make sure it is series type
def test_impl(A):
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series2(self):
# test creating dataframe from passed series
def test_impl(A):
df = pd.DataFrame({'A': A})
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series_str(self):
def test_impl(A):
return (A == 'a').sum()
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['a', 'b', 'c']})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series_index1(self):
def test_impl(A):
return A
hpat_func = hpat.jit(test_impl)
S = pd.Series([3, 5, 6], ['a', 'b', 'c'], name='A')
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_size(self):
def test_impl(S):
return S.size
hpat_func = hpat.jit(test_impl)
n = 11
for S, expected in [
(pd.Series(), 0),
(pd.Series([]), 0),
(pd.Series(np.arange(n)), n),
(pd.Series([np.nan, 1, 2]), 3),
(pd.Series(['1', '2', '3']), 3),
]:
with self.subTest(S=S, expected=expected):
self.assertEqual(hpat_func(S), expected)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_attr2(self):
def test_impl(A):
return A.copy().values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_attr3(self):
def test_impl(A):
return A.min()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_series_attr4(self):
def test_impl(A):
return A.cumsum().values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_argsort1(self):
def test_impl(A):
return A.argsort()
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.random.ranf(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_attr6(self):
def test_impl(A):
return A.take([2, 3]).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_attr7(self):
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_getattr_ndim(self):
'''Verifies getting Series attribute ndim is supported'''
def test_impl(S):
return S.ndim
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_getattr_T(self):
'''Verifies getting Series attribute T is supported'''
def test_impl(S):
return S.T
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_copy_str1(self):
def test_impl(A):
return A.copy()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_copy_int1(self):
def test_impl(A):
return A.copy()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1, 2, 3])
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_copy_deep(self):
def test_impl(A, deep):
return A.copy(deep=deep)
hpat_func = hpat.jit(test_impl)
for S in [
pd.Series([1, 2]),
pd.Series([1, 2], index=["a", "b"]),
]:
with self.subTest(S=S):
for deep in (True, False):
with self.subTest(deep=deep):
actual = hpat_func(S, deep)
expected = test_impl(S, deep)
pd.testing.assert_series_equal(actual, expected)
self.assertEqual(actual.values is S.values, expected.values is S.values)
self.assertEqual(actual.values is S.values, not deep)
# Shallow copy of index is not supported yet
if deep:
self.assertEqual(actual.index is S.index, expected.index is S.index)
self.assertEqual(actual.index is S.index, not deep)
def test_series_astype_int_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts integer series to series of strings
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int_to_str2(self):
'''Verifies Series.astype implementation with a string literal dtype argument
converts integer series to series of strings
'''
def test_impl(S):
return S.astype('str')
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str2(self):
'''Verifies Series.astype implementation with a string literal dtype argument
handles string series not changing it
'''
def test_impl(S):
return S.astype('str')
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str_index_str(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=['d', 'e', 'f'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str_index_int(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=[1, 2, 3])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: requires str(datetime64) support in Numba')
def test_series_astype_dt_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts datetime series to series of strings
'''
def test_impl(A):
return A.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series([pd.Timestamp('20130101 09:00:00'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03')
])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('AssertionError: Series are different'
'[left]: [0.000000, 1.000000, 2.000000, 3.000000, ...'
'[right]: [0.0, 1.0, 2.0, 3.0, ...'
'TODO: needs alignment to NumPy on Numba side')
def test_series_astype_float_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts float series to series of strings
'''
def test_impl(A):
return A.astype(str)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int32_to_int64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series with dtype=int32 to series with dtype=int64
'''
def test_impl(A):
return A.astype(np.int64)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n), dtype=np.int32)
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int_to_float64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts integer series to series of float
'''
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_float_to_int32(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts float series to series of integers
'''
def test_impl(A):
return A.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support string literal as dtype arg')
def test_series_astype_literal_dtype1(self):
'''Verifies Series.astype implementation with a string literal dtype argument
converts float series to series of integers
'''
def test_impl(A):
return A.astype('int32')
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support converting unicode_type to int')
def test_series_astype_str_to_int32(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series of strings to series of integers
'''
import numba
def test_impl(A):
return A.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series([str(x) for x in np.arange(n) - n // 2])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support converting unicode_type to float')
def test_series_astype_str_to_float64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series of strings to series of float
'''
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['3.24', '1E+05', '-1', '-1.3E-01', 'nan', 'inf'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_index_str(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=['a', 'b', 'c'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_index_int(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=[2, 3, 5])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_np_call_on_series1(self):
def test_impl(A):
return np.min(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_values(self):
def test_impl(A):
return A.values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_values1(self):
def test_impl(A):
return (A == 2).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_shape1(self):
def test_impl(A):
return A.shape
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_static_setitem_series1(self):
def test_impl(A):
A[0] = 2
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_setitem_series1(self):
def test_impl(A, i):
A[i] = 2
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A.copy(), 0), test_impl(df.A.copy(), 0))
def test_setitem_series2(self):
def test_impl(A, i):
A[i] = 100
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1, 0)
test_impl(A2, 0)
pd.testing.assert_series_equal(A1, A2)
@unittest.skip("enable after remove dead in hiframes is removed")
def test_setitem_series3(self):
def test_impl(A, i):
S = pd.Series(A)
S[i] = 100
hpat_func = hpat.jit(test_impl)
n = 11
A = np.arange(n)
A1 = A.copy()
A2 = A
hpat_func(A1, 0)
test_impl(A2, 0)
np.testing.assert_array_equal(A1, A2)
def test_setitem_series_bool1(self):
def test_impl(A):
A[A > 3] = 100
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1)
test_impl(A2)
pd.testing.assert_series_equal(A1, A2)
def test_setitem_series_bool2(self):
def test_impl(A, B):
A[A > 3] = B[A > 3]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1, df.B)
test_impl(A2, df.B)
pd.testing.assert_series_equal(A1, A2)
def test_static_getitem_series1(self):
def test_impl(A):
return A[0]
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
self.assertEqual(hpat_func(A), test_impl(A))
def test_getitem_series1(self):
def test_impl(A, i):
return A[i]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_getitem_series_str1(self):
def test_impl(A, i):
return A[i]
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'bb', 'cc']})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_series_iat1(self):
def test_impl(A):
return A.iat[3]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_iat2(self):
def test_impl(A):
A.iat[3] = 1
return A
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_iloc1(self):
def test_impl(A):
return A.iloc[3]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_iloc2(self):
def test_impl(A):
return A.iloc[3:8]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(
hpat_func(S), test_impl(S).reset_index(drop=True))
def test_series_op1(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
def test_series_op2(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
df = pd.DataFrame({'A': np.arange(1, n, dtype=np.int64)})
else:
df = pd.DataFrame({'A': np.arange(1, n)})
pd.testing.assert_series_equal(hpat_func(df.A, 1), test_impl(df.A, 1), check_names=False)
def test_series_op3(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
def test_series_op4(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n)})
pd.testing.assert_series_equal(hpat_func(df.A, 1), test_impl(df.A, 1), check_names=False)
def test_series_op5(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
@unittest.skipIf(platform.system() == 'Windows', 'Series values are different (20.0 %)'
'[left]: [1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824, 3486784401, 10000000000]'
'[right]: [1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824, -808182895, 1410065408]')
def test_series_op5_integer_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
operand_series = pd.Series(np.arange(1, n, dtype=np.int64))
else:
operand_series = pd.Series(np.arange(1, n))
operand_scalar = 10
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op5_float_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = .5
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op6(self):
def test_impl(A):
return -A
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_op7(self):
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_names=False)
def test_series_op8(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'ne', 'eq')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_names=False)
@unittest.skipIf(platform.system() == 'Windows', "Attribute dtype are different: int64, int32")
def test_series_op8_integer_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = 10
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op8_float_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = .5
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_inplace_binop_array(self):
def test_impl(A, B):
A += B
return A
hpat_func = hpat.jit(test_impl)
n = 11
A = np.arange(n)**2.0 # TODO: use 2 for test int casting
B = pd.Series(np.ones(n))
np.testing.assert_array_equal(hpat_func(A.copy(), B), test_impl(A, B))
def test_series_fusion1(self):
def test_impl(A, B):
return A + B + 1
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n), dtype=np.int64)
B = pd.Series(np.arange(n)**2, dtype=np.int64)
else:
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
self.assertEqual(count_parfor_REPs(), 1)
def test_series_fusion2(self):
# make sure getting data var avoids incorrect single def assumption
def test_impl(A, B):
S = B + 2
if A[0] == 0:
S = A + 1
return S + B
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n), dtype=np.int64)
B = pd.Series(np.arange(n)**2, dtype=np.int64)
else:
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
self.assertEqual(count_parfor_REPs(), 3)
def test_series_len(self):
def test_impl(A, i):
return len(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_series_box(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_box2(self):
def test_impl():
A = pd.Series(['1', '2', '3'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_list_str_unbox1(self):
def test_impl(A):
return A.iloc[0]
hpat_func = hpat.jit(test_impl)
S = pd.Series([['aa', 'b'], ['ccc'], []])
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
# call twice to test potential refcount errors
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_np_typ_call_replace(self):
# calltype replacement is tricky for np.typ() calls since variable
# type can't provide calltype
def test_impl(i):
return np.int32(i)
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(1), test_impl(1))
def test_series_ufunc1(self):
def test_impl(A, i):
return np.isinf(A).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A, 1), test_impl(df.A, 1))
def test_list_convert(self):
def test_impl():
df = pd.DataFrame({'one': np.array([-1, np.nan, 2.5]),
'two': ['foo', 'bar', 'baz'],
'three': [True, False, True]})
return df.one.values, df.two.values, df.three.values
hpat_func = hpat.jit(test_impl)
one, two, three = hpat_func()
self.assertTrue(isinstance(one, np.ndarray))
self.assertTrue(isinstance(two, np.ndarray))
self.assertTrue(isinstance(three, np.ndarray))
@unittest.skip("needs empty_like typing fix in npydecl.py")
def test_series_empty_like(self):
def test_impl(A):
return np.empty_like(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertTrue(isinstance(hpat_func(df.A), np.ndarray))
def test_series_fillna1(self):
def test_impl(A):
return A.fillna(5.0)
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
# test inplace fillna for named numeric series (obtained from DataFrame)
def test_series_fillna_inplace1(self):
def test_impl(A):
A.fillna(5.0, inplace=True)
return A
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
def test_series_fillna_str1(self):
def test_impl(A):
return A.fillna("dd")
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'b', None, 'ccc']})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
def test_series_fillna_str_inplace1(self):
def test_impl(A):
A.fillna("dd", inplace=True)
return A
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
# TODO: handle string array reflection
# hpat_func(S1)
# test_impl(S2)
# np.testing.assert_array_equal(S1, S2)
def test_series_fillna_str_inplace_empty1(self):
def test_impl(A):
A.fillna("", inplace=True)
return A
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_fillna_index_str(self):
def test_impl(S):
return S.fillna(5.0)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2.0, np.nan, 1.0], index=['a', 'b', 'c', 'd'])
pd.testing.assert_series_equal(hpat_func(S),
test_impl(S), check_names=False)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_fillna_index_int(self):
def test_impl(S):
return S.fillna(5.0)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2.0, np.nan, 1.0], index=[2, 3, 4, 5])
pd.testing.assert_series_equal(hpat_func(S),
test_impl(S), check_names=False)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis1(self):
'''Verifies Series.dropna() implementation handles 'index' as axis argument'''
def test_impl(S):
return S.dropna(axis='index')
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis2(self):
'''Verifies Series.dropna() implementation handles 0 as axis argument'''
def test_impl(S):
return S.dropna(axis=0)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis3(self):
'''Verifies Series.dropna() implementation handles correct non-literal axis argument'''
def test_impl(S, axis):
return S.dropna(axis=axis)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
axis_values = [0, 'index']
for value in axis_values:
pd.testing.assert_series_equal(hpat_func(S1, value), test_impl(S2, value))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_float_index1(self):
'''Verifies Series.dropna() implementation for float series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
for data in test_global_input_data_float64:
S1 = pd.Series(data)
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_float_index2(self):
'''Verifies Series.dropna() implementation for float series with string index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf], ['a', 'b', 'c', 'd', 'e'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index1(self):
'''Verifies Series.dropna() implementation for series of strings with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index2(self):
'''Verifies Series.dropna() implementation for series of strings with string index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''], ['a', 'b', 'c', 'd', 'e'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index3(self):
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''], index=[1, 2, 5, 7, 10])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('BUG: old-style dropna impl returns series without index, in new-style inplace is unsupported')
def test_series_dropna_float_inplace_no_index1(self):
'''Verifies Series.dropna() implementation for float series with default index and inplace argument True'''
def test_impl(S):
S.dropna(inplace=True)
return S
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('TODO: add reflection support and check method return value')
def test_series_dropna_float_inplace_no_index2(self):
'''Verifies Series.dropna(inplace=True) results are reflected back in the original float series'''
def test_impl(S):
return S.dropna(inplace=True)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
self.assertIsNone(hpat_func(S1))
self.assertIsNone(test_impl(S2))
pd.testing.assert_series_equal(S1, S2)
@unittest.skip('BUG: old-style dropna impl returns series without index, in new-style inplace is unsupported')
def test_series_dropna_str_inplace_no_index1(self):
'''Verifies Series.dropna() implementation for series of strings
with default index and inplace argument True
'''
def test_impl(S):
S.dropna(inplace=True)
return S
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('TODO: add reflection support and check method return value')
def test_series_dropna_str_inplace_no_index2(self):
'''Verifies Series.dropna(inplace=True) results are reflected back in the original string series'''
def test_impl(S):
return S.dropna(inplace=True)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
self.assertIsNone(hpat_func(S1))
self.assertIsNone(test_impl(S2))
pd.testing.assert_series_equal(S1, S2)
def test_series_dropna_str_parallel1(self):
'''Verifies Series.dropna() distributed work for series of strings with default index'''
def test_impl(A):
B = A.dropna()
return (B == 'gg').sum()
hpat_func = hpat.jit(distributed=['A'])(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc', 'dd', 'gg'])
start, end = get_start_end(len(S1))
# TODO: gatherv
self.assertEqual(hpat_func(S1[start:end]), test_impl(S1))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
self.assertTrue(count_array_OneDs() > 0)
@unittest.skip('AssertionError: Series are different\n'
'Series length are different\n'
'[left]: 3, Int64Index([0, 1, 2], dtype=\'int64\')\n'
'[right]: 2, Int64Index([1, 2], dtype=\'int64\')')
def test_series_dropna_dt_no_index1(self):
'''Verifies Series.dropna() implementation for datetime series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([pd.NaT, pd.Timestamp('1970-12-01'), pd.Timestamp('2012-07-25')])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
def test_series_dropna_bool_no_index1(self):
'''Verifies Series.dropna() implementation for bool series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([True, False, False, True])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_int_no_index1(self):
'''Verifies Series.dropna() implementation for integer series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
n = 11
S1 = pd.Series(np.arange(n, dtype=np.int64))
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('numba.errors.TypingError - fix needed\n'
'Failed in hpat mode pipeline'
'(step: convert to distributed)\n'
'Invalid use of Function(<built-in function len>)'
'with argument(s) of type(s): (none)\n')
def test_series_rename1(self):
def test_impl(A):
return A.rename('B')
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A), test_impl(df.A))
def test_series_sum_default(self):
def test_impl(S):
return S.sum()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1., 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_sum_nan(self):
def test_impl(S):
return S.sum()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
# all NA case should produce 0
S = pd.Series([np.nan, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Old style Series.sum() does not support parameters")
def test_series_sum_skipna_false(self):
def test_impl(S):
return S.sum(skipna=False)
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(np.isnan(hpat_func(S)), np.isnan(test_impl(S)))
@unittest.skipIf(not hpat.config.config_pipeline_hpat_default,
"Series.sum() operator + is not implemented yet for Numba")
def test_series_sum2(self):
def test_impl(S):
return (S + S).sum()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series([np.nan, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_prod(self):
def test_impl(S, skipna):
return S.prod(skipna=skipna)
hpat_func = hpat.jit(test_impl)
data_samples = [
[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
for data in data_samples:
S = pd.Series(data)
for skipna_var in [True, False]:
actual = hpat_func(S, skipna=skipna_var)
expected = test_impl(S, skipna=skipna_var)
if np.isnan(actual) or np.isnan(expected):
# con not compare Nan != Nan directly
self.assertEqual(np.isnan(actual), np.isnan(expected))
else:
self.assertEqual(actual, expected)
def test_series_prod_skipna_default(self):
def test_impl(S):
return S.prod()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2, 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_count1(self):
def test_impl(S):
return S.count()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series([np.nan, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series(['aa', 'bb', np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_mean(self):
def test_impl(S):
return S.mean()
hpat_func = hpat.jit(test_impl)
data_samples = [
[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
for data in data_samples:
with self.subTest(data=data):
S = pd.Series(data)
actual = hpat_func(S)
expected = test_impl(S)
if np.isnan(actual) or np.isnan(expected):
self.assertEqual(np.isnan(actual), np.isnan(expected))
else:
self.assertEqual(actual, expected)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.mean() any parameters unsupported")
def test_series_mean_skipna(self):
def test_impl(S, skipna):
return S.mean(skipna=skipna)
hpat_func = hpat.jit(test_impl)
data_samples = [
[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
for skipna in [True, False]:
for data in data_samples:
S = pd.Series(data)
actual = hpat_func(S, skipna)
expected = test_impl(S, skipna)
if np.isnan(actual) or np.isnan(expected):
self.assertEqual(np.isnan(actual), np.isnan(expected))
else:
self.assertEqual(actual, expected)
def test_series_var1(self):
def test_impl(S):
return S.var()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_min(self):
def test_impl(S):
return S.min()
hpat_func = hpat.jit(test_impl)
# TODO type_min/type_max
for input_data in [[np.nan, 2., np.nan, 3., np.inf, 1, -1000],
[8, 31, 1123, -1024],
[2., 3., 1, -1000, np.inf]]:
S = pd.Series(input_data)
result_ref = test_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.min() any parameters unsupported")
def test_series_min_param(self):
def test_impl(S, param_skipna):
return S.min(skipna=param_skipna)
hpat_func = hpat.jit(test_impl)
for input_data, param_skipna in [([np.nan, 2., np.nan, 3., 1, -1000, np.inf], True),
([2., 3., 1, np.inf, -1000], False)]:
S = pd.Series(input_data)
result_ref = test_impl(S, param_skipna)
result = hpat_func(S, param_skipna)
self.assertEqual(result, result_ref)
def test_series_max(self):
def test_impl(S):
return S.max()
hpat_func = hpat.jit(test_impl)
# TODO type_min/type_max
for input_data in [[np.nan, 2., np.nan, 3., np.inf, 1, -1000],
[8, 31, 1123, -1024],
[2., 3., 1, -1000, np.inf]]:
S = pd.Series(input_data)
result_ref = test_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.max() any parameters unsupported")
def test_series_max_param(self):
def test_impl(S, param_skipna):
return S.max(skipna=param_skipna)
hpat_func = hpat.jit(test_impl)
for input_data, param_skipna in [([np.nan, 2., np.nan, 3., 1, -1000, np.inf], True),
([2., 3., 1, np.inf, -1000], False)]:
S = pd.Series(input_data)
result_ref = test_impl(S, param_skipna)
result = hpat_func(S, param_skipna)
self.assertEqual(result, result_ref)
def test_series_value_counts(self):
def test_impl(S):
return S.value_counts()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['AA', 'BB', 'C', 'AA', 'C', 'AA'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_dist_input1(self):
'''Verify distribution of a Series without index'''
def test_impl(S):
return S.max()
hpat_func = hpat.jit(distributed={'S'})(test_impl)
n = 111
S = pd.Series(np.arange(n))
start, end = get_start_end(n)
self.assertEqual(hpat_func(S[start:end]), test_impl(S))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_series_dist_input2(self):
'''Verify distribution of a Series with integer index'''
def test_impl(S):
return S.max()
hpat_func = hpat.jit(distributed={'S'})(test_impl)
n = 111
S = pd.Series(np.arange(n), 1 + np.arange(n))
start, end = get_start_end(n)
self.assertEqual(hpat_func(S[start:end]), test_impl(S))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip("Passed if run single")
def test_series_dist_input3(self):
'''Verify distribution of a Series with string index'''
def test_impl(S):
return S.max()
hpat_func = hpat.jit(distributed={'S'})(test_impl)
n = 111
S = pd.Series(np.arange(n), ['abc{}'.format(id) for id in range(n)])
start, end = get_start_end(n)
self.assertEqual(hpat_func(S[start:end]), test_impl(S))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_series_tuple_input1(self):
def test_impl(s_tup):
return s_tup[0].max()
hpat_func = hpat.jit(test_impl)
n = 111
S = pd.Series(np.arange(n))
S2 = pd.Series(np.arange(n) + 1.0)
s_tup = (S, 1, S2)
self.assertEqual(hpat_func(s_tup), test_impl(s_tup))
@unittest.skip("pending handling of build_tuple in dist pass")
def test_series_tuple_input_dist1(self):
def test_impl(s_tup):
return s_tup[0].max()
hpat_func = hpat.jit(locals={'s_tup:input': 'distributed'})(test_impl)
n = 111
S = pd.Series(np.arange(n))
S2 = pd.Series(np.arange(n) + 1.0)
start, end = get_start_end(n)
s_tup = (S, 1, S2)
h_s_tup = (S[start:end], 1, S2[start:end])
self.assertEqual(hpat_func(h_s_tup), test_impl(s_tup))
def test_series_rolling1(self):
def test_impl(S):
return S.rolling(3).sum()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_concat1(self):
def test_impl(S1, S2):
return pd.concat([S1, S2]).values
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6., 7.])
np.testing.assert_array_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_map1(self):
def test_impl(S):
return S.map(lambda a: 2 * a)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_map_global1(self):
def test_impl(S):
return S.map(lambda a: a + GLOBAL_VAL)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_map_tup1(self):
def test_impl(S):
return S.map(lambda a: (a, 2 * a))
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_map_tup_map1(self):
def test_impl(S):
A = S.map(lambda a: (a, 2 * a))
return A.map(lambda a: a[1])
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_combine(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6.0, 21., 3.6, 5.])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_float3264(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([np.float64(1), np.float64(2),
np.float64(3), np.float64(4), np.float64(5)])
S2 = pd.Series([np.float32(1), np.float32(2),
np.float32(3), np.float32(4), np.float32(5)])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_assert1(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1, 2, 3])
S2 = pd.Series([6., 21., 3., 5.])
with self.assertRaises(AssertionError):
hpat_func(S1, S2)
def test_series_combine_assert2(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([6., 21., 3., 5.])
S2 = pd.Series([1, 2, 3])
with self.assertRaises(AssertionError):
hpat_func(S1, S2)
def test_series_combine_integer(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b, 16)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1, 2, 3, 4, 5])
S2 = pd.Series([6, 21, 3, 5])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_different_types(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([6.1, 21.2, 3.3, 5.4, 6.7])
S2 = pd.Series([1, 2, 3, 4, 5])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_integer_samelen(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1, 2, 3, 4, 5])
S2 = pd.Series([6, 21, 17, -5, 4])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_samelen(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6.0, 21., 3.6, 5., 0.0])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_value(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b, 1237.56)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6.0, 21., 3.6, 5.])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_value_samelen(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b, 1237.56)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6.0, 21., 3.6, 5., 0.0])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_apply1(self):
def test_impl(S):
return S.apply(lambda a: 2 * a)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_abs1(self):
def test_impl(S):
return S.abs()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, -2., 3., 0.5E-01, 0xFF, 0o7, 0b101])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_cov1(self):
def test_impl(S1, S2):
return S1.cov(S2)
hpat_func = hpat.jit(test_impl)
for pair in _cov_corr_series:
S1, S2 = pair
np.testing.assert_almost_equal(
hpat_func(S1, S2), test_impl(S1, S2),
err_msg='S1={}\nS2={}'.format(S1, S2))
def test_series_corr1(self):
def test_impl(S1, S2):
return S1.corr(S2)
hpat_func = hpat.jit(test_impl)
for pair in _cov_corr_series:
S1, S2 = pair
np.testing.assert_almost_equal(
hpat_func(S1, S2), test_impl(S1, S2),
err_msg='S1={}\nS2={}'.format(S1, S2))
def test_series_str_len1(self):
def test_impl(S):
return S.str.len()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'abc', 'c', 'cccd'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_str2str(self):
str2str_methods = ('capitalize', 'lower', 'lstrip', 'rstrip',
'strip', 'swapcase', 'title', 'upper')
for method in str2str_methods:
func_text = "def test_impl(S):\n"
func_text += " return S.str.{}()\n".format(method)
test_impl = _make_func_from_text(func_text)
hpat_func = hpat.jit(test_impl)
S = pd.Series([' \tbbCD\t ', 'ABC', ' mCDm\t', 'abc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_append1(self):
def test_impl(S, other):
return S.append(other).values
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([-2., 3., 9.1])
S2 = pd.Series([-2., 5.0])
# Test single series
np.testing.assert_array_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_append2(self):
def test_impl(S1, S2, S3):
return S1.append([S2, S3]).values
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([-2., 3., 9.1])
S2 = pd.Series([-2., 5.0])
S3 = pd.Series([1.0])
# Test series tuple
np.testing.assert_array_equal(hpat_func(S1, S2, S3),
test_impl(S1, S2, S3))
def test_series_isin_list1(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
values = [1, 2, 5, 7, 8]
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isin_list2(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
values = [1., 2., 5., 7., 8.]
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isin_list3(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['a', 'b', 'q', 'w', 'c', 'd', 'e', 'r'])
values = ['a', 'q', 'c', 'd', 'e']
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isin_set1(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
values = {1, 2, 5, 7, 8}
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isin_set2(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
values = {1., 2., 5., 7., 8.}
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
@unittest.skip('TODO: requires hashable unicode strings in Numba')
def test_series_isin_set3(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['a', 'b', 'c', 'd', 'e'] * 2)
values = {'b', 'c', 'e'}
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isna1(self):
def test_impl(S):
return S.isna()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3., np.inf])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_isnull1(self):
def test_impl(S):
return S.isnull()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_isnull_full(self):
def test_impl(series):
return series.isnull()
hpat_func = hpat.jit(test_impl)
for data in test_global_input_data_numeric + [test_global_input_data_unicode_kind4]:
series = pd.Series(data * 3)
ref_result = test_impl(series)
jit_result = hpat_func(series)
pd.testing.assert_series_equal(ref_result, jit_result)
def test_series_notna1(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_notna_noidx_float(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_float64:
S = pd.Series(input_data)
result_ref = test_impl(S)
result_jit = hpat_func(S)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Need fix test_global_input_data_integer64")
def test_series_notna_noidx_int(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_integer64:
S = pd.Series(input_data)
result_ref = test_impl(S)
result_jit = hpat_func(S)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Need fix test_global_input_data_integer64")
def test_series_notna_noidx_num(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_numeric:
S = pd.Series(input_data)
result_ref = test_impl(S)
result_jit = hpat_func(S)
pd.testing.assert_series_equal(result_jit, result_ref)
def test_series_notna_noidx_str(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
input_data = test_global_input_data_unicode_kind4
S = pd.Series(input_data)
result_ref = test_impl(S)
result_jit = hpat_func(S)
pd.testing.assert_series_equal(result_jit, result_ref)
def test_series_str_notna(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', None, 'c', 'cccd'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_str_isna1(self):
def test_impl(S):
return S.isna()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', None, 'c', 'cccd'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('AssertionError: Series are different')
def test_series_dt_isna1(self):
def test_impl(S):
return S.isna()
hpat_func = hpat.jit(test_impl)
S = pd.Series([pd.NaT, pd.Timestamp('1970-12-01'), pd.Timestamp('2012-07-25')])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_nlargest1(self):
def test_impl(S):
return S.nlargest(4)
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nlargest_default1(self):
def test_impl(S):
return S.nlargest()
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nlargest_nan1(self):
def test_impl(S):
return S.nlargest(4)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, np.nan, 3.0, 2.0, np.nan, 4.0])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nlargest_parallel1(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
def test_impl():
df = pq.read_table('kde.parquet').to_pandas()
S = df.points
return S.nlargest(4)
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func().values, test_impl().values)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_nlargest_index_str(self):
def test_impl(S):
return S.nlargest(4)
hpat_func = hpat.jit(test_impl)
S = pd.Series([73, 21, 10005, 5, 1], index=['a', 'b', 'c', 'd', 'e'])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_nlargest_index_int(self):
def test_impl(S):
return S.nlargest(4)
hpat_func = hpat.jit(test_impl)
S = pd.Series([73, 21, 10005, 5, 1], index=[2, 3, 4, 5, 6])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nsmallest1(self):
def test_impl(S):
return S.nsmallest(4)
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nsmallest_default1(self):
def test_impl(S):
return S.nsmallest()
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nsmallest_nan1(self):
def test_impl(S):
return S.nsmallest(4)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, np.nan, 3.0, 2.0, np.nan, 4.0])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nsmallest_parallel1(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
def test_impl():
df = pq.read_table('kde.parquet').to_pandas()
S = df.points
return S.nsmallest(4)
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func().values, test_impl().values)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_nsmallest_index_str(self):
def test_impl(S):
return S.nsmallest(3)
hpat_func = hpat.jit(test_impl)
S = pd.Series([41, 32, 33, 4, 5], index=['a', 'b', 'c', 'd', 'e'])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_nsmallest_index_int(self):
def test_impl(S):
return S.nsmallest(3)
hpat_func = hpat.jit(test_impl)
S = pd.Series([41, 32, 33, 4, 5], index=[1, 2, 3, 4, 5])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_head1(self):
def test_impl(S):
return S.head(4)
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_head_default1(self):
'''Verifies default head method for non-distributed pass of Series with no index'''
def test_impl(S):
return S.head()
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_head_index1(self):
'''Verifies head method for Series with integer index created inside jitted function'''
def test_impl():
S = pd.Series([6, 9, 2, 3, 6, 4, 5], [8, 1, 6, 0, 9, 1, 3])
return S.head(3)
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_head_index2(self):
'''Verifies head method for Series with string index created inside jitted function'''
def test_impl():
S = pd.Series([6, 9, 2, 3, 6, 4, 5], ['a', 'ab', 'abc', 'c', 'f', 'hh', ''])
return S.head(3)
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_head_index3(self):
'''Verifies head method for non-distributed pass of Series with integer index'''
def test_impl(S):
return S.head(3)
hpat_func = hpat.jit(test_impl)
S = pd.Series([6, 9, 2, 3, 6, 4, 5], [8, 1, 6, 0, 9, 1, 3])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip("Passed if run single")
def test_series_head_index4(self):
'''Verifies head method for non-distributed pass of Series with string index'''
def test_impl(S):
return S.head(3)
hpat_func = hpat.jit(test_impl)
S = pd.Series([6, 9, 2, 4, 6, 4, 5], ['a', 'ab', 'abc', 'c', 'f', 'hh', ''])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_head_parallel1(self):
'''Verifies head method for distributed Series with string data and no index'''
def test_impl(S):
return S.head(7)
hpat_func = hpat.jit(distributed={'S'})(test_impl)
# need to test different lenghts, as head's size is fixed and implementation
# depends on relation of size of the data per processor to output data size
for n in range(1, 5):
S = pd.Series(['a', 'ab', 'abc', 'c', 'f', 'hh', ''] * n)
start, end = get_start_end(len(S))
pd.testing.assert_series_equal(hpat_func(S[start:end]), test_impl(S))
self.assertTrue(count_array_OneDs() > 0)
def test_series_head_index_parallel1(self):
'''Verifies head method for distributed Series with integer index'''
def test_impl(S):
return S.head(3)
hpat_func = hpat.jit(distributed={'S'})(test_impl)
S = pd.Series([6, 9, 2, 3, 6, 4, 5], [8, 1, 6, 0, 9, 1, 3])
start, end = get_start_end(len(S))
pd.testing.assert_series_equal(hpat_func(S[start:end]), test_impl(S))
self.assertTrue(count_array_OneDs() > 0)
@unittest.skip("Passed if run single")
def test_series_head_index_parallel2(self):
'''Verifies head method for distributed Series with string index'''
def test_impl(S):
return S.head(3)
hpat_func = hpat.jit(distributed={'S'})(test_impl)
S = pd.Series([6, 9, 2, 3, 6, 4, 5], ['a', 'ab', 'abc', 'c', 'f', 'hh', ''])
start, end = get_start_end(len(S))
pd.testing.assert_series_equal(hpat_func(S[start:end]), test_impl(S))
self.assertTrue(count_array_OneDs() > 0)
def test_series_head_noidx_float(self):
def test_impl(S, n):
return S.head(n)
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_float64:
S = pd.Series(input_data)
for n in [-1, 0, 2, 3]:
result_ref = test_impl(S, n)
result_jit = hpat_func(S, n)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Need fix test_global_input_data_integer64")
def test_series_head_noidx_int(self):
def test_impl(S, n):
return S.head(n)
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_integer64:
S = pd.Series(input_data)
for n in [-1, 0, 2, 3]:
result_ref = test_impl(S, n)
result_jit = hpat_func(S, n)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Need fix test_global_input_data_integer64")
def test_series_head_noidx_num(self):
def test_impl(S, n):
return S.head(n)
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_numeric:
S = pd.Series(input_data)
for n in [-1, 0, 2, 3]:
result_ref = test_impl(S, n)
result_jit = hpat_func(S, n)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Old implementation not work with n negative and data str")
def test_series_head_noidx_str(self):
def test_impl(S, n):
return S.head(n)
hpat_func = hpat.jit(test_impl)
input_data = test_global_input_data_unicode_kind4
S = pd.Series(input_data)
for n in [-1, 0, 2, 3]:
result_ref = test_impl(S, n)
result_jit = hpat_func(S, n)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Broke another three tests")
def test_series_head_idx(self):
def test_impl(S):
return S.head()
def test_impl_param(S, n):
return S.head(n)
hpat_func = hpat.jit(test_impl)
data_test = [[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 0, 2.2, 1, 2],
['as', 'b', 'abb', 'sss', 'ytr65', '', 'qw', 'a', 'b'],
[6, 6, 2, 1, 3, np.inf, np.nan, np.nan, np.nan],
[3., 5.3, np.nan, np.nan, np.inf, np.inf, 4.4, 3.7, 8.9]
]
for input_data in data_test:
for index_data in data_test:
S = pd.Series(input_data, index_data)
result_ref = test_impl(S)
result = hpat_func(S)
pd.testing.assert_series_equal(result, result_ref)
hpat_func_param1 = hpat.jit(test_impl_param)
for param1 in [1, 3, 7]:
result_param1_ref = test_impl_param(S, param1)
result_param1 = hpat_func_param1(S, param1)
pd.testing.assert_series_equal(result_param1, result_param1_ref)
def test_series_median1(self):
'''Verifies median implementation for float and integer series of random data'''
def test_impl(S):
return S.median()
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series(np.random.ranf(m))
self.assertEqual(hpat_func(S), test_impl(S))
# odd size
m = 101
S = pd.Series(np.random.randint(-30, 30, m))
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series(np.random.ranf(m))
self.assertEqual(hpat_func(S), test_impl(S))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
"BUG: old-style median implementation doesn't filter NaNs")
def test_series_median_skipna_default1(self):
'''Verifies median implementation with default skipna=True argument on a series with NA values'''
def test_impl(S):
return S.median()
hpat_func = hpat.jit(test_impl)
S = pd.Series([2., 3., 5., np.nan, 5., 6., 7.])
self.assertEqual(hpat_func(S), test_impl(S))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
"Skipna argument is not supported in old-style")
def test_series_median_skipna_false1(self):
'''Verifies median implementation with skipna=False on a series with NA values'''
def test_impl(S):
return S.median(skipna=False)
hpat_func = hpat.jit(test_impl)
# np.inf is not NaN, so verify that a correct number is returned
S1 = pd.Series([2., 3., 5., np.inf, 5., 6., 7.])
self.assertEqual(hpat_func(S1), test_impl(S1))
# TODO: both return values are 'nan', but HPAT's is not np.nan, hence checking with
# assertIs() doesn't work - check if it's Numba relatated
S2 = pd.Series([2., 3., 5., np.nan, 5., 6., 7.])
self.assertEqual(np.isnan(hpat_func(S2)), np.isnan(test_impl(S2)))
def test_series_median_parallel1(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
def test_impl():
df = pq.read_table('kde.parquet').to_pandas()
S = df.points
return S.median()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
self.assertTrue(count_array_OneDs() > 0)
def test_series_argsort_parallel(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
def test_impl():
df = pq.read_table('kde.parquet').to_pandas()
S = df.points
return S.argsort().values
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(), test_impl())
def test_series_idxmin1(self):
def test_impl(A):
return A.idxmin()
hpat_func = hpat.jit(test_impl)
n = 11
np.random.seed(0)
S = pd.Series(np.random.ranf(n))
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_idxmin_str(self):
def test_impl(S):
return S.idxmin()
hpat_func = hpat.jit(test_impl)
S = pd.Series([8, 6, 34, np.nan], ['a', 'ab', 'abc', 'c'])
self.assertEqual(hpat_func(S), test_impl(S))
@unittest.skip("Skipna is not implemented")
def test_series_idxmin_str_idx(self):
def test_impl(S):
return S.idxmin(skipna=False)
hpat_func = hpat.jit(test_impl)
S = pd.Series([8, 6, 34, np.nan], ['a', 'ab', 'abc', 'c'])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_idxmin_no(self):
def test_impl(S):
return S.idxmin()
hpat_func = hpat.jit(test_impl)
S = pd.Series([8, 6, 34, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_idxmin_int(self):
def test_impl(S):
return S.idxmin()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1, 2, 3], [4, 45, 14])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_idxmin_noidx(self):
def test_impl(S):
return S.idxmin()
hpat_func = hpat.jit(test_impl)
data_test = [[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 0, 2.2, 1, 2],
[6, 6, 2, 1, 3, np.inf, np.nan, np.nan, np.nan],
[3., 5.3, np.nan, np.nan, np.inf, np.inf, 4.4, 3.7, 8.9]
]
for input_data in data_test:
S = pd.Series(input_data)
result_ref = test_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
def test_series_idxmin_idx(self):
def test_impl(S):
return S.idxmin()
hpat_func = hpat.jit(test_impl)
data_test = [[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 0, 2.2, 1, 2],
[6, 6, 2, 1, 3, -np.inf, np.nan, np.inf, np.nan],
[3., 5.3, np.nan, np.nan, np.inf, np.inf, 4.4, 3.7, 8.9]
]
for input_data in data_test:
for index_data in data_test:
S = pd.Series(input_data, index_data)
result_ref = test_impl(S)
result = hpat_func(S)
if np.isnan(result) or np.isnan(result_ref):
self.assertEqual(np.isnan(result), np.isnan(result_ref))
else:
self.assertEqual(result, result_ref)
def test_series_idxmax1(self):
def test_impl(A):
return A.idxmax()
hpat_func = hpat.jit(test_impl)
n = 11
np.random.seed(0)
S = pd.Series(np.random.ranf(n))
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
@unittest.skip("Skipna is not implemented")
def test_series_idxmax_str_idx(self):
def test_impl(S):
return S.idxmax(skipna=False)
hpat_func = hpat.jit(test_impl)
S = pd.Series([8, 6, 34, np.nan], ['a', 'ab', 'abc', 'c'])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_idxmax_noidx(self):
def test_impl(S):
return S.idxmax()
hpat_func = hpat.jit(test_impl)
data_test = [[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 0, 2.2, 1, 2],
[6, 6, 2, 1, 3, np.inf, np.nan, np.inf, np.nan],
[3., 5.3, np.nan, np.nan, np.inf, np.inf, 4.4, 3.7, 8.9]
]
for input_data in data_test:
S = pd.Series(input_data)
result_ref = test_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
def test_series_idxmax_idx(self):
def test_impl(S):
return S.idxmax()
hpat_func = hpat.jit(test_impl)
data_test = [[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 0, 2.2, 1, 2],
[6, 6, 2, 1, 3, np.nan, np.nan, np.nan, np.nan],
[3., 5.3, np.nan, np.nan, np.inf, np.inf, 4.4, 3.7, 8.9]
]
for input_data in data_test:
for index_data in data_test:
S = pd.Series(input_data, index_data)
result_ref = test_impl(S)
result = hpat_func(S)
if np.isnan(result) or np.isnan(result_ref):
self.assertEqual(np.isnan(result), np.isnan(result_ref))
else:
self.assertEqual(result, result_ref)
def test_series_sort_values1(self):
def test_impl(A):
return A.sort_values()
hpat_func = hpat.jit(test_impl)
n = 11
np.random.seed(0)
S = pd.Series(np.random.ranf(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_sort_values_index1(self):
def test_impl(A, B):
S = pd.Series(A, B)
return S.sort_values()
hpat_func = hpat.jit(test_impl)
n = 11
np.random.seed(0)
# TODO: support passing Series with Index
# S = pd.Series(np.random.ranf(n), np.random.randint(0, 100, n))
A = np.random.ranf(n)
B = np.random.ranf(n)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
def test_series_sort_values_parallel1(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
def test_impl():
df = pq.read_table('kde.parquet').to_pandas()
S = df.points
return S.sort_values()
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(), test_impl())
def test_series_shift(self):
def pyfunc():
series = pd.Series([1.0, np.nan, -1.0, 0.0, 5e-324])
return series.shift()
cfunc = hpat.jit(pyfunc)
pd.testing.assert_series_equal(cfunc(), pyfunc())
def test_series_shift_unboxing(self):
def pyfunc(series):
return series.shift()
cfunc = hpat.jit(pyfunc)
for data in test_global_input_data_float64:
series = pd.Series(data)
pd.testing.assert_series_equal(cfunc(series), pyfunc(series))
def test_series_shift_full(self):
def pyfunc(series, periods, freq, axis, fill_value):
return series.shift(periods=periods, freq=freq, axis=axis, fill_value=fill_value)
cfunc = hpat.jit(pyfunc)
freq = None
axis = 0
for data in test_global_input_data_float64:
series = pd.Series(data)
for periods in [-2, 0, 3]:
for fill_value in [9.1, np.nan, -3.3, None]:
jit_result = cfunc(series, periods, freq, axis, fill_value)
ref_result = pyfunc(series, periods, freq, axis, fill_value)
pd.testing.assert_series_equal(jit_result, ref_result)
def test_series_shift_str(self):
def pyfunc(series):
return series.shift()
cfunc = hpat.jit(pyfunc)
series = pd.Series(test_global_input_data_unicode_kind4)
with self.assertRaises(TypingError) as raises:
cfunc(series)
msg = 'Method shift(). The object must be a number. Given self.data.dtype: {}'
self.assertIn(msg.format(types.unicode_type), str(raises.exception))
def test_series_shift_fill_str(self):
def pyfunc(series, fill_value):
return series.shift(fill_value=fill_value)
cfunc = hpat.jit(pyfunc)
series = pd.Series(test_global_input_data_float64[0])
with self.assertRaises(TypingError) as raises:
cfunc(series, fill_value='unicode')
msg = 'Method shift(). The object must be a number. Given fill_value: {}'
self.assertIn(msg.format(types.unicode_type), str(raises.exception))
def test_series_shift_unsupported_params(self):
def pyfunc(series, freq, axis):
return series.shift(freq=freq, axis=axis)
cfunc = hpat.jit(pyfunc)
series = pd.Series(test_global_input_data_float64[0])
with self.assertRaises(TypingError) as raises:
cfunc(series, freq='12H', axis=0)
msg = 'Method shift(). Unsupported parameters. Given freq: {}'
self.assertIn(msg.format(types.unicode_type), str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc(series, freq=None, axis=1)
msg = 'Method shift(). Unsupported parameters. Given axis != 0'
self.assertIn(msg, str(raises.exception))
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_shift_index_str(self):
def test_impl(S):
return S.shift()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3., 5., np.nan, 6., 7.], index=['a', 'b', 'c', 'd', 'e', 'f', 'g'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_shift_index_int(self):
def test_impl(S):
return S.shift()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3., 5., np.nan, 6., 7.], index=[1, 2, 3, 4, 5, 6, 7])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_index1(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'])
return A.index
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(), test_impl())
def test_series_index2(self):
def test_impl():
A = pd.Series([1, 2, 3], index=[0, 1, 2])
return A.index
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(), test_impl())
def test_series_index3(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A.index
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(), test_impl())
def test_series_take_index_default(self):
def pyfunc():
series = pd.Series([1.0, 13.0, 9.0, -1.0, 7.0])
indices = [1, 3]
return series.take(indices)
cfunc = hpat.jit(pyfunc)
ref_result = pyfunc()
result = cfunc()
pd.testing.assert_series_equal(ref_result, result)
def test_series_take_index_default_unboxing(self):
def pyfunc(series, indices):
return series.take(indices)
cfunc = hpat.jit(pyfunc)
series = pd.Series([1.0, 13.0, 9.0, -1.0, 7.0])
indices = [1, 3]
ref_result = pyfunc(series, indices)
result = cfunc(series, indices)
pd.testing.assert_series_equal(ref_result, result)
def test_series_take_index_int(self):
def pyfunc():
series = pd.Series([1.0, 13.0, 9.0, -1.0, 7.0], index=[3, 0, 4, 2, 1])
indices = [1, 3]
return series.take(indices)
cfunc = hpat.jit(pyfunc)
ref_result = pyfunc()
result = cfunc()
pd.testing.assert_series_equal(ref_result, result)
def test_series_take_index_int_unboxing(self):
def pyfunc(series, indices):
return series.take(indices)
cfunc = hpat.jit(pyfunc)
series = pd.Series([1.0, 13.0, 9.0, -1.0, 7.0], index=[3, 0, 4, 2, 1])
indices = [1, 3]
ref_result = pyfunc(series, indices)
result = cfunc(series, indices)
pd.testing.assert_series_equal(ref_result, result)
def test_series_take_index_str(self):
def pyfunc():
series = pd.Series([1.0, 13.0, 9.0, -1.0, 7.0], index=['test', 'series', 'take', 'str', 'index'])
indices = [1, 3]
return series.take(indices)
cfunc = hpat.jit(pyfunc)
ref_result = pyfunc()
result = cfunc()
pd.testing.assert_series_equal(ref_result, result)
def test_series_take_index_str_unboxing(self):
def pyfunc(series, indices):
return series.take(indices)
cfunc = hpat.jit(pyfunc)
series = pd.Series([1.0, 13.0, 9.0, -1.0, 7.0], index=['test', 'series', 'take', 'str', 'index'])
indices = [1, 3]
ref_result = pyfunc(series, indices)
result = cfunc(series, indices)
pd.testing.assert_series_equal(ref_result, result)
def test_series_iterator_int(self):
def test_impl(A):
return [i for i in A]
A = pd.Series([3, 2, 1, 5, 4])
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_series_iterator_float(self):
def test_impl(A):
return [i for i in A]
A = pd.Series([0.3, 0.2222, 0.1756, 0.005, 0.4])
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_series_iterator_boolean(self):
def test_impl(A):
return [i for i in A]
A = pd.Series([True, False])
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_series_iterator_string(self):
def test_impl(A):
return [i for i in A]
A = pd.Series(['a', 'ab', 'abc', '', 'dddd'])
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_series_iterator_one_value(self):
def test_impl(A):
return [i for i in A]
A = pd.Series([5])
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
@unittest.skip("Fails when NUMA_PES>=2 due to unimplemented sync of such construction after distribution")
def test_series_iterator_no_param(self):
def test_impl():
A = pd.Series([3, 2, 1, 5, 4])
return [i for i in A]
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(), test_impl())
def test_series_iterator_empty(self):
def test_impl(A):
return [i for i in A]
A = pd.Series([np.int64(x) for x in range(0)])
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_series_default_index(self):
def test_impl():
A = pd.Series([3, 2, 1, 5, 4])
return A.index
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(), test_impl())
@unittest.skip("Implement drop_duplicates for Series")
def test_series_drop_duplicates(self):
def test_impl():
A = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'])
return A.drop_duplicates()
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_quantile(self):
def test_impl():
A = pd.Series([1, 2.5, .5, 3, 5])
return A.quantile()
hpat_func = hpat.jit(test_impl)
np.testing.assert_equal(hpat_func(), test_impl())
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.quantile() parameter as a list unsupported")
def test_series_quantile_q_vector(self):
def test_series_quantile_q_vector_impl(S, param1):
return S.quantile(param1)
S = pd.Series(np.random.ranf(100))
hpat_func = hpat.jit(test_series_quantile_q_vector_impl)
param1 = [0.0, 0.25, 0.5, 0.75, 1.0]
result_ref = test_series_quantile_q_vector_impl(S, param1)
result = hpat_func(S, param1)
np.testing.assert_equal(result, result_ref)
@unittest.skip("Implement unique without sorting like in pandas")
def test_unique(self):
def test_impl(S):
return S.unique()
hpat_func = hpat.jit(test_impl)
S = pd.Series([2, 1, 3, 3])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_unique_sorted(self):
def test_impl(S):
return S.unique()
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
S[2] = 0
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_unique_str(self):
def test_impl():
data = pd.Series(['aa', 'aa', 'b', 'b', 'cccc', 'dd', 'ddd', 'dd'])
return data.unique()
hpat_func = hpat.jit(test_impl)
# since the orider of the elements are diffrent - check count of elements only
ref_result = test_impl().size
result = hpat_func().size
np.testing.assert_array_equal(ref_result, result)
def test_series_groupby_count(self):
def test_impl():
A = pd.Series([13, 11, 21, 13, 13, 51, 42, 21])
grouped = A.groupby(A, sort=False)
return grouped.count()
hpat_func = hpat.jit(test_impl)
ref_result = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, ref_result)
@unittest.skip("getiter for this type is not implemented yet")
def test_series_groupby_iterator_int(self):
def test_impl():
A = pd.Series([13, 11, 21, 13, 13, 51, 42, 21])
grouped = A.groupby(A)
return [i for i in grouped]
hpat_func = hpat.jit(test_impl)
ref_result = test_impl()
result = hpat_func()
np.testing.assert_array_equal(result, ref_result)
def test_series_std(self):
def pyfunc():
series = pd.Series([1.0, np.nan, -1.0, 0.0, 5e-324])
return series.std()
cfunc = hpat.jit(pyfunc)
ref_result = pyfunc()
result = cfunc()
np.testing.assert_equal(ref_result, result)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'Series.std() parameters "skipna" and "ddof" unsupported')
def test_series_std_unboxing(self):
def pyfunc(series, skipna, ddof):
return series.std(skipna=skipna, ddof=ddof)
cfunc = hpat.jit(pyfunc)
for data in test_global_input_data_numeric + [[]]:
series = pd.Series(data)
for ddof in [0, 1]:
for skipna in [True, False]:
ref_result = pyfunc(series, skipna=skipna, ddof=ddof)
result = cfunc(series, skipna=skipna, ddof=ddof)
np.testing.assert_equal(ref_result, result)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'Series.std() strings as input data unsupported')
def test_series_std_str(self):
def pyfunc(series):
return series.std()
cfunc = hpat.jit(pyfunc)
series = pd.Series(test_global_input_data_unicode_kind4)
with self.assertRaises(TypingError) as raises:
cfunc(series)
msg = 'Method std(). The object must be a number. Given self.data.dtype: {}'
self.assertIn(msg.format(types.unicode_type), str(raises.exception))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'Series.std() parameters "axis", "level", "numeric_only" unsupported')
def test_series_std_unsupported_params(self):
def pyfunc(series, axis, level, numeric_only):
return series.std(axis=axis, level=level, numeric_only=numeric_only)
cfunc = hpat.jit(pyfunc)
series = pd.Series(test_global_input_data_float64[0])
msg = 'Method std(). Unsupported parameters. Given {}: {}'
with self.assertRaises(TypingError) as raises:
cfunc(series, axis=1, level=None, numeric_only=None)
self.assertIn(msg.format('axis', 'int'), str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc(series, axis=None, level=1, numeric_only=None)
self.assertIn(msg.format('level', 'int'), str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc(series, axis=None, level=None, numeric_only=True)
self.assertIn(msg.format('numeric_only', 'bool'), str(raises.exception))
def test_series_nunique(self):
def test_series_nunique_impl(S):
return S.nunique()
def test_series_nunique_param1_impl(S, dropna):
return S.nunique(dropna)
hpat_func = hpat.jit(test_series_nunique_impl)
the_same_string = "the same string"
test_input_data = []
data_simple = [[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
['aa', 'aa', 'b', 'b', 'cccc', 'dd', 'ddd', 'dd'],
['aa', 'copy aa', the_same_string, 'b', 'b', 'cccc', the_same_string, 'dd', 'ddd', 'dd', 'copy aa', 'copy aa'],
[]
]
data_extra = [[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
# unsupported ['aa', np.nan, 'b', 'b', 'cccc', np.nan, 'ddd', 'dd'],
# unsupported [np.nan, 'copy aa', the_same_string, 'b', 'b', 'cccc', the_same_string, 'dd', 'ddd', 'dd', 'copy aa', 'copy aa'],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
if hpat.config.config_pipeline_hpat_default:
"""
HPAT pipeline Series.nunique() does not support numpy.nan
"""
test_input_data = data_simple
else:
test_input_data = data_simple + data_extra
for input_data in test_input_data:
S = pd.Series(input_data)
result_ref = test_series_nunique_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
if not hpat.config.config_pipeline_hpat_default:
"""
HPAT pipeline does not support parameter to Series.nunique(dropna=True)
"""
hpat_func_param1 = hpat.jit(test_series_nunique_param1_impl)
for param1 in [True, False]:
result_param1_ref = test_series_nunique_param1_impl(S, param1)
result_param1 = hpat_func_param1(S, param1)
self.assertEqual(result_param1, result_param1_ref)
def test_series_var(self):
def pyfunc():
series = pd.Series([1.0, np.nan, -1.0, 0.0, 5e-324])
return series.var()
cfunc = hpat.jit(pyfunc)
np.testing.assert_equal(pyfunc(), cfunc())
def test_series_var_unboxing(self):
def pyfunc(series):
return series.var()
cfunc = hpat.jit(pyfunc)
for data in test_global_input_data_numeric + [[]]:
series = pd.Series(data)
np.testing.assert_equal(pyfunc(series), cfunc(series))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'Series.var() parameters "ddof" and "skipna" unsupported')
def test_series_var_full(self):
def pyfunc(series, skipna, ddof):
return series.var(skipna=skipna, ddof=ddof)
cfunc = hpat.jit(pyfunc)
for data in test_global_input_data_numeric + [[]]:
series = pd.Series(data)
for ddof in [0, 1]:
for skipna in [True, False]:
ref_result = pyfunc(series, skipna=skipna, ddof=ddof)
result = cfunc(series, skipna=skipna, ddof=ddof)
np.testing.assert_equal(ref_result, result)
def test_series_var_str(self):
def pyfunc(series):
return series.var()
cfunc = hpat.jit(pyfunc)
series = pd.Series(test_global_input_data_unicode_kind4)
with self.assertRaises(TypingError) as raises:
cfunc(series)
msg = 'Method var(). The object must be a number. Given self.data.dtype: {}'
self.assertIn(msg.format(types.unicode_type), str(raises.exception))
def test_series_var_unsupported_params(self):
def pyfunc(series, axis, level, numeric_only):
return series.var(axis=axis, level=level, numeric_only=numeric_only)
cfunc = hpat.jit(pyfunc)
series = pd.Series(test_global_input_data_float64[0])
msg = 'Method var(). Unsupported parameters. Given {}: {}'
with self.assertRaises(TypingError) as raises:
cfunc(series, axis=1, level=None, numeric_only=None)
self.assertIn(msg.format('axis', 'int'), str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc(series, axis=None, level=1, numeric_only=None)
self.assertIn(msg.format('level', 'int'), str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc(series, axis=None, level=None, numeric_only=True)
self.assertIn(msg.format('numeric_only', 'bool'), str(raises.exception))
def test_series_count(self):
def test_series_count_impl(S):
return S.count()
hpat_func = hpat.jit(test_series_count_impl)
the_same_string = "the same string"
test_input_data = [[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
['aa', 'aa', 'b', 'b', 'cccc', 'dd', 'ddd', 'dd'],
['aa', 'copy aa', the_same_string, 'b', 'b', 'cccc', the_same_string, 'dd', 'ddd', 'dd',
'copy aa', 'copy aa'],
[],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf]
]
for input_data in test_input_data:
S =
|
pd.Series(input_data)
|
pandas.Series
|
import os
import unittest
import numpy as np
import pandas as pd
import biclust_comp.processing.impc as impc
class TestPooledVariance(unittest.TestCase):
def test_pooled_variance(self):
group1 = [10, 12, 13, 15, 9]
group2 = [2, 3, 1]
group3 = [1, 1, 1, 1, 1, 1, 1, 2, 3, 2, 3, 2, 3, 2, 3]
# Calculated using https://www.easycalculation.com/statistics/pooled-standard-deviation.php
expected_pooled_variance = (1.326650165133974)**2
counts = np.concatenate([group1, group2, group3])
alpha = 62
beta = 13.1312
ids = [f"ID{i}" for i in range(len(counts))]
counts_df = pd.DataFrame({'variable1': counts,
'variable2': alpha * counts,
'variable3': beta * counts,
'variable4': -1 * counts},
index=ids)
group_labels = np.repeat(['group1', 'group2', 'group3'],
[len(group1), len(group2), len(group3)])
sample_info =
|
pd.DataFrame({'ID': ids, 'group': group_labels})
|
pandas.DataFrame
|
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.api.types import is_float, is_float_dtype, is_scalar
from pandas.core.arrays import IntegerArray, integer_array
from pandas.tests.extension.base import BaseOpsUtil
class TestArithmeticOps(BaseOpsUtil):
def _check_divmod_op(self, s, op, other, exc=None):
super()._check_divmod_op(s, op, other, None)
def _check_op(self, s, op_name, other, exc=None):
op = self.get_op_from_name(op_name)
result = op(s, other)
# compute expected
mask = s.isna()
# if s is a DataFrame, squeeze to a Series
# for comparison
if isinstance(s, pd.DataFrame):
result = result.squeeze()
s = s.squeeze()
mask = mask.squeeze()
# other array is an Integer
if isinstance(other, IntegerArray):
omask = getattr(other, "mask", None)
mask = getattr(other, "data", other)
if omask is not None:
mask |= omask
# 1 ** na is na, so need to unmask those
if op_name == "__pow__":
mask = np.where(~s.isna() & (s == 1), False, mask)
elif op_name == "__rpow__":
other_is_one = other == 1
if isinstance(other_is_one, pd.Series):
other_is_one = other_is_one.fillna(False)
mask = np.where(other_is_one, False, mask)
# float result type or float op
if (
is_float_dtype(other)
or is_float(other)
or op_name in ["__rtruediv__", "__truediv__", "__rdiv__", "__div__"]
):
rs = s.astype("float")
expected = op(rs, other)
self._check_op_float(result, expected, mask, s, op_name, other)
# integer result type
else:
rs = pd.Series(s.values._data, name=s.name)
expected = op(rs, other)
self._check_op_integer(result, expected, mask, s, op_name, other)
def _check_op_float(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in float dtypes
expected[mask] = np.nan
if "floordiv" in op_name:
# Series op sets 1//0 to np.inf, which IntegerArray does not do (yet)
mask2 = np.isinf(expected) & np.isnan(result)
expected[mask2] = np.nan
tm.assert_series_equal(result, expected)
def _check_op_integer(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in integer dtypes
# to compare properly, we convert the expected
# to float, mask to nans and convert infs
# if we have uints then we process as uints
# then convert to float
# and we ultimately want to create a IntArray
# for comparisons
fill_value = 0
# mod/rmod turn floating 0 into NaN while
# integer works as expected (no nan)
if op_name in ["__mod__", "__rmod__"]:
if is_scalar(other):
if other == 0:
expected[s.values == 0] = 0
else:
expected = expected.fillna(0)
else:
expected[
(s.values == 0).fillna(False)
& ((expected == 0).fillna(False) | expected.isna())
] = 0
try:
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
except ValueError:
expected = expected.astype(float)
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
expected[mask] = pd.NA
# assert that the expected astype is ok
# (skip for unsigned as they have wrap around)
if not s.dtype.is_unsigned_integer:
original = pd.Series(original)
# we need to fill with 0's to emulate what an astype('int') does
# (truncation) for certain ops
if op_name in ["__rtruediv__", "__rdiv__"]:
mask |= original.isna()
original = original.fillna(0).astype("int")
original = original.astype("float")
original[mask] = np.nan
tm.assert_series_equal(original, expected.astype("float"))
# assert our expected result
tm.assert_series_equal(result, expected)
def test_arith_integer_array(self, data, all_arithmetic_operators):
# we operate with a rhs of an integer array
op = all_arithmetic_operators
s = pd.Series(data)
rhs = pd.Series([1] * len(data), dtype=data.dtype)
rhs.iloc[-1] = np.nan
self._check_op(s, op, rhs)
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# scalar
op = all_arithmetic_operators
s = pd.Series(data)
self._check_op(s, op, 1, exc=TypeError)
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
op = all_arithmetic_operators
df = pd.DataFrame({"A": data})
self._check_op(df, op, 1, exc=TypeError)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
# ndarray & other series
op = all_arithmetic_operators
s = pd.Series(data)
other = np.ones(len(s), dtype=s.dtype.type)
self._check_op(s, op, other, exc=TypeError)
def test_arith_coerce_scalar(self, data, all_arithmetic_operators):
op = all_arithmetic_operators
s = pd.Series(data)
other = 0.01
self._check_op(s, op, other)
@pytest.mark.parametrize("other", [1.0, np.array(1.0)])
def test_arithmetic_conversion(self, all_arithmetic_operators, other):
# if we have a float operand we should have a float result
# if that is equal to an integer
op = self.get_op_from_name(all_arithmetic_operators)
s = pd.Series([1, 2, 3], dtype="Int64")
result = op(s, other)
assert result.dtype is np.dtype("float")
def test_arith_len_mismatch(self, all_arithmetic_operators):
# operating with a list-like with non-matching length raises
op = self.get_op_from_name(all_arithmetic_operators)
other = np.array([1.0])
s = pd.Series([1, 2, 3], dtype="Int64")
with pytest.raises(ValueError, match="Lengths must match"):
op(s, other)
@pytest.mark.parametrize("other", [0, 0.5])
def test_arith_zero_dim_ndarray(self, other):
arr = integer_array([1, None, 2])
result = arr + np.array(other)
expected = arr + other
tm.assert_equal(result, expected)
def test_error(self, data, all_arithmetic_operators):
# invalid ops
op = all_arithmetic_operators
s = pd.Series(data)
ops = getattr(s, op)
opa = getattr(data, op)
# invalid scalars
msg = (
r"(:?can only perform ops with numeric values)"
r"|(:?IntegerArray cannot perform the operation mod)"
)
with pytest.raises(TypeError, match=msg):
ops("foo")
with pytest.raises(TypeError, match=msg):
ops(pd.Timestamp("20180101"))
# invalid array-likes
with pytest.raises(TypeError, match=msg):
ops(pd.Series("foo", index=s.index))
if op != "__rpow__":
# TODO(extension)
# rpow with a datetimelike coerces the integer array incorrectly
msg = (
"can only perform ops with numeric values|"
"cannot perform .* with this index type: DatetimeArray|"
"Addition/subtraction of integers and integer-arrays "
"with DatetimeArray is no longer supported. *"
)
with pytest.raises(TypeError, match=msg):
ops(pd.Series(pd.date_range("20180101", periods=len(s))))
# 2d
result = opa(pd.DataFrame({"A": s}))
assert result is NotImplemented
# msg = r"can only perform ops with 1-d structures"
msg = "Lengths must match"
with pytest.raises(ValueError, match=msg):
opa(np.arange(len(s)).reshape(-1, len(s)))
@pytest.mark.parametrize("zero, negative", [(0, False), (0.0, False), (-0.0, True)])
def test_divide_by_zero(self, zero, negative):
# https://github.com/pandas-dev/pandas/issues/27398
a = pd.array([0, 1, -1, None], dtype="Int64")
result = a / zero
expected = np.array([np.nan, np.inf, -np.inf, np.nan])
if negative:
expected *= -1
tm.assert_numpy_array_equal(result, expected)
def test_pow_scalar(self):
a =
|
pd.array([-1, 0, 1, None, 2], dtype="Int64")
|
pandas.array
|
'''
At the time of writing this code I'm learning BeautifulSoup, so a lot of comments are just to help me understand what bs4 functions are doing.
'''
from urllib.request import urlopen
from bs4 import BeautifulSoup
import time
import pandas as pd
import re
def scrape_first_table(url,
headers_limit=2,
headers_index=0,
row_start=1,
headers_start=0):
'''
Takes in a url string and soup slicing parameters, returns a df of the first table on the page.
'''
soup = url_to_soup(url)
headers = get_table_headers(soup, headers_limit, headers_index)
rows_data = get_row_data(soup, row_start)
return pd.DataFrame(rows_data, columns = headers[headers_start:])
def scrape_mvp_vote_results_by_year(years):
'''Takes in an iterable of years as ints, returns a pandas df of MVP voting results for the year ending in the given year.
E.g: passing range(2020,2017,-1) returns results from 2020 descending to 2018
Args:
years ([int]):list of years
Returns:
pandas.Dataframe : dataframe of voting results
'''
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Tests for abagen.samples module
"""
import numpy as np
import pandas as pd
import pytest
from abagen import samples
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# generate fake data (based largely on real data) so we know what to expect #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
@pytest.fixture(scope='module')
def ontology():
""" Fake ontology dataframe
"""
sid = [4251, 4260, 4322, 4323, 9422]
hemi = ['L', 'R', 'L', 'R', np.nan]
acronym = ['S', 'S', 'Cl', 'Cl', 'CC']
path = [
'/4005/4006/4007/4008/4219/4249/12896/4251/',
'/4005/4006/4007/4008/4219/4249/12896/4260/',
'/4005/4006/4007/4275/4321/4322/',
'/4005/4006/4007/4275/4321/4323/',
'/4005/9352/9418/9422/',
]
name = [
'subiculum, left',
'subiculum, right',
'claustrum, left',
'claustrum, right',
'central canal',
]
return pd.DataFrame(dict(id=sid, hemisphere=hemi, name=name,
acronym=acronym, structure_id_path=path))
@pytest.fixture(scope='module')
def mm_annotation():
""" Fake annotation dataframe with some samples mislabelled
"""
mni_x = [-10, -20, 30, 40, 0]
sid = [4251, 4323, 4323, 4251, 9422]
sacr = ['S', 'Cl', 'Cl', 'S', 'CC']
sname = [
'subiculum, left',
'claustrum, right',
'claustrum, right',
'subiculum, left',
'central canal'
]
ind = pd.Series(range(len(sid)), name='sample_id')
return pd.DataFrame(dict(mni_x=mni_x, structure_id=sid,
structure_acronym=sacr, structure_name=sname),
index=ind)
@pytest.fixture(scope='module')
def annotation(mm_annotation):
""" Fake annotation dataframe
"""
out = mm_annotation.loc[[0, 2, 4]].reset_index(drop=True)
out.index.name = 'sample_id'
return out
@pytest.fixture(scope='module')
def microarray():
""" Fake microarray dataframe
"""
data = np.arange(9).reshape(3, 3)
cols = pd.Series(range(3), name='sample_id')
ind = pd.Series([1058685, 1058684, 1058683], name='probe_id')
return pd.DataFrame(data, columns=cols, index=ind)
@pytest.fixture(scope='module')
def pacall(microarray):
""" Fake PACall dataframe
"""
data = [[0, 0, 1], [0, 1, 1], [1, 1, 1]]
return pd.DataFrame(data, columns=microarray.columns,
index=microarray.index)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# test all the functions on our generated fake data so we know what to expect #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def test_update_mni_coords():
# xyz coordinates are getting replaced so who cares about the original
# but ids are important and need to be real!
x = y = z = [-10, 20, 30, 40]
ids = [594, 2985, 1058, 1145]
annotation = pd.DataFrame(dict(mni_x=x, mni_y=y, mni_z=z, well_id=ids))
out = samples.update_mni_coords(annotation)
# confirm that no samples were lost / reordered during the update process
# and that we still have all our columns
assert np.all(out['well_id'] == annotation['well_id'])
assert np.all(out.columns == annotation.columns)
# but DO confirm that _something_ changes about the dataframes (i.e., our
# bogus coordinates should be different)
with pytest.raises(AssertionError):
pd.testing.assert_frame_equal(out, annotation)
assert np.all(out['mni_x'] != annotation['mni_x'])
assert np.all(out['mni_y'] != annotation['mni_y'])
assert np.all(out['mni_z'] != annotation['mni_z'])
# if we provide invalid well_ids we should receive an error!
annotation['well_id'] = [594, 2985, 1058, 99999999999]
with pytest.raises(KeyError):
samples.update_mni_coords(annotation)
@pytest.mark.parametrize('path, expected', [
('/4005/4006/4007/4275/4276/4277/4278/12899/4286/', 'subcortex'),
('/4005/4006/4007/4275/4327/4341/4342/4344/', 'subcortex'),
('/4005/4006/4007/4008/4084/4103/4111/4112/4113/', 'cortex'),
('/4005/4006/4833/4696/4697/12930/12931/12933/4751/', 'cerebellum'),
('/4005/4006/9512/9676/9677/9680/9681/', 'brainstem'),
('/4005/4006/4833/9131/9132/9133/9492/', 'brainstem'),
('/4005/9218/9298/12959/265505622/', 'white matter'),
('/4005/9218/9219/9220/9227/', 'white matter'),
('/4005/9352/9418/9419/9708/', 'other'),
('/4005/9352/9353/9400/9402/', 'other'),
('/4005/4006/4833', None),
('thisisnotapath', None), # TODO: should this error?
])
def test__get_struct(path, expected):
out = samples._get_struct(path)
assert out == expected if expected is not None else out is None
def test_drop_mismatch_samples(mm_annotation, ontology):
# here's what we expect (i.e., indices 1 & 3 are dropped and the structure
# for the remaining samples is correctly extracted from the paths)
expected = pd.DataFrame(dict(hemisphere=['L', 'R', np.nan],
mni_x=[-10, 30, 0],
structure_acronym=['S', 'Cl', 'CC'],
structure=['subcortex', 'subcortex', 'other'],
structure_id=[4251, 4323, 9422],
structure_name=['subiculum, left',
'claustrum, right',
'central canal']),
index=[0, 2, 4])
# do we get what we expect? (ignore ordering of columns / index)
out = samples.drop_mismatch_samples(mm_annotation, ontology)
pd.testing.assert_frame_equal(out, expected, check_like=True)
@pytest.mark.xfail
def test__assign_sample():
assert False
@pytest.mark.xfail
def test__check_label():
assert False
@pytest.mark.xfail
def test_label_samples():
assert False
def test_mirror_samples(microarray, pacall, annotation, ontology):
sids = pd.Series(range(5), name='sample_id')
# we're changing quite a bit of stuff in the annotation dataframe
aexp = pd.DataFrame(dict(mni_x=[-10, 30, 0, 10, -30],
structure_acronym=['S', 'Cl', 'CC', 'S', 'Cl'],
structure_id=[4251, 4323, 9422, 4260, 4322],
structure_name=['subiculum, left',
'claustrum, right',
'central canal',
'subiculum, right',
'claustrum, left']),
index=sids)
# and far less stuff in the microarray/pacall dataframes
mexp = pd.DataFrame(microarray.loc[:, [0, 1, 2, 0, 1]].values,
columns=sids, index=microarray.index)
pexp = pd.DataFrame(pacall.loc[:, [0, 1, 2, 0, 1]].values,
columns=sids, index=pacall.index)
# but let's confirm all the outputs are as-expected
m, p, a = samples.mirror_samples(microarray, pacall, annotation, ontology)
pd.testing.assert_frame_equal(a[0], aexp, check_like=True)
pd.testing.assert_frame_equal(m[0], mexp)
pd.testing.assert_frame_equal(p[0], pexp)
m, p, a = samples._mirror_samples(microarray, pacall, annotation, ontology)
pd.testing.assert_frame_equal(a, aexp, check_like=True)
pd.testing.assert_frame_equal(m, mexp)
|
pd.testing.assert_frame_equal(p, pexp)
|
pandas.testing.assert_frame_equal
|
#%%
import numpy as np
import pandas as pd
import cvxpy as cvx
import matplotlib.pyplot as plt
# Global vartiables(sizes):
PV_ARRAY_SIZE_KW = 420 # kWAC rating of the PV array
DIESEL_GEN_SIZE_KW = 1000 # kWAC rating of the diesel generator
# Diesel fuel consumption coefficients from https://ieeexplore.ieee.org/document/8494571
DIESEL_FUEL_CONS_A = 0.246 # Liters per kWh
DIESEL_FUEL_CONS_B = 0.08415 # Liters per kW (rating)
#%% Obtain aggregate load
# NOTE: Must run resi_data.py, building_data.py to obtain the following CSV files!
residf = pd.read_csv('resi_load.csv', index_col=0)
bldgdf =
|
pd.read_csv('bldg_load.csv', index_col=0)
|
pandas.read_csv
|
print(snakemake.input[0])
import pandas as pd
import numpy as np
import numpy as np
import os
from collections import Counter
## function to count types of taxa for each taxa level of every ASV
def f(x):
vars=x[~x.isnull()]
if(len(vars)== 0):
return None
res=Counter(vars).most_common()
return "|".join(["{}:{}".format(el[0],el[1])for el in res])
# Reads in all three taxonomic assigments from all three databases
rdp=pd.read_table(snakemake.input[0])
silva=
|
pd.read_table(snakemake.input[1])
|
pandas.read_table
|
import os
import cv2
import random
import numpy as np
import pandas as pd
from PIL import Image
import matplotlib.pyplot as plt
import imageio
import imgaug.augmenters as iaa
from imgaug.augmentables import Keypoint, KeypointsOnImage
import torch
import torchvision
from animal_data_loader import AnimalDatasetCombined, ToTensor
import thinplate as tps
def L2_dist(v1, v2):
v1 = np.array(v1)
v2 = np.array(v2)
return np.sqrt(np.abs(np.sum((v2 - v1) ** 2)))
def calc_dist(avg_vector, animal_list):
result = []
for i in animal_list:
fname = i[0]
keypoints = i[1]
dist = L2_dist(avg_vector, keypoints)
temp = ((fname, keypoints), dist)
result.append(temp)
return result
def ClusterIndicesNumpy(clustNum, labels_array): # numpy
return np.where(labels_array == clustNum)[0]
def save_images(
fname_list,
images_path="../data/cropped_images/",
annot_path="../data/updated_df.csv",
save_dir="./keypoints/",
animal_class=None,
train=True,
):
if not os.path.isdir(save_dir):
os.mkdir(save_dir)
trainset = AnimalDatasetCombined(
images_path,
annot_path,
fname_list,
input_size=(512, 512),
output_size=(128, 128),
transforms=torchvision.transforms.Compose([ToTensor()]),
train=train,
)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=1, shuffle=False)
for i, (inps, labels, label_masks, _) in enumerate(train_loader):
for j in range(labels.shape[0]):
final_img = np.zeros((128, 128))
input_img = inps[j].cpu().numpy().transpose((1, 2, 0))
input_img = Image.fromarray(input_img.astype(np.uint8))
input_img.save(
os.path.join(
save_dir, "image" + str(animal_class) + "_{}_{}.png".format(i, j)
)
)
for k in range(labels.shape[1]):
images = labels.cpu().numpy()[j][k]
final_img += images
# plt.imsave(
# os.path.join(
# save_dir,
# "temp" + str(animal_class) + "_{}_{}_{}.png".format(i, j, k),
# ),
# images,
# cmap="gray",
# )
plt.imsave(
os.path.join(
save_dir, "temp" + str(animal_class) + "_{}_{}.png".format(i, j)
),
final_img,
cmap="gray",
)
def get_keypoints(fname, csv_file="../data/updated_df.csv"):
keypoint_names = [
"L_Eye",
"R_Eye",
"Nose",
"L_EarBase",
"R_EarBase",
"L_F_Elbow",
"L_F_Paw",
"R_F_Paw",
"R_F_Elbow",
"L_B_Paw",
"R_B_Paw",
"L_B_Elbow",
"R_B_Elbow",
"L_F_Knee",
"R_F_Knee",
"L_B_Knee",
"R_B_Knee",
]
keypoints = []
vis = []
annot_df =
|
pd.read_csv(csv_file)
|
pandas.read_csv
|
from spyre import server
import sys
import pandas as pd
PROJECT_ID = 'spyre-example'
stations = {
'Paris': '071490',
'New York': '725033',
'Seattle': '727930',
'LA': '722950',
'Hanoi': '488193',
'Delhi': '421810',
'Moscow': '276120',
'Tehran': '407540',
'Shanghai': '583620',
}
station_options = [
{'label': k, 'value': v}
for k, v in stations.iteritems()
]
station_options[0]['checked'] = True
station_options[1]['checked'] = True
class WeatherHistoryApp(server.App):
title = "Historical Weather"
inputs = [
{
'type': 'checkboxgroup',
'label': 'Cities',
'options': station_options,
'key': 'stations',
'action_id': 'weatherplot'
}, {
'type': 'radiobuttons',
'label': 'type',
'options': [
{'label': 'Temperature', 'value': 'mean_temp', 'checked': True},
{'label': 'Precipitation', 'value': 'total_precipitation'}
],
'key': 'type',
'action_id': 'weatherplot'
}, {
'type': 'slider',
'label': 'Number of Years to Include',
'min': 1,
'max': 10,
'value': 2,
'key': 'nyears',
'action_id': 'weatherplot'
}, {
'type': 'radiobuttons',
'label': 'Group by',
'options': [
{'label': 'day', 'value': 'day', 'checked': True},
# {'label': 'month', 'value': 'month'},
{'label': 'year', 'value': 'year'}
],
'key': 'groupby',
'action_id': 'weatherplot'
},
]
outputs = [{
"type": "plot",
"id": "weatherplot"
}]
def get_data(self, type, station_ids, n_years):
query = """
SELECT station_number, year, month, day, {type} as value, rain, snow
FROM `publicdata.samples.gsod`
WHERE station_number IN ({stns})
AND year < 2010
AND year >= {minyr}
""".format(
type=type,
stns=','.join(station_ids),
minyr=2010 - n_years
)
df = pd.read_gbq(query, project_id=PROJECT_ID, dialect='standard')
df['date'] =
|
pd.to_datetime(df[['year', 'month', 'day']])
|
pandas.to_datetime
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 20 16:28:59 2020
@author: <NAME>
"""
import pandas as pd
import numpy as np
import sys
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
from pairsnp import calculate_snp_matrix, calculate_distance_matrix
input_path=sys.argv[1]
output_path=sys.argv[2]
metadata_path=sys.argv[3]
outbreak_only=sys.argv[4]
outbreaks=sys.argv[5]
n_threshold=sys.argv[6]
remove_n=sys.argv[7]
class cmd():
def __init__(self, input_path, output_path, metadata_path, outbreak_only, outbreaks, n_threshold, remove_n):
self.input_path = input_path
self.output_path = output_path
self.metadata_path = metadata_path
self.outbreak_only = outbreak_only
self.outbreaks = outbreaks
self.n_threshold = n_threshold
self.remove_n = remove_n
self.fasta = ''
self.indexes = []
self.ids = []
def import_data(self, input_path):
data = pd.read_csv(str(input_path),
delimiter='\n', header=None, names=['fasta'])
self.fasta=data
return data
def find_indexes(self, fasta_file):
array = fasta_file['fasta'].str.find('>')
indexes=[x for x in range(len(array)) if array[x] == 0]
self.indexes = indexes
def reformat(self, fasta_data, index, IDs):
sample_dict = {'Samples':[]}
for y in range(len(index)):
if y == (len(index) -1):
real = fasta_data.iloc[index[y]+1: len(fasta_data)]
else:
real = fasta_data.iloc[index[y]+1: index[y+1]]
real_1 = real['fasta'].str.cat(sep='\n')
real_1 = real_1.replace('\n',"")
sample_dict['Samples'].append({str(IDs[y]):real_1})
self.fasta = sample_dict
def sample_n_filter(self, data, ids, n_threshold):
new_dict = {}
app = []
for num, x in enumerate(ids):
n_count=[data['Samples'][num][str(x)].lower().count('n')]
if n_count[0] <= int(n_threshold):
app.append(num)
new_ids = [ids[i] for i in app]
new_dict['Samples'] = [data['Samples'][i] for i in app]
else:
print('Excluding ' + x + ' for ambigious sequences')
self.fasta = new_dict
self.ids = new_ids
def filtering(self, sample_data, sample_id):
test_dict={}
for length in range(len(sample_data['Samples'])):
test_dict.setdefault(len(sample_data['Samples'][length][sample_id[length]]),[]).append(sample_id[length])
lis = list(map(len, test_dict.values()))
if len(lis) > 1:
num = [i for i, x in enumerate(lis) if x !=max(lis)]
for li in range(len(num)):
for p in range(len(test_dict[list(test_dict.keys())[num[li]]])):
pos = [i for i, x in enumerate(sample_id) if x == test_dict[list(test_dict.keys())[num[li]]][p]]
print("Excluding sample ID from analysis: " + sample_id[pos[0]] + " due to differences in sample length")
del sample_data['Samples'][pos[0]], sample_id[pos[0]]
self.fasta = sample_data
self.ids = sample_id
def find_snps(self, samples, ids, remove_n):
genome = pd.DataFrame({'genome':ids})
for x in range(len(samples[0].fasta)):
base=[samples[y].fasta[x] for y in range(len(samples))]
summ = pd.Series(base)
if not all([base in samples[0].bases for base in base]) and remove_n:
summ=pd.Series(data=None)
if len(summ.value_counts()) >= 2:
genome[str(x+1)] = base
self.snps = genome
def append_haplotype(self, snps):
bases = []
frame = snps.drop('genome', axis=1)
for per in range(len(frame)):
haplo =""
for snp in frame.iloc[per]:
haplo += snp
bases.append(haplo)
snps.insert(1, 'haplotype', bases)
self.snps = snps
def haplotype_number(self, data):
haplo = data['haplotype']
set_hap = set(haplo)
dict_hap = {}
for y, x in enumerate(set_hap):
dict_hap.setdefault(x,[]).append('h.' + str(y+1))
order =[]
for y in range(len(haplo)):
for x in dict_hap:
if x in haplo[y]:
order.append(dict_hap[x][0])
data.insert(2, 'haplotype_number', order)
self.snps = data
def write_csv(self, output_path, outbreak_data, ids, outbreak_ids, snps, outbreak_only):
if outbreak_only == True:
csv_id = [str(x) + '_' + 'Outbreak_' +
str(outbreak_data['number'].iloc[outbreak_ids.index(x)])
+ str(outbreak_data['p/s'].iloc[outbreak_ids.index(x)]) for x in ids
if x in outbreak_ids]
snps_out = pd.DataFrame.copy(snps, deep=True)
snps_out['genome']=csv_id
snps_out.to_csv(str(output_path) + 'outbreak_only.csv', index=False)
print('Written SNP file to ' + str(output_path) + 'outbreak_only.csv')
else:
snps.to_csv(str(output_path) + '.csv', index=False)
print('Written SNP file to ' + str(output_path))
#Sample class
class sample():
def __init__(self, ids):
self.ids = ids
self.fasta = ""
self.n_threshold = ""
self.bases=['A','T','C','G','a','t','c','g']
class outbreak(cmd):
def __init__(self, input_path, output_path, metadata_path, outbreak_only, outbreaks,n_threshold, remove_n):
super().__init__(input_path, output_path, metadata_path, outbreak_only,outbreaks, n_threshold,remove_n)
self.ids = ""
self.data=""
self.new_meta=""
self.used_data=""
def import_data(self, metadata_path):
outbreak_data = pd.read_csv(str(metadata_path),
delimiter='\t', header=None,
names=['sample', 'number', 'p/s'], index_col=None)
print('Outbreak_data is loaded')
self.data = outbreak_data
self.ids = outbreak_data['sample']
def remove_duplicates(self, data, ids):
dup = ids.value_counts()[ids.value_counts()>1]
pos = []
noted_set=[]
for num, x in enumerate(ids):
if x in list(dup.index):
noted_set.append(x)
if noted_set.count(x) >= 2:
pos.append(num)
print('Outbreak sample duplicates removed')
data.drop(pos, inplace=True)
self.data = data
self.ids = list(data['sample'])
#Only using metadata there are samples for
def meta_outbreaks_used(self, data, ids, filtered_ids):
counts = [counter for counter, x in enumerate(ids) if x in filtered_ids]
self.used_data = data.iloc[counts]
#Selecting metadata based on outbreak argument
def outbreak_process(self, outbreak_data, outbreaks):
if outbreaks == '-a':
new_meta=outbreak_data
print('Using all outbreak samples..')
elif outbreaks == 'None':
new_meta = pd.DataFrame(data=None,
columns=['sample', 'number', 'p/s'])
print('Using no outbreak samples')
else:
print('Using ' + outbreaks + ' outbreak samples')
outbreaks=[int(s) for s in outbreaks.split(',')]
empty=[]
pos=[]
for num in outbreaks:
empty.append([i for i, x in enumerate(outbreak_data['number']) if x ==num])
for num in range(len(empty)):
pos +=empty[num]
new_meta = outbreak_data.iloc[pos]
self.used_data = new_meta
self.used_ids = list(new_meta['sample'])
class results():
def __init__(self, output_path):
self.output_path = output_path
self.trait_list = []
def traits(self, outbreak_data):
t_list = [(str(outbreak_data['number'].iloc[x]) +
str(outbreak_data['p/s'].iloc[x]))
for x in range(len(outbreak_data))]
self.trait_list = sorted(set(t_list))
def create_array(self, outbreak_data, filtered_ids, traitlabels):
sample_ids = list(outbreak_data['sample'])
one_array = pd.DataFrame(data=None)
trait_list=[]
array_list=[]
for sample in filtered_ids:
if sample in sample_ids:
ind = sample_ids.index(sample)
test_dict={outbreak_data['sample'].iloc[ind]:
[str(outbreak_data['number'].iloc[ind]) +
str(outbreak_data['p/s'].iloc[ind])]}
trait_list.append(test_dict)
one = traitlabels.index(list(test_dict.values())[0][0])
arr = np.zeros( (1, len(traitlabels)+2), dtype=int)[0]
arr[one] = 1
array_list.append(np.array2string(arr,separator=',')[1:-1])
else:
trait_list.append({str(sample):['None']})
arr = np.zeros( (1, len(traitlabels)+2), dtype=int)[0]
arr[-2] = 1
array_list.append(np.array2string(arr,separator=',')[1:-1])
one_array['genome']=filtered_ids
one_array['array']=array_list
self.array = one_array
self.labels = trait_list
def create_nexus(self, outbreak_data, output_path, filtered_ids, snps_hap,
outbreak_only, trait_labels, array):
#Traits labels
label_set = sorted(set(trait_labels))
###START####
textlist = ['#NEXUS',
'\n',
'\n',
'BEGIN TAXA;\n',
'DIMENSIONS NTAX=', str(len(filtered_ids)), ';\n\n',
'TAXLABELS\n']
textlist2 = [';\n\nEND;\n\n',
'BEGIN CHARACTERS;\n',
'DIMENSIONS NCHAR='+str(len(snps_hap['haplotype'].iloc[0])) +';''\n',
'FORMAT DATATYPE=DNA MISSING=N GAP=- ;\nMATRIX\n']
textlist3 = [';\nEND;\n\nBegin Traits;\n',
'Dimensions NTraits='+ str(len(label_set)+2) +';\n'
'Format labels=yes missing=? separator=Comma;\n',
'TraitLabels ']
print('Writing haplotypes to NEXUS file..')
if outbreak_only == False:
test_file = open(str(output_path) +".nex", 'w')
else:
test_file = open(str(output_path) +".outbreak_nex", 'w')
test_file.writelines(textlist)
for element in filtered_ids:
print(element, file = test_file)
test_file.writelines(textlist2)
#Haplotype creation
for p in range(len(snps_hap[['genome', 'haplotype']])):
test_file.writelines(snps_hap[['genome', 'haplotype']].iloc[p][0])
test_file.write('\t')
test_file.writelines(snps_hap[['genome', 'haplotype']].iloc[p][1])
test_file.write('\n')
#Traits creation
test_file.writelines(textlist3)
#Traits labels - Caveat - Letter has to be a single one
for x in label_set:
number=x[:-1]
letter=x[-1]
if letter == 'S':
let = 'staff'
else:
let = 'patient'
test_file.write('Outbreak_' + number + '_' + let +' ')
test_file.write('Other Reference;\nMatrix\n')
for p in range(len(array)):
test_file.writelines(array['genome'].iloc[p])
test_file.write('\t')
test_file.writelines(array['array'].iloc[p])
test_file.write('\n')
test_file.write(';\nEND;')
test_file.close()
print(str(output_path) +'.nex is complete')
def pseudosequence(self, output_path, snps_hap, outbreak_only):
print('Producing pseudosequences..')
if outbreak_only == False:
pseudo_fna = open(output_path +'.pseudo.fna', 'w')
else:
pseudo_fna = open(output_path +'.outbreak_pseudo.fna', 'w')
for p in range(len(snps_hap[['genome', 'haplotype']])):
pseudo_fna.write('>')
pseudo_fna.writelines(snps_hap[['genome', 'haplotype']].iloc[p][0])
pseudo_fna.write('\n')
pseudo_fna.writelines(snps_hap[['genome', 'haplotype']].iloc[p][1])
pseudo_fna.write('\n')
pseudo_fna.close()
print(str(output_path) + '.pseudo.fna is complete')
def make_graph(self, output_path, trait_list, outbreak_only):
if outbreak_only == False:
suffix = '.pseudo.fna'
else:
suffix = '.outbreak_pseudo.fna'
seq_input = str(output_path + suffix)
#Calculate initial distance matrix
sparse_matrix, consensus, seq_names = calculate_snp_matrix(seq_input)
dist_matrix = calculate_distance_matrix(sparse_matrix, consensus, "dist", False)
#Determining any duplicates within the samples - same haplotypes
lis=[]
for y in range(len(dist_matrix)):
a = [i for i, x in enumerate(dist_matrix) if (x == dist_matrix[y]).all()]
if len(a) > 1:
lis.append(a)
identical_snps = list({tuple(i) for i in lis})
#Calculate the possible samples we could have
possible_nodes = list(range(len(dist_matrix)))
for y in range(len(identical_snps)):
for x in range(1,len(identical_snps[y])):
possible_nodes.remove(identical_snps[y][x])
#Variable node size to the node that has been concatenated into
app=[]
node_size = {node:1 for node in possible_nodes}
for x in identical_snps:
app.append(x[1:])
node_size[x[0]] = len(x)
node_size_list = []
for node in node_size.values():
size = node*300
node_size_list.append(size)
app_list = [x for y in range(len(app)) for x in app[y]]
all_in_one = np.delete(dist_matrix, app_list, axis=0)
all_in_two = np.delete(all_in_one, app_list, axis=1)
ait = nx.from_numpy_matrix(all_in_two)
#Labelling
pre_labels={num:seq_names[num] for num in range(len(seq_names))}
label_dict={num:pre_labels[num] for num in possible_nodes}
#networkx relabelling node
relabel_dict={}
for x in range(len(ait.nodes())):
relabel_dict[list(ait.nodes())[x]]=list(label_dict.values())[x]
nx.relabel_nodes(ait, relabel_dict, False)
#Edge labels
edgelabels = nx.get_edge_attributes(nx.minimum_spanning_tree(ait), 'weight')
rounded_edgelabels = {list(edgelabels.keys())[weight]:round(list(edgelabels.values())[weight]) for weight in range(len(edgelabels))}
#Adding colour
new_dict = {}
for x in range(len(trait_list)):
new_dict[list(trait_list[x].keys())[0]] = list(trait_list[x].values())[0][0]
colours_categories = pd.Series(new_dict, index=list(new_dict.keys()))
colours_cats=
|
pd.Categorical(colours_categories)
|
pandas.Categorical
|
import numpy as np
import pytest
import pandas._libs.index as _index
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas._testing as tm
class TestMultiIndexBasic:
def test_multiindex_perf_warn(self):
df = DataFrame(
{
"jim": [0, 0, 1, 1],
"joe": ["x", "x", "z", "y"],
"jolie": np.random.rand(4),
}
).set_index(["jim", "joe"])
with tm.assert_produces_warning(PerformanceWarning):
df.loc[(1, "z")]
df = df.iloc[[2, 1, 3, 0]]
with tm.assert_produces_warning(PerformanceWarning):
df.loc[(0,)]
def test_indexing_over_hashtable_size_cutoff(self):
n = 10000
old_cutoff = _index._SIZE_CUTOFF
_index._SIZE_CUTOFF = 20000
s = Series(np.arange(n), MultiIndex.from_arrays((["a"] * n, np.arange(n))))
# hai it works!
assert s[("a", 5)] == 5
assert s[("a", 6)] == 6
assert s[("a", 7)] == 7
_index._SIZE_CUTOFF = old_cutoff
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame(
{
"a": ["R1", "R2", np.nan, "R4"],
"b": ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20],
}
)
result = df.set_index(["a", "b"], drop=False)
expected = DataFrame(
{
"a": ["R1", "R2", np.nan, "R4"],
"b": ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20],
},
index=[
Index(["R1", "R2", np.nan, "R4"], name="a"),
Index(["C1", "C2", "C3", "C4"], name="b"),
],
)
tm.assert_frame_equal(result, expected)
def test_nested_tuples_duplicates(self):
# GH#30892
dti = pd.to_datetime(["20190101", "20190101", "20190102"])
idx = Index(["a", "a", "c"])
mi = pd.MultiIndex.from_arrays([dti, idx], names=["index1", "index2"])
df = DataFrame({"c1": [1, 2, 3], "c2": [np.nan, np.nan, np.nan]}, index=mi)
expected = DataFrame({"c1": df["c1"], "c2": [1.0, 1.0, np.nan]}, index=mi)
df2 = df.copy(deep=True)
df2.loc[(dti[0], "a"), "c2"] = 1.0
tm.assert_frame_equal(df2, expected)
df3 = df.copy(deep=True)
df3.loc[[(dti[0], "a")], "c2"] = 1.0
|
tm.assert_frame_equal(df3, expected)
|
pandas._testing.assert_frame_equal
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = DataFrame([[2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
tm.assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(['a', 'b', 'c'])
data2 = [Series([1.5, 3, 4], idx, dtype='O'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series_aligned_index(self):
series = [pd.Series(i, index=['b', 'a', 'c'], name=str(i))
for i in range(3)]
result = pd.DataFrame(series)
expected = pd.DataFrame({'b': [0, 1, 2],
'a': [0, 1, 2],
'c': [0, 1, 2]},
columns=['b', 'a', 'c'],
index=['0', '1', '2'])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {'a': 1.5, 'b': 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
tm.assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {'A': randn(10),
'B': randn(8)}
with pytest.raises(ValueError, match='arrays must all be same length'):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(lrange(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self):
df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}
data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])
result = DataFrame(data)
assert result.index.is_monotonic
# ordering ambiguous, raise exception
with pytest.raises(ValueError, match='ambiguous ordering'):
DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})
# this is OK though
result = DataFrame({'A': ['a', 'b'],
'B': Series(['a', 'b'], index=['a', 'b'])})
expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},
index=['a', 'b'])
tm.assert_frame_equal(result, expected)
def test_constructor_tuples(self):
result = DataFrame({'A': [(1, 2), (3, 4)]})
expected = DataFrame({'A': Series([(1, 2), (3, 4)])})
tm.assert_frame_equal(result, expected)
def test_constructor_namedtuples(self):
# GH11181
from collections import namedtuple
named_tuple = namedtuple("Pandas", list('ab'))
tuples = [named_tuple(1, 3), named_tuple(2, 4)]
expected = DataFrame({'a': [1, 2], 'b': [3, 4]})
result = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
# with columns
expected = DataFrame({'y': [1, 2], 'z': [3, 4]})
result = DataFrame(tuples, columns=['y', 'z'])
tm.assert_frame_equal(result, expected)
def test_constructor_orient(self):
data_dict = self.mixed_frame.T._series
recons = DataFrame.from_dict(data_dict, orient='index')
expected = self.mixed_frame.sort_index()
tm.assert_frame_equal(recons, expected)
# dict of sequence
a = {'hi': [32, 3, 3],
'there': [3, 5, 3]}
rs = DataFrame.from_dict(a, orient='index')
xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
tm.assert_frame_equal(rs, xp)
def test_from_dict_columns_parameter(self):
# GH 18529
# Test new columns parameter for from_dict that was added to make
# from_items(..., orient='index', columns=[...]) easier to replicate
result = DataFrame.from_dict(OrderedDict([('A', [1, 2]),
('B', [4, 5])]),
orient='index', columns=['one', 'two'])
expected = DataFrame([[1, 2], [4, 5]], index=['A', 'B'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
msg = "cannot use columns parameter with orient='columns'"
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
orient='columns', columns=['one', 'two'])
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
columns=['one', 'two'])
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
tm.assert_index_equal(df.index, a.index)
# ndarray like
arr = np.random.randn(10)
s = Series(arr, name='x')
df = DataFrame(s)
expected = DataFrame(dict(x=s))
tm.assert_frame_equal(df, expected)
s = Series(arr, index=range(3, 13))
df = DataFrame(s)
expected = DataFrame({0: s})
tm.assert_frame_equal(df, expected)
pytest.raises(ValueError, DataFrame, s, columns=[1, 2])
# #2234
a = Series([], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
# series with name and w/o
s1 = Series(arr, name='x')
df = DataFrame([s1, arr]).T
expected = DataFrame({'x': s1, 'Unnamed 0': arr},
columns=['x', 'Unnamed 0'])
tm.assert_frame_equal(df, expected)
# this is a bit non-intuitive here; the series collapse down to arrays
df = DataFrame([arr, s1]).T
expected = DataFrame({1: s1, 0: arr}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
def test_constructor_Series_named_and_columns(self):
# GH 9232 validation
s0 = Series(range(5), name=0)
s1 = Series(range(5), name=1)
# matching name and column gives standard frame
tm.assert_frame_equal(pd.DataFrame(s0, columns=[0]),
s0.to_frame())
tm.assert_frame_equal(pd.DataFrame(s1, columns=[1]),
s1.to_frame())
# non-matching produces empty frame
assert pd.DataFrame(s0, columns=[1]).empty
assert pd.DataFrame(s1, columns=[0]).empty
def test_constructor_Series_differently_indexed(self):
# name
s1 = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
# no name
s2 = Series([1, 2, 3], index=['a', 'b', 'c'])
other_index = Index(['a', 'b'])
df1 = DataFrame(s1, index=other_index)
exp1 = DataFrame(s1.reindex(other_index))
assert df1.columns[0] == 'x'
tm.assert_frame_equal(df1, exp1)
df2 = DataFrame(s2, index=other_index)
exp2 = DataFrame(s2.reindex(other_index))
assert df2.columns[0] == 0
tm.assert_index_equal(df2.index, other_index)
tm.assert_frame_equal(df2, exp2)
def test_constructor_manager_resize(self):
index = list(self.frame.index[:5])
columns = list(self.frame.columns[:3])
result = DataFrame(self.frame._data, index=index,
columns=columns)
tm.assert_index_equal(result.index, Index(index))
tm.assert_index_equal(result.columns, Index(columns))
def test_constructor_from_items(self):
items = [(c, self.frame[c]) for c in self.frame.columns]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons =
|
DataFrame.from_items(items)
|
pandas.DataFrame.from_items
|
#!/usr/bin/env python
debug = True
if debug: print('debug = True')
computaGenero = True
if debug: print('computaGenero = ' + str(computaGenero))
computaCursos = True
if debug: print('computaCursos = ' + str(computaCursos))
computaRecorrencia = True # Também vale para retenção e evasão
if debug: print('computaRecorrencia = ' + str(computaRecorrencia))
entrada = True
if debug: print('entrada = ' + str(entrada))
incremental = False # Só tem valor caso entrada tenha valor True. Faz com que o que já estiver na saída seja mantido
if debug: print('incremental = ' + str(incremental))
graficos = True
if debug: print('graficos = ' + str(graficos))
if debug: print('')
import json
import pandas as pd
import os
from pathlib import Path
import re
import errno
from enum import Enum
from collections import namedtuple
import numpy as np
if computaGenero:
from genderize import Genderize
genderize = Genderize()
if graficos:
import matplotlib.pyplot as plt
#plt.close('all')
if computaRecorrencia:
from unidecode import unidecode
from operator import itemgetter
from similarity.jarowinkler import JaroWinkler
jw = JaroWinkler()
if graficos:
import itertools
import calendar
# Colunas que não fazem parte da entrada devem ter o valor de Expressão em branco
# Colunas com valor de Expressão tem seus nomes substituídos pela Descrição
Coluna = namedtuple('Coluna', ['Descrição', 'Expressão'])
class Colunas(Enum):
@property
def Descrição(self):
'''Nome da coluna.'''
return self.value[0].Descrição
@property
def Expressão(self):
'''Regex da coluna.'''
return self.value[0].Expressão
# Cuidado para não esquecer da vírgula no final da cada linha
Nome = Coluna('Nome', r'NOME'),
RG = Coluna('RG', r'Documento de Identidade|^R\.?G\.?$'),
CPF = Coluna('CPF', r'CPF'),
Curso = Coluna('Curso', r'CURSO'),
ID = Coluna('ID', None),
Ação = Coluna('Ação', None),
Evasão = Coluna('Evasão', None),
Evasora = Coluna('Evasora', None),
Gênero = Coluna('Gênero', None),
Porcentagem = Coluna('Porcentagem', None),
Retenção = Coluna('Retenção', None),
Retentora = Coluna('Retentora', None),
Quantidade = Coluna('Quantidade', None),
Válidos = Coluna('Qtde. voluntários válidos', None),
try:
with open("../Saida/generos.json") as json_file:
generos = json.load(json_file)
if debug: print('Lendo Saida/generos.json')
except FileNotFoundError:
if debug: print('Saida/generos.json não encontrado')
# Nomes podem ser adicionados aqui (ou no arquivo Saida/generos.json) caso não seja encontrado pela Genderize
generos = {
'ALDREI': 'm',
'EDIPO': 'm',
'FABRICIO': 'm',
'HYTALO': 'm',
'JOLINDO': 'm',
'KAWE': 'm',
'MASSARU': 'm',
'OTAVIO': 'm',
'VINICIUS': 'm',
'CARINE': 'f',
'CASSIA': 'f',
'FLAVIA': 'f',
'FRANCYELE': 'f',
'GABRIELLA': 'f',
'HELOISA': 'f',
'IHANNA': 'f',
'JENYFFER': 'f',
'JESSICA': 'f',
'JULIA': 'f',
'LAIS': 'f',
'LETICIA': 'f',
'LIGIA': 'f',
'MAITHE': 'f',
'MARIANGELA': 'f',
'MARINEIA': 'f',
'MONICA': 'f',
'NAIADY': 'f',
'NATHALIA': 'f',
'NATHALLI': 'f',
'STHEFANIE': 'f',
'TAIZA': 'f',
'TAMILES': 'f',
'TAIS': 'f',
'TASSIANY': 'f',
'TATIANY': 'f',
'THASSIA': 'f',
'VERONICA': 'f',
}
# Expressões podem ser adicionadas aqui para ignorar nomes de voluntários
# Ignorar implica em não fazer análise de gênero, recorrência, retenção e evasão
nomesExcluidos = [re.compile(expressao, re.I) for expressao in [
r'confirmou',
]]
# A ordem em que os cursos aparecem no dicionário é importante, visto que a busca respeita essa ordem
# Exemplo: "educação física" deve aparecer antes de "física"
cursos = dict((curso, re.compile(expressao, re.I)) for curso, expressao in {
'Engenharia elétrica': r'el[eé]trica',
'Psicologia': r'psico',
'Comunicação social: jornalismo': r'jornal',
'Medicina': r'medicina|fmrp',
'Mestrando': r'mestrado|mestrando',
'Ciência da computação': r'ci[êe]ncias?\s+da\s+computa[cç][aã]o|bcc',
'Engenharia mecânica': r'mec[aâ]nica',
'Engenharia de produção': r'produ[cç][aã]o',
'Engenharia civil': r'civil',
'Economia Empresarial e Controladoria': r'ecec',
'Não universitário': r't[eé]cnic[oa]|n[aã]o\s+cursante|completo|etec|trabalho|profissional|convidad[ao]|extern[ao]|palestra|volunt[aá]ri[ao]|nenhum|socorrista|cursinho|vestibula|nutricionista|enfermeira|formad[oa]|consultora|decoradora|estudante|fiscal|terapeuta|banc[aá]ria|psic[oó]log[ao]|assessora|empres[áa]ri[ao]|noite|professor|desempregad[ao]|mãe|graduad[ao]',
'Meteorologia': r'meteoro',
'Educação física': r'(educa[çc][ãa]o|ed\.?)\s+f[íi]sica',
'Física': r'f[ií]sica',
'Doutorando': r'doutorado',
'Ciências biológicas': r'biologia|biol[oó]gicas|^bio',
'Química': r'qu[íi]mica',
'Administração': r'adm',
'Música': r'^m[úu]sica',
'Matemática aplicada a negócios': r'^man|neg[óo]cio',
'Engenharia química': r'engenharia\s+qu[íi]mica|eng\s+qu[íi]mica',
'Fisioterapia': r'fisio',
'Ciências contábeis': r'cont',
'Economia': r'econo',
'Pedagogia': r'^pedago',
'Biblioteconomia e Ciência da Informação': r'^BCI',
'Universitário: curso não informado': r'^unaerp|cultura|ufpa|ffclrp|^unesp|^integral\s+manh[aã]|^fea',
'Pós graduando': r'p[óo]s\s+gradua[çc][ãa]o',
'Agronomia': r'agro',
'Análise e desenvolvimento de sistemas': r'an[áa]lise',
'Arquitetura': r'arq',
'Artes visuais': r'artes',
'Biotecnologia': r'^biotecnologia',
'Ciências biomédicas': r'ci[eê]ncias\s+biom[eé]dicas',
'Comunicação social: radialismo': r'rtv|radialismo|r[aá]dio\s+e\s+tv',
'Dança, grafiti e teatro': r'teatro',
'Design': r'design',
'Direito': r'^direito',
'Ecologia': r'^ecologia',
'Enfermagem': r'enfermagem|eerp',
'Engenharia ambiental': r'amb',
'Engenharia de biossistemas': r'biossistemas',
'Engenharia da computação': r'engenharia\s+d[ae]\s+computa[cç][aã]o',
'Engenharia florestal': r'florestal',
'Farmácia': r'^farm[áa]cia|fcfrp',
'Filosofia': r'^filo',
'Fonoaudiologia': r'^fono',
'Genética': r'gen[ée]tica',
'Informática biomédica': r'inform[áa]tica\s+biom[eé]dica|^ibm',
'Letras': r'^letras',
'Marketing': r'marketing|mkt',
'Nutrição e metabolismo': r'nutri[çc][ãa]o',
'Medicina veterinária': r'veterin[áa]ria',
'Teologia': r'^teologia',
'Terapia ocupacional': r'ocupacional|t.o',
'Odontologia': r'^odonto|forp',
'Publicidade e propaganda': r'publicidade|pp',
'Recursos humanos': r'recursos\s+humanos|rh',
'Relações públicas': r'rela[cç][oõ]es\s+p[uú]blicas|rp',
'Serviço social': r'social',
'Sistemas de informação': r'sistemas|^b?si$',
}.items())
listaCursos = [curso for curso, _ in cursos.items()]
loteGeneros = {}
dfs = {}
desc = pd.DataFrame()
pessoas = pd.DataFrame(columns = [Colunas.ID.Descrição, Colunas.Nome.Descrição, Colunas.RG.Descrição, Colunas.CPF.Descrição, Colunas.Quantidade.Descrição, Colunas.Retentora.Descrição, Colunas.Evasora.Descrição, Colunas.Curso.Descrição, Colunas.Gênero.Descrição])
lastID = 0
def createDir(path):
'''Cria o diretório do caminho indicado, caso não exista.'''
directory = os.path.dirname(path)
if not os.path.exists(directory):
if debug: print('Criando diretório ' + directory)
try:
os.makedirs(directory, exist_ok = True)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
def output(dataframe, path):
'''Salva o dataframe como csv no caminho indicado.'''
filename = "../Saida/" + path + "/output.csv"
createDir(filename)
if debug: print('Escrevendo ' + path + '/output.csv')
dataframe.to_csv(filename, index = False, float_format = '%.f')
def incluiAcao(path):
'''
A partir de um arquivo csv, faz a inclusão da ação.
Nenhuma análise é feita e nenhum arquivo é gerado.
São preenchidas as variáveis globais `dfs`, `pessoas` e `loteGeneros`.
'''
ids = []
if debug: print('Lendo ' + path + ".csv")
df = pd.read_csv("../Dados/" + path + ".csv", true_values = ['Sim'], false_values = ['Não'])
if debug: print('Removendo colunas desnecessárias')
df = df.loc[:, df.columns.str.contains('|'.join([coluna.Expressão for coluna in Colunas if coluna.Expressão]), case = False)]
# Renomeia colunas para que fiquem de forma homogêna, seguindo a propriedade `Descrição`
if debug: print('Renomeando colunas')
def columnIterator():
'''Retorna apenas as colunas que correspondem a alguma das expressões em `Colunas`.'''
for coluna in Colunas:
if coluna.Expressão:
for col in df.columns:
if re.search(coluna.Expressão, col, re.I):
yield (col, coluna.Descrição)
break
df.rename(columns = dict(columnIterator()), inplace = True)
if debug: print('Limpando valores')
df.replace(r'\t', ' ', regex = True, inplace = True) # Substitui tabs por espaços
df.replace(r'\s{2,}', ' ', regex = True, inplace = True) # Remove espaços em sequência
df.replace(r'^\s+|\s+$', '', regex = True, inplace = True) # Leading and trailing trimming
df.replace(r'^$', None, regex = True, inplace = True) # Transforma vazio em None
if debug: print('Removendo linhas totalmente em branco')
df.dropna(axis = 'index', how = 'all', inplace = True)
# Após remover linhas e colunas não desejadas, refaz os índices
if debug: print('Refazendo índices')
df.reset_index(drop = True, inplace = True)
if debug: print('')
for i in df.index:
temNome = False
if Colunas.Nome.Descrição in df:
value = df.at[i, Colunas.Nome.Descrição]
if pd.isnull(value):
if debug: print('Sem nome')
elif any([reg.search(value) for reg in nomesExcluidos]):
df.at[i, Colunas.Nome.Descrição] = None
if debug: print('Sem nome')
else:
# Remove 'pipes' do nome, pois é utilizado como separador na análise de recorrência
value = re.sub(r'\|', '', value)
if value == '':
df.at[i, Colunas.Nome.Descrição] = None
if debug: print('Sem nome')
else:
temNome = True
nome = df.at[i, Colunas.Nome.Descrição] = value
if debug: print(value)
elif debug: print('Sem nome')
def validaDocumento(coluna):
'''Efetua a validação de CPF ou RG, recuperando apénas dígitos (números em qualquer posição e `x` ou `X` na última posição).'''
if coluna in df:
value = df.at[i, coluna]
if pd.notnull(value):
try:
int(value) # Se já é int, então não tem caracteres especiais...
if debug: print(coluna + ': ' + str(value))
except ValueError:
newValue = re.sub(r'[^0-9xX]|[xX].', '', value) # Remove caracteres especiais do documento (deixa apenas números)
df.at[i, coluna] = None if newValue == '' else newValue
if debug: print(coluna + ': ' + value + ' -> ' + newValue)
validaDocumento(Colunas.RG.Descrição)
validaDocumento(Colunas.CPF.Descrição)
# Análise de recorrência
def analiseRecorrencia(*args):
'''Busca recorrência por correspondência nas colunas indicadas.'''
def analiseCurso():
'''Atribuição imediata do curso de acordo com as expressões definidas no cabeçalho do arquivo. Joga exception caso não encontre.'''
nome = df.at[i, Colunas.Curso.Descrição]
if pd.isnull(nome):
if debug: print('Curso não preenchido')
else:
try:
curso = next(curso for curso, reg in cursos.items() if reg.search(nome))
if debug: print('Curso: ' + nome + ' -> ' + curso)
return curso
except StopIteration:
raise Exception('Curso desconhecido: ' + nome)
def analiseGenero(ID):
'''
Caso o gênero esteja no dicionário local, a atribuição é imediata.
Caso contrário, nome é adicionado ao lote a ser buscado após o fim das inclusões na função `computaGeneros`.
'''
primeiroNome = nome.split()[0]
nomeSemAcento = unidecode(primeiroNome.upper())
genero = generos.get(nomeSemAcento)
if genero:
pessoas.loc[pessoas[Colunas.ID.Descrição] == ID, Colunas.Gênero.Descrição] = genero
if debug: print(primeiroNome + ' -> ' + genero)
else:
# Adiciona nome no lote a ser buscado em `computaGeneros`
if nomeSemAcento in loteGeneros:
if not ID in loteGeneros[nomeSemAcento]:
loteGeneros[nomeSemAcento].append(ID)
else:
loteGeneros[nomeSemAcento] = [ID]
def buscaColuna(coluna):
'''Busca recorrência por correspondência na coluna indicada.'''
if coluna in df:
key = df.at[i, coluna]
if pd.notnull(key):
if computaRecorrencia:
nonlocal nome
upperName = unidecode(nome.upper()) # Similaridade não considera acentos (a = á) e é case insensitive (a = A)
def analiseNomes(pessoa):
'''Retorna o grau de similaridade (0-1) e o nome referente à melhor correspondência (maior similaridade) encontrada.'''
nomes = pessoa.split('|')
values = [jw.similarity(unidecode(nome.upper()), upperName) for nome in nomes]
index, value = max(enumerate(values), key = itemgetter(1))
return value, nomes[index]
if coluna == Colunas.Nome.Descrição:
similaridades = pessoas[coluna].map(analiseNomes)
matches = pd.Series([similaridade[0] for similaridade in similaridades]) > .96 # Grau de similaridade mínimo aceitável: 96%
else:
matches = pessoas[coluna] == key
if matches.sum() > 1:
# Se acontecer com nome, talvez seja interessante aumentar o grau de similaridade mínimo aceitável
# Se acontecer com documento, provavelmente é bug
raise Exception('Mais de um registro de ' + coluna + ' "' + key + '" encontrado')
if matches.any():
ID = pessoas.loc[matches, Colunas.ID.Descrição].iloc[0]
if coluna == Colunas.Nome.Descrição:
similaridade = max(similaridades, key = itemgetter(0))
elif computaRecorrencia:
similaridade = analiseNomes(pessoas.loc[matches, Colunas.Nome.Descrição].iloc[0])
else:
similaridade = [1]
if similaridade[0] < 1:
# Caso a mesma pessoa dê entrada com nomes diferentes, todos são salvos
pessoas.loc[matches & (pessoas[Colunas.Nome.Descrição] == ''), Colunas.Nome.Descrição] = nome
pessoas.loc[matches & (pessoas[Colunas.Nome.Descrição] != ''), Colunas.Nome.Descrição] += '|' + nome
# Se coluna diverge dentre registros da mesma pessoa, o primeiro encontrado tem valor e o resto é ignorado
# Curso
if computaCursos and Colunas.Curso.Descrição in df and pd.isnull(pessoas.loc[matches, Colunas.Curso.Descrição].iloc[0]):
curso = analiseCurso()
if curso:
pessoas.loc[matches, Colunas.Curso.Descrição] = curso
# Gênero
if computaGenero and Colunas.Nome.Descrição in df and pd.isnull(pessoas.loc[matches, Colunas.Gênero.Descrição].iloc[0]):
analiseGenero(ID)
if debug:
print('Recorrência encontrada pelo ' + coluna + f' ({key})')
if coluna == Colunas.Nome.Descrição:
print(f'Similaridade: {similaridade[0] * 100:.0f}% (' + similaridade[1] + ')')
print(f'ID: {ID:.0f}')
pessoas.loc[matches, Colunas.Quantidade.Descrição] += 1
pessoas.loc[matches, Colunas.Evasora.Descrição] = path
return ID
for arg in args:
if arg is not None:
ID = buscaColuna(arg)
if ID: return ID
global lastID
lastID += 1
if debug: print(f'Recorrência não encontrada. ID atribuído: {lastID:.0f}')
pessoas.loc[pessoas.shape[0]] = {
Colunas.ID.Descrição: lastID,
Colunas.RG.Descrição: df.at[i, Colunas.RG.Descrição] if Colunas.RG.Descrição in df else None,
Colunas.CPF.Descrição: df.at[i, Colunas.CPF.Descrição] if Colunas.CPF.Descrição in df else None,
Colunas.Nome.Descrição: df.at[i, Colunas.Nome.Descrição] if temNome else '',
Colunas.Quantidade.Descrição: 1,
Colunas.Retentora.Descrição: path,
Colunas.Evasora.Descrição: path,
Colunas.Curso.Descrição: analiseCurso() if computaCursos and Colunas.Curso.Descrição in df and pd.notnull(df.at[i, Colunas.Curso.Descrição]) else None,
Colunas.Gênero.Descrição: None,
}
if computaGenero and Colunas.Nome.Descrição in df and pd.notnull(df.at[i, Colunas.Nome.Descrição]):
analiseGenero(lastID)
return lastID
ID = analiseRecorrencia(Colunas.RG.Descrição, Colunas.CPF.Descrição, Colunas.Nome.Descrição if temNome and computaRecorrencia else None)
df.at[i, Colunas.ID.Descrição] = ID
ids.append(ID)
df.at[i, Colunas.Curso.Descrição] = None
if debug: print('')
if Colunas.Curso.Descrição in df:
df[Colunas.Curso.Descrição] = df[Colunas.Curso.Descrição].apply(lambda value: str(value) if pd.notnull(value) else None)
dfs[path] = df
if debug: print('')
return ids
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
def computaGeneros():
'''
Busca os gêneros de lotes de 10 nomes a partir da variável `loteGeneros` para o dicionário de gêneros e atribuindo o gênero a cada pessoa.
Busca acontece primeiro para os nomes mais recorrentes. Caso uma pessoa com múltiplos nomes tenha o gênero encontrado, os outros nomes serão ignorados.
Caso o nome seja ignorado por todas as pessoas referentes, ele deixa de ser buscado.
'''
def iteradorLote():
for nome, IDs in loteGeneros.items():
matches = pessoas[Colunas.ID.Descrição].isin(IDs)
if any(pd.isnull(pessoas.loc[matches, Colunas.Gênero.Descrição])):
yield nome, matches, IDs.__len__()
for lote in chunks(sorted(iteradorLote(), key = lambda item: item[2], reverse = True), 10):
retorno = genderize.get([nome for nome, _, _ in lote])
#country_id = 'br', language_id = 'pt'
for genero, (nome, matches, _) in zip([genero[0] if genero else 'n' for genero in [genero['gender'] for genero in retorno]], lote):
pessoas.loc[matches, Colunas.Gênero.Descrição] = generos[nome] = genero
if debug: print(nome + ' -> ' + genero)
arquivo = "../Saida/generos.json"
createDir(arquivo)
with open(arquivo, 'w') as outfile:
json.dump(generos, outfile, sort_keys = True, indent = 2)
if debug:
print('Salvando Saida/generos.json')
print('')
def analisaAcao(path, ids):
'''
Faz a análise de gênero e cursos da açao a partir do `DataFrame` referente ao caminho indicado em `dfs`.
Opcionalmente, indica-se `ids` para restringir a análise apenas a certas pessoas (útil para entrada incremental).
'''
if debug: print('Analisando ' + path)
df = dfs[path]
algum = not incremental or not os.path.exists("../Saida/" + path + "/output.csv")
if computaCursos or computaGenero:
for i in df.index:
pessoa = pessoas[pessoas[Colunas.ID.Descrição] == df.at[i, Colunas.ID.Descrição]]
ID = pessoa[Colunas.ID.Descrição].item()
if ids and not ID in ids: continue
if debug: print(ID)
# Curso
if computaCursos and (not Colunas.Curso.Descrição in df or pd.isnull(df.at[i, Colunas.Curso.Descrição])):
value = pessoa[Colunas.Curso.Descrição].item()
if
|
pd.isnull(value)
|
pandas.isnull
|
"""
scenario_plots.py
Create a list of plots for comparing multiple scenarios.
"""
import os
import matplotlib.pyplot as plt
import pandas as pd
import cea.inputlocator
def plot_scenarios(scenario_folders, output_file):
"""
List each scenario in the folder `scenario_root` and plot demand and lca (operations, embodied) data.
:param scenario_folders: A list of scenario folders.
:param output_file: The filename (pdf) to save the results as.
:return: (None)
"""
from matplotlib.backends.backend_pdf import PdfPages
locators = [cea.inputlocator.InputLocator(scenario) for scenario in scenario_folders]
scenario_names = [os.path.basename(locator.scenario) for locator in locators]
pdf = PdfPages(output_file)
try:
create_page_demand(locators, pdf, scenario_names)
create_page_lca_embodied(locators, pdf, scenario_names)
create_page_lca_operation(locators, pdf, scenario_names)
finally:
pdf.close()
def create_page_lca_operation(locators, pdf, scenario_names):
"""
Create Page Three: LCA Operation
:param locators: list of InputLocators, one for each scenario
:param pdf: the PdfFile to write the page to
:param scenario_names: list of scenario names
:return: None
"""
try:
fig, axes = plt.subplots(nrows=2, figsize=(8.27, 11.69))
plt.suptitle('LCA Operation')
plot_lca_operation(axes[0], locators, scenario_names, column='O_nre_pen_MJm2',
title='Non-Renewable Primary Energy', unit='MJ/m2')
plot_lca_operation(axes[1], locators, scenario_names, column='O_ghg_kgm2', title='Greenhouse Gas', unit='kg/m2')
fig.subplots_adjust(hspace=0.5)
pdf.savefig()
finally:
plt.close()
def create_page_lca_embodied(locators, pdf, scenario_names):
"""
Create Page Two: LCA Embodied
:param locators: list of InputLocators, one for each scenario
:param pdf: the PdfFile to write the page to
:param scenario_names: list of scenario names
:return: None
"""
try:
fig, axes = plt.subplots(nrows=2, figsize=(8.27, 11.69))
plt.suptitle('LCA Embodied')
plot_lca_embodied(axes[0], locators, scenario_names, column='E_nre_pen_MJm2',
title='Non-Renewable Primary Energy', unit='MJ/m2')
plot_lca_embodied(axes[1], locators, scenario_names, column='E_ghg_kgm2', title='Greenhouse Gas', unit='kg/m2')
fig.subplots_adjust(hspace=0.5)
pdf.savefig()
finally:
plt.close()
def create_page_demand(locators, pdf, scenario_names):
"""
Create Page one: Demand
:param locators: list of InputLocators, one for each scenario
:param pdf: the PdfFile to write the page to
:param scenario_names: list of scenario names
:return: None
"""
try:
fig, axes = plt.subplots(nrows=3, figsize=(8.27, 11.69))
plt.suptitle('Demand')
plot_demand(axes[0], locators, scenario_names, column='E_sys_MWhyr', title='Ef')
plot_demand(axes[1], locators, scenario_names, column='Qhs_sys_MWhyr', title='QH')
plot_demand(axes[2], locators, scenario_names, column='Qcs_sys_MWhyr', title='QC')
fig.subplots_adjust(hspace=0.5)
pdf.savefig()
finally:
plt.close()
def plot_demand(ax, locators, scenario_names, column, title):
df = pd.DataFrame()
afs = pd.DataFrame()
for i, scenario in enumerate(scenario_names):
scenario_data = pd.read_csv(locators[i].get_total_demand()).set_index('Name')
df[scenario] = scenario_data[column] * 1000 / scenario_data['GFA_m2']
afs[scenario] = scenario_data['GFA_m2']
ax2 = ax.twinx()
df.boxplot(ax=ax, sym='', return_type='axes')
ax.set_title(title)
ax.set_ylabel('Per Building [KWh/m2]')
y = pd.DataFrame({scenario: df[scenario] * afs[scenario] / afs[scenario].sum()
for scenario in scenario_names}).sum().ravel()
x = ax.get_xticks()
axylim = ax.get_ylim()
bottom = axylim[0] * 0.9
top = axylim[1] * 1.1
ax.set_ylim(bottom=bottom, top=top)
ax2.set_ylim(bottom=bottom, top=top)
plt.scatter(x, y, marker='D', color='g')
ax2.set_ylabel('Per Scenario [KWh/m2]')
def plot_lca_embodied(ax, locators, scenario_names, column, title, unit):
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""Parallelized, single-point launch script to run DSR or GP on a set of benchmarks."""
import warnings
warnings.filterwarnings('ignore', category=DeprecationWarning)
warnings.filterwarnings('ignore', category=FutureWarning)
import os
import sys
import json
import time
from datetime import datetime
import multiprocessing
from functools import partial
from pkg_resources import resource_filename
import zlib
import click
import numpy as np
import pandas as pd
from sympy.parsing.sympy_parser import parse_expr
from sympy import srepr
from dsr import DeepSymbolicOptimizer
from dsr.program import Program
from dsr.task.regression.dataset import BenchmarkDataset
from dsr.baselines import gpsr
def train_dsr(name_and_seed, config):
"""Trains DSR and returns dict of reward, expression, and traversal"""
# Override the benchmark name and output file
name, seed = name_and_seed
config["task"]["name"] = name
config["training"]["output_file"] = "dsr_{}_{}.csv".format(name, seed)
# Try importing TensorFlow (with suppressed warnings), Controller, and learn
# When parallelizing across tasks, these will already be imported, hence try/except
try:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from dsr.controller import Controller
from dsr.train import learn
except ModuleNotFoundError: # Specific subclass of ImportError for when module is not found, probably needs to be excepted first
print("One or more libraries not found")
raise ModuleNotFoundError
except ImportError:
# Have we already imported tf? If so, this is the error we want to dodge.
if 'tf' in globals():
pass
else:
raise ImportError
# Train the model
model = DeepSymbolicOptimizer(config)
start = time.time()
result = {"name" : name, "seed" : seed} # Name and seed are listed first
result.update(model.train(seed=seed))
result["t"] = time.time() - start
result.pop("program")
return result
def train_gp(name_and_seed, logdir, config_task, config_gp):
"""Trains GP and returns dict of reward, expression, and program"""
name, seed = name_and_seed
config_gp["seed"] = seed + zlib.adler32(name.encode("utf-8"))
start = time.time()
# Load the dataset
config_dataset = config_task["dataset"]
config_dataset["name"] = name
dataset = BenchmarkDataset(**config_dataset)
# Fit the GP
gp = gpsr.GP(dataset=dataset, **config_gp)
p, logbook = gp.train()
# Retrieve results
r = base_r = p.fitness.values[0]
str_p = str(p)
nmse_test = gp.nmse_test(p)[0]
nmse_test_noiseless = gp.nmse_test_noiseless(p)[0]
success = gp.success(p)
# Many failure cases right now for converting to SymPy expression
try:
expression = repr(parse_expr(str_p.replace("X", "x").replace("add", "Add").replace("mul", "Mul")))
except:
expression = "N/A"
# Save run details
drop = ["gen", "nevals"]
df_fitness = pd.DataFrame(logbook.chapters["fitness"]).drop(drop, axis=1)
df_fitness = df_fitness.rename({"avg" : "fit_avg", "min" : "fit_min"}, axis=1)
df_fitness["fit_best"] = df_fitness["fit_min"].cummin()
df_len = pd.DataFrame(logbook.chapters["size"]).drop(drop, axis=1)
df_len = df_len.rename({"avg" : "l_avg"}, axis=1)
df = pd.concat([df_fitness, df_len], axis=1, sort=False)
df.to_csv(os.path.join(logdir, "gp_{}_{}.csv".format(name, seed)), index=False)
result = {
"name" : name,
"seed" : seed,
"r" : r,
"base_r" : base_r,
"nmse_test" : nmse_test,
"nmse_test_noiseless" : nmse_test_noiseless,
"success" : success,
"expression" : expression,
"traversal" : str_p,
"t" : time.time() - start
}
return result
@click.command()
@click.argument('config_template', default="config.json")
@click.option('--method', default="dsr", type=click.Choice(["dsr", "gp"]), help="Symbolic regression method")
@click.option('--mc', default=1, type=int, help="Number of Monte Carlo trials for each benchmark")
@click.option('--output_filename', default=None, help="Filename to write results")
@click.option('--n_cores_task', '--n', default=1, help="Number of cores to spread out across tasks")
@click.option('--seed_shift', default=0, type=int, help="Integer to add to each seed (i.e. to combine multiple runs)")
@click.option('--b', multiple=True, type=str, help="Name of benchmark or benchmark prefix")
def main(config_template, method, mc, output_filename, n_cores_task, seed_shift, b):
"""Runs DSR or GP on multiple benchmarks using multiprocessing."""
# Load the config file
with open(config_template, encoding='utf-8') as f:
config = json.load(f)
# Required configs
config_task = config["task"] # Task specification parameters
config_training = config["training"] # Training hyperparameters
# Optional configs
config_controller = config.get("controller") # Controller hyperparameters
config_language_model_prior = config.get("language_model_prior") # Language model hyperparameters
config_gp = config.get("gp") # GP hyperparameters
# Create output directories
if output_filename is None:
output_filename = "benchmark_{}.csv".format(method)
config_training["logdir"] = os.path.join(
config_training["logdir"],
"log_{}".format(datetime.now().strftime("%Y-%m-%d-%H%M%S")))
logdir = config_training["logdir"]
if "dataset" in config_task and "backup" in config_task["dataset"] and config_task["dataset"]["backup"]:
config_task["dataset"]["logdir"] = logdir
os.makedirs(logdir, exist_ok=True)
output_filename = os.path.join(logdir, output_filename)
# Use benchmark name from config if not specified as command-line arg
if len(b) == 0:
if isinstance(config_task["name"], str):
b = (config_task["name"],)
elif isinstance(config_task["name"], list):
b = tuple(config_task["name"])
# Shortcut to run all Nguyen benchmarks
benchmarks = list(b)
if "Nguyen" in benchmarks:
benchmarks.remove("Nguyen")
benchmarks += ["Nguyen-{}".format(i+1) for i in range(12)]
# Generate benchmark-seed pairs for each MC. When passed to the TF RNG,
# seeds will be added to checksums on the benchmark names
unique_benchmarks = benchmarks.copy()
benchmarks *= mc
seeds = (np.arange(mc) + seed_shift).repeat(len(unique_benchmarks)).tolist()
names_and_seeds = list(zip(benchmarks, seeds))
# Edit n_cores_task and/or n_cores_batch
if n_cores_task == -1:
n_cores_task = multiprocessing.cpu_count()
if n_cores_task > len(benchmarks):
print("Setting 'n_cores_task' to {} for batch because there are only {} benchmarks.".format(len(benchmarks), len(benchmarks)))
n_cores_task = len(benchmarks)
if method == "dsr":
if config_training["verbose"] and n_cores_task > 1:
print("Setting 'verbose' to False for parallelized run.")
config_training["verbose"] = False
if config_training["n_cores_batch"] != 1 and n_cores_task > 1:
print("Setting 'n_cores_batch' to 1 to avoid nested child processes.")
config_training["n_cores_batch"] = 1
print("Running {} for n={} on benchmarks {}".format(method, mc, unique_benchmarks))
# Write terminal command and config.json into log directory
cmd_filename = os.path.join(logdir, "cmd.out")
with open(cmd_filename, 'w') as f:
print(" ".join(sys.argv), file=f)
config_filename = os.path.join(logdir, "config.json")
with open(config_filename, 'w') as f:
json.dump(config, f, indent=4)
# Define the work
if method == "dsr":
work = partial(train_dsr, config=config)
elif method == "gp":
work = partial(train_gp, logdir=logdir, config_task=config_task, config_gp=config_gp)
# Farm out the work
write_header = True
if n_cores_task > 1:
pool = multiprocessing.Pool(n_cores_task)
for result in pool.imap_unordered(work, names_and_seeds):
|
pd.DataFrame(result, index=[0])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Covid-19 em São Paulo
Gera gráficos para acompanhamento da pandemia de Covid-19
na cidade e no estado de São Paulo.
@author: https://github.com/DaviSRodrigues
"""
from datetime import datetime, timedelta
from io import StringIO
import locale
import math
from tableauscraper import TableauScraper
import traceback
import unicodedata
import pandas as pd
import plotly.graph_objects as go
import plotly.io as pio
from plotly.subplots import make_subplots
import requests
def main():
locale.setlocale(locale.LC_ALL, 'pt_BR.UTF-8')
print('Carregando dados...')
hospitais_campanha, leitos_municipais, leitos_municipais_privados, leitos_municipais_total = carrega_dados_cidade()
dados_munic, dados_estado, isolamento, leitos_estaduais, internacoes, doencas, dados_raciais, dados_vacinacao, doses_aplicadas, doses_recebidas, dados_imunizantes, atualizacao_imunizantes = carrega_dados_estado()
print('\nLimpando e enriquecendo dos dados...')
dados_cidade, dados_munic, hospitais_campanha, leitos_municipais, leitos_municipais_privados, leitos_municipais_total, dados_estado, isolamento, leitos_estaduais, internacoes, doencas, dados_raciais, dados_vacinacao, dados_imunizantes = pre_processamento(hospitais_campanha, leitos_municipais, leitos_municipais_privados, leitos_municipais_total, dados_estado, isolamento, leitos_estaduais, internacoes, doencas, dados_raciais, dados_vacinacao, doses_aplicadas, doses_recebidas, dados_munic, dados_imunizantes, atualizacao_imunizantes)
evolucao_cidade, evolucao_estado = gera_dados_evolucao_pandemia(dados_munic, dados_estado, isolamento, dados_vacinacao, internacoes)
evolucao_cidade, evolucao_estado = gera_dados_semana(evolucao_cidade, evolucao_estado, leitos_estaduais, isolamento, internacoes)
print('\nGerando gráficos e tabelas...')
gera_graficos(dados_munic, dados_cidade, hospitais_campanha, leitos_municipais, leitos_municipais_privados, leitos_municipais_total, dados_estado, isolamento, leitos_estaduais, evolucao_cidade, evolucao_estado, internacoes, doencas, dados_raciais, dados_vacinacao, dados_imunizantes)
print('\nAtualizando serviceWorker.js...')
atualiza_service_worker(dados_estado)
print('\nFim')
def carrega_dados_cidade():
hospitais_campanha = pd.read_csv('dados/hospitais_campanha_sp.csv', sep=',')
leitos_municipais = pd.read_csv('dados/leitos_municipais.csv', sep=',')
leitos_municipais_privados = pd.read_csv('dados/leitos_municipais_privados.csv', sep=',')
leitos_municipais_total = pd.read_csv('dados/leitos_municipais_total.csv', sep=',')
return hospitais_campanha, leitos_municipais, leitos_municipais_privados, leitos_municipais_total
def carrega_dados_estado():
hoje = data_processamento
ano = hoje.strftime('%Y')
mes = hoje.strftime('%m')
data = hoje.strftime('%Y%m%d')
try:
print('\tAtualizando dados dos municípios...')
URL = 'https://raw.githubusercontent.com/seade-R/dados-covid-sp/master/data/dados_covid_sp.csv'
dados_munic = pd.read_csv(URL, sep=';', decimal=',')
opcoes_zip = dict(method='zip', archive_name='dados_munic.csv')
dados_munic.to_csv('dados/dados_munic.zip', sep=';', decimal=',', index=False, compression=opcoes_zip)
except Exception as e:
traceback.print_exception(type(e), e, e.__traceback__)
print('\tErro ao buscar dados_covid_sp.csv do GitHub: lendo arquivo local.\n')
dados_munic = pd.read_csv('dados/dados_munic.zip', sep=';', decimal=',')
try:
print('\tAtualizando dados estaduais...')
URL = 'https://raw.githubusercontent.com/seade-R/dados-covid-sp/master/data/sp.csv'
dados_estado = pd.read_csv(URL, sep=';')
dados_estado.to_csv('dados/dados_estado_sp.csv', sep=';')
except Exception as e:
traceback.print_exception(type(e), e, e.__traceback__)
print('\tErro ao buscar dados_estado_sp.csv do GitHub: lendo arquivo local.\n')
dados_estado = pd.read_csv('dados/dados_estado_sp.csv', sep=';', decimal=',', encoding='latin-1', index_col=0)
try:
print('\tCarregando dados de isolamento social...')
isolamento = pd.read_csv('dados/isolamento_social.csv', sep=',')
except Exception as e:
print(f'\tErro ao buscar isolamento_social.csv\n\t{e}')
try:
print('\tAtualizando dados de internações...')
URL = ('https://raw.githubusercontent.com/seade-R/dados-covid-sp/master/data/plano_sp_leitos_internacoes.csv')
internacoes = pd.read_csv(URL, sep=';', decimal=',', thousands='.')
internacoes.to_csv('dados/internacoes.csv', sep=';', decimal=',')
except Exception as e:
try:
print(f'\tErro ao buscar internacoes.csv do GitHub: lendo arquivo da Seade.\n\t{e}')
URL = (f'http://www.seade.gov.br/wp-content/uploads/{ano}/{mes}/Leitos-e-Internacoes.csv')
internacoes = pd.read_csv(URL, sep=';', encoding='latin-1', decimal=',', thousands='.', engine='python',
skipfooter=2)
except Exception as e:
print(f'\tErro ao buscar internacoes.csv da Seade: lendo arquivo local.\n\t{e}')
internacoes = pd.read_csv('dados/internacoes.csv', sep=';', decimal=',', thousands='.', index_col=0)
try:
print('\tAtualizando dados de doenças preexistentes...')
URL = ('https://raw.githubusercontent.com/seade-R/dados-covid-sp/master/data/casos_obitos_doencas_preexistentes.csv.zip')
doencas = pd.read_csv(URL, sep=';')
if len(doencas.asma.unique()) == 3:
opcoes_zip = dict(method='zip', archive_name='doencas_preexistentes.csv')
doencas.to_csv('dados/doencas_preexistentes.zip', sep=';', compression=opcoes_zip)
else:
global processa_doencas
processa_doencas = False
raise Exception('O arquivo de doeças preexistentes não possui registros SIM/NÃO/IGNORADO para todas as doenças.')
except Exception as e:
try:
print(f'\tErro ao buscar doencas_preexistentes.csv do GitHub: lendo arquivo local.\n\t{e}')
doencas = pd.read_csv('dados/doencas_preexistentes.zip', sep=';', index_col=0)
except Exception as e:
print(f'\tErro ao buscar doencas_preexistentes.csv localmente: lendo arquivo da Seade.\n\t{e}')
URL = f'http://www.seade.gov.br/wp-content/uploads/{ano}/{mes}/casos_obitos_doencas_preexistentes.csv'
doencas =
|
pd.read_csv(URL, sep=';', encoding='latin-1')
|
pandas.read_csv
|
#Instructions
#------------
#In this challenge, you are tasked with creating a Python script for analyzing the financial records of your company.
#You will be given two sets of revenue data (budget_data_1.csv and budget_data_2.csv).
#Each dataset is composed of two columns: Date and Revenue. (Thankfully, your company has rather lax standards
#for accounting so the records are simple.)
#Your task is to create a Python script that analyzes the records to calculate each of the following:
##The total number of months included in the dataset
##The total amount of revenue gained over the entire period
#The average change in revenue between months over the entire period
#The greatest increase in revenue (date and amount) over the entire period
#The greatest decrease in revenue (date and amount) over the entire period
#As an example, your analysis should look similar to the one below:
#Financial Analysis
#----------------------------
#Total Months: 25
#Total Revenue: $1241412
#Average Revenue Change: $216825
#Greatest Increase in Revenue: Sep-16 ($815531)
#Greatest Decrease in Revenue: Aug-12 ($-652794)
#Your final script should both print the analysis to the terminal
#and export a text file with the results.
#-----------------------------------------------------------------------------
import numpy as np
import pandas as pd
#import os
#import csv
file = "C:/Users/nab226/Desktop/NUCHI201801DATA4-Class-Repository-DATA/MWS/Homework/03-Python/Instructions/PyBank/raw_data/budget_data_1.csv"
df = pd.read_csv(file)
#df.head()
#The total number of months included in the dataset
n_months = df.Date.count()
#n_months
print("Financial Analysis - Dataset #1")
print("----------------------------")
print("Total Months: "+str(n_months))
#The total amount of revenue gained over the entire period
total_rev = df["Revenue"].sum()
#total_rev
print("Total Revenue: $"+str(total_rev))
#The average change in revenue between months over the entire period
rev = np.array(df["Revenue"])
#rev
n_rev = rev.size
#n_rev
delta_rev = np.diff(rev)
#delta_rev
n_delta_rev = delta_rev.size
#n_delta_rev
avg_delta_rev = (delta_rev.sum())/(n_delta_rev)
avg_delta_rev = np.round(avg_delta_rev,2)
print("Average Revenue Change: $"+str(avg_delta_rev))
#The greatest increase in revenue (date and amount) over the entire period
max_rev = df["Revenue"].max()
#max_rev
min_rev = df["Revenue"].min()
#min_rev
dict_df = pd.Series(df.Revenue.values,index=df.Date).to_dict()
#dict_df
for Date, Revenue in dict_df.items():
if Revenue == max_rev:
print("Greatest Increase in Revenue: "+str(Date)+" ($"+str(Revenue)+")")
for Date, Revenue in dict_df.items():
if Revenue == min_rev:
print("Greatest Decrease in Revenue: "+str(Date)+" ($"+str(Revenue)+")")
print(" ")
with open("pybank_output1.txt", "w") as text_file:
print("Financial Analysis - Dataset #1", file=text_file)
print("----------------------------", file=text_file)
print("Total Months: "+str(n_months), file=text_file)
print("Total Revenue: $"+str(total_rev), file=text_file)
print("Average Revenue Change: $"+str(avg_delta_rev), file=text_file)
for Date, Revenue in dict_df.items():
if Revenue == max_rev:
print("Greatest Increase in Revenue: "+str(Date)+" ($"+str(Revenue)+")", file=text_file)
for Date, Revenue in dict_df.items():
if Revenue == min_rev:
print("Greatest Decrease in Revenue: "+str(Date)+" ($"+str(Revenue)+")", file=text_file)
#end of script for dataset #1
#re-run script for dataset #2
#-----------------------------------------------------------------------------
import numpy as np
import pandas as pd
#import os
#import csv
file = "C:/Users/nab226/Desktop/NUCHI201801DATA4-Class-Repository-DATA/MWS/Homework/03-Python/Instructions/PyBank/raw_data/budget_data_2.csv"
df = pd.read_csv(file)
#df.head()
#The total number of months included in the dataset
n_months = df.Date.count()
#n_months
print("Financial Analysis - Dataset #2")
print("----------------------------")
print("Total Months: "+str(n_months))
#The total amount of revenue gained over the entire period
total_rev = df["Revenue"].sum()
#total_rev
print("Total Revenue: $"+str(total_rev))
#The average change in revenue between months over the entire period
rev = np.array(df["Revenue"])
#rev
n_rev = rev.size
#n_rev
delta_rev = np.diff(rev)
#delta_rev
n_delta_rev = delta_rev.size
#n_delta_rev
avg_delta_rev = (delta_rev.sum())/(n_delta_rev)
avg_delta_rev = np.round(avg_delta_rev,2)
print("Average Revenue Change: $"+str(avg_delta_rev))
#The greatest increase in revenue (date and amount) over the entire period
max_rev = df["Revenue"].max()
#max_rev
min_rev = df["Revenue"].min()
#min_rev
dict_df =
|
pd.Series(df.Revenue.values,index=df.Date)
|
pandas.Series
|
import datetime
from sklearn.metrics import mean_squared_error, mean_absolute_error, f1_score
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from .dataset import DataSet
from .metergroup import MeterGroup
from .disaggregate import (
CombinatorialOptimisation,
Mean,
FHMM,
Zero,
DAE,
Seq2Point,
Seq2Seq,
DSC,
Disaggregator,
) # , AFHMM,AFHMM_SAC
class API:
"""
The API ia designed for rapid experimentation with NILM Algorithms.
"""
def __init__(self, params):
"""
Initializes the API with default parameters
"""
self.power = {}
self.sample_period = 1
self.appliances = []
self.methods = {}
self.chunk_size = None
self.method_dict = {
"CO": {},
"FHMM": {},
"Hart85": {},
"DAE": {},
"Mean": {},
"Zero": {},
"WindowGRU": {},
"Seq2Point": {},
"RNN": {},
"Seq2Seq": {},
"DSC": {},
"AFHMM": {},
"AFHMM_SAC": {},
}
self.pre_trained = False
self.metrics = []
self.train_datasets_dict = {}
self.test_datasets_dict = {}
self.artificial_aggregate = False
self.train_submeters = []
self.train_mains = pd.DataFrame()
self.test_submeters = []
self.test_mains = pd.DataFrame()
self.gt_overall = {}
self.pred_overall = {}
self.classifiers = []
self.DROP_ALL_NANS = True
self.mae = pd.DataFrame()
self.rmse = pd.DataFrame()
self.experiment(params)
def initialise(self, params):
"""
Instantiates the API with the specified Parameters
"""
for elems in params["params"]["power"]:
self.power = params["params"]["power"]
self.sample_period = params["sample_rate"]
for elems in params["appliances"]:
self.appliances.append(elems)
self.pre_trained = ["pre_trained"]
self.train_datasets_dict = params["train"]["datasets"]
self.test_datasets_dict = params["test"]["datasets"]
self.metrics = params["test"]["metrics"]
self.methods = params["methods"]
self.artificial_aggregate = params.get(
"artificial_aggregate", self.artificial_aggregate
)
self.chunk_size = params.get("chunk_size", self.chunk_size)
def experiment(self, params):
"""
Calls the Experiments with the specified parameters
"""
self.params = params
self.initialise(params)
if params["chunk_size"]:
# This is for training and Testing in Chunks
self.load_datasets_chunks()
else:
# This is to load all the data from all buildings and use it for training and testing. This might not be possible to execute on computers with low specs
self.load_datasets()
def load_datasets_chunks(self):
"""
This function loads the data from buildings and datasets with the specified chunk size and trains on each of them.
After the training process is over, it tests on the specified testing set whilst loading it in chunks.
"""
# First, we initialize all the models
self.store_classifier_instances()
d = self.train_datasets_dict
for model_name, clf in self.classifiers:
# If the model is a neural net, it has an attribute n_epochs, Ex: DAE, Seq2Point
if hasattr(clf, "n_epochs"):
epochs = clf.n_epochs
# If it doesn't have the attribute n_epochs, this is executed. Ex: Mean, Zero
else:
epochs = 1
# If the model has the filename specified for loading the pretrained model, then we don't need to load training data
if clf.load_model_path:
print(clf.MODEL_NAME, " is loading the pretrained model")
continue
for q in range(epochs):
for dataset in d:
print("Loading data for ", dataset, " dataset")
for building in d[dataset]["buildings"]:
train = DataSet(d[dataset]["path"])
print("Loading building ... ", building)
train.set_window(
start=d[dataset]["buildings"][building]["start_time"],
end=d[dataset]["buildings"][building]["end_time"],
)
mains_iterator = (
train.buildings[building]
.elec.mains()
.load(
chunksize=self.chunk_size,
physical_quantity="power",
ac_type=self.power["mains"],
sample_period=self.sample_period,
)
)
print(self.appliances)
appliance_iterators = [
train.buildings[building]
.elec.select_using_appliances(type=app_name)
.load(
chunksize=self.chunk_size,
physical_quantity="power",
ac_type=self.power["appliance"],
sample_period=self.sample_period,
)
for app_name in self.appliances
]
print(train.buildings[building].elec.mains())
for chunk_num, chunk in enumerate(
train.buildings[building]
.elec.mains()
.load(
chunksize=self.chunk_size,
physical_quantity="power",
ac_type=self.power["mains"],
sample_period=self.sample_period,
)
):
# Dummry loop for executing on outer level. Just for looping till end of a chunk
print("starting enumeration..........")
train_df = next(mains_iterator)
appliance_readings = []
for i in appliance_iterators:
try:
appliance_df = next(i)
except StopIteration:
pass
appliance_readings.append(appliance_df)
if self.DROP_ALL_NANS:
train_df, appliance_readings = self.dropna(
train_df, appliance_readings
)
if self.artificial_aggregate:
print("Creating an Artificial Aggregate")
train_df = pd.DataFrame(
np.zeros(appliance_readings[0].shape),
index=appliance_readings[0].index,
columns=appliance_readings[0].columns,
)
for app_reading in appliance_readings:
train_df += app_reading
train_appliances = []
for cnt, i in enumerate(appliance_readings):
train_appliances.append((self.appliances[cnt], [i]))
self.train_mains = [train_df]
self.train_submeters = train_appliances
clf.partial_fit(self.train_mains, self.train_submeters)
print("...............Finished the Training Process ...................")
print("...............Started the Testing Process ...................")
d = self.test_datasets_dict
for dataset in d:
print("Loading data for ", dataset, " dataset")
for building in d[dataset]["buildings"]:
test = DataSet(d[dataset]["path"])
test.set_window(
start=d[dataset]["buildings"][building]["start_time"],
end=d[dataset]["buildings"][building]["end_time"],
)
mains_iterator = (
test.buildings[building]
.elec.mains()
.load(
chunksize=self.chunk_size,
physical_quantity="power",
ac_type=self.power["mains"],
sample_period=self.sample_period,
)
)
appliance_iterators = [
test.buildings[building]
.elec.select_using_appliances(type=app_name)
.load(
chunksize=self.chunk_size,
physical_quantity="power",
ac_type=self.power["appliance"],
sample_period=self.sample_period,
)
for app_name in self.appliances
]
for chunk_num, chunk in enumerate(
test.buildings[building]
.elec.mains()
.load(
chunksize=self.chunk_size,
physical_quantity="power",
ac_type=self.power["mains"],
sample_period=self.sample_period,
)
):
test_df = next(mains_iterator)
appliance_readings = []
for i in appliance_iterators:
try:
appliance_df = next(i)
except StopIteration:
appliance_df = pd.DataFrame()
appliance_readings.append(appliance_df)
if self.DROP_ALL_NANS:
test_df, appliance_readings = self.dropna(
test_df, appliance_readings
)
if self.artificial_aggregate:
print("Creating an Artificial Aggregate")
test_df = pd.DataFrame(
np.zeros(appliance_readings[0].shape),
index=appliance_readings[0].index,
columns=appliance_readings[0].columns,
)
for app_reading in appliance_readings:
test_df += app_reading
test_appliances = []
for cnt, i in enumerate(appliance_readings):
test_appliances.append((self.appliances[cnt], [i]))
self.test_mains = [test_df]
self.test_submeters = test_appliances
print(
"Results for Dataset {dataset} Building {building} Chunk {chunk_num}".format(
dataset=dataset, building=building, chunk_num=chunk_num
)
)
self.call_predict(self.classifiers)
def dropna(self, mains_df, appliance_dfs):
"""
Drops the missing values in the Mains reading and appliance readings and returns consistent data by copmuting the intersection
"""
print("Dropping missing values")
# The below steps are for making sure that data is consistent by doing intersection across appliances
mains_df = mains_df.dropna()
for i in range(len(appliance_dfs)):
appliance_dfs[i] = appliance_dfs[i].dropna()
ix = mains_df.index
for app_df in appliance_dfs:
ix = ix.intersection(app_df.index)
mains_df = mains_df.loc[ix]
new_appliances_list = []
for app_df in appliance_dfs:
new_appliances_list.append(app_df.loc[ix])
return mains_df, new_appliances_list
def load_datasets(self):
# This function has a few issues, which should be addressed soon
self.store_classifier_instances()
d = self.train_datasets_dict
print("............... Loading Data for training ...................")
# store the train_main readings for all buildings
for dataset in d:
print("Loading data for ", dataset, " dataset")
train = DataSet(d[dataset]["path"])
for building in d[dataset]["buildings"]:
print("Loading building ... ", building)
train.set_window(
start=d[dataset]["buildings"][building]["start_time"],
end=d[dataset]["buildings"][building]["end_time"],
)
self.train_mains = self.train_mains.append(
next(
train.buildings[building]
.elec.mains()
.load(
physical_quantity="power",
ac_type=self.power["mains"],
sample_period=self.sample_period,
)
)
)
# store train submeters reading
train_buildings = pd.DataFrame()
for appliance in self.appliances:
train_df = pd.DataFrame()
print("For appliance .. ", appliance)
for dataset in d:
print("Loading data for ", dataset, " dataset")
train = DataSet(d[dataset]["path"])
for building in d[dataset]["buildings"]:
print("Loading building ... ", building)
# store data for submeters
train.set_window(
start=d[dataset]["buildings"][building]["start_time"],
end=d[dataset]["buildings"][building]["end_time"],
)
train_df = train_df.append(
next(
train.buildings[building]
.elec.submeters()
.select_using_appliances(type=appliance)
.load(
physical_quantity="power",
ac_type=self.power["appliance"],
sample_period=self.sample_period,
)
)
)
self.train_submeters.append((appliance, [train_df]))
# create instance of the training methods
# train models
# store data for mains
self.train_mains = [self.train_mains]
self.call_partial_fit()
d = self.test_datasets_dict
# store the test_main readings for all buildings
for dataset in d:
print("Loading data for ", dataset, " dataset")
test = DataSet(d[dataset]["path"])
for building in d[dataset]["buildings"]:
test.set_window(
start=d[dataset]["buildings"][building]["start_time"],
end=d[dataset]["buildings"][building]["end_time"],
)
self.test_mains = next(
test.buildings[building]
.elec.mains()
.load(
physical_quantity="power",
ac_type=self.power["mains"],
sample_period=self.sample_period,
)
)
self.test_submeters = []
for appliance in self.appliances:
test_df = next(
(
test.buildings[building]
.elec.submeters()
.select_using_appliances(type=appliance)
.load(
physical_quantity="power",
ac_type=self.power["appliance"],
sample_period=self.sample_period,
)
)
)
self.test_submeters.append((appliance, [test_df]))
self.test_mains = [self.test_mains]
self.call_predict(self.classifiers)
def store_classifier_instances(self):
"""
This function is reponsible for initializing the models with the specified model parameters
"""
method_dict = {}
for i in self.method_dict:
if i in self.methods:
self.method_dict[i].update(self.methods[i])
method_dict = {
"CO": CombinatorialOptimisation(self.method_dict["CO"]),
"FHMM": FHMM(self.method_dict["FHMM"]),
"DAE": DAE(self.method_dict["DAE"]),
"Mean": Mean(self.method_dict["Mean"]),
"Zero": Zero(self.method_dict["Zero"]),
"Seq2Seq": Seq2Seq(self.method_dict["Seq2Seq"]),
"Seq2Point": Seq2Point(self.method_dict["Seq2Point"]),
"DSC": DSC(self.method_dict["DSC"]),
# 'AFHMM':AFHMM(self.method_dict['AFHMM']),
# 'AFHMM_SAC':AFHMM_SAC(self.method_dict['AFHMM_SAC'])
#'RNN':RNN(self.method_dict['RNN'])
}
for name in self.methods:
if name in method_dict:
clf = method_dict[name]
self.classifiers.append((name, clf))
else:
print(
"\n\nThe method {model_name} specied does not exist. \n\n".format(
model_name=i
)
)
def call_predict(self, classifiers):
"""
This functions computers the predictions on the self.test_mains using all the trained models and then compares different learn't models using the metrics specified
"""
pred_overall = {}
gt_overall = {}
for name, clf in classifiers:
gt_overall, pred_overall[name] = self.predict(
clf,
self.test_mains,
self.test_submeters,
self.sample_period,
"Europe/London",
)
self.gt_overall = gt_overall
self.pred_overall = pred_overall
for i in gt_overall.columns:
plt.figure()
plt.plot(gt_overall[i], label="truth")
for clf in pred_overall:
plt.plot(pred_overall[clf][i], label=clf)
plt.title(i)
plt.legend()
if gt_overall.size == 0:
print("No samples found in ground truth")
return None
for metric in self.metrics:
if metric == "f1-score":
f1_score = {}
for clf_name, clf in classifiers:
f1_score[clf_name] = self.compute_f1_score(
gt_overall, pred_overall[clf_name]
)
f1_score = pd.DataFrame(f1_score)
print("............ ", metric, " ..............")
print(f1_score)
elif metric == "rmse":
rmse = {}
for clf_name, clf in classifiers:
rmse[clf_name] = self.compute_rmse(
gt_overall, pred_overall[clf_name]
)
rmse = pd.DataFrame(rmse)
self.rmse = rmse
print("............ ", metric, " ..............")
print(rmse)
elif metric == "mae":
mae = {}
for clf_name, clf in classifiers:
mae[clf_name] = self.compute_mae(gt_overall, pred_overall[clf_name])
mae = pd.DataFrame(mae)
self.mae = mae
print("............ ", metric, " ..............")
print(mae)
elif metric == "rel_error":
rel_error = {}
for clf_name, clf in classifiers:
rel_error[clf_name] = self.compute_rel_error(
gt_overall, pred_overall[clf_name]
)
rel_error = pd.DataFrame(rel_error)
print("............ ", metric, " ..............")
print(rel_error)
else:
print(
"The requested metric {metric} does not exist.".format(
metric=metric
)
)
def predict(self, clf, test_elec, test_submeters, sample_period, timezone):
"""
Generates predictions on the test dataset using the specified classifier.
"""
# "ac_type" varies according to the dataset used.
# Make sure to use the correct ac_type before using the default parameters in this code.
pred_list = clf.disaggregate_chunk(test_elec)
# It might not have time stamps sometimes due to neural nets
# It has the readings for all the appliances
concat_pred_df = pd.concat(pred_list, axis=0)
gt = {}
for meter, data in test_submeters:
concatenated_df_app = pd.concat(data, axis=1)
index = concatenated_df_app.index
gt[meter] = pd.Series(concatenated_df_app.values.flatten(), index=index)
gt_overall = pd.DataFrame(gt, dtype="float32")
pred = {}
for app_name in concat_pred_df.columns:
app_series_values = concat_pred_df[app_name].values.flatten()
# Neural nets do extra padding sometimes, to fit, so get rid of extra predictions
app_series_values = app_series_values[: len(gt_overall[app_name])]
# print (len(gt_overall[app_name]),len(app_series_values))
pred[app_name] = pd.Series(app_series_values, index=gt_overall.index)
pred_overall = pd.DataFrame(pred, dtype="float32")
# gt[i] = pd.DataFrame({k:v.squeeze() for k,v in iteritems(gt[i]) if len(v)}, index=next(iter(gt[i].values())).index).dropna()
# If everything can fit in memory
# gt_overall = pd.concat(gt)
# gt_overall.index = gt_overall.index.droplevel()
# #pred_overall = pd.concat(pred)
# pred_overall.index = pred_overall.index.droplevel()
# Having the same order of columns
# gt_overall = gt_overall[pred_overall.columns]
# #Intersection of index
# gt_index_utc = gt_overall.index.tz_convert("UTC")
# pred_index_utc = pred_overall.index.tz_convert("UTC")
# common_index_utc = gt_index_utc.intersection(pred_index_utc)
# common_index_local = common_index_utc.tz_convert(timezone)
# gt_overall = gt_overall.loc[common_index_local]
# pred_overall = pred_overall.loc[common_index_local]
# appliance_labels = [m for m in gt_overall.columns.values]
# gt_overall.columns = appliance_labels
# pred_overall.columns = appliance_labels
return gt_overall, pred_overall
# metrics
def compute_mae(self, gt, pred):
"""
Computes the Mean Absolute Error between Ground truth and Prediction
"""
mae = {}
for appliance in gt.columns:
mae[appliance] = mean_absolute_error(gt[appliance], pred[appliance])
return pd.Series(mae)
def compute_rmse(self, gt, pred):
"""
Computes the Root Mean Squared Error between Ground truth and Prediction
"""
rms_error = {}
for appliance in gt.columns:
rms_error[appliance] = np.sqrt(
mean_squared_error(gt[appliance], pred[appliance])
)
# print (gt['sockets'])
# print (pred[])
return pd.Series(rms_error)
def compute_f1_score(self, gt, pred):
"""
Computes the F1 Score between Ground truth and Prediction
"""
f1 = {}
gttemp = {}
predtemp = {}
for appliance in gt.columns:
gttemp[appliance] = np.array(gt[appliance])
gttemp[appliance] = np.where(gttemp[appliance] < 10, 0, 1)
predtemp[appliance] = np.array(pred[appliance])
predtemp[appliance] = np.where(predtemp[appliance] < 10, 0, 1)
f1[appliance] = f1_score(gttemp[appliance], predtemp[appliance])
return pd.Series(f1)
def compute_rel_error(self, gt, pred):
"""
Computes the Relative Error between Ground truth and Prediction
"""
rel_error = {}
for appliance in gt.columns:
rel_error[appliance] = np.sum(
np.sum(abs(gt[appliance] - pred[appliance])) / len(gt[appliance])
)
return
|
pd.Series(rel_error)
|
pandas.Series
|
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import log_loss
from catboost import CatBoostClassifier
import joblib
train_df =
|
pd.read_csv('/content/drive/My Drive/DSN Challenge - Insurance Prediction/train_data.csv')
|
pandas.read_csv
|
import math
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import itertools
class DataLoader():
"""A class for loading and transforming data for the lstm model"""
def __init__(self, filename, split, cols):
dataframe =
|
pd.read_csv(filename)
|
pandas.read_csv
|
from base.base_data_loader import BaseDataLoader
import pandas as pd
import numpy as np
import tokenizers
class RobertaDataLoader(BaseDataLoader):
def __init__(self, config):
super(RobertaDataLoader, self).__init__(config)
self.X_train, self.y_train = self.load_data(self.config.data.data_path.train)
self.X_train, self.y_train = self.preprocess_data(self.X_train,self.y_train)
self.X_test, self.y_test = self.load_data(self.config.data.data_path.test)
self.X_test, self.y_test_token = self.preprocess_data(self.X_test,self.y_test)
def get_tokenizer(self):
tokenizer = tokenizers.ByteLevelBPETokenizer(
vocab_file= self.config.data.roberta.path + self.config.data.roberta.vocab,
merges_file= self.config.data.roberta.path + self.config.data.roberta.merges,
lowercase= self.config.data.roberta.lowercase,
add_prefix_space= self.config.data.roberta.add_prefix_space
)
return tokenizer
def get_train_data(self):
return self.X_train, self.y_train
def get_test_data(self):
return self.X_test, self.y_test
def load_data(self, path):
df = pd.read_csv(path)
return df[['text','sentiment']], df['selected_text']
def preprocess_data(self, X, y):
df =
|
pd.concat([X, y], axis=1)
|
pandas.concat
|
import praw
import pandas as pd
import numpy as np
import requests
from praw.models import MoreComments
import datetime as dt
from psaw import PushshiftAPI
from tqdm import tqdm
import pickle
import dill
from datetime import datetime,timedelta
import tqdm.notebook as tq
TIMEOUT_AFTER_COMMENT_IN_SECS = .350
club_reddit_abbr = ['ACMilan', 'atletico', 'Barca', 'borussiadortmund', 'chelseafc', 'fcbayern', 'FCInterMilan',
'Gunners',
'Juve', 'LiverpoolFC', 'MCFC', 'psg', 'realmadrid', 'reddevils', 'roma', 'coys']
premier_other_reddit_abbr = ['lcfc', 'Hammers','LeedsUnited', 'Everton','avfc','NUFC','WWFC','crystalpalace',
'SaintsFC','BrightonHoveAlbion','Burnley','fulhamfc','WBAfootball','SheffieldUnited']
premier_reddit_abbr = ['lcfc', 'Hammers','LeedsUnited', 'Everton','avfc','NUFC','WWFC','crystalpalace',
'SaintsFC','BrightonHoveAlbion','Burnley','fulhamfc','WBAfootball','SheffieldUnited',
'chelseafc','Gunners','LiverpoolFC','MCFC','reddevils','coys']
premier_team_full_name = ['Leicester City','West Ham United', 'Leeds United', 'Everton', 'Aston Villa', 'Newcastle United','Wolverhampton Wanderers',
'Crystal Palace','Southampton','Brighton','Burnley','Fulham','West Bromwich Albion','Sheffield United',
'Chelsea','Arsenal','Liverpool','Manchester City','Manchester United','Tottenham Spurs']
TOT_abbr = ['coys','PremierLeague','soccer','football','ThreeLions']
all_abbr = ['lcfc', 'Hammers','LeedsUnited', 'Everton','avfc','NUFC','WWFC','crystalpalace',
'SaintsFC','BrightonHoveAlbion','Burnley','fulhamfc','WBAfootball','SheffieldUnited',
'chelseafc','Gunners','LiverpoolFC','MCFC','reddevils','coys','PremierLeague','soccer','football','ThreeLions']
club_abbr = ['MIL', 'AMD', 'BAR', 'DOR', 'CHE', 'MUN', 'INT', 'ARS',
'JUV', 'LIV', 'MNC', 'PSG', 'RMD', 'MNU', 'ROM', 'TOT']
premier_other_abbr = ['LEI','WHU','LEE','EVE','AST','NEW','WOL','PAL',
'SOT','BHA','BUR','FUL','WBA','SHW']
premier_abbr = ['LEI','WHU','LEE','EVE','AST','NEW','WOL','PAL',
'SOT','BHA','BUR','FUL','WBA','SHW','CHE','ARS',
'LIV', 'MNC','MNU','TOT']
target_player = ['Kane', 'Sterling', 'Walker', 'Stones', 'Pickford', 'Lingard']
target_player_1 = ['Kane','Sterling','Stones']
player_club = ['coys','MCFC','MCFC']
player_group1 = ['Sterling','Bruyne','Stones','Mahrez', 'Kane', 'Rashford', 'Lingard']
# reddit api initialization
def api_initialization():
auth = requests.auth.HTTPBasicAuth('bCnE1U61Wqixgs2wy28POg', 'vEY7k3_j7o3PZZvP-tEt6DnhWr1x5A')
data = {'grant_type': 'password',
'username': 'Delta_Wang11',
'password': '<PASSWORD>'
}
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/93.0.4577.82 Safari/537.36'}
res = requests.post('https://www.reddit.com/api/v1/access_token',
auth=auth, data=data, headers=headers)
TOKEN = res.json()['access_token']
headers = {**headers, **{'Authorization': f"bearer {TOKEN}"}}
requests.get('https://oauth.reddit.com/api/v1/me', headers=headers)
return headers
def time_initialization():
start_time_list = []
end_time_list = []
year = [2020, 2020, 2020, 2020, 2021, 2021, 2021, 2021, 2021, 2021, 2021, 2021]
month = [9, 10, 11, 12, 1, 2, 3, 4, 5, 6, 7, 8]
end_day = [30, 31, 30, 31, 31, 28, 31, 30, 31, 30, 31, 31]
for i in range(12):
start_time_list.append(int(dt.datetime(year[i], month[i], 1).timestamp()))
end_time_list.append(int(dt.datetime(year[i], month[i], end_day[i]).timestamp()))
return start_time_list, end_time_list
def time_generation_year(y):
start_time_list = []
end_time_list = []
end_day_lunar = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
end_day = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
for year in range(2021-y,2022):
if year==2016 or year==2020:
for m in range(12):
start_time_list.append(int(dt.datetime(year, m+1, 1).timestamp()))
end_time_list.append(int(dt.datetime(year, m+1, end_day_lunar[m]).timestamp()))
else:
for m in range(12):
start_time_list.append(int(dt.datetime(year, m + 1, 1).timestamp()))
end_time_list.append(int(dt.datetime(year, m + 1, end_day[m]).timestamp()))
return start_time_list,end_time_list
def list_flatten(target):
return [item for sublist in target for item in sublist]
def transform_df(filename):
with open(filename, 'rb') as file:
posts = dill.load(file)
posts = list_flatten(posts)
posts_info = [post[-1] for post in posts]
df = pd.DataFrame()
for post in posts_info:
df = df.append({
'author': post['author'],
'id': post['id'],
'created_utc': post['created_utc'],
'subreddit': post['subreddit'],
'url': post['url'],
'title': post['title'],
'num_comments': post['num_comments'],
'score': post['score'],
'upvote_ratio': post['upvote_ratio'],
'self_text': post['selftext'] if 'selftext' in post else ''
}, ignore_index=True)
return df
def parse_dates(df):
dates = df['created_utc'].astype(int)
real_dates = []
for date in dates:
real_dates.append(datetime.fromtimestamp(date)-timedelta(hours=8))
df['time'] = real_dates
df['time_str'] = df['time'].astype("str")
return df
def praw_init():
return praw.Reddit(client_id='bCnE1U61Wqixgs2wy28POg', client_secret='<KEY>',
user_agent='Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36')
def extract_comments_by_praw(reddit, filename, columns):
comments = pd.DataFrame(columns = columns)
#reddit = praw_init()
data = pd.read_csv(filename)
data_comments_50 = data.loc[data['num_comments']>=50]
if data_comments_50.shape[0]>=100:
for i in tqdm(range(len(data_comments_50))):
uid = data_comments_50.iloc[i]['id']
subreddit = data_comments_50.iloc[i]['subreddit']
time = data_comments_50.iloc[i]['time']
title = data_comments_50.iloc[i]['title']
submission = reddit.submission(id=uid)
submission.comments.replace_more(limit=None)
for comment in submission.comments.list():
comments = comments.append({'comments': comment.body, 'post_id': uid, 'subreddit': subreddit,
'time': time, 'title': title},
ignore_index=True)
#if TIMEOUT_AFTER_COMMENT_IN_SECS > 0:
#time.sleep(TIMEOUT_AFTER_COMMENT_IN_SECS)
'''
for top_level_comment in tqdm(submission.comments):
if isinstance(top_level_comment, MoreComments):
for cid in top_level_comment.children:
comments = comments.append({'comments': reddit.comment(cid).body, 'post_id': uid, 'subreddit': subreddit,
'time': time, 'title': title},
ignore_index=True)
else:
comments = comments.append({'comments': top_level_comment.body, 'post_id': uid, 'subreddit': subreddit,
'time': time, 'title': title},
ignore_index=True)
'''
return comments
def extract_comments_by_psaw(filename, columns):
api = PushshiftAPI()
comments = pd.DataFrame(columns=columns)
data = pd.read_csv(filename)
data_comments_50 = data.loc[data['num_comments'] >= 50]
if data_comments_50.shape[0] >= 100:
for i in tqdm(range(len(data_comments_50))):
uid = data_comments_50.iloc[i]['id']
subreddit = data_comments_50.iloc[i]['subreddit']
time = data_comments_50.iloc[i]['date']
title = data_comments_50.iloc[i]['title']
comment_ids = api._get_submission_comment_ids(uid)
for cid in tqdm(comment_ids):
gen = api.search_comments(ids=cid)
comment = next(gen).body
comments = comments.append(
{'comments': comment, 'post_id': uid, 'subreddit': subreddit,
'time': time, 'title': title},
ignore_index=True)
return comments
def extract_comments_by_api(filename,columns):
comments = pd.DataFrame(columns=columns)
headers = api_initialization()
posts = pd.read_csv(filename)
posts_comments = posts.loc[posts['num_comments'] >= 50]
subreddit = posts['subreddit'][0]
if posts_comments.shape[0] >= 100:
iter = 0
for uid in tq.tqdm(posts_comments['id']):
if iter%200 == 0:
headers = api_initialization()
iter += 1
try:
url = f"https://oauth.reddit.com/r/{subreddit}/comments/{uid}/"
res = requests.get(url, headers=headers)
comment_list = res.json()[1]
comments = get_comment(comment_list,comments,uid)
except Exception as e:
comments.to_csv(f'../data/comments/temp{iter}.csv',index=False)
pass
continue
return comments
def extract_gameday_comments(filename,matchfilename,columns):
comments = pd.DataFrame(columns=columns)
headers = api_initialization()
posts = pd.read_csv(filename)
posts['datetime'] = pd.to_datetime(posts.time)
match =
|
pd.read_csv(matchfilename)
|
pandas.read_csv
|
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
Series,
Timestamp,
date_range,
isna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray, period_array
class TestSeriesConstructors:
@pytest.mark.parametrize(
"constructor,check_index_type",
[
# NOTE: some overlap with test_constructor_empty but that test does not
# test for None or an empty generator.
# test_constructor_pass_none tests None but only with the index also
# passed.
(lambda: Series(), True),
(lambda: Series(None), True),
(lambda: Series({}), True),
(lambda: Series(()), False), # creates a RangeIndex
(lambda: Series([]), False), # creates a RangeIndex
(lambda: Series((_ for _ in [])), False), # creates a RangeIndex
(lambda: Series(data=None), True),
(lambda: Series(data={}), True),
(lambda: Series(data=()), False), # creates a RangeIndex
(lambda: Series(data=[]), False), # creates a RangeIndex
(lambda: Series(data=(_ for _ in [])), False), # creates a RangeIndex
],
)
def test_empty_constructor(self, constructor, check_index_type):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
expected = Series()
result = constructor()
assert len(result.index) == 0
tm.assert_series_equal(result, expected, check_index_type=check_index_type)
def test_invalid_dtype(self):
# GH15520
msg = "not understood"
invalid_list = [pd.Timestamp, "pd.Timestamp", list]
for dtype in invalid_list:
with pytest.raises(TypeError, match=msg):
Series([], name="time", dtype=dtype)
def test_invalid_compound_dtype(self):
# GH#13296
c_dtype = np.dtype([("a", "i8"), ("b", "f4")])
cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype)
with pytest.raises(ValueError, match="Use DataFrame instead"):
Series(cdt_arr, index=["A", "B"])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.0])) == 1.0
assert int(Series([1.0])) == 1
def test_constructor(self, datetime_series):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty_series = Series()
assert datetime_series.index.is_all_dates
# Pass in Series
derived = Series(datetime_series)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, datetime_series.index)
# Ensure new index is not created
assert id(datetime_series.index) == id(derived.index)
# Mixed type Series
mixed = Series(["hello", np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not empty_series.index.is_all_dates
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
assert not Series().index.is_all_dates
# exception raised is of type Exception
with pytest.raises(Exception, match="Data must be 1-dimensional"):
Series(np.random.randn(3, 3), index=np.arange(3))
mixed.name = "Series"
rs = Series(mixed).name
xp = "Series"
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
msg = "initializing a Series from a MultiIndex is not supported"
with pytest.raises(NotImplementedError, match=msg):
Series(m)
@pytest.mark.parametrize("input_class", [list, dict, OrderedDict])
def test_constructor_empty(self, input_class):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series()
empty2 = Series(input_class())
# these are Index() and RangeIndex() which don't compare type equal
# but are just .equals
tm.assert_series_equal(empty, empty2, check_index_type=False)
# With explicit dtype:
empty = Series(dtype="float64")
empty2 = Series(input_class(), dtype="float64")
tm.assert_series_equal(empty, empty2, check_index_type=False)
# GH 18515 : with dtype=category:
empty = Series(dtype="category")
empty2 = Series(input_class(), dtype="category")
tm.assert_series_equal(empty, empty2, check_index_type=False)
if input_class is not list:
# With index:
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series(index=range(10))
empty2 = Series(input_class(), index=range(10))
tm.assert_series_equal(empty, empty2)
# With index and dtype float64:
empty = Series(np.nan, index=range(10))
empty2 = Series(input_class(), index=range(10), dtype="float64")
tm.assert_series_equal(empty, empty2)
# GH 19853 : with empty string, index and dtype str
empty = Series("", dtype=str, index=range(3))
empty2 = Series("", index=range(3))
tm.assert_series_equal(empty, empty2)
@pytest.mark.parametrize("input_arg", [np.nan, float("nan")])
def test_constructor_nan(self, input_arg):
empty = Series(dtype="float64", index=range(10))
empty2 = Series(input_arg, index=range(10))
tm.assert_series_equal(empty, empty2, check_index_type=False)
@pytest.mark.parametrize(
"dtype",
["f8", "i8", "M8[ns]", "m8[ns]", "category", "object", "datetime64[ns, UTC]"],
)
@pytest.mark.parametrize("index", [None, pd.Index([])])
def test_constructor_dtype_only(self, dtype, index):
# GH-20865
result = pd.Series(dtype=dtype, index=index)
assert result.dtype == dtype
assert len(result) == 0
def test_constructor_no_data_index_order(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
result = pd.Series(index=["b", "a", "c"])
assert result.index.tolist() == ["b", "a", "c"]
def test_constructor_no_data_string_type(self):
# GH 22477
result = pd.Series(index=[1], dtype=str)
assert np.isnan(result.iloc[0])
@pytest.mark.parametrize("item", ["entry", "ѐ", 13])
def test_constructor_string_element_string_type(self, item):
# GH 22477
result = pd.Series(item, index=[1], dtype=str)
assert result.iloc[0] == str(item)
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
ser = Series(["x", None], dtype=string_dtype)
result = ser.isna()
expected = Series([False, True])
tm.assert_series_equal(result, expected)
assert ser.iloc[1] is None
ser = Series(["x", np.nan], dtype=string_dtype)
assert np.isnan(ser.iloc[1])
def test_constructor_series(self):
index1 = ["d", "b", "a", "c"]
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
tm.assert_series_equal(s2, s1.sort_index())
def test_constructor_iterable(self):
# GH 21987
class Iter:
def __iter__(self):
for i in range(10):
yield i
expected = Series(list(range(10)), dtype="int64")
result = Series(Iter(), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_sequence(self):
# GH 21987
expected = Series(list(range(10)), dtype="int64")
result = Series(range(10), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_single_str(self):
# GH 21987
expected = Series(["abc"])
result = Series("abc")
tm.assert_series_equal(result, expected)
def test_constructor_list_like(self):
# make sure that we are coercing different
# list-likes to standard dtypes and not
# platform specific
expected = Series([1, 2, 3], dtype="int64")
for obj in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype="int64")]:
result = Series(obj, index=[0, 1, 2])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"])
def test_constructor_index_dtype(self, dtype):
# GH 17088
s = Series(Index([0, 2, 4]), dtype=dtype)
assert s.dtype == dtype
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements from a list are converted to strings
# when dtype is str, 'str', or 'U'
result = Series(input_vals, dtype=string_dtype)
expected = Series(input_vals).astype(string_dtype)
tm.assert_series_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = Series([1.0, 2.0, np.nan], dtype=string_dtype)
expected = Series(["1.0", "2.0", np.nan], dtype=object)
tm.assert_series_equal(result, expected)
assert np.isnan(result[2])
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ["a", "b", "c"], fastpath=True)
res = Series(cat)
tm.assert_categorical_equal(res.values, cat)
# can cast to a new dtype
result = Series(pd.Categorical([1, 2, 3]), dtype="int64")
expected = pd.Series([1, 2, 3], dtype="int64")
tm.assert_series_equal(result, expected)
# GH12574
cat = Series(pd.Categorical([1, 2, 3]), dtype="category")
assert is_categorical_dtype(cat)
assert is_categorical_dtype(cat.dtype)
s = Series([1, 2, 3], dtype="category")
assert is_categorical_dtype(s)
assert is_categorical_dtype(s.dtype)
def test_constructor_categorical_with_coercion(self):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])
# test basic creation / coercion of categoricals
s = Series(factor, name="A")
assert s.dtype == "category"
assert len(s) == len(factor)
str(s.values)
str(s)
# in a frame
df = DataFrame({"A": factor})
result = df["A"]
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
df = DataFrame({"A": s})
result = df["A"]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
# multiples
df = DataFrame({"A": s, "B": s, "C": 1})
result1 = df["A"]
result2 = df["B"]
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
assert result2.name == "B"
assert len(df) == len(factor)
str(df.values)
str(df)
# GH8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
assert result == expected
result = x.person_name[0]
assert result == expected
result = x.person_name.loc[0]
assert result == expected
def test_constructor_categorical_dtype(self):
result = pd.Series(
["a", "b"], dtype=CategoricalDtype(["a", "b", "c"], ordered=True)
)
assert is_categorical_dtype(result.dtype) is True
tm.assert_index_equal(result.cat.categories, pd.Index(["a", "b", "c"]))
assert result.cat.ordered
result = pd.Series(["a", "b"], dtype=CategoricalDtype(["b", "a"]))
assert is_categorical_dtype(result.dtype)
tm.assert_index_equal(result.cat.categories, pd.Index(["b", "a"]))
assert result.cat.ordered is False
# GH 19565 - Check broadcasting of scalar with Categorical dtype
result = Series(
"a", index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
expected = Series(
["a", "a"], index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
tm.assert_series_equal(result, expected)
def test_constructor_categorical_string(self):
# GH 26336: the string 'category' maintains existing CategoricalDtype
cdt = CategoricalDtype(categories=list("dabc"), ordered=True)
expected = Series(list("abcabc"), dtype=cdt)
# Series(Categorical, dtype='category') keeps existing dtype
cat = Categorical(list("abcabc"), dtype=cdt)
result = Series(cat, dtype="category")
tm.assert_series_equal(result, expected)
# Series(Series[Categorical], dtype='category') keeps existing dtype
result = Series(result, dtype="category")
tm.assert_series_equal(result, expected)
def test_categorical_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat, copy=True)
assert s.cat is not cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat)
assert s.values is cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_unordered_compare_equal(self):
left = pd.Series(["a", "b", "c"], dtype=CategoricalDtype(["a", "b"]))
right = pd.Series(pd.Categorical(["a", "b", np.nan], categories=["a", "b"]))
tm.assert_series_equal(left, right)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0.0, np.nan, 2.0], index=index)
tm.assert_series_equal(result, expected)
data[1] = 1.0
result = Series(data, index=index)
expected = Series([0.0, 1.0, 2.0], index=index)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=int)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=float)
tm.assert_series_equal(result, expected)
data[0] = 0
data[2] = 2
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0, np.nan, 2], index=index, dtype=float)
tm.assert_series_equal(result, expected)
data[1] = 1
result = Series(data, index=index)
expected = Series([0, 1, 2], index=index, dtype=int)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=bool)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=object)
tm.assert_series_equal(result, expected)
data[0] = True
data[2] = False
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([True, np.nan, False], index=index, dtype=object)
tm.assert_series_equal(result, expected)
data[1] = True
result = Series(data, index=index)
expected = Series([True, True, False], index=index, dtype=bool)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype="M8[ns]")
result = Series(data)
expected = Series([iNaT, iNaT, iNaT], dtype="M8[ns]")
tm.assert_series_equal(result, expected)
data[0] = datetime(2001, 1, 1)
data[2] = datetime(2001, 1, 3)
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), iNaT, datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
data[1] = datetime(2001, 1, 2)
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), datetime(2001, 1, 2), datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
def test_constructor_maskedarray_hardened(self):
# Check numpy masked arrays with hard masks -- from GH24574
data = ma.masked_all((3,), dtype=float).harden_mask()
result = pd.Series(data)
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range("20090415", "20090519", freq="B")
data = {k: 1 for k in rng}
result = Series(data, index=rng)
assert result.index is rng
def test_constructor_default_index(self):
s = Series([0, 1, 2])
tm.assert_index_equal(s.index, pd.Index(np.arange(3)))
@pytest.mark.parametrize(
"input",
[
[1, 2, 3],
(1, 2, 3),
list(range(3)),
pd.Categorical(["a", "b", "a"]),
(i for i in range(3)),
map(lambda x: x, range(3)),
],
)
def test_constructor_index_mismatch(self, input):
# GH 19342
# test that construction of a Series with an index of different length
# raises an error
msg = "Length of passed values is 3, index implies 4"
with pytest.raises(ValueError, match=msg):
Series(input, index=np.arange(4))
def test_constructor_numpy_scalar(self):
# GH 19342
# construction with a numpy scalar
# should not raise
result = Series(np.array(100), index=np.arange(4), dtype="int64")
expected = Series(100, index=np.arange(4), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_broadcast_list(self):
# GH 19342
# construction with single-element container and index
# should raise
msg = "Length of passed values is 1, index implies 3"
with pytest.raises(ValueError, match=msg):
Series(["foo"], index=["a", "b", "c"])
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
assert isinstance(s, Series)
def test_constructor_sanitize(self):
s = Series(np.array([1.0, 1.0, 8.0]), dtype="i8")
assert s.dtype == np.dtype("i8")
s = Series(np.array([1.0, 1.0, np.nan]), copy=True, dtype="i8")
assert s.dtype == np.dtype("f8")
def test_constructor_copy(self):
# GH15125
# test dtype parameter has no side effects on copy=True
for data in [[1.0], np.array([1.0])]:
x = Series(data)
y = pd.Series(x, copy=True, dtype=float)
# copy=True maintains original data in Series
tm.assert_series_equal(x, y)
# changes to origin of copy does not affect the copy
x[0] = 2.0
assert not x.equals(y)
assert x[0] == 2.0
assert y[0] == 1.0
@pytest.mark.parametrize(
"index",
[
pd.date_range("20170101", periods=3, tz="US/Eastern"),
pd.date_range("20170101", periods=3),
pd.timedelta_range("1 day", periods=3),
pd.period_range("2012Q1", periods=3, freq="Q"),
pd.Index(list("abc")),
pd.Int64Index([1, 2, 3]),
pd.RangeIndex(0, 3),
],
ids=lambda x: type(x).__name__,
)
def test_constructor_limit_copies(self, index):
# GH 17449
# limit copies of input
s = pd.Series(index)
# we make 1 copy; this is just a smoke test here
assert s._mgr.blocks[0].values is not index
def test_constructor_pass_none(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(None, index=range(5))
assert s.dtype == np.float64
s = Series(None, index=range(5), dtype=object)
assert s.dtype == np.object_
# GH 7431
# inference on the index
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(index=np.array([None]))
expected = Series(index=Index([None]))
tm.assert_series_equal(s, expected)
def test_constructor_pass_nan_nat(self):
# GH 13467
exp = Series([np.nan, np.nan], dtype=np.float64)
assert exp.dtype == np.float64
tm.assert_series_equal(Series([np.nan, np.nan]), exp)
tm.assert_series_equal(Series(np.array([np.nan, np.nan])), exp)
exp = Series([pd.NaT, pd.NaT])
assert exp.dtype == "datetime64[ns]"
tm.assert_series_equal(Series([pd.NaT, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, pd.NaT])), exp)
tm.assert_series_equal(Series([pd.NaT, np.nan]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, np.nan])), exp)
tm.assert_series_equal(Series([np.nan, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([np.nan, pd.NaT])), exp)
def test_constructor_cast(self):
msg = "could not convert string to float"
with pytest.raises(ValueError, match=msg):
Series(["a", "b", "c"], dtype=float)
def test_constructor_unsigned_dtype_overflow(self, uint_dtype):
# see gh-15832
msg = "Trying to coerce negative values to unsigned integers"
with pytest.raises(OverflowError, match=msg):
Series([-1], dtype=uint_dtype)
def test_constructor_coerce_float_fail(self, any_int_dtype):
# see gh-15832
msg = "Trying to coerce float values to integers"
with pytest.raises(ValueError, match=msg):
Series([1, 2, 3.5], dtype=any_int_dtype)
def test_constructor_coerce_float_valid(self, float_dtype):
s = Series([1, 2, 3.5], dtype=float_dtype)
expected = Series([1, 2, 3.5]).astype(float_dtype)
tm.assert_series_equal(s, expected)
def test_constructor_dtype_no_cast(self):
# see gh-1572
s = Series([1, 2, 3])
s2 = Series(s, dtype=np.int64)
s2[1] = 5
assert s[1] == 5
def test_constructor_datelike_coercion(self):
# GH 9477
# incorrectly inferring on dateimelike looking when object dtype is
# specified
s = Series([Timestamp("20130101"), "NOV"], dtype=object)
assert s.iloc[0] == Timestamp("20130101")
assert s.iloc[1] == "NOV"
assert s.dtype == object
# the dtype was being reset on the slicing and re-inferred to datetime
# even thought the blocks are mixed
belly = "216 3T19".split()
wing1 = "2T15 4H19".split()
wing2 = "416 4T20".split()
mat = pd.to_datetime("2016-01-22 2019-09-07".split())
df = pd.DataFrame({"wing1": wing1, "wing2": wing2, "mat": mat}, index=belly)
result = df.loc["3T19"]
assert result.dtype == object
result = df.loc["216"]
assert result.dtype == object
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [
np.array([None, None, None, None, datetime.now(), None]),
np.array([None, None, datetime.now(), None]),
]:
result = Series(arr)
assert result.dtype == "M8[ns]"
def test_constructor_dtype_datetime64(self):
s = Series(iNaT, dtype="M8[ns]", index=range(5))
assert isna(s).all()
# in theory this should be all nulls, but since
# we are not specifying a dtype is ambiguous
s = Series(iNaT, index=range(5))
assert not isna(s).all()
s = Series(np.nan, dtype="M8[ns]", index=range(5))
assert isna(s).all()
s = Series([datetime(2001, 1, 2, 0, 0), iNaT], dtype="M8[ns]")
assert isna(s[1])
assert s.dtype == "M8[ns]"
s = Series([datetime(2001, 1, 2, 0, 0), np.nan], dtype="M8[ns]")
assert isna(s[1])
assert s.dtype == "M8[ns]"
# GH3416
dates = [
np.datetime64(datetime(2013, 1, 1)),
np.datetime64(datetime(2013, 1, 2)),
np.datetime64(datetime(2013, 1, 3)),
]
s = Series(dates)
assert s.dtype == "M8[ns]"
s.iloc[0] = np.nan
assert s.dtype == "M8[ns]"
# GH3414 related
expected = Series(
[datetime(2013, 1, 1), datetime(2013, 1, 2), datetime(2013, 1, 3)],
dtype="datetime64[ns]",
)
result = Series(Series(dates).astype(np.int64) / 1000000, dtype="M8[ms]")
tm.assert_series_equal(result, expected)
result = Series(dates, dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
expected = Series(
[pd.NaT, datetime(2013, 1, 2), datetime(2013, 1, 3)], dtype="datetime64[ns]"
)
result = Series([np.nan] + dates[1:], dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
dts = Series(dates, dtype="datetime64[ns]")
# valid astype
dts.astype("int64")
# invalid casting
msg = r"cannot astype a datetimelike from \[datetime64\[ns\]\] to \[int32\]"
with pytest.raises(TypeError, match=msg):
dts.astype("int32")
# ints are ok
# we test with np.int64 to get similar results on
# windows / 32-bit platforms
result = Series(dts, dtype=np.int64)
expected = Series(dts.astype(np.int64))
tm.assert_series_equal(result, expected)
# invalid dates can be help as object
result = Series([datetime(2, 1, 1)])
assert result[0] == datetime(2, 1, 1, 0, 0)
result = Series([datetime(3000, 1, 1)])
assert result[0] == datetime(3000, 1, 1, 0, 0)
# don't mix types
result = Series([Timestamp("20130101"), 1], index=["a", "b"])
assert result["a"] == Timestamp("20130101")
assert result["b"] == 1
# GH6529
# coerce datetime64 non-ns properly
dates = date_range("01-Jan-2015", "01-Dec-2015", freq="M")
values2 = dates.view(np.ndarray).astype("datetime64[ns]")
expected = Series(values2, index=dates)
for dtype in ["s", "D", "ms", "us", "ns"]:
values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]")
result = Series(values1, dates)
tm.assert_series_equal(result, expected)
# GH 13876
# coerce to non-ns to object properly
expected = Series(values2, index=dates, dtype=object)
for dtype in ["s", "D", "ms", "us", "ns"]:
values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]")
result = Series(values1, index=dates, dtype=object)
tm.assert_series_equal(result, expected)
# leave datetime.date alone
dates2 = np.array([d.date() for d in dates.to_pydatetime()], dtype=object)
series1 = Series(dates2, dates)
tm.assert_numpy_array_equal(series1.values, dates2)
assert series1.dtype == object
# these will correctly infer a datetime
s = Series([None, pd.NaT, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([np.nan, pd.NaT, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([pd.NaT, None, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([pd.NaT, np.nan, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range("20130101", periods=3)
assert Series(dr).iloc[0].tz is None
dr = date_range("20130101", periods=3, tz="UTC")
assert str(Series(dr).iloc[0].tz) == "UTC"
dr = date_range("20130101", periods=3, tz="US/Eastern")
assert str(Series(dr).iloc[0].tz) == "US/Eastern"
# non-convertible
s = Series([1479596223000, -1479590, pd.NaT])
assert s.dtype == "object"
assert s[2] is pd.NaT
assert "NaT" in str(s)
# if we passed a NaT it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), pd.NaT])
assert s.dtype == "object"
assert s[2] is pd.NaT
assert "NaT" in str(s)
# if we passed a nan it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), np.nan])
assert s.dtype == "object"
assert s[2] is np.nan
assert "NaN" in str(s)
def test_constructor_with_datetime_tz(self):
# 8260
# support datetime64 with tz
dr = date_range("20130101", periods=3, tz="US/Eastern")
s = Series(dr)
assert s.dtype.name == "datetime64[ns, US/Eastern]"
assert s.dtype == "datetime64[ns, US/Eastern]"
assert is_datetime64tz_dtype(s.dtype)
assert "datetime64[ns, US/Eastern]" in str(s)
# export
result = s.values
assert isinstance(result, np.ndarray)
assert result.dtype == "datetime64[ns]"
exp = pd.DatetimeIndex(result)
exp = exp.tz_localize("UTC").tz_convert(tz=s.dt.tz)
tm.assert_index_equal(dr, exp)
# indexing
result = s.iloc[0]
assert result == Timestamp(
"2013-01-01 00:00:00-0500", tz="US/Eastern", freq="D"
)
result = s[0]
assert result == Timestamp(
"2013-01-01 00:00:00-0500", tz="US/Eastern", freq="D"
)
result = s[Series([True, True, False], index=s.index)]
tm.assert_series_equal(result, s[0:2])
result = s.iloc[0:1]
tm.assert_series_equal(result, Series(dr[0:1]))
# concat
result = pd.concat([s.iloc[0:1], s.iloc[1:]])
tm.assert_series_equal(result, s)
# short str
assert "datetime64[ns, US/Eastern]" in str(s)
# formatting with NaT
result = s.shift()
assert "datetime64[ns, US/Eastern]" in str(result)
assert "NaT" in str(result)
# long str
t = Series(date_range("20130101", periods=1000, tz="US/Eastern"))
assert "datetime64[ns, US/Eastern]" in str(t)
result = pd.DatetimeIndex(s, freq="infer")
tm.assert_index_equal(result, dr)
# inference
s = Series(
[
pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
]
)
assert s.dtype == "datetime64[ns, US/Pacific]"
assert lib.infer_dtype(s, skipna=True) == "datetime64"
s = Series(
[
pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Eastern"),
]
)
assert s.dtype == "object"
assert lib.infer_dtype(s, skipna=True) == "datetime"
# with all NaT
s = Series(pd.NaT, index=[0, 1], dtype="datetime64[ns, US/Eastern]")
expected = Series(pd.DatetimeIndex(["NaT", "NaT"], tz="US/Eastern"))
tm.assert_series_equal(s, expected)
@pytest.mark.parametrize("arr_dtype", [np.int64, np.float64])
@pytest.mark.parametrize("dtype", ["M8", "m8"])
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_construction_to_datetimelike_unit(self, arr_dtype, dtype, unit):
# tests all units
# gh-19223
dtype = f"{dtype}[{unit}]"
arr = np.array([1, 2, 3], dtype=arr_dtype)
s = Series(arr)
result = s.astype(dtype)
expected = Series(arr.astype(dtype))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("arg", ["2013-01-01 00:00:00", pd.NaT, np.nan, None])
def test_constructor_with_naive_string_and_datetimetz_dtype(self, arg):
# GH 17415: With naive string
result = Series([arg], dtype="datetime64[ns, CET]")
expected = Series(pd.Timestamp(arg)).dt.tz_localize("CET")
tm.assert_series_equal(result, expected)
def test_constructor_datetime64_bigendian(self):
# GH#30976
ms = np.datetime64(1, "ms")
arr = np.array([np.datetime64(1, "ms")], dtype=">M8[ms]")
result = Series(arr)
expected = Series([Timestamp(ms)])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("interval_constructor", [IntervalIndex, IntervalArray])
def test_construction_interval(self, interval_constructor):
# construction from interval & array of intervals
intervals = interval_constructor.from_breaks(np.arange(3), closed="right")
result = Series(intervals)
assert result.dtype == "interval[int64]"
tm.assert_index_equal(Index(result.values), Index(intervals))
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_infer_interval(self, data_constructor):
# GH 23563: consistent closed results in interval dtype
data = [pd.Interval(0, 1), pd.Interval(0, 2), None]
result = pd.Series(data_constructor(data))
expected = pd.Series(IntervalArray(data))
assert result.dtype == "interval[float64]"
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_interval_mixed_closed(self, data_constructor):
# GH 23563: mixed closed results in object dtype (not interval dtype)
data = [pd.Interval(0, 1, closed="both"), pd.Interval(0, 2, closed="neither")]
result = Series(data_constructor(data))
assert result.dtype == object
assert result.tolist() == data
def test_construction_consistency(self):
# make sure that we are not re-localizing upon construction
# GH 14928
s = Series(pd.date_range("20130101", periods=3, tz="US/Eastern"))
result = Series(s, dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.dt.tz_convert("UTC"), dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.values, dtype=s.dtype)
tm.assert_series_equal(result, s)
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_infer_period(self, data_constructor):
data = [pd.Period("2000", "D"), pd.Period("2001", "D"), None]
result = pd.Series(data_constructor(data))
expected = pd.Series(period_array(data))
tm.assert_series_equal(result, expected)
assert result.dtype == "Period[D]"
def test_constructor_period_incompatible_frequency(self):
data = [pd.Period("2000", "D"), pd.Period("2001", "A")]
result = pd.Series(data)
assert result.dtype == object
assert result.tolist() == data
def test_constructor_periodindex(self):
# GH7932
# converting a PeriodIndex when put in a Series
pi = period_range("20130101", periods=5, freq="D")
s = Series(pi)
assert s.dtype == "Period[D]"
expected = Series(pi.astype(object))
tm.assert_series_equal(s, expected)
def test_constructor_dict(self):
d = {"a": 0.0, "b": 1.0, "c": 2.0}
result = Series(d, index=["b", "c", "d", "a"])
expected = Series([1, 2, np.nan, 0], index=["b", "c", "d", "a"])
tm.assert_series_equal(result, expected)
pidx = tm.makePeriodIndex(100)
d = {pidx[0]: 0, pidx[1]: 1}
result = Series(d, index=pidx)
expected = Series(np.nan, pidx, dtype=np.float64)
expected.iloc[0] = 0
expected.iloc[1] = 1
tm.assert_series_equal(result, expected)
def test_constructor_dict_list_value_explicit_dtype(self):
# GH 18625
d = {"a": [[2], [3], [4]]}
result = Series(d, index=["a"], dtype="object")
expected = Series(d, index=["a"])
tm.assert_series_equal(result, expected)
def test_constructor_dict_order(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6, else
# order by value
d = {"b": 1, "a": 0, "c": 2}
result = Series(d)
expected = Series([1, 0, 2], index=list("bac"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data,dtype",
[
(Period("2020-01"), PeriodDtype("M")),
(Interval(left=0, right=5), IntervalDtype("int64")),
(
Timestamp("2011-01-01", tz="US/Eastern"),
DatetimeTZDtype(tz="US/Eastern"),
),
],
)
def test_constructor_dict_extension(self, data, dtype):
d = {"a": data}
result = Series(d, index=["a"])
expected = Series(data, index=["a"], dtype=dtype)
assert result.dtype == dtype
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float("nan")])
def test_constructor_dict_nan_key(self, value):
# GH 18480
d = {1: "a", value: "b", float("nan"): "c", 4: "d"}
result = Series(d).sort_values()
expected = Series(["a", "b", "c", "d"], index=[1, value, np.nan, 4])
tm.assert_series_equal(result, expected)
# MultiIndex:
d = {(1, 1): "a", (2, np.nan): "b", (3, value): "c"}
result = Series(d).sort_values()
expected = Series(
["a", "b", "c"], index=Index([(1, 1), (2, np.nan), (3, value)])
)
tm.assert_series_equal(result, expected)
def test_constructor_dict_datetime64_index(self):
# GH 9456
dates_as_str = ["1984-02-19", "1988-11-06", "1989-12-03", "1990-03-15"]
values = [42544017.198965244, 1234565, 40512335.181958228, -1]
def create_data(constructor):
return dict(zip((constructor(x) for x in dates_as_str), values))
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, "%Y-%m-%d"))
data_Timestamp = create_data(Timestamp)
expected = Series(values, (Timestamp(x) for x in dates_as_str))
result_datetime64 = Series(data_datetime64)
result_datetime = Series(data_datetime)
result_Timestamp = Series(data_Timestamp)
tm.assert_series_equal(result_datetime64, expected)
tm.assert_series_equal(result_datetime, expected)
tm.assert_series_equal(result_Timestamp, expected)
def test_constructor_dict_tuple_indexer(self):
# GH 12948
data = {(1, 1, None): -1.0}
result = Series(data)
expected = Series(
-1.0, index=MultiIndex(levels=[[1], [1], [np.nan]], codes=[[0], [0], [-1]])
)
tm.assert_series_equal(result, expected)
def test_constructor_mapping(self, non_dict_mapping_subclass):
# GH 29788
ndm = non_dict_mapping_subclass({3: "three"})
result = Series(ndm)
expected = Series(["three"], index=[3])
tm.assert_series_equal(result, expected)
def test_constructor_list_of_tuples(self):
data = [(1, 1), (2, 2), (2, 3)]
s =
|
Series(data)
|
pandas.Series
|
# Core functions
#
# this file contains reusable core functions like filtering on university
# and adding year and month name info
# these are functions which are generally used in every product
# roadmap: I want to push all functions from loose function
# to functions combined in classgroups
from nlp_functions import remove_punctuation
from nlp_functions import get_abstract_if_any
from nlp_functions import comma_space_fix
#from static import PATH_START, PATH_START_PERSONAL
#from static import PATH_START_SERVER , PATH_START_PERSONAL_SERVER
#from static import UNPAYWALL_EMAIL
#from static import PATH_STATIC_RESPONSES
#from static import PATH_STATIC_RESPONSES_ALTMETRIC
#from static import PATH_STATIC_RESPONSES_SCOPUS_ABS
#from static import MAX_NUM_WORKERS # not used everywhere so care
import pandas as pd
import calendar
import numpy as np
import requests
from pybliometrics.scopus import ScopusSearch
from pybliometrics.scopus import AbstractRetrieval
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import ProcessPoolExecutor
from functools import partial
### from functools import wraps
import time
from datetime import datetime # new
from datetime import timedelta
import re
import mysql.connector
from mysql.connector import Error
from altmetric import Altmetric
import pickle
import functools
from unittest.mock import Mock
from requests.models import Response
#import sys
from nlp_functions import faculty_finder
from pybliometrics.scopus import config
from pybliometrics.scopus.exception import Scopus429Error
import static
def overloaded_abstract_retrieval(identifier, view='FULL', refresh=True, id_type='eid'):
"""
The only thing this extra layer does is swap api-keys on error 429
Any multi-threading etc is done elsewhere (and may need its own testing as always)
"""
try:
res = AbstractRetrieval(identifier=identifier, view=view, refresh=refresh, id_type=id_type)
time.sleep(0.05)
except Scopus429Error:
# Use the last item of _keys, drop it and assign it as
# current API key
# update: keep swapping until it works
still_error = True
while still_error:
if len(static.SCOPUS_KEYS) > 0:
config["Authentication"]["APIKey"] = static.SCOPUS_KEYS.pop()
try:
time.sleep(1) # only when key has changed so 1s is fine
res = AbstractRetrieval(identifier=identifier, view=view, refresh=refresh, id_type=id_type)
still_error = False
except Scopus429Error: # NO! only for 429
print('error, key pop will happen at top of while top')
except:
print('non429 error')
still_error = False
res = None # ?
else:
still_error = False
res = None # ?
return res
def make_doi_list_from_csv(source_path, output_path, do_return=True):
# this function returns a list of DOIs from a source scopus frontend file
# in: source_path: a full path ending with .csv which contains a csv which has a column 'DOI'
# output_path: a full path ending with .csv which will be where the result is returned as csv
# out: a csv is generated and saved, and is returned as dataframe as well
#
df = pd.read_csv(source_path)
df[~df.DOI.isnull()].DOI.to_csv(output_path, header=False)
if do_return:
return df[~df.DOI.isnull()].DOI
else:
return None
def filter_on_uni(df_in, affiliation_column, cur_uni, affiliation_dict_basic):
"""" returns the dataframe filtered on the chosen university
in: df with column 'Scopus affiliation IDs' with list of affiliation ids in scopus style
cur_uni: a university name appearing in the dictionary affiliation_dict_basic
affiliation_dict_basic: a dictionary with keys unis and values affiliation ids
out: df filtered over rows
"""
# now the return has all info per university
# ! scival may change their delimiters here, so please check once a while if it works as intended
# put an extra check here to be safe
return df_in[df_in.apply(lambda x: not (set(x[affiliation_column].split('| '))
.isdisjoint(set(affiliation_dict_basic[cur_uni]))), axis=1)]
def add_year_and_month_old(df_in, date_col):
"""" adds two columns to a dataframe: a year and a month
in: df_in: dataframe with special column (read below)
date_col: name of column which has data information, formatted as [start]YYYY[any 1 char]MM[anything][end]
column must not have Nones or nans for example
out: dataframe with extra columns for year and month
"""
df_in['year'] = df_in[date_col].apply(lambda x: x[0:4])
df_in['month'] = df_in[date_col].apply(lambda x: x[5:7])
df_in['month_since_2018'] = df_in.month.astype('int') + (df_in.year.astype('int')-2018)*12
df_in['month_name'] = df_in.month.astype('int').apply(lambda x: calendar.month_name[x])
return df_in
def add_year_and_month(df_in, date_col):
"""" adds two columns to a dataframe: a year and a month
in: df_in: dataframe with special column (read below)
date_col: name of column which has data information, formatted as [start]YYYY[any 1 char]MM[anything][end]
column must not have Nones or nans for example
out: dataframe with extra columns for year and month
"""
df_in['year'] = df_in[date_col].apply(lambda x: None if x is None else x[0:4])
df_in['month'] = df_in[date_col].apply(lambda x: None if x is None else x[5:7])
df_in['month_since_2018'] = df_in.apply(lambda x: None if x.month is None else int(x.month) + (int(x.year)-2018)*12, axis=1)
#df_in.month.astype('int') + (df_in.year.astype('int')-2018)*12
df_in['month_name'] = df_in.month.apply(lambda x: None if x is None else calendar.month_name[int(x)])
return df_in
def add_pure_year(df_in, date_col='Current publication status > Date'):
"""" adds one columns to a dataframe: a 'pure_year' based on pure info.
The input must fit the PURE form as 'Anything+YY'
We assume the year is after 2000! there are no checks for this
in: df_in: dataframe with special column (read below)
date_col: name of column which has data information, formatted as [start][anything]YYYY[end]
column must not have Nones or nans for example
out: dataframe with extra columns for year and month
"""
if date_col is None:
df_in['pure_year'] = np.nan
else:
df_in['pure_year'] = df_in[date_col].apply(lambda x: float('20' + x[-2:]))
return df_in
def get_scopus_abstract_info(paper_eid):
"""
Returns the users df_in with extra columns with scopus abstract info per row or with diagnostics
:param df_in: must have doi and eid
:return:
"""
# init
no_author_group = True # we want this too
error = False
ab = None
error_message = 'no error'
if paper_eid == None:
# paper_without eid
error_message = 'paper eid is none'
error = True
else:
try:
ab = overloaded_abstract_retrieval(identifier=paper_eid, view='FULL', refresh=True, id_type='eid')
except:
error = True
error_message = 'abstract api error'
if not(error):
# chk if API errors out on authorgroup call and log it
try:
ab.authorgroup
no_author_group = False
except:
no_author_group = True
##### this belongs in another function, with its own diagnostics + only run ff if this succeeds in topfn
####if not(no_author_group):
#### (bool_got_vu_author, a, b) = find_first_vu_author() # yet to make this
# also if no error, save the result for returns
return {'abstract_object': ab,
'no_author_group_warning': no_author_group,
'abstract_error': error,
'abstract_error_message': error_message}
def split_scopus_subquery_affils(subquery_affils, number_of_splits=4,
subquery_time = ''):
"""
! This function needs testing
This function takes in subquery_affils from make_affiliation_dicts_afids()
and translates it into a list of subqueries to avoid query length limits
in: subquery_affils from make_affiliation_dicts_afids()
number_of_splits: an integer between 2 and 10
subquery_time: an optional query to paste after every subquery
out: a list of subqueries to constrain scopussearch to a subset of affils
during stacking be sure to de-duplicate (recommended on EID)
"""
if (number_of_splits <= 10) & (number_of_splits > 1) & (number_of_splits % 1 == 0):
pass # valid number_of_splits
# you do not have to worry about number_of_splits < #afids because
# in python asking indices range outside indices range yields empty lists
# s.t. stacking them here does nothing
# needs checking though
else:
print('invalid number_of_splits, replacing with 4')
number_of_splits = 4
affil_count = len(subquery_affils.split('OR')) # number of affiliation ids
if affil_count <= 12: # to avoid weird situations
print('affil_count is small, returning single subquery')
my_query_set = subquery_affils + subquery_time
else:
# do it
my_query_set = []
step_size = int(np.floor(affil_count / number_of_splits)+1)
counter = 0
for cur_step in np.arange(0,number_of_splits):
if counter == 0:
cur_subquery = 'OR'.join(subquery_affils.split('OR')[0:step_size]) + ' ) '
elif counter == number_of_splits-1: # this is the last one
cur_subquery = ' ( ' + 'OR'.join(subquery_affils.split('OR')[step_size*cur_step:step_size*(cur_step+1)]) # + ' ) ) '
else:
cur_subquery = ' ( ' + 'OR'.join(subquery_affils.split('OR')[step_size*cur_step:step_size*(cur_step+1)]) + ' ) '
# stack results in a list, check if we need extra [] or not !
cur_subquery = cur_subquery + subquery_time
my_query_set.append(cur_subquery)
counter = counter + 1 # useless but OK
#print('-----')
#print(my_query_set)
#print('-----')
return my_query_set
def get_first_chosen_affiliation_author(ab, chosen_affid):
"""
:param ab:
:return:
"""
# init
first_vu_author = None
cur_org = None
has_error = False
first_vu_author_position = None # care reverse!!! you need a length here or extra unreverse
try:
# loop over the authors in the author group, back to front, s.t. the 'first' vu author overwrites everything
# this is not ideal,
# because we would also want to check the second vu-author if first one can't be traced back to a faculty
for cntr, author in enumerate(ab.authorgroup[::-1]): # ensures the final vu_author result is the leading vu author
if author.affiliation_id == None:
# then we can't match as vu author (yet), so we just skip as we do non-vu authors
1
else:
if not (set(author.affiliation_id.split(', ')).isdisjoint(set(chosen_affid))):
cur_org = author.organization
if author.given_name == None:
author_given_name = '?'
else:
author_given_name = author.given_name
if author.surname == None:
author_surname = '?'
else:
author_surname = author.surname
first_vu_author = author_given_name + ' ' + author_surname
except:
has_error = True
return {'first_affil_author': first_vu_author,
'first_affil_author_org': cur_org,
'first_affil_author_has_error': has_error}
def get_count_of_chosen_affiliation_authors(ab, chosen_affid):
"""
:param ab:
:return:
"""
# init
author_count_valid = False
author_count = 0
has_error = False
try:
# loop over the authors in the author group, back to front, s.t. the 'first' vu author overwrites everything
# this is not ideal,
# because we would also want to check the second vu-author if first one can't be traced back to a faculty
for cntr, author in enumerate(ab.authorgroup[::-1]): # ensures the final vu_author result is the leading vu author
if author.affiliation_id == None:
# then we can't match as vu author (yet), so we just skip as we do non-vu authors
1
else:
if not (set(author.affiliation_id.split(', ')).isdisjoint(set(chosen_affid))):
# then we have a vu-author. Count and continue
# notice there is no safety net if an author appears multiple times for some reason
author_count = author_count + 1
author_count_valid = True
except:
has_error = True
# then the author_count_valid remains False
return {'affil_author_count': author_count,
'affil_author_count_valid': author_count_valid,
'affil_author_count_has_error': has_error}
# upw start
## 1st at bottom
## 2nd
# remember, these are not for general purpose, but specific decorators for api-harvester-type functions crystal_()
def check_id_validity(func):
# first layer is a pass right now and that is OK
def decorator_check_id_validity(func):
@functools.wraps(func)
def wrapper_check_id_validity(cur_id, my_requests):
#
# pre-process
valid_doi_probably = False
if cur_id is not None:
if pd.notnull(cur_id):
if cur_id != 'nan':
try:
cur_id = cur_id.lower()
valid_doi_probably = True
except:
try:
cur_id = str(cur_id).lower() # not sure but OK
valid_doi_probably = True # stay on safe side then and loose tiny bit of performance
except:
# then give up
print('warning: failed to str(cur_doi).lower()')
if not valid_doi_probably:
# chance cur_id s.t. the crystal function can skip the checks and directly insert invalid-id-result
cur_id = 'invalid' # the only change
# end of pre-process
#
# run the core function
r, relevant_keys, cur_id_lower, prepend, id_type = func(cur_id, my_requests)
#
# no post-process
#
return r, relevant_keys, cur_id_lower, prepend, id_type
return wrapper_check_id_validity
return decorator_check_id_validity(func)
#############################################add_deal_info
## 3rd
def check_errors_and_parse_outputs(func):
# first layer is a pass right now and that is OK
def decorator_check_errors_and_parse_outputs(func):
@functools.wraps(func)
def wrapper_check_errors_and_parse_outputs(cur_id, my_requests=requests): # !!!!
#
# pre-processing
#
#
r, relevant_keys, cur_id_lower, prepend, id_type = func(cur_id, my_requests)
#
# post-processing
#
# init a dict and fill with right keys and zeros
dict_init = {} # values are filled with None as starting point
for key in relevant_keys:
dict_init[prepend + key] = None # really init empty and stays empty if error
dict_init[prepend + id_type] = None # can only be data['doi'] (!) # legacy
dict_init[prepend + id_type + '_lowercase'] = cur_id_lower
dict_init['own_' + id_type + '_lowercase'] = cur_id_lower
dict_init['orig_' + id_type] = cur_id # legacy
#
dict_to_add = dict_init
# ! somehow need to recognize doi_lowercase too...
#
try:
if 'error' in r.json().keys():
# the following code has been checked to work as intended
has_error = True
error_message = r.json()['message']
dict_to_add[prepend + 'error'] = has_error
dict_to_add[prepend + 'error_message'] = error_message
#
else:
# case: no error
#print(r)
#print(r.json())
has_error = False
error_message = 'no error'
dict_to_add[prepend + 'error'] = has_error
dict_to_add[prepend + 'error_message'] = error_message
#
# get data
try:
data = r.json()['results'][0]
except:
data = r.json()
# overwrite dict_to_add with data
for key in relevant_keys:
try:
dict_to_add[prepend + key] = data[key] # even upw_doi goes automatically : )
except KeyError:
dict_to_add[prepend + key] = None # if the key is not there, the result is None
dict_to_add[prepend + id_type] = cur_id # fix
except:
has_error = True
error_message = "error in r.json() or deeper"
dict_to_add[prepend + 'error'] = has_error
dict_to_add[prepend + 'error_message'] = error_message
#
return pd.Series(dict_to_add) # r, relevant_keys # different output # output has been changed
return wrapper_check_errors_and_parse_outputs
return decorator_check_errors_and_parse_outputs(func)
#############################################
## 4th
def faster(func):
# makes stuff for lists of ids and enables multi-threading and persistent sessions : ) amazing
# first layer is a pass right now and that is OK
def decorator_iterate_list(func):
@functools.wraps(func)
def wrapper_iterate_list(doi_list, silent=True, multi_thread=True, my_requests=None, allow_session_creation=True):
""" returns unpaywall info for a given doi list, includes result success/failure and diagnostics
:param doi_list: doi list as a list of strings, re-computes if doi are duplicate
does not de-dupe or dropna for generality, but you can do doi_list = df_in.doi.dropna().unique()
if you so desire
silent: whether you want silent behaviour or not, defaults to printing nothing
multi_thread: whether you want to multi_thread unpaywall (code has been tested), on by default
you do not have to worry about worker counts, a default law is integrated for that
my_requests: by default None, but can be exchanged for a requests-session on demand
with default, called functions will themselves enter 'requests' to reduce communication costs
allow_session_creation: if my_requests=None, this allows the fn to make its own session
:return: subset of unpaywall columns info + diagnostics as a pandas DataFrame, vertically doi's in lowercase-form.
duplicate doi's in the list are ignored, and the output has 1 row per unique DOI
Notice: this should be the only function to call fn_get_upw_info for more than 1 DOI (for developers)
, s.t. the multi-threading code can be here without duplicate code
"""
# all processing
# empty dataframe
df_unpaywall = pd.DataFrame()
if multi_thread: # valid across session used or not
max_num_workers = static.MAX_NUM_WORKERS
num_workers = np.max(
[1, int(np.floor(np.min([max_num_workers, np.floor(float(len(doi_list)) / 4.0)])))])
if (my_requests is None) & (allow_session_creation is True) & (len(doi_list) >= 20):
# then optionally make your own session # + avoid overhead for small jobs
# perform with a session
with requests.Session() as sessionA:
if multi_thread:
fn_get_upw_info_partial = partial(func,
my_requests=sessionA) # avoid communication costs
multi_result = multithreading(fn_get_upw_info_partial,
doi_list,
num_workers)
for cur_series in multi_result:
df_unpaywall = df_unpaywall.append(cur_series, ignore_index=True)
else: # single thread
for (counter, cur_doi) in enumerate(doi_list):
if silent == False:
print(
'unpaywall busy with number ' + str(counter + 1) + ' out of ' + str(len(doi_list)))
cur_res = func(cur_doi, my_requests=sessionA)
df_unpaywall = df_unpaywall.append(cur_res, ignore_index=True)
else:
# perform without a session
if multi_thread:
fn_get_upw_info_partial = partial(func,
my_requests=my_requests) # avoid communication costs
multi_result = multithreading(fn_get_upw_info_partial,
doi_list,
num_workers)
for cur_series in multi_result:
df_unpaywall = df_unpaywall.append(cur_series, ignore_index=True)
else: # single thread
for (counter, cur_doi) in enumerate(doi_list):
if silent == False:
print('unpaywall busy with number ' + str(counter + 1) + ' out of ' + str(len(doi_list)))
cur_res = func(cur_doi, my_requests=my_requests)
df_unpaywall = df_unpaywall.append(cur_res, ignore_index=True)
# either way, return the result
return df_unpaywall
return wrapper_iterate_list
return decorator_iterate_list(func)
## 5th
def appender(func, cur_id_name='doi'):
"""
Returns the given dataframe with extra columns with unpaywall info and result success/failure and diagnostics
Merging is done with lower-cased DOI's to avoid duplicate issues. The DOI name is case-insensitive
:param df_in: df_in as a pandas dataframe, must have a column named 'doi' with doi's as string
:return: pandas dataframe with extra columns with subset of unpaywall info and result success/failure and diagnostic
all new doi info is lowercase
"""
def decorator_appender(func):
@functools.wraps(func)
def wrapper_appender(df_in, silent=True, cut_dupes=False, avoid_double_work=True,
multi_thread=True, my_requests=None, allow_session_creation=True):
if cur_id_name == 'eid':
print('warning: scopus abstract accelerator has not been validated yet !')
# make doi_list
if avoid_double_work:
doi_list = df_in.drop_duplicates(cur_id_name)[cur_id_name].to_list() # notice no dropna to keep functionality the same
# also no lower-dropna for simplicity
else:
doi_list = df_in[cur_id_name].to_list()
if cut_dupes:
print('deprecated code running')
# I think it should yield exactly the same result, but needs testing that is all
# overwrites
doi_list = df_in[cur_id_name].dropna().unique()
# get unpaywall info
df_unpaywall = func(doi_list, silent, multi_thread, my_requests, allow_session_creation)
# merge to add columns
# prepare doi_lower
df_in.loc[:, 'id_lowercase'] = df_in[cur_id_name].str.lower()
df_merged = df_in.merge(df_unpaywall.drop_duplicates('own_' + cur_id_name + '_lowercase'),
left_on='id_lowercase', right_on='own_' + cur_id_name + '_lowercase', how='left')
# drop duplicates in df_unpaywall to avoid having duplicates in the result due repeating DOI's or Nones
# assumption: all none returns are the exact same
if not silent:
print('done with add_unpaywall_columns')
return df_merged
return wrapper_appender
return decorator_appender(func)
@appender
@faster
@check_errors_and_parse_outputs
@check_id_validity
def crystal_unpaywall(cur_id, my_requests):
# always use cur_id, my_requests for in and r, relevant_keys for out
# id is either cur_doi or 'invalid' if invalid
prepend = 'upw_'
id_type = 'doi'
cur_id_lower = cur_id.lower()
if my_requests is None:
my_requests = requests # avoids passing requests around everytime
relevant_keys = ['free_fulltext_url',
'is_boai_license', 'is_free_to_read', 'is_subscription_journal',
'license', 'oa_color'] # , 'doi', 'doi_lowercase' : you get these from callers
if cur_id == 'invalid':
# get the invalid-doi-response directly from disk to save time, you can run update_api_statics to update it
in_file = open(static.PATH_STATIC_RESPONSES, 'rb')
r = pickle.load(in_file)
in_file.close()
else:
r = my_requests.get("https://api.unpaywall.org/" + str(cur_id) + "?email=" + static.UNPAYWALL_EMAIL) # force string
# keep multi_thread to 16 to avoid issues with local computer and in rare occasions the api returns
# this try making the code 10x slower
"""
try:
r = my_requests.get("https://api.unpaywall.org/" + str(cur_id) + "?email=" + UNPAYWALL_EMAIL) # force string
except:
print('request failed hard for unpaywall, filling blank')
in_file = open(PATH_STATIC_RESPONSES, 'rb')
r = pickle.load(in_file)
in_file.close()
"""
return r, relevant_keys, cur_id_lower, prepend, id_type
add_unpaywall_columns = crystal_unpaywall # the final function goes through the new pipe
# recreate the legacy unpaywall functions for now
#
def legacy_crystal_unpaywall(cur_id, my_requests):
# always use cur_id, my_requests for in and r, relevant_keys for out
# id is either cur_doi or 'invalid' if invalid
prepend = 'upw_'
id_type = 'doi'
cur_id_lower = cur_id.lower()
if my_requests is None:
my_requests = requests # avoids passing requests around everytime
relevant_keys = ['free_fulltext_url',
'is_boai_license', 'is_free_to_read', 'is_subscription_journal',
'license', 'oa_color'] # , 'doi', 'doi_lowercase' : you get these from callers
if cur_id == 'invalid':
# get the invalid-doi-response directly from disk to save time, you can run update_api_statics to update it
in_file = open(static.PATH_STATIC_RESPONSES, 'rb')
r = pickle.load(in_file)
in_file.close()
else:
r = my_requests.get("https://api.unpaywall.org/" + str(cur_id) + "?email=" + static.UNPAYWALL_EMAIL) # force string
# keep multi_thread to 16 to avoid issues with local computer and in rare occasions the api returns
# this try making the code 10x slower
"""
try:
r = my_requests.get("https://api.unpaywall.org/" + str(cur_id) + "?email=" + UNPAYWALL_EMAIL) # force string
except:
print('request failed hard for unpaywall, filling blank')
in_file = open(PATH_STATIC_RESPONSES, 'rb')
r = pickle.load(in_file)
in_file.close()
"""
return r, relevant_keys, cur_id_lower, prepend, id_type
fn_get_upw_info = check_errors_and_parse_outputs(check_id_validity(legacy_crystal_unpaywall)) # avoid, legacy
fn_get_all_upw_info = faster(fn_get_upw_info) # these are only for legacy and should be avoided
###add_unpaywall_columns = appender(fn_get_all_upw_info) # the final function goes through the new pipe
#
# I do not like this kind of handling as it breaks some functools functionality
# I will refactor legacy code later some time
@appender
@faster
@check_errors_and_parse_outputs
@check_id_validity
def crystal_altmetric(cur_id, my_requests):
"""
This is a bit annoying because this returns either None or a dictionary, and not a request object...
So I will just send requests without the package
"""
prepend = 'altmetric_'
id_type = 'doi'
cur_id_lower = cur_id.lower()
if my_requests is None:
my_requests = requests # avoids passing requests around everytime
# some settings
api_ver = 'v1' # may change in future, so here it is. For api-key re-edit with altmetric package
api_url = "http://api.altmetric.com/%s/" % api_ver
url = api_url + 'doi' + "/" + cur_id
relevant_keys = ['title', 'cited_by_policies_count', 'score'] # OK for now, care some may miss, patch for that !
# , 'doi', 'doi_lowercase' : you get these from callers
if cur_id == 'invalid':
# get the invalid-doi-response directly from disk to save time, you can run update_api_statics to update it
in_file = open(static.PATH_STATIC_RESPONSES_ALTMETRIC, 'rb')
r = pickle.load(in_file)
in_file.close()
else:
# r = my_requests.get("https://api.unpaywall.org/" + str(cur_id) + "?email=" + UNPAYWALL_EMAIL) # force string
r = my_requests.get(url, params={}, headers={})
return r, relevant_keys, cur_id_lower, prepend, id_type
add_altmetric_columns = crystal_altmetric
###@appender(cur_id_name='eid')
@faster
@check_errors_and_parse_outputs
@check_id_validity
def crystal_scopus_abstract(cur_id, my_requests):
"""
This is a bit annoying because this returns either None or a dictionary, and not a request object...
So I will just send requests without the package
"""
prepend = 'scopus_abstract_'
id_type = 'eid'
cur_id_lower = cur_id.lower() # irrelevant but OK
### not used
###if my_requests is None:
#### my_requests = requests # avoids passing requests around everytime
# some settings
# None
# the issue is that ab is not a requests-type
# but we need requests-type
# also, I do not want to use homebrew request code for it because scopus apis are an outsourced mess
# instead we will use a mock
relevant_keys = ['obje', 'retries'] # all in one, care integration
# , 'doi', 'doi_lowercase' : you get these from callers
if cur_id == 'invalid':
# get the invalid-doi-response directly from disk to save time, you can run update_api_statics to update it
in_file = open(static.PATH_STATIC_RESPONSES_SCOPUS_ABS, 'rb')
r = pickle.load(in_file)
in_file.close()
else:
# r = my_requests.get("https://api.unpaywall.org/" + str(cur_id) + "?email=" + UNPAYWALL_EMAIL) # force string
# r = my_requests.get(url, params={}, headers={})
#
# scopus api is not friendly so I need a try/except here
#
# wait-and-retry
one_shot = False
if one_shot:
retries = 0
try:
ab = overloaded_abstract_retrieval(identifier=cur_id, view='FULL', refresh=True, id_type='eid')
r = Mock(spec=Response)
r.json.return_value = {'obje': pickle.dumps(ab), 'message': 'hi', 'retries':retries}
r.status_code = 999
# requirements:
# r.json().keys
# r.json()['message']
# r.json()['results'] # if not present, will not unpack and use json().keys()
except:
# if so, fall back to invalid routine
#
# get the invalid-doi-response directly from disk to save time, you can run update_api_statics to update it
in_file = open(static.PATH_STATIC_RESPONSES_SCOPUS_ABS, 'rb')
r = pickle.load(in_file)
in_file.close()
else:
# print(one_shot)
retry = True
retries = -1
while retry:
#retry = False # removes retries
retries = retries + 1
try:
ab = overloaded_abstract_retrieval(identifier=cur_id, view='FULL', refresh=True, id_type='eid')
qq = ab.title
qqx = qq + 'x'
#
# if api does not error, and wepuyc have an title, then the call is correct and we got info back successfully
#
# then do rest of actions
r = Mock(spec=Response)
r.json.return_value = {'obje': pickle.dumps(ab), 'message': 'hi', 'retries': retries}
r.status_code = 999
retry = False
except:
# we had an api error or a return with empty information
# either way, just fillna and continue
if retries < 30:
retry = True
time.sleep(1)
if retries > 2:
print('retrying ' + str(retries))
### some returns are caught here as well sadly...
else:
retry = False
# prepare for exit
in_file = open(static.PATH_STATIC_RESPONSES_SCOPUS_ABS, 'rb')
r = pickle.load(in_file)
in_file.close()
# you have to validate this code because scopus has weird features going in which mess up data when overloading
return r, relevant_keys, cur_id_lower, prepend, id_type
crystal_scopus_abstract = appender(func=crystal_scopus_abstract, cur_id_name='eid')
###@appender(cur_id_name='eid')
@faster
@check_errors_and_parse_outputs
@check_id_validity
def crystal_scopus_abstract2(cur_id, my_requests):
"""
This is a bit annoying because this returns either None or a dictionary, and not a request object...
So I will just send requests without the package
2 only gives abstract_text
"""
prepend = 'scopus_abstract_'
id_type = 'eid'
cur_id_lower = cur_id.lower() # irrelevant but OK
### not used
###if my_requests is None:
#### my_requests = requests # avoids passing requests around everytime
# some settings
# None
# the issue is that ab is not a requests-type
# but we need requests-type
# also, I do not want to use homebrew request code for it because scopus apis are an outsourced mess
# instead we will use a mock
relevant_keys = ['text', 'retries'] # all in one, care integration
# , 'doi', 'doi_lowercase' : you get these from callers
if cur_id == 'invalid':
# get the invalid-doi-response directly from disk to save time, you can run update_api_statics to update it
in_file = open(static.PATH_STATIC_RESPONSES_SCOPUS_ABS, 'rb')
r = pickle.load(in_file)
in_file.close()
else:
# r = my_requests.get("https://api.unpaywall.org/" + str(cur_id) + "?email=" + UNPAYWALL_EMAIL) # force string
# r = my_requests.get(url, params={}, headers={})
#
# scopus api is not friendly so I need a try/except here
#
# wait-and-retry
one_shot = False
if one_shot:
retries = 0
try:
ab = overloaded_abstract_retrieval(identifier=cur_id, view='FULL', refresh=True, id_type='eid')
r = Mock(spec=Response)
try:
ab_abstract = ab.abstract
except:
# error in getting abstract out (outside API
ab_abstract = np.nan
r.json.return_value = {'text': ab_abstract, 'message': 'hi', 'retries':retries}
r.status_code = 999
# requirements:
# r.json().keys
# r.json()['message']
# r.json()['results'] # if not present, will not unpack and use json().keys()
except:
# if so, fall back to invalid routine
#
# get the invalid-doi-response directly from disk to save time, you can run update_api_statics to update it
in_file = open(static.PATH_STATIC_RESPONSES_SCOPUS_ABS, 'rb')
r = pickle.load(in_file)
in_file.close()
else:
# print(one_shot)
retry = True
retries = -1
while retry:
#retry = False # removes retries
retries = retries + 1
try:
ab = overloaded_abstract_retrieval(identifier=cur_id, view='FULL', refresh=True, id_type='eid')
qq = ab.title
qqx = qq + 'x'
#
# if api does not error, and wepuyc have an title, then the call is correct and we got info back successfully
#
# then do rest of actions
r = Mock(spec=Response)
try:
ab_abstract = ab.abstract
except:
# error in getting abstract out (outside API
ab_abstract = np.nan
r.json.return_value = {'text': ab_abstract, 'message': 'hi', 'retries': retries}
r.status_code = 999
retry = False
except:
# we had an api error or a return with empty information
# either way, just fillna and continue
if retries < 30:
retry = True
time.sleep(1)
if retries > 2:
print('retrying ' + str(retries))
else:
retry = False
# prepare for exit
in_file = open(static.PATH_STATIC_RESPONSES_SCOPUS_ABS, 'rb')
r = pickle.load(in_file)
in_file.close()
# you have to validate this code because scopus has weird features going in which mess up data when overloading
return r, relevant_keys, cur_id_lower, prepend, id_type
crystal_scopus_abstract2 = appender(func=crystal_scopus_abstract2, cur_id_name='eid')
class api_extractor:
"""
DEPRECATED: please stop using this... I will make a new one later, for now updates and patches are stopped
This class is an api extractor: it extracts info across api's.
Has multi-threading :)
Is not an eager operator so ScopusSearch query is only executed when needed and not on initialization
source_list: which sources to use, like unpaywall
query: query to put in scopussearch
Under construction: only does unpaywall data right now to test multi-threading
Also, I need an extra step for scopussearch datacleaning split-off
Dubbel-check ff of je de juiste funccorresponding_author_functionsties hebt, bv voor unpaywall drop_dupe stap bij merge
Plan nu: ff scopussearch-bypass erin, daarmee ff doortesten speedgain op grotere volumes
"""
def __init__(self,
query='TITLE(DATA SCIENCE) AND PUBDATETXT(February 2018)',
source_list=['all'],
max_num_workers=32):
self.source_list = source_list
self.query = query
self.scopus_search_info = None
self.scopus_search_info_ready = False
self.max_num_workers = max_num_workers
def get_scopus_search_info(self, cur_query):
"""
Gets the scopus search info and return it as dataframe of obj.results
Not yet handling errors of API...
"""
use_sleep_and_retry = True
if use_sleep_and_retry:
no_res = True
cntr=0
while no_res:
try:
res = pd.DataFrame(ScopusSearch(cur_query, refresh=True).results)
no_res = False
except:
cntr = cntr + 1
print(str(cntr) + ' ' + cur_query)
time.sleep(1)
else:
res = pd.DataFrame(ScopusSearch(cur_query, refresh=True).results)
return res
def feed_scopus_search_info(self, df_in, do_return=False, do_overwrite=False):
"""
This methods allows you to directly feed in a dataframe with scopussearch info,
of the form pandas.DataFrame(ScopusSearch().results)
"""
if (self.scopus_search_info_ready is False) | do_overwrite is True:
self.scopus_search_info = df_in
self.scopus_search_info_ready = True
if do_return:
return self.scopus_search_info
else:
print('scopus search info not updated because info was already entered and do_overwrite was provided False')
def extract(self, use_multi_thread=True, skip_scopus_search=False, skip_unpaywall=False,
use_parallel_apis=False):
"""
extract all chosen info
"""
# the functions like get_scopus_search_info and fn_get_upw_info,
# should always be single-thread in themselves,
# and we make them multi-thread outside of their own functions
#
# !!! we can further speed up by requesting from api providers in parallel
# that way we can further avoid api rate limits
# for this we need advanced functionality
# after writing the code, turn the default use_parallel_apis to True
#
#
# always redo scopus-search unless explicitly asked skip_scopus_search
# init
if not(self.scopus_search_info is None):
df_temp = self.scopus_search_info.copy()
doi_list = df_temp[~df_temp.DOI.isnull()].DOI.drop_duplicates().to_list()
#
# doi list issue happens here and in getupwdata line 161: search to_list, and doi/DOI difference
# here: add fn (read jupyter)
df_upw = pd.DataFrame()
df_ss = pd.DataFrame()
if use_multi_thread:
#ss
if skip_scopus_search is False:
# !!! please thoroughly test this
print('untested functionality called: multithread scopus search: careful!') # see fast_scopus_search_test.py for dev!
my_query = self.query # use own query
mini_queries = split_query_to_months(my_query)
count_queries = len(mini_queries)
# num_workers law: PLEASE TEST IT for optimum point or not
num_workers = np.max([1, int(np.floor(np.min([self.max_num_workers, np.floor(float(count_queries)/4.0)])))])
#
multi_result = multithreading(self.get_scopus_search_info, mini_queries, num_workers)
for cur_series in multi_result:
# we are appending dataframes, not series
df_ss = df_ss.append(cur_series, ignore_index=True)
###doi_list = df_ss.doi # check this !
## This is the point where parallel-api functionality should start(!)
if use_parallel_apis:
1
# please first make the apis work in single_thread
# then in regular multi-thread
# and finally in parallel_apis_multi_thread.
# 1. set sources using the skip_ arguments
# 2. choose max_workers using not on #dois but #dois*doi-apis + #eids*eid-apis
# 3. make a list with 1 element per job, including all details like
# [ [doi_1,'unpaywall'], [doi_1,'unpaywall'], [eid_1,'scival']. ...]
# 4. push that into multi-threading, but use a different function
# use the function I started below named get_parallel_api_info()
# this function picks up the source in element2 in a list element and
# directs to the right api function
# this makes the code superclean to support all forms of threading
# while keeping full functionality
# also, it needs to add a column with 'source' for differentiation
# 5. follow the unpaywall code below and append and done
# 6. for proper testing, split by source column back into df_upw/etc/etc
# and give the serial_api routine also a combined df for comparability
# 7. do extensive testing
# 8. do timing: how large is the speed gain quantitatively?
# this is probably best to test on high-end of very-high-end machines
# because we need to hit the api rate limits with serial_apis to see an effect
else:
#upw
if skip_unpaywall is False:
num_workers = np.max([1, int(np.floor(np.min([self.max_num_workers, np.floor(float(len(doi_list))/4.0)])))])
multi_result = multithreading(fn_get_upw_info, doi_list, num_workers)
for cur_series in multi_result:
df_upw = df_upw.append(cur_series, ignore_index=True)
#if ~skip_scival:
# 1
else:
# single-thread
# ss
if skip_scopus_search is False:
# query fed separately btw
# 2 lines for clarity for now
scopus_search_results = self.get_scopus_search_info(self.query) # care
self.feed_scopus_search_info(scopus_search_results) # store in properties
df_ss = scopus_search_results # combining results is trivial for single-thread
###doi_list = df_ss.doi # check this !
# upw
if skip_unpaywall is False:
for cur_doi in doi_list:
series_to_add = fn_get_upw_info(cur_doi)
df_upw = df_upw.append(series_to_add, ignore_index=True)
# scopussearch: the save and .self are issue for multithread, incl
# overwrite of results properties
# you need to fix that
# also, the num_workers law: you need to decide that differently too
# you prolly have 1 - 120 months, and 1 workers does 1 month a time
# so you need like #months/3 or a comparable version of the law below
return df_upw, df_ss # ! merge or combine or store properly later
def get_parallel_api_info(self, cur_id, source):
# please check if the multi-threader unpacks list elements, if so use 1 argument
# and unpack within the function to id/source
# to distinguish later, add the source as a column (is per DOI/EID)
source_dict = {'api_source' : source }
if source == 'unpaywall':
series_to_add = fn_get_upw_info(cur_id) # cur_id:cur_doi here
if source == 'scival':
1
series_to_add = series_to_add.append(pd.Series(source_dict))
return series_to_add
def change_max_num_workers(self, max_num_workers):
self.max_num_workers = max_num_workers
def split_query_to_months(query, silent=False):
"""
warning: did not pass testing, some data records may not be retrieved
This function splits a ScopusSearch query into multiple ones
It takes a query with year indication, and plits it to 1 query per month
This in turn allows the multi-threading functions of this import framework
to reduce the computation time
Otherwise, you will wait a very long serverside wait time and then get a
lot of data at once with massive download times and possibly more failures
input: a valid ScopusSearch query string which ends with exactly:
PUBYEAR > XXXX AND PUBYEAR < YYYY
with no other appearance of PUBYEAR text
and there is at least one valid year
Also, there should not be any month specification, only complete years
And incomplete years are not allowed (current year at time of call)
Also, the pubyear clauses should be extra clauses with ands at top level
please respect this format as the regex functionality is not perfect
advanced: the month january is also split up, because it generally is twice as large
as the other months
"""
# this code can be improved with regex
# extract years
final_year = str(int(query.split('PUBYEAR < ')[1]) - 1)
first_year = str(int(query.split('PUBYEAR > ')[1][0:4]) + 1)
rest_of_query = query.split('PUBYEAR > ')[0] # probably ending with ' AND'
# make year list
years = np.arange(int(first_year), int(final_year)+1)
# define month abbreviations (can split out later)
#calendar.month_name[ value between 1 and 12]
# example: PUBDATETXT(February 2018)
query_parts = []
for year in years:
for month_number in np.arange(1,12+1):
if month_number == 1:
# january is split again in two by open access y/n
query_parts.append(rest_of_query
+ 'PUBDATETXT('
+ calendar.month_name[month_number]
+ ' '
+ str(year)
+ ')'
+ ' AND OPENACCESS(1)')
query_parts.append(rest_of_query
+ 'PUBDATETXT('
+ calendar.month_name[month_number]
+ ' '
+ str(year)
+ ')'
+ ' AND OPENACCESS(0)')
else:
query_parts.append(rest_of_query
+ 'PUBDATETXT('
+ calendar.month_name[month_number]
+ ' '
+ str(year)
+ ')')
# careful with using ints and strs together
if ~silent:
print('query has been split up in ' + str(len(query_parts)) + ' queries for multi-threading')
return query_parts
def multithreading(func, args,
workers):
with ThreadPoolExecutor(workers) as ex:
res = ex.map(func, args)
return list(res)
def multithreading_starmap(func, args,
workers):
with ThreadPoolExecutor(workers) as ex:
res = ex.starmap(func, args)
return list(res)
def multiprocessing(func, args,
workers):
with ProcessPoolExecutor(workers) as ex:
res = ex.map(func, args)
return list(res)
def my_timestamp():
# return a sring with current time info
now = datetime.now()
return '_'.join(['', str(now.year), str(now.month), str(now.day), str(now.hour), str(now.minute), str(now.second)])
def add_deal_info(path_deals, path_isn, df_b):
"""
This function adds columns with deal information to your dataframe
:param path_deals: path to csv with deals, must have columns: 'ISN':'deal_ISN',
'Titel':'deal_journal_title',
'Deal naam':'deal_name',
'Deal korting':'deal_discount',
'Deal type':'deal_owner',
'Deal bijgewerkt':'deal_modified',
'ISSN':'deal_ISSN'
:param path_isn: path to csv with table from isn to issn numbers, must have columns ISN and ISSN as translation,
:param df_b: dataframe with at lesat the columns: issn, eIssn, upw_oa_color
The parameters should not have any columns matching the names of columns the function is trying to add
:return: your input dataframe df_b with extra columns
"""
# load in data from apc deals and isn-issn translation table
# apc deals
df_d_base = pd.read_csv(path_deals)
# isn issn translation table
df_t = pd.read_csv(path_isn)
# cleaning
df_b.at[df_b[df_b.issn.apply(lambda x: True if isinstance(x, list) else False)].index.tolist(), 'issn'] = None
# now translate isn<>issn
df_d = df_d_base.merge(df_t, left_on='ISN', right_on='ISN', how='left')
# rename columns for readability
df_d = df_d.rename(columns={'ISN': 'deal_ISN',
'Titel': 'deal_journal_title',
'Deal naam': 'deal_name',
'Deal korting': 'deal_discount',
'Deal type': 'deal_owner',
'Deal bijgewerkt': 'deal_modified',
'ISSN': 'deal_ISSN'})
# remove punctuation in ISSN
df_d['deal_ISSN_short'] = df_d.deal_ISSN.apply(lambda x: np.nan if x is np.nan else x[0:4] + x[5::])
# drop deals without ISSN to avoid bad merges (can upgrade later to match on j-names)
df_d = df_d[~df_d.deal_ISSN.isnull()]
# merge on both issn and eIssn (extensive exploration show this is safe, see file apcdeals1.ipnyb)
#
# complex merge-strategy here with dropping columns
df_m = df_b.merge(df_d, left_on='issn', right_on='deal_ISSN_short', how='left')
df_m = df_m.reset_index().rename(columns={'index': 'my_index'})
cols_d = list(df_d)
df_m_part_1 = df_m[~df_m.deal_ISSN.isnull()]
df_m_part_2 = df_m[df_m.deal_ISSN.isnull()].drop(cols_d, axis=1).merge(df_d, left_on='eIssn',
right_on='deal_ISSN_short', how='left')
df_m = df_m_part_1.append(df_m_part_2)
df_m = df_m.sort_values('my_index').reset_index().drop(['index', 'my_index'], axis=1)
#
# give nans some intuition
df_m['deal_discount_verbose'] = df_m['deal_discount'].apply(lambda x: 'no known deal' if x is np.nan else x)
# df_m['upw_oa_color_verbose'] = df_m['upw_oa_color'].apply(lambda x: 'unknown' if x is np.nan else x) # wrongplace
df_m['deal_owner_verbose'] = df_m['deal_owner'].apply(lambda x: 'no known deal' if x is np.nan else x)
return df_m
def pre_process_pure_data(df,
org_info,
path_to_save=None,
test_mode_upw=False,
do_save=False,
silent=False):
"""
Sorry for documentation, time lines are tight.
This goes in:
df = dataframe from pure, conditions are tight (will follow)
org_info is an excel with 2 columns, 1 'Naam' and 1 'Faculteit' which map groups to faculties
path_to_save: path to where to save as string
test_mode_upw: whether you want to do unpaywall load for first few records or all of them
do_save: whether you want to save or not
This comes out:
the cleaned, preprocesses dataframe with unpaywall
"""
# clean column numbering first
df.columns = [re.sub('^\d+.', "", x) for x in
df.columns] # remove at start of string where 1 or more digits
df.columns = [re.sub('^\d+', "", x) for x in df.columns]
df.columns = [re.sub('^ ', "", x) for x in df.columns]
df.columns = [re.sub('^.\d+', "", x) for x in df.columns]
df.columns = [re.sub('^ ', "", x) for x in df.columns]
# hidden settings
#
df = df[[
'Title of the contribution in original language',
'Current publication status > Date',
#'5.1 Publication statuses and dates > E-pub ahead of print[1]',
'Subtitle of the contribution in original language', # new
'Type',
'Workflow > Step',
'Original language',
'Electronic version(s) of this work > DOI (Digital Object Identifier)[1]',
'Organisations > Organisational unit[1]',
'Organisations > Organisational unit[2]',
'Organisations > Organisational unit[3]',
'Organisations > Organisational unit[4]',
'Organisations > Organisational unit[5]',
'Organisations > Organisational unit[6]',
'Organisations > Organisational unit[7]',
'Organisations > Organisational unit[8]',
'Organisations > Organisational unit[9]',
'Organisations > Organisational unit[10]',
'Journal > Journal[1]:Titles',
'Journal > Journal[1]:ISSNs',
# '14.3 Journal > Journal[1]:Additional searchable ISSN (Electronic)',
'UUID',
# '18 Created',
# "33.1 Keywords in 'Open Access classification'[1]"
]]
admitted_types = ['Chapter in Book / Report / Conference proceeding - Chapter',
'Contribution to Journal - Article',
'Contribution to Conference - Paper',
'Book / Report - Report',
'Book / Report - Book',
'Chapter in Book / Report / Conference proceeding - Conference contribution',
'Contribution to Journal - Review article',
] ## OVERWRITES LATER
# I will play safe for now, can always post-filter it
accepted_amsco_types_sample = ['Contribution to journal - Article',
'Chapter in Book/Report/Conference proceeding - Chapter',
'Chapter in Book/Report/Conference proceeding - Foreword/postscript',
'Book/Report - Book',
'Contribution to journal - Review article',
###'Contribution to journal - Comment/Letter to the editor',
#'Thesis - Thesis: Research University of Amsterdam, graduation University of Amsterdam',
'Book/Report - Report',
#'Non-textual form - Web publication/site',
#'Book/Report - Book editing',
#'Thesis - Thesis: Research external, graduation external',
'Contribution to journal - Editorial',
'Chapter in Book/Report/Conference proceeding - Conference contribution',
#'Book/Report - Inaugural speech',
#'Working paper - Working paper',
'Contribution to conference - Paper',
'Contribution to conference - Abstract',
# 'Case note - Case note',
'Contribution to journal - Meeting Abstract',
'Contribution to journal - Book/Film/Article review',
#'Contribution to conference - Poster',
'Contribution to journal - Special issue',
###'Contribution to journal - Erratum/Corrigendum',
#'Non-textual form - Exhibition',
'Chapter in Book/Report/Conference proceeding - Entry for encyclopedia/dictionary',
#'Thesis - Thesis: Research University of Amsterdam, graduation external',
'Contribution to journal - Letter',
'Contribution to journal - Short survey',
#'Book/Report - Valedictory speech',
#'Contribution to journal - Literature review (NOT USED)',
#'Thesis - Thesis: Research external, graduation University of Amsterdam',
#'Non-textual form - Digital or Visual Products'
]
admitted_types = ['Chapter in Book / Report / Conference proceeding - Chapter',
'Contribution to Journal - Article',
'Contribution to Conference - Paper',
'Book / Report - Report',
'Book / Report - Book',
'Chapter in Book / Report / Conference proceeding - Conference contribution',
'Contribution to Journal - Review article',
] + accepted_amsco_types_sample
# pre-processing
#
# some robustness needed... some asserts too
#
admitted_types_lower = pd.DataFrame(admitted_types)[0].str.lower().to_list()
print('pure unprocessed has this many rows: ' + str(len(df)))
df = df[df['Type'].str.lower().isin(admitted_types_lower)]
print('pure processed has this many rows: ' + str(len(df)))
###df = df[df['Type'].isin(admitted_types)]
###df = df[df['Type'].isin(admitted_types)]
df['DOI'] = df['Electronic version(s) of this work > DOI (Digital Object Identifier)[1]']
# add unpaywall info
#
ae = api_extractor(max_num_workers=16) # care: not tested sufficiently, may give too many error returns
if test_mode_upw:
ae.feed_scopus_search_info(df_in=df.iloc[0:1,:], do_overwrite=True) # 0:1 saves 15sec wait per year of data
df_res_upw, _ = ae.extract(use_multi_thread=False, skip_scopus_search=True, skip_unpaywall=False,
use_parallel_apis=False)
else:
print('multithread is not used')
ae.feed_scopus_search_info(df_in=df, do_overwrite=True)
df_res_upw, _ = ae.extract(use_multi_thread=False, skip_scopus_search=True, skip_unpaywall=False,
use_parallel_apis=False)
#
# merge back in with orig_doi no nans
# cleaning is done in the import framework, saving us work and duplicate code : )
# ! Not sure if dois in pure have an error, causing a mismatch with scopus and unpaywall
print(list(df_res_upw))
print(df_res_upw.head(1))
df = df.merge(df_res_upw, left_on = 'DOI', right_on = 'orig_doi', how = 'left')
df['upw_oa_color_verbose'] = df['upw_oa_color'].apply(lambda x: 'unknown' if x is np.nan else x)
###df_m['pure_oa_class_verbose'] = df_m["33.1 Keywords in 'Open Access classification'[1]"].apply(lambda x: 'unknown' if x is np.nan else x)
# add faculty_finder info exploiting pure org columns
#
ff = faculty_finder(organizational_chart=org_info)
#
#
if silent is False:
trysize = 100
start = time.time()
df.loc[0:trysize,"Organisations > Organisational unit[1]"].apply(lambda x: ff.match(x))
end = time.time()
print(end-start)
print('that was time for 100 entries, but total df is: ')
print(len(df))
print('now doing all of em')
print('this will probably take ' + str(float(len(df))/trysize*(end-start)) + ' seconds')
#
#
df['ff'] = df.loc[:,"Organisations > Organisational unit[1]"].apply(lambda x: ff.match(x))
df.loc[:, 'ff_provided_organization_string'] = df.ff.apply(lambda x: x['ff_provided_organization_string'])
df.loc[:, 'ff_match'] = df.ff.apply(lambda x: x['ff_match'])
df.loc[:, 'ff_score'] = df.ff.apply(lambda x: x['ff_score'])
df.loc[:, 'ff_terms'] = df.ff.apply(lambda x: x['ff_terms'])
df.loc[:, 'ff_message'] = df.ff.apply(lambda x: x['ff_message'])
df.loc[:, 'ff_match_subgroup'] = df.ff.apply(lambda x: x['ff_match_subgroup'])
#
# evaluation is in pure_integratie.ipnyb
# for completeness, I also want ff_match based on org_info
# extra processing
df['DOI_isnull'] = df.DOI.isnull()
df['pub_uuid'] = df['UUID']
# now save
if do_save:
df.to_csv(path_to_save)
return df
def get_eid_uuid_data(host, database, user, pw, silent=False):
"""
This function obtains the EID<>PURE_PUB_UUID table from our extrapure database
It immediately works for all years at once
:param host: host database (IP)
:param database: database name
:param user: user to log into database with
:param pw: password to log into database with
:param silent: whether you want to silence extra prints or not
:return: 1 a dataframe with 2 columns as EID<>PURE_PUB_UUID table if success, otherwise just None
2 a boolean which is True iff success otherwise False
"""
try:
connection = mysql.connector.connect(host=host,
database=database,
user=user,
password=pw)
sql_select_Query = "select * from scopus_has_publication"
cursor = connection.cursor()
cursor.execute(sql_select_Query)
records = cursor.fetchall()
df_t = pd.DataFrame(records).rename(columns={0: 'eid', 1: 'pub_uuid'})
if silent is False:
print("Total number of rows is: ", cursor.rowcount)
success = True
except Error as e:
#always print this, later also add logging
print("Error reading data from MySQL table", e)
print('returning None')
df_t = None
success = False
finally:
if (connection.is_connected()):
connection.close()
cursor.close()
if silent is False:
print("MySQL connection is closed")
return df_t, success
def fn_cats(row):
if row == 'closed':
result = 1
elif row == 'hybrid':
result = 2
elif row == 'bronze':
result = 3
elif row == 'green':
result = 4
elif row == 'gold':
result = 5
else:
result = 0 # nans etc
return result
def left_pad(my_str):
if len(my_str) < 2:
return '0' + my_str
else:
return my_str
def get_today():
return str(datetime.now().year) + '-' + left_pad(str(datetime.now().month)) + '-' + left_pad(
str(datetime.now().day))
def get_today_for_pubdatetxt():
return left_pad(calendar.month_name[datetime.now().month]) + ' ' + str(datetime.now().year)
def get_today_for_pubdatetxt_integers(year, month):
return left_pad(calendar.month_name[month]) + ' ' + str(year)
def get_today_for_pubdatetxt_super(months_back=0):
# remove datetime. later
# dt_obj = datetime.datetime.now() - datetime.timedelta(days=datetime.datetime.now().day)
if months_back == 0:
dt_obj = datetime.now()
else:
cntr = months_back
dt_obj = datetime.now()
while cntr > 0:
dt_obj = dt_obj - timedelta(days=dt_obj.day)
#print(dt_obj)
cntr -= 1
return left_pad(calendar.month_name[dt_obj.month]) + ' ' + str(dt_obj.year)
def make_types_native_basic(lst):
res = []
for ii in lst:
#print(type(ii))
if (type(ii) == np.int32) | (type(ii) == np.int64):
#print('aa')
res.append(int(ii))
else:
res.append(ii)
return res
def add_abstract_to_scopus(start_path,
year,
do_save_csv=True):
"""
Combines the scopus pickle and old scopus csv into a new one with cleaned abstract text
:param start_path: the starting path where all input/output goes. Subdirectories are required.
this function requires the subfolder:
- 'scopus_processed' with 'pickle_OA_VU'+year+'_met_corresponding_authors.pkl' and for every year
'knip_OA_VU'+year+'_met_corresponding_authors.csv'
:param do_save_csv: whether you want to output a csv or not (will overwrite)
:return: Nothing
"""
#
# get scopus pkl file
print('started reading a pickle ')
df_pickle = pd.read_pickle(start_path + '/scopus_processed/pickle_OA_VU' \
+ str(year) + '_met_corresponding_authors.pkl')
print('finished reading a pickle ')
# make abstract text and clean it
df_pickle['abstract_text'] = df_pickle.apply(get_abstract_if_any, axis=1)
df_pickle['abstract_text_clean'] = (df_pickle['abstract_text']
.apply(comma_space_fix)
.apply(remove_punctuation))
df_pickle = df_pickle[['eid', 'abstract_text_clean']]
if ((len(df_pickle[df_pickle.eid.isnull()]) > 0)
| (df_pickle.eid.apply(lambda x: x is None).max())
| (df_pickle.eid.apply(lambda x: x == 'None').max())):
print('merge issue: df_pickle for abstract text has some null eids')
#
# read scopus
df_k = pd.read_csv(start_path + '/scopus_processed/knip_OA_VU' + str(
year) + '_met_corresponding_authors.csv')
#
# merge with scopus
df_m = df_k.merge(df_pickle[['eid', 'abstract_text_clean']], on='eid', how='left')
if len(df_m) != len(df_k):
print('len messed up')
#
# save it
if do_save_csv:
df_m.to_csv(start_path + '/scopus_processed/knip_OA_VU' \
+ str(year) + '_met_abstract_tekst.csv')
return None
def merge_pure_with_scopus_data(df_p, df_s, df_t):
"""
This functions merges a pre-processed Pure dataframe with a pre-processed Scopus dataframe, also uses extrapure
It is a mega-merge using EID, DOI and title with advanced rule sets. Soft-title-match is not included.
There is room for improvement: a doi-cleaner would be nice like '10.' start for all entries
This function is year_range-indifferent and will work with any year-range or period-range
:param df_p: Dataframe from pure, must be preprocessed with pre_process_pure_data()
:param df_s: Dataframe from scopus, must be enriched through open-access-pipeline (! not yet in Pycharm !)
:param df_t: Dataframe from xpure with eid to uuid. Careful with UUID: every PURE repo has different uuids.
:return: df_combined (the merged dataframe including merge_source), diagnostics (is None right now)
"""
# we need to clean the dois otherwise the doi merge will fail
# I am going to take a small risk and do a overwrite...
df_p_backup = df_p.copy()
df_s_backup = df_s.copy()
df_p['DOI'] = df_p.DOI.apply(lambda x: x.replace('https://doi.org/', '') if pd.notnull(x) else x)
df_s['doi'] = df_s.doi.apply(lambda x: x.replace('https://doi.org/', '') if pd.notnull(x) else x)
# 1. use df_t to enrich df_p with eids, continue with df_m
df_m = df_p.merge(df_t, left_on='pub_uuid', right_on='pub_uuid', how='left')
df_m['has_eid'] = ~df_m.eid.isnull()
if len(df_m[df_m['Title of the contribution in original language'].isnull()] > 0):
print('there were records with empty titles and those were discarded')
# 2. de-duplicate left=pure and right=scopus
# 2A. de-dupe for eids
# assumption: last duplicate entry is correct, rest is false
# we need to preserve records which have NaNs in their eids
# plan of attack: split part with eid, de-dupe it w/o worrying about nan eids, then re-append the part w/o eid
df_m = df_m[df_m.eid.isnull()].append(df_m[~df_m.eid.isnull()].drop_duplicates(subset=['eid'], keep='last'))
if df_m[~df_m.eid.isnull()].eid.value_counts().max() != 1:
print('eid de-duplication failed somehow, you can ignore this if you used AMSCO-data')
# 2B. de-duplicate on DOI
# some are maked as 'do_not_merge_on_DOI' which is an advanced feature
# assumptions:
# step 1. all records with double DOI except for books and book chapters: keep=last, drop other records
# step 2. all records with double DOI and =book or =bookchapter: add a flag to not merge on DOI at all,
# keep rest as is so we can unpaywall it later
#
# prepare support variables
doi_counts = df_m[~df_m.DOI.isnull()].DOI.value_counts().sort_values(ascending=False)
double_doi = doi_counts[doi_counts > 1].index.to_list() # for future use or to mail Reinout or whatever
df_m['type_contains_book'] = df_m.Type.str.lower().str.contains('book')
#
# step 1: drop some of the DOI duplicates (see step 1/2 disc above)
df_m = (df_m[(~df_m.DOI.isin(double_doi)) | (df_m.type_contains_book)]
.append(df_m[(df_m.DOI.isin(double_doi)) & (~df_m.type_contains_book)]
.drop_duplicates(subset='DOI', keep='last')))
#
# step 2: prepare 'do_not_merge_on_DOI' tag
###df_m['do_not_merge_on_DOI'] = ((df_m.DOI.isin(double_doi)) & (df_m.type_contains_book))
#
df_m['do_not_merge_on_DOI'] = (df_m.DOI.isin(double_doi))
#
doi_counts = df_m[~df_m.DOI.isnull()].DOI.value_counts().sort_values(ascending=False)
double_doi = doi_counts[doi_counts > 1].index.to_list() # for future use or to mail Reinout or whatever
if df_m[df_m.DOI.isin(double_doi)].do_not_merge_on_DOI.mean() != 1:
print('doi de-duplication failed somehow')
# this sometimes happens due to doi-https-cleaning??? No
# this happens when there are book-types with duplicate dois: a rare happening, and it will mess up stuff
# why: you will get issues during merging
# proposed solution: if two different bookparts have the same doi, do not merge on doi at all
# that is the safest, as STM has a good chance of post-fixing it.
# but do not delete those records though, just do not merge on doi (they are different pieces after all)
# 2C. de-duplicate on titles
#
# drop records where there are more than 1 word in the title (where title duplicate)
# where there is 1 word in the title, we cannot drop, and we should not merge either, so isolate those
# like 'introduction' can be title-dupe, of course, and still be a unique article
#
# this is a hard choice, but it is probably best to remove dupes and add flags before any merge happens,
# in order to avoid having dupes with different eids appear twice in merged and unmerged form
# the total affected records are 0.7% and the chance on a missing merge is even smaller
# this is an assumption: we assume we the kept dupes are the correct and best ones here
#
# helper variables
df_double_titles = df_m['Title of the contribution in original language'].value_counts()
double_titles = df_double_titles[df_double_titles > 1].index.to_list()
#
# btw: these are exclusive sets, any record can belong to maximally one of these two groups
df_m['is_dupe_based_on_long_title_dupe'] = (
(df_m['Title of the contribution in original language'].isin(double_titles))
& (df_m['Title of the contribution in original language'].str.split().str.len() > 1))
df_m['do_not_merge_on_title'] = ((df_m['Title of the contribution in original language'].isin(double_titles))
& (df_m[
'Title of the contribution in original language'].str.split().str.len() == 1))
#
# now we need to remove dupes
# split into two, drop dupes, then combine back
df_m = (df_m[df_m['is_dupe_based_on_long_title_dupe']]
.drop_duplicates(subset=['Title of the contribution in original language'], keep='last')
.append(df_m[~df_m['is_dupe_based_on_long_title_dupe']]))
#
# end of de-duplication and tagging 'do_not_merge_on_DOI' and 'do_not_merge_on_title'
# 3. Perform the mega-merge
#
# drop where title is empty
df_m = df_m[~df_m['Title of the contribution in original language'].isnull()]
if len(df_m[df_m['Title of the contribution in original language'].isnull()]) > 0:
print('dropped ' + str(len(
df_m[df_m['Title of the contribution in original language'].isnull()])) + ' records for no title present')
#
# all variables of step 1
#
# first part of pure with eid
df_A = df_m[~df_m.eid.isnull()]
df_BC = df_m[df_m.eid.isnull()]
#
# inner-merged part of A and Scopus
df_Amerged_SA = df_A.merge(df_s, on='eid', how='inner')
#
# find out which eids were merged on
merged_eids = set(df_Amerged_SA.eid.unique())
# merged parts of left and right
df_Amerged = df_A[df_A.eid.isin(merged_eids)]
df_SA = df_s[
df_s.eid.isin(merged_eids)] # remember we de-duplicated for eids, dois and titles, therefore this should work
# unmerged parts left and right
df_Aunmerged = df_A[~df_A.eid.isin(merged_eids)]
df_Sunmerged1 = df_s[~df_s.eid.isin(merged_eids)]
#
# reflux df_Aunmerged
df_BC_Aunmerged = df_BC.append(df_Aunmerged)
#
# all variables of step 2
# do respect 'do_not_merge_on_DOI'
#
# grab from PURE table the B, the C and the Aunmerged parts only
# do not grab Amerged because we do not want to merge the merged parts again ever
# from these parts, isolate the parts which fulfill the two conditions: has DOI and has no flag to not merge on DOI
# these should be attempted to merge on DOI with Scopus (again, do not merge twice, use Sunmerged1 for this)
# after the merge can obtain the DOIs that merged and use that to split Bmerged and Bunmerged
# notice that there is a difference with the initial plan: Bunmerged will not contain do_not_merge_on_DOI-set at all
# To reduce complexity and adhere to the original plan, we will append the do_not_merge_on_DOI-set to Bunmerged
#
# also, df_BC_Aunmerged splits up in 3 parts
# first we cut off the do_not_merge_on_DOI pat
# then we cut the rest in two: one part without DOI and one part with DOI
# this last part is the merge_candidate for step 2/B
df_merge_candidate_B = df_BC_Aunmerged[(~df_BC_Aunmerged.DOI.isnull()) & (~df_BC_Aunmerged.do_not_merge_on_DOI)]
df_BC_Aunmerged_wo_DOI_may_merge = df_BC_Aunmerged[
(df_BC_Aunmerged.DOI.isnull()) & (~df_BC_Aunmerged.do_not_merge_on_DOI)]
df_do_not_merge_on_DOI = df_BC_Aunmerged[df_BC_Aunmerged.do_not_merge_on_DOI]
#
# merge
# assumption: we assume flat doi merge is perfect (we do not lowercase or clean starts or anything)
# diagnostics: this merges 15 out of 328 pure entries with DOI
# lowercasing only affects 20% roughly, but merge stays at 15
# 8 records in total have start different than '10.'
# I will leave it as uncleaned doi-merging here because the added value is very small
df_Bmerged_SB = df_merge_candidate_B.merge(df_Sunmerged1, left_on='DOI', right_on='doi', how='inner')
#
# find out which dois were merged on
merged_dois = set(df_Bmerged_SB.DOI.unique())
merged_dois
# merged parts of left and right
df_Bmerged = df_merge_candidate_B[df_merge_candidate_B.DOI.isin(merged_dois)]
df_SB = df_Sunmerged1[df_Sunmerged1.doi.isin(merged_dois)]
# unmerged parts left and right
df_Bunmerged_temp = df_merge_candidate_B[~df_merge_candidate_B.DOI.isin(merged_dois)]
df_Sunmerged2 = df_Sunmerged1[~df_Sunmerged1.doi.isin(merged_dois)]
#
# append the do_not_merge_on_DOI-set to Bunmerged afterwards
# remember to add the do_not_merge_on_DOI set to df_Bunmerged
# notice that defining every part explicitly makes this less difficult
df_Bunmerged = df_Bunmerged_temp.append(df_do_not_merge_on_DOI)
#
# info:
# in step 2 the unmerged parts together were df_BC_Aunmerged
# we split that now into:
# 1. df_do_not_merge_on_DOI
# 2. df_BC_Aunmerged_wo_DOI_may_merge
# 3. df_merge_candidate_B, which consists of df_Bmerged and df_Bunmerged_temp
# Also, df_Bunmerged is basically df_Bunmerged_temp + df_do_not_merge_on_DOI
#
# so what will be the unmerged part for the next step then?
# df_do_not_merge_on_DOI + df_BC_Aunmerged_wo_DOI_may_merge + df_Bunmerged_temp
# or equivalently:
# df_Bunmerged + df_BC_Aunmerged_wo_DOI_may_merge
# or equivalently:
# the unmerged set of the next step is the unmerged set of this step, minus df_Bmerged because that part merged
# but we'd rather append than 'substract' so we build it up as (in reflux formulation):
#
# unmerged part for the next step = df_BC_Aunmerged_wo_DOI_may_merge + df_Bunmerged
# verified logically a few times now, let's continue
#
# reflux df_Bunmerged
df_C_Bunmerged = df_BC_Aunmerged_wo_DOI_may_merge.append(df_Bunmerged)
#
# all variables of step 3
# do respect 'do_not_merge_on_title'
#
# the unmerged set is exactly df_C_Bunmerged
# but not everything is merge candidate
# we have to isolate the do_not_merge_on_title set
df_do_not_merge_on_title = df_C_Bunmerged[df_C_Bunmerged.do_not_merge_on_title]
df_merge_candidate_C = df_C_Bunmerged[~df_C_Bunmerged.do_not_merge_on_title]
# notice that we do not split into whether title is present, because title-less records were discarded (0 in 2018)
#
# now we have to try to merge on title
# first we do an exact match merge,
# for the rest we evaluate the levenshtein distance
# exploration indicated that we expact very favourable 0/1 splits and no gray zone, but let's try it out
#
# first exact match on title
df_Cmerged_SC_exact = df_merge_candidate_C.merge(df_Sunmerged2,
left_on='Title of the contribution in original language',
right_on='title',
how='inner')
# now split merged, unmerged and do_not_merge
# find out which eids were merged on
merged_titles = set(df_Cmerged_SC_exact.title.unique())
# merged parts of left and right
df_Cmerged = df_merge_candidate_C[
df_merge_candidate_C['Title of the contribution in original language'].isin(merged_titles)]
df_SC = df_Sunmerged2[df_Sunmerged2.title.isin(merged_titles)]
# unmerged parts left and right
df_Cunmerged_temp = df_merge_candidate_C[
~df_merge_candidate_C['Title of the contribution in original language'].isin(merged_titles)]
df_Sunmerged3 = df_Sunmerged2[~df_Sunmerged2.title.isin(merged_titles)]
# and we have the do_not_merge_on_title set ready, do not forget, better add it now
df_Cunmerged = df_Cunmerged_temp.append(df_do_not_merge_on_title)
#
#
# This is without soft-title-matching!
# generate resulting combined table (name it SP)
# ! careful! you cant just add stuff, we absorbed Aunmerged for example!
# first append cols to unmerged parts
if len(df_Amerged_SA) > 0:
df_Amerged_SA.loc[:, 'merge_source'] = 'both'
else:
df_Amerged_SA['merge_source'] = None
df_Bmerged_SB.loc[:, 'merge_source'] = 'both'
df_Cmerged_SC_exact.loc[:, 'merge_source'] = 'both'
df_Cunmerged.loc[:, 'merge_source'] = 'pure'
df_Sunmerged3.loc[:, 'merge_source'] = 'scopus'
df_combined = (df_Amerged_SA
.append(df_Bmerged_SB, sort=False)
.append(df_Cmerged_SC_exact, sort=False)
.append(df_Cunmerged, sort=False)
.append(df_Sunmerged3, sort=False))
diagnostics = None
return df_combined, diagnostics
def prepare_combined_data(start_path,
year_range,
xpure_pack,
add_abstract=True,
skip_preprocessing_pure_instead_load_cache=False, # safe
remove_ultra_rare_class_other=True,
org_info=pd.read_excel( static.PATH_START + 'raw data algemeen/vu_organogram_2.xlsx', skiprows=0)):
"""
This function prepares the combined data for a chosen year_range
The raw pure files and processed scopus files per year should be available
Next step: test this function!
Remember that you must do a fresh run if you want any different year range !
In no way can the results be stacked across different executions of this function (including any soft-title-match)
Because otherwise you will introduce duplicates with that stacking
:param start_path: the starting path where all input/output goes. Subdirectories are required.
this function requires the subfolder:
- 'scopus_processed' with 'pickle_OA_VU'+year+'_met_corresponding_authors.pkl' and for every year
'knip_OA_VU'+year+'_met_corresponding_authors.csv'
in year_range
-
:param year_range:
:param add_abstract:
:param remove_ultra_rare_class_other:
:param skip_preprocessing_pure_instead_load_cache:
:return:
"""
# 0. unpack xpure settings
[host, database, user, pw] = xpure_pack
# 1. prepare helper variables
# 1A. wrap immutable parameters
year_range = list(year_range)
# 1B. load xpure user/pass
#host = pd.read_csv(path_pw + '/password_xpure.csv').host[0]
#database = pd.read_csv(path_pw + '/password_xpure.csv').database[0]
#user = pd.read_csv(path_pw + '/password_xpure.csv').user[0]
#pw = pd.read_csv(path_pw + '/password_xpure.csv').pw[0]
# 2. add abstract
if add_abstract:
# add the abstract and set scopus_variant to use this enriched csv
scopus_variant = '_met_abstract_tekst.csv'
for year in year_range:
add_abstract_to_scopus(start_path, year) # verified: safe for per-year run (scopus<>scopus only)
else:
# do not add an abstract and use the original csv
scopus_variant = '_met_corresponding_authors.csv'
print('start 3')
# 3. Obtain df_combined for a single year
# includes obtaining processed pure, scopus and xpure data, then merging it and saving csvs
df_p_multi_year = pd.DataFrame()
df_s_multi_year =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
import codecs
import csv
from datetime import datetime
from io import StringIO
import os
import platform
from tempfile import TemporaryFile
from urllib.error import URLError
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas import DataFrame, Index, MultiIndex, Series, compat, concat
import pandas._testing as tm
from pandas.io.parsers import CParserWrapper, TextFileReader, TextParser
def test_override_set_noconvert_columns():
# see gh-17351
#
# Usecols needs to be sorted in _set_noconvert_columns based
# on the test_usecols_with_parse_dates test from test_usecols.py
class MyTextFileReader(TextFileReader):
def __init__(self):
self._currow = 0
self.squeeze = False
class MyCParserWrapper(CParserWrapper):
def _set_noconvert_columns(self):
if self.usecols_dtype == "integer":
# self.usecols is a set, which is documented as unordered
# but in practice, a CPython set of integers is sorted.
# In other implementations this assumption does not hold.
# The following code simulates a different order, which
# before GH 17351 would cause the wrong columns to be
# converted via the parse_dates parameter
self.usecols = list(self.usecols)
self.usecols.reverse()
return CParserWrapper._set_noconvert_columns(self)
data = """a,b,c,d,e
0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
cols = {
"a": [0, 0],
"c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],
}
expected = DataFrame(cols, columns=["c_d", "a"])
parser = MyTextFileReader()
parser.options = {
"usecols": [0, 2, 3],
"parse_dates": parse_dates,
"delimiter": ",",
}
parser._engine = MyCParserWrapper(StringIO(data), **parser.options)
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_empty_decimal_marker(all_parsers):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = "Only length-1 decimal markers supported"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), decimal="")
def test_bad_stream_exception(all_parsers, csv_dir_path):
# see gh-13652
#
# This test validates that both the Python engine and C engine will
# raise UnicodeDecodeError instead of C engine raising ParserError
# and swallowing the exception that caused read to fail.
path = os.path.join(csv_dir_path, "sauron.SHIFT_JIS.csv")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup("utf-8")
parser = all_parsers
msg = "'utf-8' codec can't decode byte"
# Stream must be binary UTF8.
with open(path, "rb") as handle, codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter
) as stream:
with pytest.raises(UnicodeDecodeError, match=msg):
parser.read_csv(stream)
def test_read_csv_local(all_parsers, csv1):
prefix = "file:///" if compat.is_platform_windows() else "file://"
parser = all_parsers
fname = prefix + str(os.path.abspath(csv1))
result = parser.read_csv(fname, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
def test_1000_sep(all_parsers):
parser = all_parsers
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({"A": [1, 10], "B": [2334, 13], "C": [5, 10.0]})
result = parser.read_csv(StringIO(data), sep="|", thousands=",")
tm.assert_frame_equal(result, expected)
def test_squeeze(all_parsers):
data = """\
a,1
b,2
c,3
"""
parser = all_parsers
index = Index(["a", "b", "c"], name=0)
expected = Series([1, 2, 3], name=1, index=index)
result = parser.read_csv(StringIO(data), index_col=0, header=None, squeeze=True)
tm.assert_series_equal(result, expected)
# see gh-8217
#
# Series should not be a view.
assert not result._is_view
def test_malformed(all_parsers):
# see gh-6607
parser = all_parsers
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = "Expected 3 fields in line 4, saw 5"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), header=1, comment="#")
@pytest.mark.parametrize("nrows", [5, 3, None])
def test_malformed_chunks(all_parsers, nrows):
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
parser = all_parsers
msg = "Expected 3 fields in line 6, saw 5"
reader = parser.read_csv(
StringIO(data), header=1, comment="#", iterator=True, chunksize=1, skiprows=[2]
)
with pytest.raises(ParserError, match=msg):
reader.read(nrows)
def test_unnamed_columns(all_parsers):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
parser = all_parsers
expected = DataFrame(
[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]],
dtype=np.int64,
columns=["A", "B", "C", "Unnamed: 3", "Unnamed: 4"],
)
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_csv_mixed_type(all_parsers):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
parser = all_parsers
expected = DataFrame({"A": ["a", "b", "c"], "B": [1, 3, 4], "C": [2, 4, 5]})
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_read_csv_low_memory_no_rows_with_index(all_parsers):
# see gh-21141
parser = all_parsers
if not parser.low_memory:
pytest.skip("This is a low-memory specific test")
data = """A,B,C
1,1,1,2
2,2,3,4
3,3,4,5
"""
result = parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0)
expected = DataFrame(columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_read_csv_dataframe(all_parsers, csv1):
parser = all_parsers
result = parser.read_csv(csv1, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
def test_read_csv_no_index_name(all_parsers, csv_dir_path):
parser = all_parsers
csv2 = os.path.join(csv_dir_path, "test2.csv")
result = parser.read_csv(csv2, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738, "foo"],
[1.047916, -0.041232, -0.16181208307, 0.212549, "bar"],
[0.498581, 0.731168, -0.537677223318, 1.346270, "baz"],
[1.120202, 1.567621, 0.00364077397681, 0.675253, "qux"],
[-0.487094, 0.571455, -1.6116394093, 0.103469, "foo2"],
],
columns=["A", "B", "C", "D", "E"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
]
),
)
tm.assert_frame_equal(result, expected)
def test_read_csv_wrong_num_columns(all_parsers):
# Too few columns.
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
parser = all_parsers
msg = "Expected 6 fields in line 3, saw 7"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data))
def test_read_duplicate_index_explicit(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=0)
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=Index(["foo", "bar", "baz", "qux", "foo", "bar"], name="index"),
)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(all_parsers):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=Index(["foo", "bar", "baz", "qux", "foo", "bar"]),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
"A,B\nTrue,1\nFalse,2\nTrue,3",
dict(),
DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
),
(
"A,B\nYES,1\nno,2\nyes,3\nNo,3\nYes,3",
dict(true_values=["yes", "Yes", "YES"], false_values=["no", "NO", "No"]),
DataFrame(
[[True, 1], [False, 2], [True, 3], [False, 3], [True, 3]],
columns=["A", "B"],
),
),
(
"A,B\nTRUE,1\nFALSE,2\nTRUE,3",
dict(),
DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
),
(
"A,B\nfoo,bar\nbar,foo",
dict(true_values=["foo"], false_values=["bar"]),
DataFrame([[True, False], [False, True]], columns=["A", "B"]),
),
],
)
def test_parse_bool(all_parsers, data, kwargs, expected):
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
def test_int_conversion(all_parsers):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [3, 3.0])
def test_read_nrows(all_parsers, nrows):
# see gh-10476
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
expected = DataFrame(
[["foo", 2, 3, 4, 5], ["bar", 7, 8, 9, 10], ["baz", 12, 13, 14, 15]],
columns=["index", "A", "B", "C", "D"],
)
parser = all_parsers
result = parser.read_csv(StringIO(data), nrows=nrows)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [1.2, "foo", -1])
def test_read_nrows_bad(all_parsers, nrows):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
msg = r"'nrows' must be an integer >=0"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), nrows=nrows)
@pytest.mark.parametrize("index_col", [0, "index"])
def test_read_chunksize_with_index(all_parsers, index_col):
parser = all_parsers
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = parser.read_csv(StringIO(data), index_col=0, chunksize=2)
expected = DataFrame(
[
["foo", 2, 3, 4, 5],
["bar", 7, 8, 9, 10],
["baz", 12, 13, 14, 15],
["qux", 12, 13, 14, 15],
["foo2", 12, 13, 14, 15],
["bar2", 12, 13, 14, 15],
],
columns=["index", "A", "B", "C", "D"],
)
expected = expected.set_index("index")
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
tm.assert_frame_equal(chunks[2], expected[4:])
@pytest.mark.parametrize("chunksize", [1.3, "foo", 0])
def test_read_chunksize_bad(all_parsers, chunksize):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
msg = r"'chunksize' must be an integer >=1"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), chunksize=chunksize)
@pytest.mark.parametrize("chunksize", [2, 8])
def test_read_chunksize_and_nrows(all_parsers, chunksize):
# see gh-15755
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0, nrows=5)
reader = parser.read_csv(StringIO(data), chunksize=chunksize, **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(concat(reader), expected)
def test_read_chunksize_and_nrows_changing_size(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0, nrows=5)
reader = parser.read_csv(StringIO(data), chunksize=8, **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(reader.get_chunk(size=2), expected.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), expected.iloc[2:5])
with pytest.raises(StopIteration, match=""):
reader.get_chunk(size=3)
def test_get_chunk_passed_chunksize(all_parsers):
parser = all_parsers
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
reader = parser.read_csv(StringIO(data), chunksize=2)
result = reader.get_chunk()
expected = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [dict(), dict(index_col=0)])
def test_read_chunksize_compat(all_parsers, kwargs):
# see gh-12185
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
reader = parser.read_csv(StringIO(data), chunksize=2, **kwargs)
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(concat(reader), result)
def test_read_chunksize_jagged_names(all_parsers):
# see gh-23509
parser = all_parsers
data = "\n".join(["0"] * 7 + [",".join(["0"] * 10)])
expected = DataFrame([[0] + [np.nan] * 9] * 7 + [[0] * 10])
reader = parser.read_csv(StringIO(data), names=range(10), chunksize=4)
result = concat(reader)
tm.assert_frame_equal(result, expected)
def test_read_data_list(all_parsers):
parser = all_parsers
kwargs = dict(index_col=0)
data = "A,B,C\nfoo,1,2,3\nbar,4,5,6"
data_list = [["A", "B", "C"], ["foo", "1", "2", "3"], ["bar", "4", "5", "6"]]
expected = parser.read_csv(StringIO(data), **kwargs)
parser = TextParser(data_list, chunksize=2, **kwargs)
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_iterator(all_parsers):
# see gh-6607
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0)
expected = parser.read_csv(StringIO(data), **kwargs)
reader = parser.read_csv(StringIO(data), iterator=True, **kwargs)
first_chunk = reader.read(3)
tm.assert_frame_equal(first_chunk, expected[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, expected[3:])
def test_iterator2(all_parsers):
parser = all_parsers
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = parser.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["foo", "bar", "baz"],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(result[0], expected)
def test_reader_list(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0)
lines = list(csv.reader(StringIO(data)))
reader = TextParser(lines, chunksize=2, **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
tm.assert_frame_equal(chunks[2], expected[4:])
def test_reader_list_skiprows(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = dict(index_col=0)
lines = list(csv.reader(StringIO(data)))
reader = TextParser(lines, chunksize=2, skiprows=[1], **kwargs)
expected = parser.read_csv(StringIO(data), **kwargs)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[1:3])
def test_iterator_stop_on_chunksize(all_parsers):
# gh-3967: stopping iteration when chunksize is specified
parser = all_parsers
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = parser.read_csv(StringIO(data), chunksize=1)
result = list(reader)
assert len(result) == 3
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["foo", "bar", "baz"],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(concat(result), expected)
@pytest.mark.parametrize(
"kwargs", [dict(iterator=True, chunksize=1), dict(iterator=True), dict(chunksize=1)]
)
def test_iterator_skipfooter_errors(all_parsers, kwargs):
msg = "'skipfooter' not supported for 'iteration'"
parser = all_parsers
data = "a\n1\n2"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), skipfooter=1, **kwargs)
def test_nrows_skipfooter_errors(all_parsers):
msg = "'skipfooter' not supported with 'nrows'"
data = "a\n1\n2\n3\n4\n5\n6"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), skipfooter=1, nrows=5)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
"""foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
""",
dict(index_col=0, names=["index", "A", "B", "C", "D"]),
DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
index=Index(["foo", "bar", "baz", "qux", "foo2", "bar2"], name="index"),
columns=["A", "B", "C", "D"],
),
),
(
"""foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
""",
dict(index_col=[0, 1], names=["index1", "index2", "A", "B", "C", "D"]),
DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
index=MultiIndex.from_tuples(
[
("foo", "one"),
("foo", "two"),
("foo", "three"),
("bar", "one"),
("bar", "two"),
],
names=["index1", "index2"],
),
columns=["A", "B", "C", "D"],
),
),
],
)
def test_pass_names_with_index(all_parsers, data, kwargs, expected):
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", [[0, 1], [1, 0]])
def test_multi_index_no_level_names(all_parsers, index_col):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
headless_data = "\n".join(data.split("\n")[1:])
names = ["A", "B", "C", "D"]
parser = all_parsers
result = parser.read_csv(
StringIO(headless_data), index_col=index_col, header=None, names=names
)
expected = parser.read_csv(StringIO(data), index_col=index_col)
# No index names in headless data.
expected.index.names = [None] * 2
tm.assert_frame_equal(result, expected)
def test_multi_index_no_level_names_implicit(all_parsers):
parser = all_parsers
data = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=MultiIndex.from_tuples(
[
("foo", "one"),
("foo", "two"),
("foo", "three"),
("bar", "one"),
("bar", "two"),
]
),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,expected,header",
[
("a,b", DataFrame(columns=["a", "b"]), [0]),
(
"a,b\nc,d",
DataFrame(columns=MultiIndex.from_tuples([("a", "c"), ("b", "d")])),
[0, 1],
),
],
)
@pytest.mark.parametrize("round_trip", [True, False])
def test_multi_index_blank_df(all_parsers, data, expected, header, round_trip):
# see gh-14545
parser = all_parsers
data = expected.to_csv(index=False) if round_trip else data
result = parser.read_csv(StringIO(data), header=header)
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(all_parsers):
parser = all_parsers
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
result = parser.read_csv(StringIO(data), sep=" ")
expected = DataFrame(
[[0, 1, 0, "a", "b"], [1, 2, 0, "c", "d"], [2, 2, 2, "e", "f"]],
columns=["Unnamed: 0", "id", "c0", "c1", "c2"],
)
tm.assert_frame_equal(result, expected)
def test_read_csv_parse_simple_list(all_parsers):
parser = all_parsers
data = """foo
bar baz
qux foo
foo
bar"""
result = parser.read_csv(StringIO(data), header=None)
expected =
|
DataFrame(["foo", "bar baz", "qux foo", "foo", "bar"])
|
pandas.DataFrame
|
from __future__ import division, print_function
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from highway_env.road.road import Road
from sklearn import linear_model
from highway_env.vehicle.behavior import IDMVehicle, LinearVehicle
from highway_env.vehicle.dynamics import Obstacle
from highway_env.wrappers.simulation import Simulation
def generate_data(count):
# Vehicle.COLLISIONS_ENABLED = False
vehicle_type = LinearVehicle
road = Road.create_random_road(lanes_count=2, lane_width=4.0, vehicles_count=5, vehicles_type=vehicle_type)
sim = Simulation(road, ego_vehicle_type=vehicle_type, displayed=True)
sim.RECORD_VIDEO = False
road.add_random_vehicles(5, vehicles_type=vehicle_type)
road.vehicles.append(Obstacle(road, np.array([50., 0])))
road.vehicles.append(Obstacle(road, np.array([130., 4.])))
for v in road.vehicles:
v.target_velocity = LinearVehicle.VELOCITY_WANTED
# v.enable_lane_change = False
for _ in range(count):
sim.handle_events()
sim.act()
sim.road.dump()
sim.step()
sim.display()
sim.quit()
return [v.get_log() for v in road.vehicles if not isinstance(v, Obstacle)]
def get_features(data):
v0 = LinearVehicle.VELOCITY_WANTED
d_safe = LinearVehicle.DISTANCE_WANTED + data['v'] * LinearVehicle.TIME_WANTED + LinearVehicle.LENGTH
velocity = v0 - data['v']
front_velocity = np.minimum(data['front_v'] - data['v'], 0)
front_distance = np.minimum(data['front_distance'] - d_safe, 0)
return pd.concat([velocity, front_velocity, front_distance], axis=1).fillna(0)
def fit(dump):
regr = linear_model.LinearRegression()
data = dump
X = get_features(data)
y = data['acceleration']
data_fit = data[(-0.95*IDMVehicle.ACC_MAX < data['acceleration'])
& (data['acceleration'] < 0.95*IDMVehicle.ACC_MAX)].reset_index(drop=True)
X_fit = get_features(data_fit)
y_fit = data_fit['acceleration']
regr.fit(X_fit, y_fit)
print(regr.coef_)
y_pred = np.clip(regr.predict(X), -IDMVehicle.ACC_MAX, IDMVehicle.ACC_MAX)
# display(y, y_pred)
return y, y_pred
def display(y, y_pred):
plt.figure()
plt.scatter(y, y, label=r'True')
plt.scatter(y, y_pred, label=r'Model')
plt.legend()
plt.xlabel(r'True acceleration [$m/s^2$]')
plt.ylabel(r'Acceleration [$m/s^2$]')
plt.show()
plt.figure()
plt.plot(np.arange(np.size(y)), y, label=r'True')
plt.plot(np.arange(np.size(y)), y_pred, label=r'Model')
plt.xlabel(r'Time [step]')
plt.ylabel(r'Acceleration [$m/s^2$]')
plt.show()
plt.figure()
plt.hist(y_pred - y, bins=30, weights=np.zeros_like(y) + 100. / y.size)
plt.xlabel(r'Acceleration error [$m/s^2$]')
plt.ylabel(r'Frequency [%]')
plt.show()
def main():
print('Generating data...')
dumps = generate_data(30 * 30)
print('Generation done.')
yy = np.array([])
yyp = np.array([])
dumps = [
|
pd.concat(dumps)
|
pandas.concat
|
"""
author: <NAME>
references:
1.https://arxiv.org/pdf/1505.04597.pdf (the original research paper)
2.https://machinelearningmastery.com/convolutional-layers-for-deep-learning-neural-networks/
3.https://towardsdatascience.com/unet-line-by-line-explanation-9b191c76baf5
4.https://github.com/ashishrana160796/nalu-cell-counting/blob/master/exploring-cell-counting/model.py
The convolutional neural network, or CNN,
is a kind of neural network model designed
to work with two-dimensional image data.
It makes use of a convolutional layer that
gives the network its name. This layer
performs an operation called a convolution,
which is essentially taking the dot product
of a set of weights, or filters, and an
array derived from an input image.
U-Net, introduced in 2015, was an innovative
approach to addressing the issue of image
segmentation...
We use tensorflow, a machine learning library,
and keras, a neaural network library, to help
make it possible.
"""
# For type hinting
from typing import List, Tuple
# For image preprocessing
import numpy as np
import pandas as pd
# For accessing the dataset
from cell_counter.import_dataset import get_dataset_info, load_images_from_dataframe
# For creating and using CNN
import tensorflow as tf
# For unet
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as keras
def unet_preprocess_data(
path: str = None, num: int = 2500, df =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta, date, time
import numpy as np
import pandas as pd
import pandas.lib as lib
import pandas.util.testing as tm
from pandas import Index
from pandas.compat import long, u, PY2
class TestInference(tm.TestCase):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
self.assertEqual(pd.lib.infer_dtype(arr), compare)
# object array of bytes
arr = arr.astype(object)
self.assertEqual(pd.lib.infer_dtype(arr), compare)
def test_isinf_scalar(self):
# GH 11352
self.assertTrue(lib.isposinf_scalar(float('inf')))
self.assertTrue(lib.isposinf_scalar(np.inf))
self.assertFalse(lib.isposinf_scalar(-np.inf))
self.assertFalse(lib.isposinf_scalar(1))
self.assertFalse(lib.isposinf_scalar('a'))
self.assertTrue(lib.isneginf_scalar(float('-inf')))
self.assertTrue(lib.isneginf_scalar(-np.inf))
self.assertFalse(lib.isneginf_scalar(np.inf))
self.assertFalse(lib.isneginf_scalar(1))
self.assertFalse(lib.isneginf_scalar('a'))
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = set(['', 'NULL', 'nan'])
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([u(infinity)], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with tm.assertRaisesRegexp(ValueError, msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = set([-999, -999.0])
for coerce_type in (True, False):
out = lib.maybe_convert_numeric(data, nan_values, coerce_type)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
self.assertTrue(result.dtype == np.float64)
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
self.assertTrue(result.dtype == np.float64)
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
self.assertTrue(np.all(np.isnan(result)))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
class TestTypeInference(tm.TestCase):
_multiprocess_can_split_ = True
def test_length_zero(self):
result = lib.infer_dtype(np.array([], dtype='i4'))
self.assertEqual(result, 'integer')
result = lib.infer_dtype([])
self.assertEqual(result, 'empty')
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'integer')
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'mixed-integer')
arr = np.array([1, 2, 3, 4, 5], dtype='i4')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'integer')
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'boolean')
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'boolean')
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'mixed')
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr)
self.assertEqual(result, 'boolean')
def test_floats(self):
arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'floating')
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],
dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'mixed-integer')
arr = np.array([1, 2, 3, 4, 5], dtype='f4')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'floating')
arr = np.array([1, 2, 3, 4, 5], dtype='f8')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'floating')
def test_string(self):
pass
def test_unicode(self):
pass
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
self.assertEqual(index.inferred_type, 'datetime64')
def test_date(self):
dates = [date(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
self.assertEqual(index.inferred_type, 'date')
def test_to_object_array_tuples(self):
r = (5, 6)
values = [r]
result = lib.to_object_array_tuples(values)
try:
# make sure record array works
from collections import namedtuple
record = namedtuple('record', 'x y')
r = record(5, 6)
values = [r]
result = lib.to_object_array_tuples(values) # noqa
except ImportError:
pass
def test_to_object_array_width(self):
# see gh-13320
rows = [[1, 2, 3], [4, 5, 6]]
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows)
tm.assert_numpy_array_equal(out, expected)
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows, min_width=1)
tm.assert_numpy_array_equal(out, expected)
expected = np.array([[1, 2, 3, None, None],
[4, 5, 6, None, None]], dtype=object)
out = lib.to_object_array(rows, min_width=5)
tm.assert_numpy_array_equal(out, expected)
def test_object(self):
# GH 7431
# cannot infer more than this as only a single element
arr = np.array([None], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'mixed')
def test_categorical(self):
# GH 8974
from pandas import Categorical, Series
arr = Categorical(list('abc'))
result = lib.infer_dtype(arr)
self.assertEqual(result, 'categorical')
result = lib.infer_dtype(Series(arr))
self.assertEqual(result, 'categorical')
arr = Categorical(list('abc'), categories=['cegfab'], ordered=True)
result = lib.infer_dtype(arr)
self.assertEqual(result, 'categorical')
result = lib.infer_dtype(Series(arr))
self.assertEqual(result, 'categorical')
class TestConvert(tm.TestCase):
def test_convert_objects(self):
arr = np.array(['a', 'b', np.nan, np.nan, 'd', 'e', 'f'], dtype='O')
result = lib.maybe_convert_objects(arr)
self.assertTrue(result.dtype == np.object_)
def test_convert_objects_ints(self):
# test that we can detect many kinds of integers
dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
for dtype_str in dtypes:
arr = np.array(list(np.arange(20, dtype=dtype_str)), dtype='O')
self.assertTrue(arr[0].dtype == np.dtype(dtype_str))
result = lib.maybe_convert_objects(arr)
self.assertTrue(issubclass(result.dtype.type, np.integer))
def test_convert_objects_complex_number(self):
for dtype in np.sctypes['complex']:
arr = np.array(list(1j * np.arange(20, dtype=dtype)), dtype='O')
self.assertTrue(arr[0].dtype == np.dtype(dtype))
result = lib.maybe_convert_objects(arr)
self.assertTrue(issubclass(result.dtype.type, np.complexfloating))
class Testisscalar(tm.TestCase):
def test_isscalar_builtin_scalars(self):
self.assertTrue(lib.isscalar(None))
self.assertTrue(lib.isscalar(True))
self.assertTrue(lib.isscalar(False))
self.assertTrue(lib.isscalar(0.))
self.assertTrue(lib.isscalar(np.nan))
self.assertTrue(lib.isscalar('foobar'))
self.assertTrue(lib.isscalar(b'foobar'))
self.assertTrue(lib.isscalar(u('efoobar')))
self.assertTrue(lib.isscalar(datetime(2014, 1, 1)))
self.assertTrue(lib.isscalar(date(2014, 1, 1)))
self.assertTrue(lib.isscalar(time(12, 0)))
self.assertTrue(lib.isscalar(timedelta(hours=1)))
self.assertTrue(lib.isscalar(pd.NaT))
def test_isscalar_builtin_nonscalars(self):
self.assertFalse(lib.isscalar({}))
self.assertFalse(lib.isscalar([]))
self.assertFalse(lib.isscalar([1]))
self.assertFalse(lib.isscalar(()))
self.assertFalse(lib.isscalar((1, )))
self.assertFalse(lib.isscalar(slice(None)))
self.assertFalse(lib.isscalar(Ellipsis))
def test_isscalar_numpy_array_scalars(self):
self.assertTrue(lib.isscalar(np.int64(1)))
self.assertTrue(lib.isscalar(np.float64(1.)))
self.assertTrue(lib.isscalar(np.int32(1)))
self.assertTrue(lib.isscalar(np.object_('foobar')))
self.assertTrue(lib.isscalar(np.str_('foobar')))
self.assertTrue(lib.isscalar(np.unicode_(u('foobar'))))
self.assertTrue(lib.isscalar(np.bytes_(b'foobar')))
self.assertTrue(lib.isscalar(np.datetime64('2014-01-01')))
self.assertTrue(lib.isscalar(np.timedelta64(1, 'h')))
def test_isscalar_numpy_zerodim_arrays(self):
for zerodim in [np.array(1), np.array('foobar'),
np.array(np.datetime64('2014-01-01')),
np.array(np.timedelta64(1, 'h')),
np.array(np.datetime64('NaT'))]:
self.assertFalse(lib.isscalar(zerodim))
self.assertTrue(lib.isscalar(lib.item_from_zerodim(zerodim)))
def test_isscalar_numpy_arrays(self):
self.assertFalse(lib.isscalar(np.array([])))
self.assertFalse(lib.isscalar(np.array([[]])))
self.assertFalse(lib.isscalar(np.matrix('1; 2')))
def test_isscalar_pandas_scalars(self):
self.assertTrue(lib.isscalar(pd.Timestamp('2014-01-01')))
self.assertTrue(lib.isscalar(pd.Timedelta(hours=1)))
self.assertTrue(lib.isscalar(pd.Period('2014-01-01')))
def test_lisscalar_pandas_containers(self):
self.assertFalse(lib.isscalar(pd.Series()))
self.assertFalse(lib.isscalar(pd.Series([1])))
self.assertFalse(lib.isscalar(pd.DataFrame()))
self.assertFalse(lib.isscalar(pd.DataFrame([[1]])))
self.assertFalse(lib.isscalar(pd.Panel()))
self.assertFalse(lib.isscalar(pd.Panel([[[1]]])))
self.assertFalse(lib.isscalar(pd.Index([])))
self.assertFalse(lib.isscalar(pd.Index([1])))
class TestParseSQL(tm.TestCase):
def test_convert_sql_column_floats(self):
arr = np.array([1.5, None, 3, 4.2], dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array([1.5, np.nan, 3, 4.2], dtype='f8')
self.assert_numpy_array_equal(result, expected)
def test_convert_sql_column_strings(self):
arr = np.array(['1.5', None, '3', '4.2'], dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array(['1.5', np.nan, '3', '4.2'], dtype=object)
self.assert_numpy_array_equal(result, expected)
def test_convert_sql_column_unicode(self):
arr = np.array([u('1.5'), None, u('3'), u('4.2')],
dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array([u('1.5'), np.nan,
|
u('3')
|
pandas.compat.u
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pyplyr import Pyplyr
from pandas import (
Series,
DataFrame
)
from pandas.core.groupby import GroupBy
from pandas.util.testing import (
assert_series_equal,
assert_frame_equal
)
class TestPyplyr(object):
def setup(self):
self.columns = [
"label1", "col0", "col1",
"col2", "col3", "col4"
]
self.df = DataFrame([
["hoge", 0, 1, 2, 3, 4],
["fuga", 5, 6, 7, 8, 9],
["hoge", 10, 11, 12, 13, 14],
["fuga", 15, 16, 17, 18, 19],
["hoge", 20, 21, 22, 23, 24]
], columns=self.columns)
self.pyplyr = Pyplyr(self.df)
def teardown(self):
self.columns = None
self.df = None
self.pyplyr = None
def test_constructor(self):
assert self.pyplyr._Pyplyr__df is self.df
def test_data(self):
result = self.pyplyr.data()
assert_frame_equal(result, self.df)
def test_select(self):
cols = ["col1", "label1"]
result = self.pyplyr.select(*cols)
assert isinstance(result, Pyplyr)
result_df = result._Pyplyr__df
expected = self.df.ix[:, cols]
assert_frame_equal(result_df, expected)
def test_filter_with_default(self):
result = self.pyplyr.filter(
label1=lambda x: x == "hoge",
col1=lambda x: x > 10
)
assert isinstance(result, Pyplyr)
result_df = result._Pyplyr__df
expected = self.df.ix[[0,2,3,4], :]
assert_frame_equal(result_df, expected)
def test_filter_with_or_operator(self):
result = self.pyplyr.filter(
operator="or",
label1=lambda x: x == "hoge",
col1=lambda x: x > 10
)
assert isinstance(result, Pyplyr)
result_df = result._Pyplyr__df
expected = self.df.ix[[0,2,3,4], :]
assert_frame_equal(result_df, expected)
def test_filter_with_and_operator(self):
result = self.pyplyr.filter(
operator="and",
label1=lambda x: x == "hoge",
col1=lambda x: x > 10
)
assert isinstance(result, Pyplyr)
result_df = result._Pyplyr__df
expected = self.df.ix[[2,4], :]
|
assert_frame_equal(result_df, expected)
|
pandas.util.testing.assert_frame_equal
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: andreypoletaev
"""
# =============================================================================
# %% Block 1: initial imports
# =============================================================================
import os, sys, re, glob
if os.path.join(os.path.abspath(os.getcwd()), "utils") not in sys.path :
sys.path.append(os.path.join(os.path.abspath(os.getcwd()), "utils"))
import numpy as np
import pandas as pd
import hop_utils as hu
from crystal_utils import read_lmp
from scipy.optimize import curve_fit as cf
from scipy.interpolate import interp1d
from datetime import datetime as dt
from matplotlib import pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.patches import Rectangle
from batlow import cm_data
batlow_cm = LinearSegmentedColormap.from_list('batlow', cm_data)
batlow_even = LinearSegmentedColormap.from_list('batlow_even', hu.batlow_lightness_scaled(0.4,0.6))
from cycler import cycler
linecycler = cycler(linestyle=['-', '--', ':', '-.'])
markcycler = cycler(marker=['o', 's', 'v', 'd', '^'])
from itertools import cycle
markers = cycle(['o', 's', 'v', 'd', '^','D','<','>'])
lines = cycle(['-', '--', '-.', ':'])
## linear fitting
linfit = lambda x, *p : p[0] * x + p[1]
## cosmetic defaults for matplotlib plotting
plt.rc('legend', fontsize=10)
plt.rc('axes', labelsize=14)
plt.rc('axes', titlesize=14)
plt.rc('xtick', labelsize=12)
plt.rc('ytick', labelsize=12)
plt.rc('errorbar', capsize=3)
plt.rc('markers', fillstyle='none')
plt.rc("savefig", format='pdf')
## variables by which it is possible to plot
relevant_vars = ['metal','phase','T1','config','stoich','exclude','z']
## which atoms to query for species
## conductivity from bulk diffusion coefficient. Takes D_bulk [cm^2/sec], cell [AA]
## output is [Kelvin/ohm/cm] i.e. [Kelvin * siemens / cm]
## note that there is only one q in the formula because hu.kb is [eV/K]
q = 1.602e-19 ## [Coulomb] elementary charge
AA = 1e-8 ## [cm] 1 angstrom in cm
sigma_T = lambda N, cell, d_com : q * N / np.prod(np.diag(cell*AA))*d_com / hu.kb
unit_conv = 1e-4 ## [cm^2/sec] 1 AA^2/psec = 0.0001 cm^2/sec. No need to change this.
eps_0 = 8.854187e-12 ## [A^2 m^-3 kg^-1 sec^4]
T1 = 300
## dictionary of units
units = {'T1':'K', 'metal':'', 'stoich':'', 'exclude':'', 'config':'', 'z':'',
'phase':f' {T1}K'}
## shorthands for labels
bdp = r'$\beta^{\prime\prime}$'
beta = r'$\beta$'
phases = {'beta':beta, 'bdp':bdp}
# =============================================================================
# %% Block 2 : load files based on the index of conduction planes created in
# ## analysis_steadystate.py
# ## The a2_...fix files are assumed to be located in the same folders as their
# ## corresponding lammps structure files.
# =============================================================================
## database of all the hops: only combined planes matter for macro analyses.
all_planes =
|
pd.read_csv('./sample_data/all_hop_planes.csv')
|
pandas.read_csv
|
"""
Classes for analyzing RSMTool predictions, metrics, etc.
:author: <NAME> (<EMAIL>)
:author: <NAME> (<EMAIL>)
:author: <NAME> (<EMAIL>)
:organization: ETS
"""
import warnings
from functools import partial
import numpy as np
import pandas as pd
from scipy.stats import kurtosis, pearsonr
from sklearn.decomposition import PCA
from sklearn.metrics import confusion_matrix, mean_squared_error, r2_score
from skll.metrics import kappa
from .container import DataContainer
from .utils.metrics import (agreement,
difference_of_standardized_means,
partial_correlations,
quadratic_weighted_kappa,
standardized_mean_difference)
from .utils.prmse import get_true_score_evaluations
class Analyzer:
"""Class to perform analysis on all metrics, predictions, etc."""
@staticmethod
def check_frame_names(data_container, dataframe_names):
"""
Check that all specified dataframes are available.
This method checks to make sure all specified DataFrames
are in the given data container object.
Parameters
----------
data_container : container.DataContainer
A DataContainer object
dataframe_names : list of str
The names of the DataFrames expected in the
DataContainer object.
Raises
------
KeyError
If a given dataframe_name is not in the DataContainer object.
"""
for dataframe_name in dataframe_names:
if dataframe_name not in data_container:
raise KeyError('The DataFrame `{}` does not exist in the '
'DataContainer object.'.format(dataframe_name))
@staticmethod
def check_param_names(configuration_obj, parameter_names):
"""
Check that all specified parameters are available.
This method checks to make sure all specified parameters
are in the given configuration object.
Parameters
----------
configuration_obj : configuration_parser.Configuration
A configuration object
parameter_names : list of str
The names of the parameters (keys) expected in the
Configuration object.
Raises
------
KeyError
If a given parameter_name is not in the Configuration object.
"""
for parameter_name in parameter_names:
if parameter_name not in configuration_obj:
raise KeyError('The parameter `{}` does not exist in the '
'Configuration object.'.format(parameter_name))
@staticmethod
def analyze_excluded_responses(df,
features,
header,
exclude_zero_scores=True,
exclude_listwise=False):
"""
Compute statistics for responses excluded from analyses.
This method computes various statistics for the responses that
were excluded from analyses, either in the training set or in
the test set.
Parameters
----------
df : pandas DataFrame
Data frame containing the excluded responses
features : list of str
List of column names containing the features
to which we want to restrict the analyses.
header : str
String to be used as the table header for the
output data frame.
exclude_zero_scores : bool, optional
Whether or not the zero-score responses
should be counted in the exclusion statistics.
Defaults to ``True``.
exclude_listwise : bool, optional
Whether or not the candidates were excluded
based on minimal number of responses.
Defaults to ``False``.
Returns
-------
df_full_crosstab : pandas DataFrame
Two-dimensional data frame containing the
exclusion statistics.
"""
# create an empty output data frame
df_full_crosstab = pd.DataFrame({'all features numeric': [0, 0, 0],
'non-numeric feature values': [0, 0, 0]},
index=['numeric non-zero human score',
'zero human score',
'non-numeric human score'])
if not df.empty:
# re-code human scores into numeric, missing or zero
df['score_category'] = 'numeric non-zero human score'
df.loc[df['sc1'].isnull(), 'score_category'] = 'non-numeric human score'
df.loc[df['sc1'].astype(float) == 0, 'score_category'] = 'zero human score'
# recode feature values: a response with at least one
# missing feature is assigned 'non-numeric feature values'
df_features_only = df[features + ['spkitemid']]
null_feature_rows = df_features_only.isnull().any(axis=1)
df_null_features = df_features_only[null_feature_rows]
df['feat_category'] = 'all features numeric'
df.loc[df['spkitemid'].isin(df_null_features['spkitemid']),
'feat_category'] = 'non-numeric feature values'
# crosstabulate
df_crosstab = pd.crosstab(df['score_category'],
df['feat_category'])
df_full_crosstab.update(df_crosstab)
# convert back to integers as these are all counts
df_full_crosstab = df_full_crosstab.astype(int)
df_full_crosstab.insert(0, header, df_full_crosstab.index)
if not exclude_listwise:
# if we are not excluding listwise, rename the first cell so
# that it is not set to zero
assert(df_full_crosstab.loc['numeric non-zero human score',
'all features numeric'] == 0)
df_full_crosstab.loc['numeric non-zero human score',
'all features numeric'] = '-'
# if we are not excluding the zeros, rename the corresponding cells
# so that they are not set to zero. We do not do this for listwise exclusion
if not exclude_zero_scores:
assert(df_full_crosstab.loc['zero human score',
'all features numeric'] == 0)
df_full_crosstab.loc['zero human score',
'all features numeric'] = '-'
return df_full_crosstab
@staticmethod
def analyze_used_responses(df_train, df_test, subgroups, candidate_column):
"""
Compute statistics for responses used in analyses.
This method computes various statistics on the responses that
were used in analyses, either in the training set or in the
test set.
Parameters
----------
df_train : pandas DataFrame
Data frame containing the response information
for the training set.
df_test : pandas DataFrame
Data frame containing the response information
for the test set.
subgroups : list of str
List of column names that contain grouping
information.
candidate_column : str
Column name that contains candidate
identification information.
Returns
-------
df_analysis : pandas DataFrame
Data frame containing information about the used
responses.
"""
# create a basic data frame for responses only
train_responses = set(df_train['spkitemid'])
test_responses = set(df_test['spkitemid'])
rows = [{'partition': 'Training', 'responses': len(train_responses)},
{'partition': 'Evaluation', 'responses': len(test_responses)},
{'partition': 'Overlapping', 'responses': len(train_responses & test_responses)},
{'partition': 'Total', 'responses': len(train_responses | test_responses)}]
df_analysis =
|
pd.DataFrame.from_dict(rows)
|
pandas.DataFrame.from_dict
|
import matplotlib.pyplot as plt
from scipy.stats import logistic
import statsmodels.formula.api as smf
import pandas as pd
import numpy as np
def get_panel_estimates(estimator, df):
assert estimator in ["naive", "diff"]
subset = df.loc[(slice(None), 10), :]
if estimator == "naive":
rslt = smf.ols(formula="Y ~ D", data=subset).fit()
elif estimator == "diff":
subset.loc[(slice(None), slice(None)), "S"] = subset["Y"] - subset["Y_8"]
rslt = smf.ols(formula="S ~ D ", data=subset).fit()
return rslt
def get_propensity_score(selection, o, u, additional_effect, y0):
if selection == "baseline":
idx = -3.8 + o + u
elif selection == "self-selection on gains":
idx = -7.3 + o + u + 5 * additional_effect
elif selection == "self-selection on pretest":
idx = -3.8 + o + u + 0.05 * (y0[0] - 98)
else:
raise NotImplementedError
return np.exp(idx) / (1 + np.exp(idx))
def get_sample_panel_demonstration(num_agents, selection, trajectory):
assert trajectory in ["parallel", "divergent"]
columns = ["Y", "D", "O", "X", "E", "U", "Y_1", "Y_0", "Y_8"]
index = list()
for i in range(num_agents):
for j in [8, 9, 10]:
index.append((i, j))
index =
|
pd.MultiIndex.from_tuples(index, names=("Identifier", "Grade"))
|
pandas.MultiIndex.from_tuples
|
"""
MIT License
Copyright (c) 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pandas as pd
import logging as lg
from queue import Queue
from .product import GdaxProducts
from sqlalchemy import and_, or_, func
from sqlalchemy.orm import scoped_session
from sqlalchemy.exc import IntegrityError
from stocklook.utils.database import DatabaseLoadingThread
from .tables import (GdaxSQLQuote,
GdaxSQLProduct,
GdaxSQLTickerFeedEntry,
GdaxOHLC5)
from stocklook.utils.timetools import (timestamp_to_local,
timestamp_to_utc_int,
now_local)
from pandas import (DataFrame,
DatetimeIndex,
infer_freq,
DateOffset,
Timestamp,
read_sql)
logger = lg.getLogger(__name__)
class GdaxDatabase:
def __init__(self, gdax=None, base=None, engine=None, session_maker=None):
if gdax is None:
from . import Gdax
gdax = Gdax()
self.gdax = gdax
self._base = None
self._engine = None
self._session_maker = None
self._stock_ids = dict()
self.setup(base, engine, session_maker)
@property
def stock_ids(self):
if not self._stock_ids:
session = self.get_session()
self.load_stocks(session)
session.close()
return self._stock_ids
def get_stock_id(self, pair):
return self.stock_ids[pair]
def get_loading_thread(self, obj, queue=None, commit_interval=10, raise_on_error=True, **kwargs):
if queue is None:
queue = Queue()
return DatabaseLoadingThread(self._session_maker,
queue,
obj,
raise_on_error=raise_on_error,
commit_interval=commit_interval,
**kwargs)
def setup(self, base=None, engine=None, session_maker=None):
"""
Configures SQLAlchemy connection.
Assigns configured objects to
GdaxDatabase._base
GdaxDatabase._engine
GdaxDatabase._session_maker
:param base:
:param engine:
:param session_maker:
:return:
"""
if base is None:
from .tables import GdaxBase
base = GdaxBase
if engine is None:
from sqlalchemy import create_engine
from ...config import config
from ...utils.security import Credentials
from sqlalchemy.engine.url import URL
# Look for a postgres or mysql database first
url_kwargs = config.get('GDAX_FEED_URL_KWARGS', dict()).copy()
# Popping off a copy
d = url_kwargs.pop('drivername', '')
if url_kwargs and d:
if d.startswith('mysql') \
or d.startswith('postgres') \
or d.startswith('sqlite'):
pw = url_kwargs.get('password', None)
if not pw and not d.startswith('sqlite'):
# Make the user input the password securely.
# if it's not stored in the KeyRing
user = url_kwargs['username']
c = Credentials(data=config, allow_input=True)
svc_name = '{}_{}'.format(c.GDAX_DB, d)
pw = c.get(svc_name, username=user, api=False)
url_kwargs['password'] = pw
url = URL(d, **url_kwargs)
engine = create_engine(url)
else:
# Don't feel like supporting databases other
# than mysql, pgsql, sqlite...other dbs are lame anyways.
raise NotImplementedError("Unsupported drivername: "
"{}".format(d))
else:
# Go for a sqlite engine as a backup.
try:
db_path = config['GDAX_DB_PATH']
except KeyError:
import os
db_dir = config['DATA_DIRECTORY']
db_path = os.path.join(db_dir, 'gdax.sqlite3')
db_path = db_path.replace("\\", "/")
pfx = 'sqlite:///'
if not db_path.startswith(pfx):
db_path = pfx + db_path
engine = create_engine(db_path)
if session_maker is None:
from sqlalchemy.orm import sessionmaker
session_maker = sessionmaker(bind=engine)
try:
session_maker = scoped_session(session_maker)
except Exception as e:
print("Error creating scoped session maker: {}".format(e))
base.metadata.create_all(bind=engine, checkfirst=True)
for p in self.gdax.products.values():
# We dont want prices cached more
# Than 30 seconds at a time
p.sync_interval = 30
self._base = base
self._engine = engine
self._session_maker = session_maker
def get_session(self):
return self._session_maker()
def load_stocks(self, session):
qry = session.query(GdaxSQLProduct)
res = qry.all()
prods = self.gdax.products.values()
stocks = list()
if len(prods) == len(res):
stocks = res
elif not res:
for p in prods:
i = GdaxSQLProduct(name=p.name,
currency=p.currency)
session.add(i)
stocks.append(i)
session.commit()
elif res:
for p in prods:
v = [r for r in res
if r.name == p.name]
if v:
continue
i = GdaxSQLProduct(name=p.name,
currency=p.currency)
stocks.append(i)
session.add(i)
session.commit()
for s in stocks:
self._stock_ids[s.name] = s.stock_id
def get_stock(self, session, name):
qry = session.query(GdaxSQLProduct)
res = qry.filter(GdaxSQLProduct.name == name).first()
if not res:
if name not in GdaxProducts.LIST:
raise Exception("Invalid gdax "
"product name {}".format(name))
self.load_stocks(session)
return self.get_stock(session, name)
return res
def sync_quotes(self, session=None):
if session is None:
session = self.get_session()
close = True
else:
close = False
for stock_name, stock_id in self.stock_ids.items():
p = self.gdax.get_product(stock_name)
q = GdaxSQLQuote(stock_id=stock_id,
close=p.price,
volume=p.volume24hr,
quote_date=now_local())
print(q)
session.add(q)
session.commit()
if close:
session.close()
def register_quote(self,
session,
stock_id,
open=None,
high=None,
low=None,
close=None,
volume=None,
quote_date=None):
if quote_date is None:
quote_date = now_local()
try:
int(stock_id)
except:
# Convert string to numeric stock id
stock_id = self._stock_ids[stock_id]
q = GdaxSQLQuote(stock_id=stock_id,
open=open,
high=high,
low=low,
close=close,
volume=volume,
quote_date=quote_date)
session.add(q)
return q
def get_quotes(self, stock_id):
int(stock_id)
from pandas import read_sql
tbl = GdaxSQLQuote.__tablename__
sql = "SELECT * FROM {}" \
"WHERE {}={}".format(tbl,
GdaxSQLQuote.stock_id.name,
stock_id)
parse_dates = [GdaxSQLQuote.date_added.name,
GdaxSQLQuote.quote_date.name]
return read_sql(sql, self._engine, parse_dates=parse_dates)
def to_frame(self, query_set, cols):
data = [(getattr(rec, c) for c in cols)
for rec in query_set]
return DataFrame(data=data,
columns=cols,
index=range(len(data)))
def get_prices(self, session, from_date, to_date, products):
"""
Returns price data available between from_date and to_date
from the GdaxSQLTickerFeedEntry.__tablename__.
This method would be useful if you're maintaining this table
in the database by subscribing to the 'ticker' websocket channel.
:param session:
:param from_date:
:param to_date:
:param products:
:return:
"""
t = GdaxSQLTickerFeedEntry
crit = and_(t.time >= from_date,
t.time <= to_date,
t.product_id.in_(products))
res = session.query(t).filter(crit).all()
cols = [t.best_ask.name,
t.best_bid.name,
t.last_size.name,
t.side.name,
t.time.name,
t.product_id.name,
t.price.name]
return self.to_frame(res, cols)
class GdaxOHLCViewer:
FREQ = '5T'
GRANULARITY = 60*5
MAX_SPAN = 1
def __init__(self, pair=None, db=None, obj=None):
if db is None:
db = GdaxDatabase()
if obj is None:
obj = GdaxOHLC5
self.pair = None
self.stock_id = None
self.db = db
self.gdax = db.gdax
self._loading_threads = list()
self.obj = obj
self.span_secs = self.MAX_SPAN * 24 * 60 * 60
if pair is not None:
self.set_pair(pair)
def load_df(self, df, thread=True, raise_on_error=True):
id_label = self.obj.stock_id.name
if id_label not in df.columns or df[id_label].dropna().index.size != df.index.size:
df.loc[:, id_label] = self.stock_id
missing = self.get_missing_columns(df.columns)
if missing:
raise KeyError("Data missing columns: "
"{}".format(missing))
# Ensure UTC time
t = self.obj.time.name
dtype = str(df[t].dtype)
logger.info("time column data type: {}".format(dtype))
if 'int' not in dtype and 'float' not in dtype:
logger.debug("Converting time to UTC")
logger.info(df[t].head(5))
df.loc[:, t] = df.loc[:, t].apply(timestamp_to_utc_int).astype(int)
logger.info(df[t].head(5))
else:
logger.debug("Confirmed UTC time dtype: {}".format(dtype))
# load database (thread vs non-thread)
if thread is True:
t = self.db.get_loading_thread(self.obj,
queue=None,
commit_interval=df.index.size)
t.start()
q = t.queue
for idx, rec in df.iterrows():
q.put(rec)
q.put(t.STOP_SIGNAL)
self._loading_threads.append(t)
else:
session = self.db.get_session()
cols = [c.name for c in self.obj.__table__.columns]
recs = list()
for idx, rec in df.iterrows():
o = self.obj()
[setattr(o, k, str(v))
for k, v in rec.items()
if k in cols]
recs.append(o)
try:
# Attempt to load all at once
session.add_all(recs)
session.commit()
except (IntegrityError, Exception) as e:
session.rollback()
# Attempt individual load
# Since adding all at once didn't work.
e = str(e).upper()
if raise_on_error or 'UNIQUE' not in e:
logger.error(e.lower())
raise
logger.error("Attempting individual insert/commits"
"as a solution to: {}".format(e))
errs = 0
for rec in recs:
try:
session.add(rec)
session.commit()
except:
session.rollback()
errs += 1
c = len(recs)
logger.info("{}/{} records successfully "
"imported.".format(c - errs/c))
else:
session.close()
session.close()
def set_pair(self, pair):
self.stock_id = self.db.get_stock_id(pair)
self.pair = pair
def get_missing_columns(self, df_cols):
cols = [c.name for c in self.obj.__table__.columns]
cols = [c for c in cols if (not c.endswith('_id') or c == 'stock_id')]
return [c for c in cols if c not in df_cols]
def get_min_max_times(self, session):
crit = self.obj.stock_id == self.stock_id
max_sel = func.max(self.obj.time)
min_sel = func.min(self.obj.time)
max_date = session.query(max_sel).filter(crit).one()[0]
min_date = session.query(min_sel).filter(crit).one()[0]
try:
return timestamp_to_local(min_date), timestamp_to_local(max_date)
except ValueError:
return None, None
def slice_frame(self, df):
if df.empty:
return df
t = self.obj.time.name
t_ser = df[t]
min_t, max_t = t_ser.min(), t_ser.max()
try:
min_t + 3
except:
raise ValueError("Expected integer (UTC) min/max "
"time to slice data, not {}".format(type(min_t)))
bump = self.GRANULARITY
osize = df.index.size
kwargs = {'time': t,
'id_label': self.obj.stock_id.name,
'id': self.stock_id,
'table': self.obj.__tablename__,
'start': min_t + bump,
'end': max_t + bump,
}
sql = "select {time} " \
"from {table} " \
"where {id_label} = {id} " \
"and {time} between {start} and {end};".format(**kwargs)
s = self.read_sql(sql, convert_dates=False)
if not s.empty:
if s[t].dtype != df[t].dtype:
t_temp = '__{}'.format(t)
logger.debug("Using temp column '{}' "
"to slice data.".format(t_temp))
for frame in (df, s):
frame.loc[t_temp, :] = pd.to_numeric(frame.loc[:, t],
errors='coerce',
downcast='integer').astype(int)
label = t_temp
else:
t_temp = None
label = t
df = df.loc[~df[label].isin(s[label]), :]
if t_temp:
df.drop([t_temp], axis=1, errors='raise', inplace=True)
diff = df.index.size - osize
if diff > 0:
logger.debug("slice_frame: Removed {} records from "
"data, was {}.".format(osize, diff))
return df
def request_ohlc(self, start, end, convert_dates=False):
df = self.gdax.get_candles(self.pair,
start,
end,
self.GRANULARITY,
convert_dates=convert_dates,
to_frame=True)
if not df.empty:
df.sort_values(['time'], ascending=[False], inplace=True)
df.loc[:, self.obj.stock_id.name] = self.stock_id
return df
def read_sql(self, sql, convert_dates=False, **kwargs):
kwargs['coerce_float'] = kwargs.get('coerce_float', False)
df = read_sql(sql, self.db._engine, **kwargs)
if not df.empty:
t = self.obj.time.name
if t in df.columns and convert_dates:
logger.debug("pre-converted time: {}".format(df[t].head(5)))
df.loc[:, t] = df[t].apply(timestamp_to_local)
df.loc[:, t] =
|
pd.to_datetime(df.loc[:, t], errors='coerce')
|
pandas.to_datetime
|
"""
Custom indicators.
These indicators are meant to supplement the TA-Lib. See:
https://ta-lib.org/function.html
"""
import math
import numpy as np
import pandas as pd
from talib.abstract import *
import pinkfish as pf
class IndicatorError(Exception):
"""
Base indicator exception.
"""
########################################################################
# CROSSOVER
class TradeCrossOverError(IndicatorError):
"""
Invalid timeperiod specified.
"""
class _CrossOver:
"""
This is a helper class to implement the CROSSOVER function.
The class provides the apply callback for pd.DataFrame.apply()
in CROSSOVER. It also keeps track of _r, explained below.
_r indicates regime direction and duration, i.e. 50 means a bull
market that has persisted for 50 days, whereas -20 means a bear
market that has persisted for 20 days.
_r is incremented(decremented) each day a bull(bear) market persists
_r remains unchanged when fast_ma within band of slow_ma
_r indicates the number of trading days a trend has persisted
_r is nan, then sma_slow is nan
_r > 0, then bull market, fast_ma > slow_ma
_r < 0, then bear market, fast_ma < slow_ma
_r == 0, no trend established yet
"""
def __init__(self):
"""
Initialize instance variables.
Attributes
----------
_r : int
Indicates regime direction and duration.
"""
self._r = 0
def apply(self, row, band=0):
"""
Implements the regime change logic.
Parameters
----------
row : pd.Series
A row of data from the dataframe.
band : int {0-100}
Percent band (default is 0, which is no band).
Returns
-------
_r : int
Indicates regime direction and duration.
"""
if pd.isnull(row['__sma_slow__']):
self._r = np.nan
elif row['__sma_fast__'] > row['__sma_slow__']*(1+band/100):
self._r = self._r + 1 if self._r > 0 else 1
elif row['__sma_fast__'] < row['__sma_slow__']*(1-band/100):
self._r = self._r -1 if self._r < 0 else -1
else:
pass
return self._r
def CROSSOVER(ts, timeperiod_fast=50, timeperiod_slow=200,
func_fast=SMA, func_slow=SMA, band=0,
price='close', prevday=False):
"""
This indicator is used to represent regime direction and duration.
For example, an indicator value of 50 means a bull market that has
persisted for 50 days, whereas -20 means a bear market that has
persisted for 20 days.
More generally, this is a crossover indicator for two moving
averages. The indicator is positive when the fast moving average
is above the slow moving arverage, and negative when the fast
moving average is below the slow moving average.
Parameters
----------
ts : pd.DateFrame
A dataframe with 'open', 'high', 'low', 'close', 'volume'.
timeperiod_fast : int, optional
The timeperiod for the fast moving average (default is 50).
timeperiod_slow : int, optional
The timeperiod for the slow moving average (default is 200).
func_fast : ta_lib.Function, optional
{SMA, DEMA, EMA, KAMA, T3, TEMA, TRIMA, WMA}
The talib function for fast moving average (default is SMA).
MAMA not compatible.
func_slow : ta_lib.Function, optional
{SMA, DEMA, EMA, KAMA, T3, TEMA, TRIMA, WMA}
The talib function for slow moving average. (default is SMA).
MAMA not compatible.
band : float, {0-100}, optional
Percent band around the slow moving average.
(default is 0, which implies no band is used).
price : str, optional {'close', 'open', 'high', 'low'}
Input_array column to use for price (default is 'close').
prevday : bool, optional
True will shift the series forward. Unless you are buying
on the close, you'll likely want to set this to True.
It gives you the previous day's CrossOver (default is False).
Returns
-------
s : pd.Series
Series that contains the rolling regime indicator values.
Raises
------
TradeCrossOverError
If one of the timeperiods specified is invalid.
Examples
--------
>>> ts['regime'] = pf.CROSSOVER(ts, timeperiod_fast=50,
timeperiod_slow=200)
"""
if (timeperiod_fast < 1 or timeperiod_slow < 2
or timeperiod_fast >= timeperiod_slow):
raise TradeCrossOverError
ts['__sma_fast__'] = ts[price] if timeperiod_fast == 1 else \
func_fast(ts, timeperiod=timeperiod_fast, price=price)
ts['__sma_slow__'] = \
func_slow(ts, timeperiod=timeperiod_slow, price=price)
func = _CrossOver().apply
s = ts.apply(func, band=band, axis=1)
if prevday:
s = s.shift()
ts.drop(['__sma_fast__', '__sma_slow__'], axis=1, inplace=True)
return s
########################################################################
# MOMENTUM
def MOMENTUM(ts, lookback=1, time_frame='monthly', price='close', prevday=False):
"""
This indicator is used to represent momentum is security prices.
Percent price change is used to calculate momentum. Momentum
is positive if the price since the lookback period has increased.
Likewise, if price has decreased since the lookback period,
momentum is negative. Percent change is used to normalize
asset prices for comparison.
Parameters
----------
ts : pd.DateFrame
A dataframe with 'open', 'high', 'low', 'close', 'volume'.
lookback : int, optional
The number of time frames to lookback, e.g. 2 months
(default is 1).
timeframe : str, optional {'monthly', 'daily', 'weekly', 'yearly'}
The unit or timeframe type of lookback (default is 'monthly').
price : str, optional {'close', 'open', 'high', 'low'}
Input_array column to use for price (default is 'close').
prevday : bool, optional
True will shift the series forward. Unless you are buying
on the close, you'll likely want to set this to True.
It gives you the previous day's Momentum (default is False).
Returns
-------
s : pd.Series
Series that contains the rolling momentum indicator values.
Raises
------
ValueError
If the lookback is not positive or the time_frame is invalid.
Examples
--------
>>> ts['mom'] = pf.MOMENTUM(ts, lookback=6, time_frame='monthly')
"""
if lookback < 1:
raise ValueError('lookback must be positive')
if time_frame =='daily': factor = 1
elif time_frame =='weekly': factor = pf.statistics.TRADING_DAYS_PER_WEEK
elif time_frame =='monthly': factor = pf.statistics.TRADING_DAYS_PER_MONTH
elif time_frame =='yearly': factor = pf.statistics.TRADING_DAYS_PER_YEAR
else:
raise ValueError('invalid time_frame "{}"'.format(time_frame))
s = ts[price].pct_change(periods=lookback*factor)
if prevday:
s = s.shift()
return s
########################################################################
# VOLATILITY
def VOLATILITY(ts, lookback=20, time_frame='yearly', downside=False,
price='close', prevday=False):
"""
This indicator is used to represent volatility in security prices.
Volatility is represented as the standard deviation. Volatility
is calculated over the lookback period, then we scale to the
time frame. Volatility scales with the square root of time.
For example, if the market’s daily volatility is 0.5%, then
volatility for two days is the square root of 2 times
the daily volatility (0.5% * 1.414 = 0.707%). We use the square
root of time to scale from daily to weely, monthly, or yearly.
Parameters
----------
ts : pd.DateFrame
A dataframe with 'open', 'high', 'low', 'close', 'volume'.
lookback : int, optional
The number of time frames to lookback, e.g. 2 months
(default is 1).
timeframe : str, optional {'yearly', 'daily', 'weekly', 'monthly'}
The unit or timeframe used for scaling. For example, if the
lookback is 20 and the timeframe is 'yearly', then we compute
the 20 day volatility and scale to 1 year.
(default is 'yearly').
downside : bool, optional
True to calculate the downside volatility (default is False).
price : str, optional {'close', 'open', 'high', 'low'}
Input_array column to use for price (default is 'close').
prevday : bool, optional
True will shift the series forward. Unless you are buying
on the close, you'll likely want to set this to True.
It gives you the previous day's Volatility (default is False).
Returns
-------
s : pd.Series
A new column that contains the rolling volatility.
Raises
------
ValueError
If the lookback is not positive or the time_frame is invalid.
Examples
--------
>>> ts['vola'] = pf.VOLATILITY(ts, lookback=20, time_frame='yearly')
"""
if lookback < 1:
raise ValueError('lookback must be positive')
if time_frame == 'daily': factor = 1
elif time_frame == 'weekly': factor = pf.statistics.TRADING_DAYS_PER_WEEK
elif time_frame == 'monthly': factor = pf.statistics.TRADING_DAYS_PER_MONTH
elif time_frame == 'yearly': factor = pf.statistics.TRADING_DAYS_PER_YEAR
else:
raise ValueError('invalid time_frame "{}"'.format(time_frame))
s = ts[price].pct_change()
if downside:
s[s > 0] = 0
s = s.rolling(window=lookback).std() * np.sqrt(factor)
if prevday:
s = s.shift()
return s
########################################################################
# ANNUALIZED_RETURNS
def ANNUALIZED_RETURNS(ts, lookback=5, price='close', prevday=False):
"""
Calculate the rolling annualized returns.
Parameters
----------
ts : pd.DateFrame
A dataframe with 'open', 'high', 'low', 'close', 'volume'.
lookback : float, optional
The number of years to lookback, e.g. 5 years. 1/12 can be
used for 1 month. Likewise 3/12 for 3 months, etc...
(default is 5).
price : str, optional {'close', 'open', 'high', 'low'}
Input_array column to use for price (default is 'close').
prevday : bool, optional
True will shift the series forward. Unless you are buying
on the close, you'll likely want to set this to True.
It gives you the previous day's Volatility (default is False).
Returns
-------
s : pd.Series
Series that contains the rolling annualized returns.
Raises
------
ValueError
If the lookback is not positive.
Examples
--------
>>> annual_returns_1mo = pf.ANNUALIZED_RETURNS(ts, lookback=1/12)
>>> annual_returns_3mo = pf.ANNUALIZED_RETURNS(ts, lookback=3/12)
>>> annual_returns_1yr = pf.ANNUALIZED_RETURNS(ts, lookback=1)
>>> annual_returns_3yr = pf.ANNUALIZED_RETURNS(ts, lookback=3)
>>> annual_returns_5yr = pf.ANNUALIZED_RETURNS(ts, lookback=5)
"""
def _cagr(s):
"""
Calculate compound annual growth rate.
B = end balance; A = begin balance; n = num years
"""
A = s[0]
B = s[-1]
n = len(s)
if B < 0: B = 0
return (math.pow(B / A, 1 / n) - 1) * 100
if lookback <= 0:
raise ValueError('lookback must be positive')
window = int(lookback * pf.statistics.TRADING_DAYS_PER_YEAR)
s = pd.Series(ts[price]).rolling(window).apply(_cagr)
if prevday:
s = s.shift()
return s
########################################################################
# ANNUALIZED_STANDARD_DEVIATION
def ANNUALIZED_STANDARD_DEVIATION(ts, lookback=3, price='close', prevday=False):
"""
Calculate the rolling annualized standard deviation.
Parameters
----------
ts : pd.DateFrame
A dataframe with 'open', 'high', 'low', 'close', 'volume'.
lookback : float, optional
The number of years to lookback, e.g. 5 years. 1/12 can be
used for 1 month. Likewise 3/12 for 3 months, etc...
(default is 5).
price : str, optional {'close', 'open', 'high', 'low'}
Input_array column to use for price (default is 'close').
prevday : bool, optional
True will shift the series forward. Unless you are buying
on the close, you'll likely want to set this to True.
It gives you the previous day's Volatility (default is False).
Returns
-------
s : pd.Series
Series that contains the rolling annualized standard deviation.
Raises
------
ValueError
If the lookback is not positive.
Examples
--------
>>> std_dev_1mo = pf.ANNUALIZED_STANDARD_DEVIATION(ts,lookback=1/12)
>>> std_dev_3mo = pf.ANNUALIZED_STANDARD_DEVIATION(ts, lookback=3/12)
>>> std_dev_1yr = pf.ANNUALIZED_STANDARD_DEVIATION(ts, lookback=1)
>>> std_dev_3yr = pf.ANNUALIZED_STANDARD_DEVIATION(ts, lookback=3)
>>> std_dev_5yr = pf.ANNUALIZED_STANDARD_DEVIATION(ts, lookback=5)
"""
def _std_dev(s):
"""
Calculate the annualized standard deviation.
"""
return np.std(s, axis=0) * math.sqrt(pf.statistics.TRADING_DAYS_PER_YEAR)
if lookback <= 0:
raise ValueError('lookback must be positive')
window = int(lookback * pf.statistics.TRADING_DAYS_PER_YEAR)
pc = ts[price].pct_change()
s = pd.Series(pc).rolling(window).apply(_std_dev)
if prevday:
s = s.shift()
return s
########################################################################
# ANNUALIZED_SHARPE_RATIO
def ANNUALIZED_SHARPE_RATIO(ts, lookback=5, price='close', prevday=False,
risk_free=0):
"""
Calculate the rolling annualized sharpe ratio.
Parameters
----------
ts : pd.DateFrame
A dataframe with 'open', 'high', 'low', 'close', 'volume'.
lookback : float, optional
The number of years to lookback, e.g. 5 years. 1/12 can be
used for 1 month. Likewise 3/12 for 3 months, etc...
(default is 5).
price : str, optional {'close', 'open', 'high', 'low'}
Input_array column to use for price (default is 'close').
prevday : bool, optional
True will shift the series forward. Unless you are buying
on the close, you'll likely want to set this to True.
It gives you the previous day's Volatility (default is False).
risk_free: float, optional
The risk free rate (default is 0).
Returns
-------
s : pd.Series
Series that contains the rolling annualized sharpe ratio.
Raises
------
ValueError
If the lookback is not positive.
Examples
--------
>>> sharpe_ratio_1mo = pf.ANNUALIZED_SHARPE_RATIO(ts, lookback=1/12)
>>> sharpe_ratio_3mo = pf.ANNUALIZED_SHARPE_RATIO(ts, lookback=3/12)
>>> sharpe_ratio_1yr = pf.ANNUALIZED_SHARPE_RATIO(ts, lookback=1)
>>> sharpe_ratio_3yr = pf.ANNUALIZED_SHARPE_RATIO(ts, lookback=3)
>>> sharpe_ratio_5yr = pf.ANNUALIZED_SHARPE_RATIO(ts, lookback=5)
"""
def _sharpe_ratio(s):
"""
Calculate the annualized sharpe ratio.
"""
dev = np.std(s, axis=0)
mean = np.mean(s, axis=0)
period = len(s)
sharpe = (mean*period - risk_free) / (dev * np.sqrt(period))
return sharpe
if lookback <= 0:
raise ValueError('lookback must be positive')
window = int(lookback * pf.statistics.TRADING_DAYS_PER_YEAR)
pc = ts[price].pct_change()
s =
|
pd.Series(pc)
|
pandas.Series
|
# -*- coding: utf-8 -*-
import os
import pickle
from copy import deepcopy
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from mabwiser.mab import MAB, LearningPolicy, NeighborhoodPolicy
from tests.test_base import BaseTest
class MABTest(BaseTest):
#################################################
# Test property decorator methods
################################################
def test_learning_policy_property(self):
for lp in BaseTest.lps:
mab = MAB([1, 2], lp)
test_lp = mab.learning_policy
self.assertTrue(type(test_lp) is type(lp))
for para_lp in BaseTest.para_lps:
mab = MAB([1, 2], para_lp)
test_lp = mab.learning_policy
self.assertTrue(type(test_lp) is type(para_lp))
for cp in BaseTest.cps:
for lp in BaseTest.lps:
mab = MAB([1, 2], lp, cp)
test_lp = mab.learning_policy
self.assertTrue(type(test_lp) is type(lp))
for cp in BaseTest.cps:
for para_lp in BaseTest.lps:
mab = MAB([1, 2], para_lp, cp)
test_lp = mab.learning_policy
self.assertTrue(type(test_lp) is type(para_lp))
def test_learning_policy_values(self):
lp = LearningPolicy.EpsilonGreedy(epsilon=0.6)
mab = MAB([0, 1], lp)
self.assertEqual(lp.epsilon, mab.learning_policy.epsilon)
data = np.array([[1, 2, 3], [3, 2, 1]])
sc = StandardScaler()
sc.fit(data)
arm_to_scaler = {0: sc, 1: sc}
lp = LearningPolicy.LinUCB(alpha=2.0, l2_lambda=0.3, arm_to_scaler=arm_to_scaler)
mab = MAB([0, 1], lp)
self.assertEqual(lp.alpha, mab.learning_policy.alpha)
self.assertEqual(lp.l2_lambda, mab.learning_policy.l2_lambda)
self.assertIs(sc, mab.learning_policy.arm_to_scaler[0])
self.assertIs(sc, mab.learning_policy.arm_to_scaler[1])
lp = LearningPolicy.Softmax(tau=0.5)
mab = MAB([0, 1], lp)
self.assertEqual(lp.tau, mab.learning_policy.tau)
def binary(arm, reward):
return reward == 1
lp = LearningPolicy.ThompsonSampling(binarizer=binary)
mab = MAB([0, 1], lp)
self.assertIs(lp.binarizer, mab.learning_policy.binarizer)
lp = LearningPolicy.UCB1(alpha=0.7)
mab = MAB([0, 1], lp)
self.assertEqual(lp.alpha, mab.learning_policy.alpha)
def test_neighborhood_policy_property(self):
for cp in BaseTest.cps:
for lp in BaseTest.lps:
mab = MAB([1, 2], lp, cp)
test_np = mab.neighborhood_policy
self.assertTrue(type(test_np) is type(cp))
for cp in BaseTest.cps:
for para_lp in BaseTest.lps:
mab = MAB([1, 2], para_lp, cp)
test_np = mab.neighborhood_policy
self.assertTrue(type(test_np) is type(cp))
def test_neighborhood_policy_values(self):
lp = LearningPolicy.EpsilonGreedy()
np = NeighborhoodPolicy.Clusters(n_clusters=3)
mab = MAB([0, 1], lp, np)
self.assertEqual(np.n_clusters, mab.neighborhood_policy.n_clusters)
self.assertFalse(mab.neighborhood_policy.is_minibatch)
np = NeighborhoodPolicy.Clusters(n_clusters=5, is_minibatch=True)
mab = MAB([0, 1], lp, np)
self.assertEqual(np.n_clusters, mab.neighborhood_policy.n_clusters)
self.assertTrue(mab.neighborhood_policy.is_minibatch)
np = NeighborhoodPolicy.KNearest(k=10, metric='cityblock')
mab = MAB([0, 1], lp, np)
self.assertEqual(np.k, mab.neighborhood_policy.k)
self.assertEqual(np.metric, mab.neighborhood_policy.metric)
np = NeighborhoodPolicy.Radius(radius=1.5, metric='canberra', no_nhood_prob_of_arm=[0.2, 0.8])
mab = MAB([0, 1], lp, np)
self.assertEqual(np.radius, mab.neighborhood_policy.radius)
self.assertEqual(np.metric, mab.neighborhood_policy.metric)
self.assertEqual(np.no_nhood_prob_of_arm, mab.neighborhood_policy.no_nhood_prob_of_arm)
np = NeighborhoodPolicy.LSHNearest(n_dimensions=2, n_tables=2, no_nhood_prob_of_arm=[0.2, 0.8])
mab = MAB([0, 1], lp, np)
self.assertEqual(np.n_dimensions, mab.neighborhood_policy.n_dimensions)
self.assertEqual(np.n_tables, mab.neighborhood_policy.n_tables)
self.assertEqual(np.no_nhood_prob_of_arm, mab.neighborhood_policy.no_nhood_prob_of_arm)
#################################################
# Test context free predict() method
################################################
def test_arm_list_int(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_arm_list_str(self):
for lp in MABTest.lps:
self.predict(arms=["A", "B", "C"],
decisions=["A", "A", "A", "B", "B", "B", "C", "C", "C"],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=["A", "B", "C"],
decisions=["A", "A", "A", "B", "B", "B", "C", "C", "C"],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_decision_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_series_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_array_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
#################################################
# Test context free predict_expectation() method
################################################
def test_exp_arm_list_int(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_arm_list_str(self):
for lp in MABTest.lps:
self.predict(arms=["A", "B", "C"],
decisions=["A", "A", "A", "B", "B", "B", "C", "C", "C"],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_series_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_array_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_context_history_series(self):
contexts = pd.DataFrame({'column1': [1, 2, 3], 'column2': [2, 3, 1]})
for lp in BaseTest.para_lps:
arm, mab = self.predict(arms=[0, 1],
decisions=[1, 1, 1],
rewards=[0, 0, 0],
learning_policy=lp,
context_history=contexts['column1'],
contexts=[[1]],
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(mab._imp.arm_to_model[0].beta.shape[0], 1)
for cp in BaseTest.nps:
for lp in BaseTest.lps:
if not self.is_compatible(lp, cp) or isinstance(cp, NeighborhoodPolicy.TreeBandit):
continue
arm, mab = self.predict(arms=[0, 1],
decisions=[1, 1, 1],
rewards=[0, 0, 0],
learning_policy=lp,
neighborhood_policy=cp,
context_history=contexts['column1'],
contexts=[[1]],
seed=123456,
num_run=1,
is_predict=True)
# Tree Bandit does not store contexts
if not isinstance(cp, NeighborhoodPolicy.TreeBandit):
self.assertEqual(np.ndim(mab._imp.contexts), 2)
for cp in BaseTest.cps:
for lp in BaseTest.lps:
if not self.is_compatible(lp, cp):
continue
arm, mab = self.predict(arms=[0, 1],
decisions=[1, 1, 1],
rewards=[0, 0, 0],
learning_policy=lp,
neighborhood_policy=cp,
context_history=contexts['column1'],
contexts=[[1]],
seed=123456,
num_run=1,
is_predict=True)
# Tree Bandit does not store contexts
if not isinstance(cp, NeighborhoodPolicy.TreeBandit):
self.assertEqual(np.ndim(mab._imp.contexts), 2)
def test_context_series(self):
contexts = pd.DataFrame({'column1': [1, 2, 3, 3, 2, 1], 'column2': [2, 3, 1, 1, 2, 3]})
for lp in BaseTest.para_lps:
arm, mab = self.predict(arms=[0, 1],
decisions=[1, 1, 1, 1, 1, 1],
rewards=[0, 0, 0, 0, 0, 0],
learning_policy=lp,
context_history=contexts['column1'],
contexts=pd.Series([1]),
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(mab._imp.arm_to_model[0].beta.shape[0], 1)
for cp in BaseTest.nps:
for lp in BaseTest.lps:
if not self.is_compatible(lp, cp):
continue
arm, mab = self.predict(arms=[0, 1],
decisions=[1, 1, 1, 1, 1, 1],
rewards=[0, 0, 0, 0, 0, 0],
learning_policy=lp,
neighborhood_policy=cp,
context_history=contexts['column1'],
contexts=pd.Series([1]),
seed=123456,
num_run=1,
is_predict=True)
# Tree Bandit does not store contexts
if not isinstance(cp, NeighborhoodPolicy.TreeBandit):
self.assertEqual(np.ndim(mab._imp.contexts), 2)
for cp in BaseTest.cps:
for lp in BaseTest.lps:
if not self.is_compatible(lp, cp):
continue
arm, mab = self.predict(arms=[0, 1],
decisions=[1, 1, 1, 1, 1, 1],
rewards=[0, 0, 0, 0, 0, 0],
learning_policy=lp,
neighborhood_policy=cp,
context_history=contexts['column1'],
contexts=pd.Series([1]),
seed=123456,
num_run=1,
is_predict=True)
# Tree Bandit does not store contexts
if not isinstance(cp, NeighborhoodPolicy.TreeBandit):
self.assertEqual(np.ndim(mab._imp.contexts), 2)
#################################################
# Test contextual predict() method
################################################
def test_context_arm_list_int(self):
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3, 4],
decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
for cp in MABTest.nps:
for lp in MABTest.lps:
if not self.is_compatible(lp, cp):
continue
self.predict(arms=[1, 2, 3, 4],
decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
neighborhood_policy=cp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
for cp in MABTest.cps:
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3, 4],
decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
neighborhood_policy=cp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_context_arm_list_str(self):
for lp in MABTest.para_lps:
self.predict(arms=["A", "B", "C", "D"],
decisions=["A", "A", "A", "B", "B", "C", "C", "C", "C", "C"],
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
for cp in MABTest.nps:
for lp in MABTest.lps:
if not self.is_compatible(lp, cp):
continue
self.predict(arms=["A", "B", "C", "D"],
decisions=["A", "A", "A", "B", "B", "C", "C", "C", "C", "C"],
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
neighborhood_policy=cp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
for cp in MABTest.cps:
for lp in MABTest.lps:
if not self.is_compatible(lp, cp):
continue
self.predict(arms=["A", "B", "C", "D"],
decisions=["A", "A", "A", "B", "B", "C", "C", "C", "C", "C"],
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
neighborhood_policy=cp,
context_history=[[0, -2, 2, 3, 11], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, -5, 2, 3, 10], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, -2, 4, 3, 9], [20, 19, 18, 17, 16], [1, 2, 1, 1, 3],
[17, 18, 17, 19, 18]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_context_decision_series(self):
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3, 4],
decisions=pd.Series([1, 1, 1, 2, 2, 3, 3, 3, 3, 3]),
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
for cp in MABTest.nps:
for lp in MABTest.lps:
if not self.is_compatible(lp, cp):
continue
self.predict(arms=[1, 2, 3, 4],
decisions=pd.Series([1, 1, 1, 2, 2, 3, 3, 3, 3, 3]),
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
neighborhood_policy=cp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
for cp in MABTest.cps:
for lp in MABTest.lps:
if not self.is_compatible(lp, cp):
continue
self.predict(arms=[1, 2, 3, 4],
decisions=pd.Series([1, 1, 1, 2, 2, 3, 3, 3, 3, 3]),
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
neighborhood_policy=cp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_context_reward_series(self):
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3, 4],
decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
rewards=pd.Series([0, 1, 1, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
for cp in MABTest.nps:
for lp in MABTest.lps:
if not self.is_compatible(lp, cp):
continue
self.predict(arms=[1, 2, 3, 4],
decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
rewards=
|
pd.Series([0, 1, 1, 0, 0, 0, 0, 1, 1, 1])
|
pandas.Series
|
import copy
import pandas as pd
import pandas.testing
from mvtk.supervisor.processing import (
replace_nulls,
normalize_ts_columns,
)
def test_replace_nulls():
for col_list in [["col1"], ["col2"], ["col1", "col2"]]:
init_rows = [
{"col1": "test1_1", "col2": "test1_2"},
{"col1": None, "col2": "test2_2"},
{"col1": "test3_1", "col2": None},
{"col1": None, "col2": None},
]
expect_rows = copy.deepcopy(init_rows)
for i in range(0, len(expect_rows)):
for col in col_list:
if expect_rows[i][col] is None:
expect_rows[i][col] = "1"
init_df = pd.DataFrame(init_rows)
expect_df = pd.DataFrame(expect_rows)
actual = replace_nulls(init_df, "1", col_list)
expect = expect_df
pandas.testing.assert_frame_equal(actual, expect)
def time_to_seconds(time):
return int(time[:2]) * 3600 + int(time[2:4]) * 60 + int(time[4:6])
def test_process_ts_columns():
format_map = {"col2": "%H:%M:%S.%f", "col3": "%H%M%S.%f", "col4": "%H%M%S"}
for col_list in [
["col2"],
["col3"],
["col4"],
["col2", "col3"],
["col2", "col4"],
["col3", "col4"],
["col2", "col3", "col4"],
]:
init_rows = [
{
"col1": "test1",
"col2": "10:11:12.123456",
"col3": "101112.123456",
"col4": "101112",
},
{
"col1": "test2",
"col2": None,
"col3": "202123.123456",
"col4": "202124",
},
{
"col1": "test3",
"col2": "10:31:32.123456",
"col3": None,
"col4": "103134",
},
{
"col1": "test4",
"col2": "20:41:42.123456",
"col3": "204143.123456",
"col4": None,
},
]
expect_rows = copy.deepcopy(init_rows)
for i in range(0, len(expect_rows)):
for col in col_list:
if expect_rows[i][col] is None:
expect_rows[i][col] = -1
else:
expect_rows[i][col] = str(
round(
time_to_seconds(expect_rows[i][col].replace(":", ""))
/ 86400,
5,
)
)
init_df = pd.DataFrame(init_rows)
expect =
|
pd.DataFrame(expect_rows)
|
pandas.DataFrame
|
import gzip
import json
import os
import pandas as pd
from sqlalchemy import String, DateTime
from panoramix import app, db, models, utils
config = app.config
DATA_FOLDER = os.path.join(config.get("BASE_DIR"), 'data')
def get_or_create_db(session):
print("Creating database reference")
DB = models.Database
dbobj = session.query(DB).filter_by(database_name='main').first()
if not dbobj:
dbobj = DB(database_name="main")
print(config.get("SQLALCHEMY_DATABASE_URI"))
dbobj.sqlalchemy_uri = config.get("SQLALCHEMY_DATABASE_URI")
session.add(dbobj)
session.commit()
return dbobj
def load_world_bank_health_n_pop():
tbl = 'wb_health_population'
with gzip.open(os.path.join(DATA_FOLDER, 'countries.json.gz')) as f:
pdf = pd.read_json(f)
pdf.year = pd.to_datetime(pdf.year)
pdf.to_sql(
tbl,
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'year': DateTime(),
'country_code': String(3),
'country_name': String(255),
'region': String(255),
},
index=False)
print("Creating table reference")
TBL = models.SqlaTable
obj = db.session.query(TBL).filter_by(table_name=tbl).first()
if not obj:
obj = TBL(table_name='wb_health_population')
obj.description = utils.readfile(os.path.join(DATA_FOLDER, 'countries.md'))
obj.main_dttm_col = 'year'
obj.database = get_or_create_db(db.session)
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
def load_birth_names():
session = db.session
with gzip.open(os.path.join(DATA_FOLDER, 'birth_names.json.gz')) as f:
pdf =
|
pd.read_json(f)
|
pandas.read_json
|
import subprocess
from pathlib import Path
from typing import Tuple
import matplotlib.pyplot as plt
import pandas
from io import StringIO
def calculate_samtools_coverage(bam_filename: Path, output_filename: Path) -> Path:
samtools = "/home/cld100/applications/samtools/bin/samtools"
dfs = list()
for cutoff in range(0, 40, 10):
print(cutoff)
command = [samtools, "depth","-q", str(cutoff), "-a", bam_filename]
output = subprocess.check_output(command, universal_newlines = True)
df = pandas.read_csv(StringIO(output), delimiter = "\t", names = ['chrom', 'position', 'depthOfCoverage'])
df['coverageCutoff'] = cutoff
dfs.append(df)
dfs =
|
pandas.concat(dfs)
|
pandas.concat
|
import pandas as pd
import numpy as np
from pathlib import Path
from datetime import datetime as dt
def mergeManagers(managers, gameLogs):
#Get visiting managers
visitingManagers = gameLogs[['row','Date','Visiting team manager ID']]
visitingManagers['yearID'] = pd.DatetimeIndex(pd.to_datetime(visitingManagers['Date'])).year-1
visitingManagers =
|
pd.merge(visitingManagers, managers, left_on=['yearID','Visiting team manager ID'], right_on=['yearID','playerID'], how="left")
|
pandas.merge
|
# This code was developed by <NAME>, 2021. <EMAIL>
import pandas as pd
import numpy as np
import copy
from simple_dispatch import StorageModel
from simple_dispatch import generatorData
from simple_dispatch import bidStack
from simple_dispatch import dispatch
from simple_dispatch import generatorDataShort
import scipy
class FutureGrid(object):
"""By <NAME>. This class manages the model of the future grid and implements dispatch / capacity calculations.
:param gd_short: The generator model
:type gd_short: An object of class `generatorDataShort` from `simple_dispatch.py`
:param unit_drops: Information about which generators are retired in each year
:type unit_drops: Dataframe
:param additions_df: Information about which generators are added each year
:type additions_df: Dataframe
:param year: Year for the future grid
:type year: int
:param future: Future grid demand, including EV demand
:type future: An object of class `FutureDemand` from later in this file
:param stor_df: Demand that needs to be met by storage; passed to storage model object
:type stor_df: Dataframe
:param storage: Storage model
:type storage: An object of the class `StorageModel` from `simple_dispatch.py`
:param bs: Bidstack
:type bs: An object of the class `bidStack` by Thomas Deetjen from `simple_dispatch.py`
:param dp: Dispatch
:type dp: An object of the class `dispatch` by Thomas Deetjen from `simple_dispatch.py`
"""
def __init__(self, gd_short):
self.gd_short = gd_short
self.gd_short_original = copy.deepcopy(gd_short)
self.unit_drops = pd.read_csv('IntermediateOutputs/scheduled_retirements_2019.csv', index_col=0)
self.additions_df = pd.read_csv('IntermediateOutputs/generator_additions.csv', index_col=0)
self.year = None
self.future = None
self.stor_df = None
self.storage = None
self.bs = None
self.dp = None
def add_generators(self, future_year):
"""Duplicate generators to simulate new additions in the future WECC grid."""
gd_short_final = copy.deepcopy(self.gd_short)
added_units = self.additions_df[self.additions_df['Year']<future_year]['orispl_unit'].values
for i, val in enumerate(added_units):
idx = len(gd_short_final.df)
loc1 = gd_short_final.df[gd_short_final.df['orispl_unit']==val].index
gd_short_final.df =
|
pd.concat((gd_short_final.df, gd_short_final.df.loc[loc1]), ignore_index=True)
|
pandas.concat
|
import json
import os
import shutil
import uuid
from io import BytesIO
from typing import List
from typing import Optional
import albumentations as A
import augmentations
import cv2
import folder_actions
import numpy as np
import pandas as pd
from balance import Balance
from fastapi import Cookie
from fastapi import FastAPI
from fastapi import File
from fastapi import Form
from fastapi import HTTPException
from fastapi import Response
from fastapi import UploadFile
from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles
from model_output import Model_output
from PIL import Image
from split import SplitDataset
SERVER_BASE_URL = os.environ["SERVER_BASE_URL"]
app = FastAPI()
app.mount("/images", StaticFiles(directory="images"), name="images")
app.mount("/test_dataset",
StaticFiles(directory="test_dataset"),
name="test_dataset")
app.mount("/models", StaticFiles(directory="models"), name="models")
app.mount("/model_output",
StaticFiles(directory="model_output"),
name="model_output")
app.mount("/img_dataset",
StaticFiles(directory="img_dataset"),
name="img_dataset")
app.mount("/image_previews",
StaticFiles(directory="image_previews"),
name="image_previews")
app.mount("/train_val_csv",
StaticFiles(directory="train_val_csv"),
name="train_val_csv")
origins = [
"http://localhost",
"http://localhost:3000",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
def load_image_into_numpy_array(data):
return np.array(Image.open(BytesIO(data)).convert("RGB"))
def hinted_tuple_hook(obj):
if "__tuple__" in obj:
return tuple(obj["items"])
else:
return obj
@app.post("/transform_image", status_code=200)
async def transform_image(
response: Response,
preview: bool = False,
parameters: Optional[str] = Form(None),
transformation: Optional[int] = Form(None),
transformation_step: Optional[int] = Form(None),
img_url: Optional[str] = Form(None),
preview_url: Optional[str] = Form(None),
image: Optional[UploadFile] = File(None),
id: Optional[str] = Cookie(None),
step_count: Optional[str] = Cookie(None),
):
if id is None:
if image is None:
raise HTTPException(status_code=400,
detail="Image has to be uploaded")
elif img_url is not None or preview_url is not None:
raise HTTPException(
status_code=400,
detail=
"Image has to be added to the history before refering it",
)
id = str(uuid.uuid4())
response.set_cookie(key="id", value=id)
folder_actions.mkdir_p("images/" + id)
step_count = "0"
if step_count is None or image is not None:
step_count = "0"
response.set_cookie(key="step_count", value=step_count)
elif not preview:
step_count = str(int(step_count) + 1)
response.set_cookie(key="step_count", value=step_count)
img_extension = ".png"
if image is not None:
img_extension = "." + image.filename.split(".")[-1]
image = load_image_into_numpy_array(await image.read())
folder_actions.delete_files(folder="images/" + str(id))
elif img_url is not None or preview_url is not None:
img_url_new = "images/" + str(id)
if img_url is not None:
if transformation_step is None:
raise HTTPException(
status_code=400,
detail="transformation_step field required")
img_extension = "." + img_url.split(".")[-1]
img_url_new += ("/transformed_img_" + str(transformation_step) +
img_extension)
if not preview:
step_count = str(int(transformation_step) + 1)
response.set_cookie(key="step_count", value=step_count)
folder_actions.delete_files(
folder="images/" + str(id),
split_string="transformed_img_",
low=int(transformation_step),
)
elif preview_url is not None:
img_extension = "." + preview_url.split(".")[-1]
img_url_new = "image_previews/" + str(id) + img_extension
image = np.array(Image.open(img_url_new).convert("RGB"))
else:
raise HTTPException(status_code=400,
detail="img_url or preview_url required")
transformed_image = image
if img_url is not None or preview_url is None:
parameters = json.loads(json.loads(parameters),
object_hook=hinted_tuple_hook)
transform = augmentations.augmentations_dict[transformation](
**parameters)
transformed = transform(image=image)
transformed_image = transformed["image"]
im = Image.fromarray(transformed_image)
img_path = "images/" + str(id)
if preview:
img_path = "image_previews/" + str(id) + img_extension
else:
img_path += "/transformed_img_" + str(step_count) + img_extension
im.save(img_path)
return {"img_path": SERVER_BASE_URL + img_path}
@app.post("/reset_images")
async def reset_images(response: Response, id: Optional[str] = Cookie(None)):
if not id:
return {"done": False}
response.delete_cookie("step_count")
folder_actions.delete_files("images/" + str(id))
return {"done": True}
@app.get("/transformed_images")
async def get_transformed_images(id: Optional[str] = Cookie(None)):
if id is None:
return {"transformed_images": []}
transformed_images = [
SERVER_BASE_URL + "images/" + str(id) + "/" + filename
for filename in folder_actions.get_file_names("images/" + str(id))
]
return {"transformed_images": list(transformed_images)}
@app.post("/transform_images")
async def transform_images(
response: Response,
parameters: str = Form(...),
transformations: str = Form(...),
num_iterations: int = Form(...),
class_id: str = Form(...),
images: List[UploadFile] = File(...),
id: Optional[str] = Cookie(None),
):
if id is None:
id = str(uuid.uuid4())
response.set_cookie(key="id", value=id)
folder_actions.mkdir_p("images/" + id)
base_img_path = "img_dataset/" + class_id + "/"
folder_actions.mkdir_p(base_img_path)
parameters = json.loads(json.loads(parameters),
object_hook=hinted_tuple_hook)
transformations = json.loads(json.loads(transformations),
object_hook=hinted_tuple_hook)
transform = A.Compose([
augmentations.augmentations_dict[transformation](**parameters[idx])
for idx, transformation in enumerate(transformations)
])
img_names = [image.filename for image in images]
images = [
load_image_into_numpy_array(await image.read()) for image in images
]
base_img_path += str(id)
transformed_images = []
for idx in range(num_iterations):
for i in range(len(images)):
transformed = transform(image=images[i])
temp = transformed["image"]
transformed_images.append({
"image":
temp,
"path":
base_img_path + str(idx) + "_" + str(uuid.uuid1()) + "_" +
img_names[i],
})
for i in range(len(transformed_images)):
image = transformed_images[i]
im = Image.fromarray(image["image"])
im.save(image["path"])
return {
"done":
True,
"images":
[SERVER_BASE_URL + image["path"] for image in transformed_images],
}
@app.get("/class_counts")
async def get_class_counts():
img_df = pd.DataFrame(columns=["image", "label"])
for dirname, _, filenames in os.walk("img_dataset"):
for filename in filenames:
class_id = dirname.split("/")[1]
img_df.loc[len(
img_df.index)] = [os.path.join(dirname, filename), class_id]
img_df = img_df.groupby(["label"]).size().reset_index(name="counts")
return {"class_counts": img_df.set_index("label").T.to_dict()}
@app.post("/balance_dataset")
async def balance_dataset(min_samples: Optional[int] = Form(None)):
img_df = pd.DataFrame(columns=["image", "label"])
done = False
for dirname, _, filenames in os.walk("img_dataset"):
for filename in filenames:
done = True
class_id = dirname.split("/")[1]
img_df.loc[len(
img_df.index)] = [os.path.join(dirname, filename), class_id]
if not done:
return {"done": done}
balance_obj = Balance(img_df, min_samples)
balanced_class_counts, balanced_img_paths = balance_obj.balance()
return {
"done": True,
"balanced_class_counts": balanced_class_counts,
"balanced_img_paths": balanced_img_paths,
}
@app.post("/split_dataset")
async def split_dataset(
response: Response,
split_percentage: Optional[float] = Form(None),
id: Optional[str] = Cookie(None),
):
if id is None:
id = str(uuid.uuid4())
response.set_cookie(key="id", value=id)
folder_actions.mkdir_p("images/" + id)
img_df =
|
pd.DataFrame(columns=["image", "label"])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
@author: Elie
"""
import os
import sys
import argparse
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
if os.name == 'posix' and "DISPLAY" not in os.environ:
mpl.use('Agg')
from matplotlib.patches import Rectangle
pd.options.mode.chained_assignment = None
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--segmentation_file", help="Path to seg file (tab seperated with header=[chr, start, end, CNt] each row describing a genomic segments position and copynumber)", dest="seg_path", type=str, required=True)
parser.add_argument("-c", "--centromere_file", help="Path to centromere file (http://hgdownload.cse.ucsc.edu/goldenPath/hg38/database/cytoBand.txt.gz)", dest="centro_path", type=str, required=True)
parser.add_argument("-o", "--output_path", help="Output directory for the output files, tsv and figures" ,dest="output_dir", type=str, required=True)
parser.add_argument("-sn", "--sample_name", help="All files will have prefix sample_name_", dest="sn", type=str, required=True)
args = parser.parse_args()
args.seg_path = os.path.expanduser(args.seg_path)
args.centro_path = os.path.expanduser(args.centro_path)
args.output_dir = os.path.expanduser(args.output_dir)
args.sn = str(args.sn)
# =============================================================================
# load data
# =============================================================================
"""use for testing"""
# seg_path = "C:/Users/Elie/Desktop/signature_apr2020/scripts/seggenerator_v3/test_cnmatrixgenerator/M1RP_ID1_MLN4_seg.txt"
# centro_path = "C:/Users/Elie/Desktop/signature_apr2020/scripts/seggenerator_v3/test_cnmatrixgenerator/cytoBandhg38.txt"
# out_path = "C:/Users/Elie/Desktop/signature_apr2020/scripts/seggenerator_v3/test_cnmatrixgenerator/copy_sig_matrix.tsv"
seg = pd.read_csv(args.seg_path, sep="\t", low_memory=False, dtype={"chromosome":str, "start.pos":np.int32, "end.pos":np.int32, "CNt":np.int32}) #load seg file
seg = seg[["chromosome", "start.pos", "end.pos", "CNt"]].rename(columns={"chromosome":"chr", "start.pos":"start", "end.pos":"end"})
centro = pd.read_csv(args.centro_path, sep="\t", low_memory=False, names=["chr", "start", "end", "band", "annot"], dtype={"chr":str, "start":np.int32, "end":np.int32, "band":str, "annot":str})
# =============================================================================
# Number of categories for each class
# =============================================================================
BCper10mb_categories = 4 # 0, 1, 2, >2
CN_categories = 9 # 0, 1, 2, 3, 4, 5, 6, 7, >7
CNCP_categories = 8 # 0, 1, 2, 3, 4, 5, 6, >6
BCperCA_categories = 6 # 0, 1, 2, 3, 4, >4
SegSize_categories = 11 # 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, >9
CNFraction_categories = 7 # 0, 1, 2, 3, 4, 5, >5
# =============================================================================
# make cytoband file into df with arm starts and ends
# inputs = path to cytoband file and segfile data frame
# outputs = chromosome arm list, chromosome list and chr_arm defining arm boundaries
# =============================================================================
'''turn cytoband file from http://hgdownload.cse.ucsc.edu/goldenPath/hg38/database/cytoBand.txt.gz
to a dataframe with columns ['chr', 'start', 'end', 'arm', 'chr_arm'].
p starts at 0, goes to start of centromere. q starts at q centromere and goes to max position in
the seg file position'''
def make_centro(seg,centro):
centro.columns = ["chr", "start", "end", "band", "annot"] #centromere position file
#get rid of stupid chr in chrom names
centro["chr"] = centro["chr"].replace("chr", "", regex=True)
#acen is a stain that ids centromeres.
#This removes all the cyto bands that arent centromeres
centro = centro.query('annot == "acen"')
centro["arm"] = centro["band"].replace("[0-9.]", "", regex=True)
#p arm goes 0 to centromere
#q goes centromere to max seg position
centro.loc[centro["arm"] == "p", "start"] = 0
maxseg = seg.groupby("chr")["end"].max().reset_index()
centro =
|
pd.merge(centro, maxseg, how="left", on="chr")
|
pandas.merge
|
import numpy as np
import datajoint as dj
from PIL import ImageColor
from collections import Counter
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import itertools
import pandas as pd
from pipeline import experiment, ephys, psth, lab, histology, ccf, psth_foraging
from pipeline.plot.util import (_plot_with_sem, _extract_one_stim_dur,
_plot_stacked_psth_diff, _plot_avg_psth, _jointplot_w_hue)
from pipeline.plot import unit_psth
from pipeline.util import (_get_units_hemisphere, _get_trial_event_times,
_get_stim_onset_time, _get_clustering_method)
from . import PhotostimError
_plt_xmin = -3
_plt_xmax = 2
def plot_clustering_quality(probe_insertion, clustering_method=None, axs=None):
probe_insertion = probe_insertion.proj()
if clustering_method is None:
try:
clustering_method = _get_clustering_method(probe_insertion)
except ValueError as e:
raise ValueError(str(e) + '\nPlease specify one with the kwarg "clustering_method"')
amp, snr, spk_rate, isi_violation = (ephys.Unit * ephys.UnitStat * ephys.ProbeInsertion.InsertionLocation
& probe_insertion & {'clustering_method': clustering_method}).fetch(
'unit_amp', 'unit_snr', 'avg_firing_rate', 'isi_violation')
metrics = {'amp': amp,
'snr': snr,
'isi': np.array(isi_violation) * 100, # to percentage
'rate': np.array(spk_rate)}
label_mapper = {'amp': 'Amplitude',
'snr': 'Signal to noise ratio (SNR)',
'isi': 'ISI violation (%)',
'rate': 'Firing rate (spike/s)'}
fig = None
if axs is None:
fig, axs = plt.subplots(2, 3, figsize = (12, 8))
fig.subplots_adjust(wspace=0.4)
assert axs.size == 6
for (m1, m2), ax in zip(itertools.combinations(list(metrics.keys()), 2), axs.flatten()):
ax.plot(metrics[m1], metrics[m2], '.k')
ax.set_xlabel(label_mapper[m1])
ax.set_ylabel(label_mapper[m2])
# cosmetic
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
return fig
def plot_unit_characteristic(probe_insertion, clustering_method=None, axs=None):
probe_insertion = probe_insertion.proj()
if clustering_method is None:
try:
clustering_method = _get_clustering_method(probe_insertion)
except ValueError as e:
raise ValueError(str(e) + '\nPlease specify one with the kwarg "clustering_method"')
if clustering_method in ('kilosort2'):
q_unit = (ephys.Unit * ephys.ProbeInsertion.InsertionLocation.proj('depth') * ephys.UnitStat
* lab.ElectrodeConfig.Electrode.proj() * lab.ProbeType.Electrode.proj('x_coord', 'y_coord')
& probe_insertion & {'clustering_method': clustering_method} & 'unit_quality != "all"').proj(
..., x='x_coord', y='y_coord')
else:
q_unit = (ephys.Unit * ephys.ProbeInsertion.InsertionLocation.proj('depth') * ephys.UnitStat
& probe_insertion & {'clustering_method': clustering_method} & 'unit_quality != "all"').proj(
..., x='unit_posx', y='unit_posy')
amp, snr, spk_rate, x, y, insertion_depth = q_unit.fetch(
'unit_amp', 'unit_snr', 'avg_firing_rate', 'x', 'y', 'depth')
metrics = pd.DataFrame(list(zip(*(amp/amp.max(), snr/snr.max(), spk_rate/spk_rate.max(),
x, insertion_depth.astype(float) + y))))
metrics.columns = ['amp', 'snr', 'rate', 'x', 'y']
# --- prepare for plotting
shank_count = (ephys.ProbeInsertion & probe_insertion).aggr(lab.ElectrodeConfig.Electrode * lab.ProbeType.Electrode,
shank_count='count(distinct shank)').fetch1('shank_count')
m_scale = get_m_scale(shank_count)
ymin = metrics.y.min() - 100
ymax = metrics.y.max() + 200
xmax = 1.3 * metrics.x.max()
xmin = -1/6*xmax
cosmetic = {'legend': None,
'linewidth': 1.75,
'alpha': 0.9,
'facecolor': 'none', 'edgecolor': 'k'}
# --- plot
fig = None
if axs is None:
fig, axs = plt.subplots(1, 3, figsize=(10, 8))
fig.subplots_adjust(wspace=0.6)
assert axs.size == 3
sns.scatterplot(data=metrics, x='x', y='y', s=metrics.amp*m_scale, ax=axs[0], **cosmetic)
sns.scatterplot(data=metrics, x='x', y='y', s=metrics.snr*m_scale, ax=axs[1], **cosmetic)
sns.scatterplot(data=metrics, x='x', y='y', s=metrics.rate*m_scale, ax=axs[2], **cosmetic)
# manually draw the legend
lg_ypos = ymax
data = pd.DataFrame({'x': [0.1*xmax, 0.4*xmax, 0.75*xmax], 'y': [lg_ypos, lg_ypos, lg_ypos],
'size_ratio': np.array([0.2, 0.5, 0.8])})
for ax, ax_maxval in zip(axs.flatten(), (amp.max(), snr.max(), spk_rate.max())):
sns.scatterplot(data=data, x='x', y='y', s=data.size_ratio*m_scale, ax=ax, **dict(cosmetic, facecolor='k'))
for _, r in data.iterrows():
ax.text(r['x']-4, r['y']+70, (r['size_ratio']*ax_maxval).astype(int))
# cosmetic
for title, ax in zip(('Amplitude', 'SNR', 'Firing rate'), axs.flatten()):
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_title(title)
ax.set_xlim((xmin, xmax))
ax.plot([0.5*xmin, xmax], [lg_ypos-80, lg_ypos-80], '-k')
ax.set_ylim((ymin, ymax + 150))
return fig
def plot_unit_selectivity(probe_insertion, clustering_method=None, axs=None):
probe_insertion = probe_insertion.proj()
if clustering_method is None:
try:
clustering_method = _get_clustering_method(probe_insertion)
except ValueError as e:
raise ValueError(str(e) + '\nPlease specify one with the kwarg "clustering_method"')
if clustering_method in ('kilosort2'):
q_unit = (psth.PeriodSelectivity * ephys.Unit * ephys.ProbeInsertion.InsertionLocation
* lab.ElectrodeConfig.Electrode.proj() * lab.ProbeType.Electrode.proj('x_coord', 'y_coord')
* experiment.Period & probe_insertion & {'clustering_method': clustering_method}
& 'period_selectivity != "non-selective"').proj(..., x='unit_posx', y='unit_posy').proj(
..., x='x_coord', y='y_coord')
else:
q_unit = (psth.PeriodSelectivity * ephys.Unit * ephys.ProbeInsertion.InsertionLocation
* experiment.Period & probe_insertion & {'clustering_method': clustering_method}
& 'period_selectivity != "non-selective"').proj(..., x='unit_posx', y='unit_posy')
attr_names = ['unit', 'period', 'period_selectivity', 'contra_firing_rate',
'ipsi_firing_rate', 'x', 'y', 'depth']
selective_units = q_unit.fetch(*attr_names)
selective_units = pd.DataFrame(selective_units).T
selective_units.columns = attr_names
selective_units.period_selectivity.astype('category')
# --- account for insertion depth (manipulator depth)
selective_units.y = selective_units.depth.values.astype(float) + selective_units.y
# --- get ipsi vs. contra firing rate difference
f_rate_diff = np.abs(selective_units.ipsi_firing_rate - selective_units.contra_firing_rate)
selective_units['f_rate_diff'] = f_rate_diff / f_rate_diff.max()
# --- prepare for plotting
shank_count = (ephys.ProbeInsertion & probe_insertion).aggr(lab.ElectrodeConfig.Electrode * lab.ProbeType.Electrode,
shank_count='count(distinct shank)').fetch1('shank_count')
m_scale = get_m_scale(shank_count)
cosmetic = {'legend': None,
'linewidth': 0.0001}
ymin = selective_units.y.min() - 100
ymax = selective_units.y.max() + 100
xmax = 1.3 * selective_units.x.max()
xmin = -1/6*xmax
# a bit of hack to get the 'open circle'
pts = np.linspace(0, np.pi * 2, 24)
circ = np.c_[np.sin(pts) / 2, -np.cos(pts) / 2]
vert = np.r_[circ, circ[::-1] * .7]
open_circle = mpl.path.Path(vert)
# --- plot
fig = None
if axs is None:
fig, axs = plt.subplots(1, 3, figsize=(10, 8))
fig.subplots_adjust(wspace=0.6)
assert axs.size == 3
for (title, df), ax in zip(((p, selective_units[selective_units.period == p])
for p in ('sample', 'delay', 'response')), axs):
sns.scatterplot(data=df, x='x', y='y',
s=df.f_rate_diff.values.astype(float)*m_scale,
hue='period_selectivity', marker=open_circle,
palette={'contra-selective': 'b', 'ipsi-selective': 'r'},
ax=ax, **cosmetic)
contra_p = (df.period_selectivity == 'contra-selective').sum() / len(df) * 100
# cosmetic
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_title(f'{title}\n% contra: {contra_p:.2f}\n% ipsi: {100-contra_p:.2f}')
ax.set_xlim((xmin, xmax))
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_ylim((ymin, ymax))
return fig
def plot_unit_bilateral_photostim_effect(probe_insertion, clustering_method=None, axs=None):
probe_insertion = probe_insertion.proj()
if not (psth.TrialCondition().get_trials('all_noearlylick_both_alm_stim') & probe_insertion):
raise PhotostimError('No Bilateral ALM Photo-stimulation present')
if clustering_method is None:
try:
clustering_method = _get_clustering_method(probe_insertion)
except ValueError as e:
raise ValueError(str(e) + '\nPlease specify one with the kwarg "clustering_method"')
dv_loc = (ephys.ProbeInsertion.InsertionLocation & probe_insertion).fetch1('depth')
no_stim_cond = (psth.TrialCondition
& {'trial_condition_name':
'all_noearlylick_nostim'}).fetch1('KEY')
bi_stim_cond = (psth.TrialCondition
& {'trial_condition_name':
'all_noearlylick_both_alm_stim'}).fetch1('KEY')
units = ephys.Unit & probe_insertion & {'clustering_method': clustering_method} & 'unit_quality != "all"'
metrics = pd.DataFrame(columns=['unit', 'x', 'y', 'frate_change'])
# get photostim onset and duration
stim_durs = np.unique((experiment.Photostim & experiment.PhotostimEvent
* psth.TrialCondition().get_trials('all_noearlylick_both_alm_stim')
& probe_insertion).fetch('duration'))
stim_dur = _extract_one_stim_dur(stim_durs)
stim_time = _get_stim_onset_time(units, 'all_noearlylick_both_alm_stim')
# XXX: could be done with 1x fetch+join
for u_idx, unit in enumerate(units.fetch('KEY', order_by='unit')):
if clustering_method in ('kilosort2'):
x, y = (ephys.Unit * lab.ElectrodeConfig.Electrode.proj()
* lab.ProbeType.Electrode.proj('x_coord', 'y_coord') & unit).fetch1('x_coord', 'y_coord')
else:
x, y = (ephys.Unit & unit).fetch1('unit_posx', 'unit_posy')
# obtain unit psth per trial, for all nostim and bistim trials
nostim_trials = ephys.Unit.TrialSpikes & unit & psth.TrialCondition.get_trials(no_stim_cond['trial_condition_name'])
bistim_trials = ephys.Unit.TrialSpikes & unit & psth.TrialCondition.get_trials(bi_stim_cond['trial_condition_name'])
nostim_psths, nostim_edge = psth.compute_unit_psth(unit, nostim_trials.fetch('KEY'), per_trial=True)
bistim_psths, bistim_edge = psth.compute_unit_psth(unit, bistim_trials.fetch('KEY'), per_trial=True)
# compute the firing rate difference between contra vs. ipsi within the stimulation time window
ctrl_frate = np.array([nostim_psth[np.logical_and(nostim_edge >= stim_time,
nostim_edge <= stim_time + stim_dur)].mean()
for nostim_psth in nostim_psths])
stim_frate = np.array([bistim_psth[np.logical_and(bistim_edge >= stim_time,
bistim_edge <= stim_time + stim_dur)].mean()
for bistim_psth in bistim_psths])
frate_change = (stim_frate.mean() - ctrl_frate.mean()) / ctrl_frate.mean()
frate_change = abs(frate_change) if frate_change < 0 else 0.0001
metrics.loc[u_idx] = (int(unit['unit']), x, float(dv_loc) + y, frate_change)
metrics.frate_change = metrics.frate_change / metrics.frate_change.max()
# --- prepare for plotting
shank_count = (ephys.ProbeInsertion & probe_insertion).aggr(lab.ElectrodeConfig.Electrode * lab.ProbeType.Electrode,
shank_count='count(distinct shank)').fetch1('shank_count')
m_scale = get_m_scale(shank_count)
fig = None
if axs is None:
fig, axs = plt.subplots(1, 1, figsize=(4, 8))
xmax = 1.3 * metrics.x.max()
xmin = -1/6*xmax
cosmetic = {'legend': None,
'linewidth': 1.75,
'alpha': 0.9,
'facecolor': 'none', 'edgecolor': 'k'}
sns.scatterplot(data=metrics, x='x', y='y', s=metrics.frate_change*m_scale,
ax=axs, **cosmetic)
axs.spines['right'].set_visible(False)
axs.spines['top'].set_visible(False)
axs.set_title('% change')
axs.set_xlim((xmin, xmax))
return fig
def plot_pseudocoronal_slice(probe_insertion, shank_no=1):
# ---- Electrode sites ----
annotated_electrodes = (lab.ElectrodeConfig.Electrode * lab.ProbeType.Electrode
* ephys.ProbeInsertion
* histology.ElectrodeCCFPosition.ElectrodePosition
& probe_insertion & {'shank': shank_no})
electrode_coords = np.array(list(zip(*annotated_electrodes.fetch(
'ccf_z', 'ccf_y', 'ccf_x', order_by='ccf_y')))) # (AP, DV, ML)
probe_track_coords = np.array(list(zip(*(histology.LabeledProbeTrack.Point
& probe_insertion & {'shank': shank_no}).fetch(
'ccf_z', 'ccf_y', 'ccf_x', order_by='ccf_y'))))
voxel_res = ccf.CCFLabel.CCF_R3_20UM_RESOLUTION
lr_max, dv_max, _ = ccf.get_ccf_xyz_max()
pseudocoronal_points, shank_ccfs = histology.retrieve_pseudocoronal_slice(probe_insertion, shank_no)
dv_pts, lr_pts, ap_pts, color_codes = pseudocoronal_points.T
dv_pts = dv_pts.astype(int)
lr_pts = lr_pts.astype(int)
color_codes = color_codes.astype(str)
# ---- paint annotation color code ----
coronal_slice = np.full((dv_max + 1, lr_max + 1, 3), np.nan)
for color in set(color_codes):
matched_ind = np.where(color_codes == color)[0]
dv_ind = dv_pts[matched_ind] # rows
lr_ind = lr_pts[matched_ind] # cols
try:
c_rgb = ImageColor.getcolor("#" + color, "RGB")
except ValueError as e:
print(str(e))
continue
coronal_slice[dv_ind, lr_ind, :] = np.full((len(matched_ind), 3), c_rgb)
# ---- paint the interpolated track of this probe/shank in gray ----
in_probe_range = np.logical_and(shank_ccfs[:, 1] >= probe_track_coords[:, 1].min(),
shank_ccfs[:, 1] <= probe_track_coords[:, 1].max())
in_electrode_range = np.logical_and(shank_ccfs[:, 1] >= electrode_coords[:, 1].min(),
shank_ccfs[:, 1] <= electrode_coords[:, 1].max())
tracks_coords = shank_ccfs[np.logical_and(in_probe_range, ~in_electrode_range), :]
coronal_slice[tracks_coords[:, 1], tracks_coords[:, 0], :] = np.full(
(tracks_coords.shape[0], 3), ImageColor.getcolor("#FFFFFF", "RGB"))
# ---- paint electrode sites on this probe/shank in black ----
coronal_slice[electrode_coords[:, 1], electrode_coords[:, 2], :] = np.full(
(electrode_coords.shape[0], 3), ImageColor.getcolor("#080808", "RGB"))
# ---- downsample the 2D slice to the voxel resolution ----
coronal_slice = coronal_slice[::voxel_res, ::voxel_res, :]
# paint outside region white
nan_r, nan_c = np.where(np.nansum(coronal_slice, axis=2) == 0)
coronal_slice[nan_r, nan_c, :] = np.full((len(nan_r), 3), ImageColor.getcolor("#FFFFFF", "RGB"))
# ---- plot ----
fig, ax = plt.subplots(1, 1)
ax.imshow(coronal_slice.astype(np.uint8), extent=[0, lr_max, dv_max, 0])
ax.invert_xaxis()
ax.set_xticks([])
ax.set_yticks([])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
return fig
def plot_driftmap(probe_insertion, clustering_method=None, shank_no=1):
probe_insertion = probe_insertion.proj()
assert histology.InterpolatedShankTrack & probe_insertion
if clustering_method is None:
try:
clustering_method = _get_clustering_method(probe_insertion)
except ValueError as e:
raise ValueError(str(e) + '\nPlease specify one with the kwarg "clustering_method"')
units = (ephys.Unit * lab.ElectrodeConfig.Electrode
& probe_insertion & {'clustering_method': clustering_method}
& 'unit_quality != "all"')
units = (units.proj('spike_times', 'spike_depths', 'unit_posy')
* ephys.ProbeInsertion.proj()
* lab.ProbeType.Electrode.proj('shank') & {'shank': shank_no})
# ---- ccf region ----
annotated_electrodes = (lab.ElectrodeConfig.Electrode * lab.ProbeType.Electrode
* ephys.ProbeInsertion
* histology.ElectrodeCCFPosition.ElectrodePosition
* ccf.CCFAnnotation * ccf.CCFBrainRegion.proj(..., annotation='region_name')
& probe_insertion & {'shank': shank_no})
pos_y, ccf_y, color_code = annotated_electrodes.fetch(
'y_coord', 'ccf_y', 'color_code', order_by='y_coord DESC')
# CCF position of most ventral recording site
last_electrode_site = np.array((histology.InterpolatedShankTrack.DeepestElectrodePoint
& probe_insertion & {'shank': shank_no}).fetch1(
'ccf_x', 'ccf_y', 'ccf_z'))
# CCF position of the brain surface where this shank crosses
brain_surface_site = np.array((histology.InterpolatedShankTrack.BrainSurfacePoint
& probe_insertion & {'shank': shank_no}).fetch1(
'ccf_x', 'ccf_y', 'ccf_z'))
# CCF position of most ventral recording site, with respect to the brain surface
y_ref = -np.linalg.norm(last_electrode_site - brain_surface_site)
# ---- spikes ----brain_surface_site
spike_times, spike_depths = units.fetch('spike_times', 'spike_depths', order_by='unit')
spike_times = np.hstack(spike_times)
spike_depths = np.hstack(spike_depths)
# histogram
# time_res = 10 # time resolution: 1sec
# depth_res = 10 # depth resolution: 10um
#
# spike_bins = np.arange(0, spike_times.max() + time_res, time_res)
# depth_bins = np.arange(spike_depths.min() - depth_res, spike_depths.max() + depth_res, depth_res)
# time-depth 2D histogram
time_bin_count = 1000
depth_bin_count = 200
spike_bins = np.linspace(0, spike_times.max(), time_bin_count)
depth_bins = np.linspace(0, np.nanmax(spike_depths), depth_bin_count)
spk_count, spk_edges, depth_edges = np.histogram2d(spike_times, spike_depths, bins=[spike_bins, depth_bins])
spk_rates = spk_count / np.mean(np.diff(spike_bins))
spk_edges = spk_edges[:-1]
depth_edges = depth_edges[:-1]
# region colorcode, by depths
binned_hexcodes = []
y_spacing = np.abs(np.nanmedian(np.where(np.diff(pos_y)==0, np.nan, np.diff(pos_y))))
anno_depth_bins = np.arange(0, depth_bins[-1], y_spacing)
for s, e in zip(anno_depth_bins[:-1], anno_depth_bins[1:]):
hexcodes = color_code[np.logical_and(pos_y > s, pos_y <= e)]
if len(hexcodes):
binned_hexcodes.append(Counter(hexcodes).most_common()[0][0])
else:
binned_hexcodes.append('FFFFFF')
region_rgba = np.array([list(ImageColor.getcolor("#" + chex, "RGBA")) for chex in binned_hexcodes])
region_rgba = np.repeat(region_rgba[:, np.newaxis, :], 10, axis=1)
# canvas setup
fig = plt.figure(figsize=(16, 8))
grid = plt.GridSpec(12, 12)
ax_main = plt.subplot(grid[1:, 0:9])
ax_cbar = plt.subplot(grid[0, 0:9])
ax_spkcount = plt.subplot(grid[1:, 9:11])
ax_anno = plt.subplot(grid[1:, 11:])
# -- plot main --
im = ax_main.imshow(spk_rates.T, aspect='auto', cmap='gray_r',
extent=[spike_bins[0], spike_bins[-1], depth_bins[-1], depth_bins[0]])
# cosmetic
ax_main.invert_yaxis()
ax_main.set_xlabel('Time (sec)')
ax_main.set_ylabel('Distance from tip sites (um)')
ax_main.set_ylim(depth_edges[0], depth_edges[-1])
ax_main.spines['right'].set_visible(False)
ax_main.spines['top'].set_visible(False)
cb = fig.colorbar(im, cax=ax_cbar, orientation='horizontal')
cb.outline.set_visible(False)
cb.ax.xaxis.tick_top()
cb.set_label('Firing rate (Hz)')
cb.ax.xaxis.set_label_position('top')
# -- plot spikecount --
ax_spkcount.plot(spk_count.sum(axis=0) / 10e3, depth_edges, 'k')
ax_spkcount.set_xlabel('Spike count (x$10^3$)')
ax_spkcount.set_yticks([])
ax_spkcount.set_ylim(depth_edges[0], depth_edges[-1])
ax_spkcount.spines['right'].set_visible(False)
ax_spkcount.spines['top'].set_visible(False)
ax_spkcount.spines['bottom'].set_visible(False)
ax_spkcount.spines['left'].set_visible(False)
# -- plot colored region annotation
ax_anno.imshow(region_rgba, aspect='auto',
extent=[0, 10, (anno_depth_bins[-1] + y_ref) / 1000, (anno_depth_bins[0] + y_ref) / 1000])
ax_anno.invert_yaxis()
ax_anno.spines['right'].set_visible(False)
ax_anno.spines['top'].set_visible(False)
ax_anno.spines['bottom'].set_visible(False)
ax_anno.spines['left'].set_visible(False)
ax_anno.set_xticks([])
ax_anno.yaxis.tick_right()
ax_anno.set_ylabel('Depth in the brain (mm)')
ax_anno.yaxis.set_label_position('right')
return fig
def plot_stacked_contra_ipsi_psth(units, axs=None):
units = units.proj()
# get event start times: sample, delay, response
period_names, period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, 'good_noearlylick_hit')
hemi = _get_units_hemisphere(units)
conds_i = (psth.TrialCondition
& {'trial_condition_name':
'good_noearlylick_left_hit' if hemi == 'left' else 'good_noearlylick_right_hit'}).fetch1('KEY')
conds_c = (psth.TrialCondition
& {'trial_condition_name':
'good_noearlylick_right_hit' if hemi == 'left' else 'good_noearlylick_left_hit'}).fetch1('KEY')
sel_i = (ephys.Unit * psth.UnitSelectivity
& 'unit_selectivity = "ipsi-selective"' & units)
sel_c = (ephys.Unit * psth.UnitSelectivity
& 'unit_selectivity = "contra-selective"' & units)
# ipsi selective ipsi trials
psth_is_it = (psth.UnitPsth * sel_i.proj('unit_posy') & conds_i).fetch(order_by='unit_posy desc')
# ipsi selective contra trials
psth_is_ct = (psth.UnitPsth * sel_i.proj('unit_posy') & conds_c).fetch(order_by='unit_posy desc')
# contra selective contra trials
psth_cs_ct = (psth.UnitPsth * sel_c.proj('unit_posy') & conds_c).fetch(order_by='unit_posy desc')
# contra selective ipsi trials
psth_cs_it = (psth.UnitPsth * sel_c.proj('unit_posy') & conds_i).fetch(order_by='unit_posy desc')
fig = None
if axs is None:
fig, axs = plt.subplots(1, 2, figsize=(20, 20))
assert axs.size == 2
_plot_stacked_psth_diff(psth_cs_ct, psth_cs_it, ax=axs[0], vlines=period_starts, flip=True)
axs[0].set_title('Contra-selective Units')
axs[0].set_ylabel('Unit (by depth)')
axs[0].set_xlabel('Time to go (s)')
_plot_stacked_psth_diff(psth_is_it, psth_is_ct, ax=axs[1], vlines=period_starts)
axs[1].set_title('Ipsi-selective Units')
axs[1].set_ylabel('Unit (by depth)')
axs[1].set_xlabel('Time to go (s)')
return fig
def plot_avg_contra_ipsi_psth(units, axs=None):
units = units.proj()
# get event start times: sample, delay, response
period_names, period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, 'good_noearlylick_hit')
hemi = _get_units_hemisphere(units)
good_unit = ephys.Unit & 'unit_quality != "all"'
conds_i = (psth.TrialCondition
& {'trial_condition_name':
'good_noearlylick_left_hit' if hemi == 'left' else 'good_noearlylick_right_hit'}).fetch('KEY')
conds_c = (psth.TrialCondition
& {'trial_condition_name':
'good_noearlylick_right_hit' if hemi == 'left' else 'good_noearlylick_left_hit'}).fetch('KEY')
sel_i = (ephys.Unit * psth.UnitSelectivity
& 'unit_selectivity = "ipsi-selective"' & units)
sel_c = (ephys.Unit * psth.UnitSelectivity
& 'unit_selectivity = "contra-selective"' & units)
psth_is_it = (((psth.UnitPsth & conds_i)
* ephys.Unit.proj('unit_posy'))
& good_unit.proj() & sel_i.proj()).fetch(
'unit_psth', order_by='unit_posy desc')
psth_is_ct = (((psth.UnitPsth & conds_c)
* ephys.Unit.proj('unit_posy'))
& good_unit.proj() & sel_i.proj()).fetch(
'unit_psth', order_by='unit_posy desc')
psth_cs_ct = (((psth.UnitPsth & conds_c)
* ephys.Unit.proj('unit_posy'))
& good_unit.proj() & sel_c.proj()).fetch(
'unit_psth', order_by='unit_posy desc')
psth_cs_it = (((psth.UnitPsth & conds_i)
* ephys.Unit.proj('unit_posy'))
& good_unit.proj() & sel_c.proj()).fetch(
'unit_psth', order_by='unit_posy desc')
fig = None
if axs is None:
fig, axs = plt.subplots(1, 2, figsize=(16, 6))
assert axs.size == 2
_plot_avg_psth(psth_cs_it, psth_cs_ct, period_starts, axs[0],
'Contra-selective')
_plot_avg_psth(psth_is_it, psth_is_ct, period_starts, axs[1],
'Ipsi-selective')
ymax = max([ax.get_ylim()[1] for ax in axs])
for ax in axs:
ax.set_ylim((0, ymax))
return fig
def plot_psth_photostim_effect(units, condition_name_kw=['both_alm'], axs=None):
"""
For the specified `units`, plot PSTH comparison between stim vs. no-stim with left/right trial instruction
The stim location (or other appropriate search keywords) can be specified in `condition_name_kw` (default: both ALM)
"""
units = units.proj()
fig = None
if axs is None:
fig, axs = plt.subplots(1, 2, figsize=(16, 6))
assert axs.size == 2
hemi = _get_units_hemisphere(units)
# no photostim:
psth_n_l = psth.TrialCondition.get_cond_name_from_keywords(['_nostim', '_left'])[0]
psth_n_r = psth.TrialCondition.get_cond_name_from_keywords(['_nostim', '_right'])[0]
psth_n_l = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name': psth_n_l} & 'unit_psth is not NULL').fetch('unit_psth')
psth_n_r = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name': psth_n_r} & 'unit_psth is not NULL').fetch('unit_psth')
# with photostim
psth_s_l = psth.TrialCondition.get_cond_name_from_keywords(condition_name_kw + ['_stim_left'])[0]
psth_s_r = psth.TrialCondition.get_cond_name_from_keywords(condition_name_kw + ['_stim_right'])[0]
psth_s_l = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name': psth_s_l} & 'unit_psth is not NULL').fetch('unit_psth')
psth_s_r = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name': psth_s_r} & 'unit_psth is not NULL').fetch('unit_psth')
# get event start times: sample, delay, response
period_names, period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, 'good_noearlylick_hit')
# get photostim onset and duration
stim_trial_cond_name = psth.TrialCondition.get_cond_name_from_keywords(condition_name_kw + ['_stim'])[0]
stim_durs = np.unique((experiment.Photostim & experiment.PhotostimEvent
* psth.TrialCondition().get_trials(stim_trial_cond_name)
& units).fetch('duration'))
stim_dur = _extract_one_stim_dur(stim_durs)
stim_time = _get_stim_onset_time(units, stim_trial_cond_name)
if hemi == 'left':
psth_s_i = psth_s_l
psth_n_i = psth_n_l
psth_s_c = psth_s_r
psth_n_c = psth_n_r
else:
psth_s_i = psth_s_r
psth_n_i = psth_n_r
psth_s_c = psth_s_l
psth_n_c = psth_n_l
_plot_avg_psth(psth_n_i, psth_n_c, period_starts, axs[0],
'Control')
_plot_avg_psth(psth_s_i, psth_s_c, period_starts, axs[1],
'Photostim')
# cosmetic
ymax = max([ax.get_ylim()[1] for ax in axs])
for ax in axs:
ax.set_ylim((0, ymax))
ax.set_xlim([_plt_xmin, _plt_xmax])
# add shaded bar for photostim
axs[1].axvspan(stim_time, stim_time + stim_dur, alpha=0.3, color='royalblue')
return fig
def plot_coding_direction(units, time_period=None, label=None, axs=None):
# get event start times: sample, delay, response
period_names, period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, 'good_noearlylick_hit')
_, proj_contra_trial, proj_ipsi_trial, time_stamps, _ = psth.compute_CD_projected_psth(
units.fetch('KEY'), time_period=time_period)
fig = None
if axs is None:
fig, axs = plt.subplots(1, 1, figsize=(8, 6))
# plot
_plot_with_sem(proj_contra_trial, time_stamps, ax=axs, c='b')
_plot_with_sem(proj_ipsi_trial, time_stamps, ax=axs, c='r')
for x in period_starts:
axs.axvline(x=x, linestyle = '--', color = 'k')
# cosmetic
axs.spines['right'].set_visible(False)
axs.spines['top'].set_visible(False)
axs.set_ylabel('CD projection (a.u.)')
axs.set_xlabel('Time (s)')
if label:
axs.set_title(label)
return fig
def plot_paired_coding_direction(unit_g1, unit_g2, labels=None, time_period=None):
"""
Plot trial-to-trial CD-endpoint correlation between CD-projected trial-psth from two unit-groups (e.g. two brain regions)
Note: coding direction is calculated on selective units, contra vs. ipsi, within the specified time_period
"""
_, proj_contra_trial_g1, proj_ipsi_trial_g1, time_stamps, unit_g1_hemi = psth.compute_CD_projected_psth(
unit_g1.fetch('KEY'), time_period=time_period)
_, proj_contra_trial_g2, proj_ipsi_trial_g2, time_stamps, unit_g2_hemi = psth.compute_CD_projected_psth(
unit_g2.fetch('KEY'), time_period=time_period)
# get event start times: sample, delay, response
period_names, period_starts = _get_trial_event_times(['sample', 'delay', 'go'], unit_g1, 'good_noearlylick_hit')
if labels:
assert len(labels) == 2
else:
labels = ('unit group 1', 'unit group 2')
# plot projected trial-psth
fig, axs = plt.subplots(1, 2, figsize=(16, 6))
_plot_with_sem(proj_contra_trial_g1, time_stamps, ax=axs[0], c='b')
_plot_with_sem(proj_ipsi_trial_g1, time_stamps, ax=axs[0], c='r')
_plot_with_sem(proj_contra_trial_g2, time_stamps, ax=axs[1], c='b')
_plot_with_sem(proj_ipsi_trial_g2, time_stamps, ax=axs[1], c='r')
# cosmetic
for ax, label in zip(axs, labels):
for x in period_starts:
ax.axvline(x=x, linestyle = '--', color = 'k')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel('CD projection (a.u.)')
ax.set_xlabel('Time (s)')
ax.set_title(label)
# plot trial CD-endpoint correlation - if 2 unit-groups are from 2 hemispheres,
# then contra-ipsi definition is based on the first group
p_start, p_end = time_period
contra_cdend_1 = proj_contra_trial_g1[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)
ipsi_cdend_1 = proj_ipsi_trial_g1[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)
if unit_g1_hemi == unit_g1_hemi:
contra_cdend_2 = proj_contra_trial_g2[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)
ipsi_cdend_2 = proj_ipsi_trial_g2[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)
else:
contra_cdend_2 = proj_ipsi_trial_g2[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)
ipsi_cdend_2 = proj_contra_trial_g2[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)
c_df =
|
pd.DataFrame([contra_cdend_1, contra_cdend_2])
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from xarray.core.indexes import PandasIndex, PandasMultiIndex, _asarray_tuplesafe
from xarray.core.variable import IndexVariable
def test_asarray_tuplesafe() -> None:
res = _asarray_tuplesafe(("a", 1))
assert isinstance(res, np.ndarray)
assert res.ndim == 0
assert res.item() == ("a", 1)
res = _asarray_tuplesafe([(0,), (1,)])
assert res.shape == (2,)
assert res[0] == (0,)
assert res[1] == (1,)
class TestPandasIndex:
def test_constructor(self) -> None:
pd_idx = pd.Index([1, 2, 3])
index = PandasIndex(pd_idx, "x")
assert index.index is pd_idx
assert index.dim == "x"
def test_from_variables(self) -> None:
var = xr.Variable(
"x", [1, 2, 3], attrs={"unit": "m"}, encoding={"dtype": np.int32}
)
index, index_vars = PandasIndex.from_variables({"x": var})
xr.testing.assert_identical(var.to_index_variable(), index_vars["x"])
assert index.dim == "x"
assert index.index.equals(index_vars["x"].to_index())
var2 = xr.Variable(("x", "y"), [[1, 2, 3], [4, 5, 6]])
with pytest.raises(ValueError, match=r".*only accepts one variable.*"):
PandasIndex.from_variables({"x": var, "foo": var2})
with pytest.raises(
ValueError, match=r".*only accepts a 1-dimensional variable.*"
):
PandasIndex.from_variables({"foo": var2})
def test_from_pandas_index(self) -> None:
pd_idx = pd.Index([1, 2, 3], name="foo")
index, index_vars = PandasIndex.from_pandas_index(pd_idx, "x")
assert index.dim == "x"
assert index.index is pd_idx
assert index.index.name == "foo"
xr.testing.assert_identical(index_vars["foo"], IndexVariable("x", [1, 2, 3]))
# test no name set for pd.Index
pd_idx.name = None
index, index_vars = PandasIndex.from_pandas_index(pd_idx, "x")
assert "x" in index_vars
assert index.index is not pd_idx
assert index.index.name == "x"
def to_pandas_index(self):
pd_idx = pd.Index([1, 2, 3], name="foo")
index = PandasIndex(pd_idx, "x")
assert index.to_pandas_index() is pd_idx
def test_query(self) -> None:
# TODO: add tests that aren't just for edge cases
index = PandasIndex(pd.Index([1, 2, 3]), "x")
with pytest.raises(KeyError, match=r"not all values found"):
index.query({"x": [0]})
with pytest.raises(KeyError):
index.query({"x": 0})
with pytest.raises(ValueError, match=r"does not have a MultiIndex"):
index.query({"x": {"one": 0}})
def test_query_datetime(self) -> None:
index = PandasIndex(
pd.to_datetime(["2000-01-01", "2001-01-01", "2002-01-01"]), "x"
)
actual = index.query({"x": "2001-01-01"})
expected = (1, None)
assert actual == expected
actual = index.query({"x": index.to_pandas_index().to_numpy()[1]})
assert actual == expected
def test_query_unsorted_datetime_index_raises(self) -> None:
index = PandasIndex(pd.to_datetime(["2001", "2000", "2002"]), "x")
with pytest.raises(KeyError):
# pandas will try to convert this into an array indexer. We should
# raise instead, so we can be sure the result of indexing with a
# slice is always a view.
index.query({"x": slice("2001", "2002")})
def test_equals(self) -> None:
index1 = PandasIndex([1, 2, 3], "x")
index2 = PandasIndex([1, 2, 3], "x")
assert index1.equals(index2) is True
def test_union(self) -> None:
index1 = PandasIndex([1, 2, 3], "x")
index2 = PandasIndex([4, 5, 6], "y")
actual = index1.union(index2)
assert actual.index.equals(
|
pd.Index([1, 2, 3, 4, 5, 6])
|
pandas.Index
|
import json
import pandas as pd
from datetime import datetime
import codecs
import csv
import pymongo
# 连接数据库
client = pymongo.MongoClient("mongodb://share:<EMAIL>:27017/linux_commits_analysis")
db = client["linux_commits_analysis"]
users = db["users"]
commits = db["commits"]
# 生成usersfile.csv
usersCursor = users.find()
with codecs.open('csvfolder/usersfile.csv', 'w', 'utf-8') as csvfile:
writer = csv.writer(csvfile)
# 先写入columns_name
writer.writerow(["_id", "user", "avatar", "html"])
# 写入多行用writerows
for data in usersCursor:
writer.writerows([[data["_id"], data["user"], data["avatar"], data["html"]]])
# 生成datefile.csv
commitsCursor = commits.find()
with codecs.open('csvfolder/datefile.csv', 'w', 'utf-8') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["author_date", "committer_date"])
for data in commitsCursor:
data["author_date"] = data["author_date"].strftime('%Y-%m-%d %H:%M:%S')
data["committer_date"] = datetime.strftime(data["committer_date"], '%Y-%m-%d %H:%M:%S')
writer.writerows([[data["author_date"], data["committer_date"]]])
# 生成authorcommitfile.csv
commitsCursor = commits.find()
with codecs.open('csvfolder/authorcommitfile.csv', 'w', 'utf-8') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["author_login", "committer_login"])
for data in commitsCursor:
if data["author_login"] == None:
data["author_login"] = str(data["author_login"])
if data["committer_login"] == None:
data["committer_login"] = str(data["author_login"])
writer.writerows([[data["author_login"], data["committer_login"]]])
# 生成temp.csv
author_commit = pd.read_csv("csvfolder/authorcommitfile.csv")
author_commit_df = pd.DataFrame(author_commit)
def getAuthorCount(str):
return list(author_commit_df["author_login"]).count(str)
def getCommitCount(str):
return list(author_commit_df["committer_login"]).count(str)
with codecs.open('csvfolder/temp.csv', 'w', 'utf-8') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["user", "author_login", "committer_login"])
for data, row in author_commit_df.iterrows():
authorCount = getAuthorCount(row["author_login"])
commitCount = getCommitCount(row["committer_login"])
writer.writerows([[row["author_login"], authorCount, commitCount]])
# 生成user.json
newUser = pd.read_csv('csvfolder/temp.csv')
users =
|
pd.read_csv('csvfolder/usersfile.csv')
|
pandas.read_csv
|
import os
import re
import warnings
from uuid import uuid4, UUID
import shapely.geometry
import geopandas as gpd
import pandas as pd
import numpy as np
from geojson import LineString, Point, Polygon, Feature, FeatureCollection, MultiPolygon
try:
import simplejson as json
except ImportError:
import json
from .config import get_settings
from ..static import UriType
def _abs_path(path, mkdir=True):
"""Gets the absolute path for a file to be within the Quest directory,
and will create a directory of that filename.
Args:
path (string): A string that is a filename.
mkdir (bool): A boolean if the user wants to create the directory.
Returns:
A string of an absolute path with a file from somewhere with in the Quest directory.
"""
if not os.path.isabs(path):
path = os.path.join(get_quest_dir(), path)
if mkdir:
os.makedirs(path, exist_ok=True)
return path
def bbox2poly(x1, y1, x2, y2, reverse_order=False, as_geojson=False, as_shapely=False):
"""Converts a bounding box to a polygon.
Args:
x1 (int): An int for the first x coordinate.
y1 (int): An int for the first y coordinate.
x2 (int): An int for the second x coordinate.
y2 (int): An int for the second y coordinate.
reverse_order (bool): A boolean to switch the order of the x and y coordinates.
as_geojson (bool): A bool to convert the polygon to a geojson object.
as_shapely (bool): A bool to convert the polygon to a shapely object.
Returns:
If the bool is false for both geojson and shapely then just a list is returned.
If the bool is true for both geojson and shapely then a shapley object is returned.
If the bool is true for just the geojson, then a geojson object is returned.
If the bool is true for just the shapely, then a shapely object is returned.
"""
if reverse_order:
x1, y1 = y1, x1
x2, y2 = y2, x2
xmin, xmax = [float(x1), float(x2)]
ymin, ymax = [float(y1), float(y2)]
poly = list([[xmin, ymin], [xmin, ymax], [xmax, ymax], [xmax, ymin]])
poly.append(poly[0])
if not (as_geojson or as_shapely):
return poly
if as_geojson:
polygon = Polygon
multi_polygon = MultiPolygon
if as_shapely:
polygon = shapely.geometry.Polygon
multi_polygon = shapely.geometry.MultiPolygon
xmin2 = xmax2 = None
if xmin < -180:
xmin2 = 360 + xmin
xmin = -180
if xmax > 180:
xmax2 = xmax - 360
xmax = 180
if xmin2 is None and xmax2 is None:
return polygon(poly)
# else bbox spans 180 longitude so create multipolygon
poly1 = list([[xmin, ymin], [xmin, ymax], [xmax, ymax], [xmax, ymin]])
poly1.append(poly1[0])
xmin = xmin2 or -180
xmax = xmax2 or 180
poly2 = list([[xmin, ymin], [xmin, ymax], [xmax, ymax], [xmax, ymin]])
poly2.append(poly2[0])
return multi_polygon(polygons=[polygon(poly1), polygon(poly2)])
def classify_uris(uris, grouped=True, as_dataframe=True, require_same_type=False, exclude=None, raise_if_empty=True):
"""Converts a list of uris into a pandas dataframe.
Notes:
Classified by resource type.
Args:
uris (list or string): List of Quest uris to classify into the following types: 'collections', 'services',
'publishers', or 'datasets'.
grouped (bool): If True returns
Pandas GroupBy object (see: https://pandas.pydata.org/pandas-docs/stable/groupby.html)
as_dataframe (bool): If True returns a Pandas DataFrame
require_same_type (bool): If True raises a `ValueError` if uris of more than one type are passed in.
exclude (list or string): List of uri types to not allow. If a uri of an excluded type is passed in
then a `ValueError` will be raised.
Returns:
A pandas dataframe.
"""
uris = listify(uris)
df =
|
pd.DataFrame(uris, columns=['uri'])
|
pandas.DataFrame
|
'''
This file includes all the locally differentially private mechanisms we designed for the SIGMOD work.
I am aware that this code can be cleaned a bit and there is a redundancy. But this helps keeping the code plug-n-play.
I can simply copy a class and use it in a different context.
http://dimacs.rutgers.edu/~graham/pubs/papers/sigmod18.pdf
'''
import numpy as np
import itertools
from scipy.linalg import hadamard
import pandas as pd
import xxhash
import sys
import random
#np.seterr(all='raise')
BIG_PRIME = 9223372036854775783
def rr2 (bit,bern):
if bern:
return bit
return -bit
def pop_probmat(prob,sz):
probmat =np.zeros((sz,sz))
d = np.log2(sz)
for i in range(0,sz):
for j in range(0,sz):
perturbed = count_1(np.bitwise_xor(i,j))
#print i,bin(i),j,bin(j) ,bin(np.bitwise_xor(i,j)),perturbed
probmat[i][j] = np.power(1.0-prob,perturbed) * np.power(prob,d-perturbed)
return probmat
def mps (num,bern,rnum):
if bern:
return num
return rnum
def L1(a,b):
a = np.abs(a)
b= np.abs(b)
return round(np.abs(a-b).sum(),4)
def count_1(num):
cnt =0
while num !=0:
num = np.bitwise_and(num,num-1)
cnt+=1
return cnt
def random_number():
return random.randrange(1, BIG_PRIME - 1)
def compute_marg(misc_vars
,irr_estimate
,ips_estimate
,iht_pert_ns_estimate
,iolh_estimate
,mps_pert_dict
,mrr_pert_dict
,mht_pert_dict
,icms_estimate
,icmsht_estimate
):
### These lists store L1 error for each k way marginal.
irr_l1_array = []
iht_l1_array = []
ips_l1_array =[]
iolh_l1_array =[]
icms_l1_array = []
icmsht_l1_array = []
mps_l1_array= []
mrr_l1_array=[]
mht_l1_array = []
s = misc_vars.allsubsetsint.shape[0]
temp_array2= np.zeros(s)
input_dist_margs = np.zeros(np.power(2,misc_vars.d))
marg_from_irr = np.zeros(np.power(2,misc_vars.d))
marg_from_iht = np.zeros(s)
marg_from_ips = np.zeros(np.power(2,misc_vars.d))
marg_from_iolh = np.zeros(np.power(2,misc_vars.d))
marg_from_icms = np.zeros(np.power(2,misc_vars.d))
marg_from_icmsht = np.zeros(np.power(2,misc_vars.d))
all_cords = np.array(range(0, np.power(2,misc_vars.d)))
temp_array = np.zeros(np.power(2, misc_vars.d))
### We now evaluate each marginal using the method described in Barak et al's paper.
for beta in misc_vars.allsubsetsint:
if count_1(beta) != misc_vars.k:
continue
alphas=misc_vars.alphas_cache[beta]["alphas"]
gammas = alphas
marg_from_irr.fill(0.0)
marg_from_ips.fill(0.0)
marg_from_iht.fill(0.0)
marg_from_iolh.fill(0.0)
marg_from_icms.fill(0.0)
marg_from_icmsht.fill(0.0)
input_dist_margs.fill(0.0)
real_indices = []
for alpha in alphas:
temp_array.fill(0.0)
temp_array2.fill(0.0)
try:
f_alpha = misc_vars.f[alpha]
except:
f_alpha = np.zeros(np.power(2,misc_vars.d))
for i in all_cords:
f_alpha[i] = np.power(-1.0, count_1(np.bitwise_and(alpha, i)))
misc_vars.f[alpha] = f_alpha
for gamma in gammas:
temp_array[gamma]+=misc_vars.f[alpha][gamma]
temp_array2[misc_vars.coef_dict[gamma]] +=np.power(-1.0,count_1(np.bitwise_and(gamma,alpha)))
try:
input_dist_margs += (temp_array * misc_vars.f[alpha].dot(misc_vars.input_dist))
marg_from_irr += (temp_array * misc_vars.f[alpha].dot(irr_estimate))
marg_from_ips += (temp_array * misc_vars.f[alpha].dot(ips_estimate))
marg_from_icms += (temp_array * misc_vars.f[alpha].dot(icms_estimate))
marg_from_icmsht += (temp_array * misc_vars.f[alpha].dot(icmsht_estimate))
marg_from_iolh += (temp_array * misc_vars.f[alpha].dot(iolh_estimate))
except:
print ("Unexpected error:", sys.exc_info())
marg_from_iht += (temp_array2 * iht_pert_ns_estimate[misc_vars.coef_dict[alpha]])
real_indices.append(misc_vars.coef_dict[alpha])
### input######
m_inp = np.abs(np.take(input_dist_margs,gammas)) ## Extracting counts from marginal indices specified by "gammas".
m_inp/=m_inp.sum()
#### INPUT_HT #############
m_inp_ht = np.abs(np.take(marg_from_iht,real_indices)) ## Extracting counts from marginal indices specified by "gammas".
m_inp_ht/=m_inp_ht.sum()
iht_l1_array.append(L1(m_inp_ht,m_inp))
######## INPUT_PS ###########
ips_marg = np.abs(np.take(marg_from_ips,gammas)) ## Extracting counts from marginal indices specified by "gammas".
ips_marg/=ips_marg.sum()
ips_l1_array.append(L1(ips_marg,m_inp))
######## INPUT_RR ##########
m_irr = np.abs(np.take(marg_from_irr, gammas)) ## Extracting counts from marginal indices specified by "gammas".
m_irr /= m_irr.sum()
irr_l1_array.append(L1(m_irr,m_inp))
######### INPUT_OLH ##########
try:
m_iolh = np.abs(np.take(marg_from_iolh,gammas)) ## Extracting counts from marginal indices specified by "gammas".
m_iolh/=m_iolh.sum()
iolh_l1_array.append(L1(m_iolh,m_inp))
except:
## incase we drop INPUT_OLH from execution.
#print ("Unexpected error:", sys.exc_info())
iolh_l1_array.append(0.0)
try:
icms_marg = np.abs(np.take(marg_from_icms,gammas)) ## Extracting counts from marginal indices specified by "gammas".
icms_marg/=icms_marg.sum()
icms_l1_array.append(L1(icms_marg,m_inp))
except:
# incase we drop INPUT_CMS from execution.
#print ("Unexpected error:", sys.exc_info())
icms_l1_array.append(0.0)
try:
icmsht_marg = np.abs(np.take(marg_from_icmsht,gammas)) ## Extracting counts from marginal indices specified by "gammas".
icmsht_marg/=icmsht_marg.sum()
icmsht_l1_array.append(L1(icmsht_marg,m_inp))
except:
# incase we drop INPUT_HTCMS from execution.
#print (icms_marg)
#print ("Unexpected error:", sys.exc_info())
icmsht_l1_array.append(0.0)
######### MARG_RR ###############
mrr_l1_array.append(L1(m_inp,mrr_pert_dict[np.binary_repr(beta,width=misc_vars.d)[::-1]]))
#print (m_inp)
######### MARG_HT #####################
mht_l1_array.append(L1(mht_pert_dict[np.binary_repr(beta,width=misc_vars.d)[::-1]],m_inp))
########## MARG_PS #####################
mps_l1_array.append(L1(mps_pert_dict[np.binary_repr(beta, width=misc_vars.d)[::-1]], m_inp))
irr_l1 = np.array(irr_l1_array).mean(axis=0)
ips_l1 = np.array(ips_l1_array).mean(axis=0)
iht_l1 = np.array(iht_l1_array).mean(axis=0)
iolh_l1 = np.array(iolh_l1_array).mean(axis=0)
icms_l1 = np.array(icms_l1_array).mean(axis=0)
icmsht_l1 = np.array(icmsht_l1_array).mean(axis=0)
mrr_l1 = np.array(mrr_l1_array).mean(axis=0)
mps_l1 = np.array(mps_l1_array).mean(axis=0)
mht_l1 = np.array(mht_l1_array).mean(axis=0)
#print (irr_l1_array,mrr_l1,iht_l1_array,mht_l1,ips_l1,mps_l1,iolh_l1_array,icms_l1_array,icmsht_l1_array)
return (irr_l1,mrr_l1,iht_l1, mht_l1, ips_l1, mps_l1, iolh_l1, icms_l1, icmsht_l1)
class INPUT_RR(object):
def perturb2(self):
return
def perturb(self,index_of_1,p):
i = 0
while i < self.sz:
item = 0.0
if i == index_of_1:
item = 1.0
if self.bern_irr[p][i]:
self.irr[i] += item
else:
self.irr[i] += (1.0 - item)
i += 1
## It is possible to simulate InputRR using Binomial distributions. We
## use this simulation for rapid completion.
def correction2(self,miscvar):
i=0
irr2 = np.zeros(self.sz)
while i < self.sz:
irr2[i] = np.random.binomial(miscvar.input_dist[i],0.5,size=1)[0] +\
np.random.binomial(self.population- miscvar.input_dist[i],1.0-self.prob,size=1)[0]
irr2[i]/=self.population
irr2[i] = (self.irr[i] + self.prob - 1.0) / (2.0 * self.prob - 1.0)
i+=1
np.copyto(self.irr,irr2)
#print (irr2)
## just repeat reconstruction of each index to reduce variance.
def correction3(self,miscvar):
i=0
while i <self.sz:
j=0
while j<5:
self.irr[i] += (np.random.binomial(miscvar.input_dist[i],0.5,size=1)[0] +\
np.random.binomial(self.population- miscvar.input_dist[i],self.prob,size=1)[0])
j+=1
self.irr[i]/=(5.0*self.population)
self.irr[i] = (self.irr[i]-self.prob) / (0.5 -self.prob);
#self.irr[i] = (self.irr[i] + self.prob - 1.0) / (2.0 * self.prob - 1.0)
i+=1
#print (self.irr)
def correction(self):
self.irr/=self.population
#print (self.irr)
for i in range(0,self.sz):
self.irr[i] = (self.irr[i]+self.prob-1.0)/(2.0*self.prob-1.0)
#self.irr/=self.irr.sum()
#print (self.irr.round(4))
def __init__(self,e_eps,d,population):
self.population=population
self.d = d
self.sz = np.power(2, self.d)
self.eps = np.log(e_eps)
self.e_eps = np.power(np.e,(self.eps/2.0))
self.prob = self.e_eps/(1.0+self.e_eps)
#print (self.prob,"input-RR")
self.problist = [self.prob,1.0-self.prob]
#self.bern_irr = np.random.choice([True,False], size=self.sz * self.population, p=self.problist).reshape(self.population, self.sz)
#self.sample_index = np.random.choice(range(0, self.sz), size=self.population)
self.irr = np.zeros(np.power(2,self.d))
class MARG_RR(object):
def perturb(self,index_of_1,p,rand_quests):
i = 0
if not rand_quests in self.marg_dict:
self.marg_dict[rand_quests] = np.zeros(self.sz)
self.marg_freq[rand_quests] = 0.0
self.marg_freq[rand_quests] += 1.0
while i < self.sz:
item = 0.0
if i == index_of_1:
item = 1.0
if self.bern[p][i]:
self.marg_dict[rand_quests][i] += item
else:
self.marg_dict[rand_quests][i] += (1.0 - item)
i += 1
def perturb2(self,index_of_1,p,rand_quests):
i = 0
if not rand_quests in self.marg_dict:
self.marg_dict[rand_quests] = np.zeros(self.sz)
self.marg_freq[rand_quests] = 0.0
self.marg_freq[rand_quests] += 1.0
while i < self.sz:
item = 0.0
b = self.bern_q
if i == index_of_1:
item = 1.0
b = self.bern_p
if b[p][i]:
self.marg_dict[rand_quests][i] += item
else:
self.marg_dict[rand_quests][i] += (1.0 - item)
i += 1
def perturb3(self,index_of_1,p,rand_quests):
try:
self.marg_freq[rand_quests] += 1.0
self.true_marg[rand_quests][index_of_1]+= 1.0
except:
self.marg_dict[rand_quests] = np.zeros(self.sz)
self.marg_freq[rand_quests] = 0.0
self.true_marg[rand_quests] = np.zeros(self.sz)
self.marg_freq[rand_quests] += 1.0
self.true_marg[rand_quests][index_of_1]+= 1.0
def correction(self):
#print ("--------------------------------")
for marg in self.marg_dict:
self.marg_dict[marg] /= self.marg_freq[marg]
for i in range(0,self.sz):
self.marg_dict[marg][i] = (self.marg_dict[marg][i]+self.prob-1.0)/(2.0*self.prob-1.0)
self.marg_dict[marg]/=self.marg_dict[marg].sum()
def correction2(self):
for marg in self.marg_dict:
#print ("--------------------------------")
self.marg_dict[marg] /= self.marg_freq[marg]
for i in range(0,self.sz):
#self.marg_dict[marg][i] = (self.marg_dict[marg][i]+self.prob-1.0)/(2.0*self.prob-1.0)
self.marg_dict[marg][i] = (self.marg_dict[marg][i]-(self.prob)) / (0.5 -(self.prob))
self.marg_dict[marg]/=self.marg_dict[marg].sum()
def correction3(self):
for marg in self.marg_dict:
#self.marg_dict[marg] /= self.marg_freq[marg]
i=0
#print (self.marg_dict[marg])
total = self.marg_freq[marg]
while i <self.sz:
j=0
while j <5:
self.marg_dict[marg][i] += (np.random.binomial(self.true_marg[marg][i],0.5,size=1)[0] +\
np.random.binomial(self.marg_freq[marg]- self.true_marg[marg][i],self.prob,size=1)[0])
j+=1
self.marg_dict[marg][i] /= (5.0*total)
#self.marg_dict[marg][i] = (self.marg_dict[marg][i]+self.prob-1.0)/(2.0*self.prob-1.0)
self.marg_dict[marg][i] = (self.marg_dict[marg][i]-(self.prob)) / (0.5 -(self.prob))
i+=1
self.marg_dict[marg]/=self.marg_dict[marg].sum()
def __init__(self,d,k,e_eps,population,k_way):
self.d = d
self.k = k
self.population= population
self.k_way = k_way
self.sz = np.power(2,self.k)
self.eps = np.log(e_eps)
self.e_eps = np.power(np.e,self.eps/2.0)
self.prob = self.e_eps / (1.0+self.e_eps)
#print (self.prob,"marg-RR")
self.problist = [self.prob,1.0-self.prob]
self.k_way_marg_ps = np.random.choice(self.k_way,size=self.population)
self.bern = np.random.choice([True, False], size=self.sz * self.population, p=self.problist).reshape(self.population, self.sz)
self.bern_p = np.random.choice([True, False], size=self.sz * self.population).reshape(self.population, self.sz)
self.bern_q = np.random.choice([True, False], size=self.sz * self.population, p=self.problist[::-1]).reshape(self.population, self.sz)
self.marg_dict = {}
self.marg_freq={}
self.true_marg={}
class MARG_HT(object):
def perturb(self,index_of_1,p,rand_quests):
if not rand_quests in self.marg_dict:
self.marg_dict[rand_quests] = np.zeros(self.sz)
self.marg_freq[rand_quests] = np.zeros(self.sz)
cf =self.rand_coef[p]
self.marg_freq[rand_quests][cf] += 1.0
htc = self.f[index_of_1][cf]
if self.bern[p]:
self.marg_dict[rand_quests][cf] += htc
else:
self.marg_dict[rand_quests][cf] += -htc
def correction(self):
for rm in self.marg_dict:
self.marg_freq[rm][self.marg_freq[rm] == 0.0] = 1.0
self.marg_dict[rm]/=self.marg_freq[rm]
self.marg_dict[rm]/=(2.0*self.prob-1.0)
self.marg_dict[rm][0]=1.0
#print ("-------------------")
#print (self.marg_dict[rm])
self.marg_dict[rm]= np.abs(self.marg_dict[rm].dot(self.f))
self.marg_dict[rm]/=self.marg_dict[rm].sum()
#print (self.marg_dict[rm].round(4))
def pop_probmat(self):
probmat =np.zeros((self.sz,self.sz))
for i in range(0,self.sz):
for j in range(0,self.sz):
if i ==j:
probmat[i][j]= self.prob
else:
probmat[i][j]= (1.0-self.prob)/(self.sz-1.0)
return probmat
def compute_all_marginals(self):
for marg_int in self.k_way:
self.correct_noise_mps(marg_int)
def __init__(self,d,k,e_eps,population,k_way,cls):
self.d = d
self.k = k
self.population= population
self.sz = np.power(2,self.k)
self.e_eps = e_eps
self.f = hadamard(self.sz).astype("float64")
self.prob = (self.e_eps/(1.0+self.e_eps))
self.problist = [self.prob,1.0-self.prob]
self.coef_dist = np.zeros(cls)
self.k_way = k_way
self.k_way_marg_ps = np.random.choice(self.k_way,size=self.population)
self.rand_coef= np.random.choice(range(0,self.sz),size=population)
self.bern = np.random.choice([True, False], size= self.population, p=self.problist)#.reshape(self.population, self.sz)
self.marg_freq = {}
self.marg_dict = {}
self.marg_noisy = np.zeros(self.sz)
class MARG_PS(object):
def perturb(self,index_of_1,p,rand_quests):
try:
freq = self.rand_cache[index_of_1]["freq"]
except:
i = 0
while i < self.sz:
options = list(range(0, self.sz))
options.remove(i)
self.rand_cache[i] = {"rnum": np.random.choice(np.array(options), size=10000), "freq": 0}
i += 1
freq = self.rand_cache[index_of_1]["freq"]
if freq > 9990:
options = list(range(0, self.sz))
options.remove(index_of_1)
self.rand_cache[index_of_1]["rnum"] = np.random.choice(np.array(options), size=10000)
self.rand_cache[index_of_1]["freq"] = 0
rnum = self.rand_cache[index_of_1]["rnum"][freq]
try:
self.marg_ps_pert_aggr[rand_quests].append(mps(index_of_1, self.bern[p], rnum))
except:
self.marg_ps_pert_aggr[rand_quests] = [mps(index_of_1, self.bern[p], rnum)]
self.rand_cache[index_of_1]["freq"] += 1
def correct_noise_mps(self,marg_int):
self.marg_int=marg_int
self.marg_ps_noisy.fill(0.0)
if type(self.marg_ps_pert_aggr[marg_int]) != "numpy.ndarray":
for rm in self.marg_ps_pert_aggr:
self.marg_ps_pert_aggr[rm] = np.array(self.marg_ps_pert_aggr[rm])
#print (self.marg_ps_pert_aggr.keys())
for index in self.marg_ps_pert_aggr[marg_int]:
self.marg_ps_noisy[index]+=1.0
self.marg_ps_noisy/=self.marg_ps_noisy.sum()
#marg_ps_recon = np.copy(marg_noisy)
self.marg_ps_recon = self.mat_inv.dot(self.marg_ps_noisy)
self.marg_ps_recon/=self.marg_ps_recon.sum()
#print (self.marg_ps_recon.round(4))
return self.marg_ps_recon
def pop_probmat(self):
probmat =np.zeros((self.sz,self.sz))
for i in range(0,self.sz):
for j in range(0,self.sz):
if i ==j:
probmat[i][j]= self.prob
else:
probmat[i][j]= (1.0-self.prob)/(self.sz-1.0)
return probmat
def compute_all_marginals(self):
for marg_int in self.k_way:
self.marg_dict[marg_int]=self.correct_noise_mps(marg_int)
def __init__(self,d,k,e_eps,population,k_way):
self.d = d
self.k = k
self.population= population
self.k_way = k_way
self.sz = np.power(2,self.k)
#self.data = data
self.e_eps = e_eps
self.prob = (self.e_eps/(self.e_eps+self.sz-1.0))
#print self.prob,"marg-ps"
self.probmat = self.pop_probmat()
self.problist = [self.prob,1.0-self.prob]
self.mat = self.pop_probmat()
self.mat_inv = np.linalg.inv(self.mat)
self.k_way_marg_ps = np.random.choice(self.k_way,size=self.population)
self.bern = np.random.choice([True, False], p=self.problist, size=self.population)
self.marg_ps_pert_aggr = {}
self.rand_cache = {}
self.marg_int = None
self.marg_ps_noisy = np.zeros(self.sz)
self.marg_dict = {}
## From <NAME> al's USENIX paper.
## https://www.usenix.org/system/files/conference/usenixsecurity17/sec17-wang-tianhao.pdf
## This algorithm indeed does well for high order marginals but doesn't outperform INPUT_HT
## for small k's i.e. 2,3, the one's that are the most interesting.
## We trade the gain in accuracy by computational cost. The encoding (or decoding) cost is O(dN).
class INPUT_OLH(object):
def __init__(self,e_eps, d, population,g=1):
self.d = d
self.population= population
self.sz = int(np.power(2,self.d))
#self.data = data
self.e_eps = e_eps
if g == 1:
self.g = int(np.ceil(e_eps+1.0))
else:
self.g = g
#print (self.g)
self.prob = (self.e_eps/(self.e_eps+self.g-1.0))
self.problist = [self.prob,1.0-self.prob]
self.bern_ps = np.random.choice([False,True], size=self.population, p=self.problist)
self.uni_dist = np.random.choice(range(self.g),size=self.population).astype("int32")
#self.hash_cache = np.array( map(str,range(self.sz)),dtype="str") ## works with Python2
self.hash_cache = np.array(range(self.sz),dtype="str")
#self.hashed_pdist = np.zeros(self.population)
self.estimate = np.zeros(self.sz)
def perturb(self,x,p):
if self.bern_ps[p]:
#x_hash= (xxhash.xxh32(self.hash_cache[x], seed=p).intdigest()) % self.g
pert_val= (xxhash.xxh32(self.hash_cache[x], seed=p).intdigest()) % self.g
else:
pert_val=self.uni_dist[p]
dom_index = 0
while dom_index<self.sz:
if pert_val == (xxhash.xxh32(self.hash_cache[dom_index], seed=p).intdigest() % self.g):
self.estimate[dom_index]+=1.0
dom_index+=1
def correction(self):
p=0
while p <self.sz:
self.estimate[p]=(self.estimate[p] - (self.population/self.g))/(self.prob -(1.0/self.g))
p+=1
self.estimate/=self.estimate.sum()
#print(self.estimate.round(4))
class INPUT_HT(object):
def perturb(self,index_of_1,p):
rc = self.rand_coefs[p]
index = self.misc_vars.coef_dict[rc]
self.coef_dist[index] += 1.0
cf = np.power(-1.0, count_1(np.bitwise_and(index_of_1, rc)))
self.iht_pert_ns_estimate[index] += rr2(cf, self.bern_ht[p])
def correction(self):
self.coef_dist[self.coef_dist==0.0]=1.0
self.iht_pert_ns_estimate/=self.coef_dist
self.iht_pert_ns_estimate/=(2.0*self.prob-1.0)
self.iht_pert_ns_estimate[0] = 1.0
self.coef_dist[self.coef_dist<=0.0]=0.0
def __init__(self,d,k,e_eps,population,misc_vars):
self.d = d
self.k = k
self.misc_vars = misc_vars
self.population= population
self.sz = np.power(2,self.k)
self.e_eps = e_eps
self.prob = self.e_eps/(1.0+self.e_eps)
self.problist = [self.prob,1.0-self.prob]
self.bern_ht = np.random.choice([True,False],p=self.problist,size=self.population)
self.rand_coefs = np.random.choice(self.misc_vars.allsubsetsint,size=self.population)
self.iht_pert_ns_estimate = np.zeros(self.misc_vars.allsubsetsint.shape[0])
#iht_pert_ns_estimate.fill(0.0)
self.coef_dist = np.zeros(self.misc_vars.cls)
## From Apple's paper.
## https://machinelearning.apple.com/2017/12/06/learning-with-privacy-at-scale.html
## This algorithm might be a bad performer. But just adding it for a comparison.
class INPUT_CMS:
def __init__(self, w, d,population,e_eps,domain):
'''
if delta <= 0 or delta >= 1:
raise ValueError("delta must be between 0 and 1, exclusive")
if epsilonh <= 0 or epsilonh >= 1:
raise ValueError("epsilon must be between 0 and 1, exclusive")
#self.w = int(np.ceil(np.e / epsilonh))
#self.d = int(np.ceil(np.log(1 / delta)))
'''
self.w=w
self.d =d
self.population=population
self.hash_functions = [self.__generate_hash_function() for i in range(self.d)]
self.M = np.zeros(shape=(self.d, self.w))
#print (self.w,self.d,self.w*self.d,self.M.shape)
self.hash_chooser = np.random.choice(range(self.d),size=self.population)
self.epsilon = np.log(e_eps)
self.flip_prob = 1.0/(1.0+np.power(np.e,self.epsilon/2.0))
problist = [self.flip_prob,1.0-self.flip_prob]
self.bern = np.random.choice([True,False],p=problist,size=self.population*self.w).reshape(self.population,self.w)
self.c_eps = (np.power(np.e,self.epsilon/2.0)+1.0)/(np.power(np.e,self.epsilon/2.0)-1.0)
self.estimate = np.zeros(int(np.power(2,domain)))
def __generate_hash_function(self):
a = random_number()
b= random_number()
return lambda x: (a * x + b) % BIG_PRIME % self.w
def perturb(self, key,p):
hash_choice = self.hash_chooser[p]
hashed_key = self.hash_functions[hash_choice](abs(hash(str(key))))
cnt = 0
while cnt< self.w:
item = -1.0
if cnt == hashed_key:
item = 1.0
if self.bern[p][cnt]:
item = -item
self.M[hash_choice][cnt]+=(self.d * (item*self.c_eps*0.5+0.5))
cnt+=1
def query(self,key):
l =0
avg=0.0
hsh_str= abs(hash(str(key)))
while l < self.d:
hashed_key = self.hash_functions[l](hsh_str)
avg+=self.M[l][hashed_key]
l+=1
avg/=self.d
est = ((1.0*self.w)/(self.w-1.0))* (avg- (1.0*self.population)/self.w)
return est
def correction(self):
cnt=0
while cnt <self.estimate.shape[0]:
self.estimate[cnt]=self.query(cnt)
cnt+=1
self.estimate[self.estimate < 0.0] = 0.0
self.estimate/=self.estimate.sum()
## From Apple's paper.
## https://machinelearning.apple.com/2017/12/06/learning-with-privacy-at-scale.html
## This algorithm might be a bad performer. But just adding it for a comparison.
class INPUT_HTCMS:
#def __init__(self, delta, epsilonh,population,e_eps):
def __init__(self, w, d,population,e_eps,domain):
self.w=int(w)
self.d =int(d)
self.ht = hadamard(self.w, dtype="float32")
self.population=population
self.hash_functions = [self.__generate_hash_function() for i in range(self.d)]
self.M = np.zeros(shape=(self.d, self.w))
#print (self.w,self.d,self.w*self.d,self.M.shape)
self.hash_chooser = np.random.choice(range(self.d),size=self.population).astype("int32")
self.coef_chooser = np.random.choice(range(self.w),size=self.population).astype("int32")
#self.hash_choice_counter = np.zeros(self.d)
self.flip_prob = 1.0/(1.0+e_eps)
problist = [self.flip_prob,1.0-self.flip_prob]
self.bern = np.random.choice([True,False],p=problist,size=self.population)
self.c_eps = (e_eps+1.0)/(e_eps-1.0)
self.estimate = np.zeros(int(np.power(2,domain)))
def __generate_hash_function(self):
a = random_number()
b= random_number()
return lambda x: (a * x + b) % BIG_PRIME % self.w
def perturb(self, key,p):
hash_choice = self.hash_chooser[p]
#self.hash_choice_counter[hash_choice]+=1.0
hashed_key = self.hash_functions[hash_choice](abs(hash(str(key))))
rand_coef = self.coef_chooser[p]
item = self.ht[rand_coef][hashed_key]
if self.bern[p]:
item = -item
self.M[hash_choice][rand_coef]+=(self.d * item*self.c_eps)
def correction(self):
cnt = 0
while cnt < self.d:
#print self.M[cnt]
self.M[cnt] = self.ht.dot(self.M[cnt])
cnt+=1
cnt=0
while cnt <self.estimate.shape[0]:
self.estimate[cnt]=self.query(cnt)
cnt+=1
self.estimate[self.estimate < 0.0] = 0.0
self.estimate/=self.estimate.sum()
def query(self,key):
l =0
avg=0.0
hsh_str= abs(hash(str(key)))
while l < self.d:
hashed_key = self.hash_functions[l](hsh_str)
avg+=self.M[l][hashed_key]
l+=1
avg/=self.d
est = ((1.0*self.w)/(self.w-1.0))* (avg- (1.0*self.population)/self.w)
return est
class INPUT_PS(object):
def perturb2(self,index_of_1,p):
if self.bern_ps[p]:
self.ips_ps_pert_aggr[index_of_1] += 1.0
else:
self.ips_ps_pert_aggr[self.rand_coef_ps[p]] += 1.0
def perturb(self,index_of_1,p):
try:
freq = self.rand_cache[index_of_1]["freq"]
except:
i = 0
while i < self.sz:
options = list(range(0, self.sz))
options.remove(i)
self.rand_cache[i] = {"rnum": np.random.choice(np.array(options), size=10000), "freq": 0}
i += 1
freq = self.rand_cache[index_of_1]["freq"]
if freq > 9990:
options = list(range(0, self.sz))
options.remove(index_of_1)
self.rand_cache[index_of_1]["rnum"] = np.random.choice(np.array(options), size=10000)
self.rand_cache[index_of_1]["freq"] = 0
rnum = self.rand_cache[index_of_1]["rnum"][freq]
ips_output = mps(index_of_1, self.bern[p], rnum)
self.ips_ps_pert_aggr[ips_output] += 1.0
self.rand_cache[index_of_1]["freq"] += 1
def correction2(self):
self.ips_ps_pert_aggr /= self.population
#print self.ips_ps_pert_aggr, "pert",self.ips_ps_pert_aggr.sum()
for i in range(0, self.sz):
self.ips_ps_pert_aggr[i] = (self.ips_ps_pert_aggr[i] * self.sz + self.probps - 1.0) / (self.probps * (self.sz + 1.0) - 1.0)
#print self.ips_ps_pert_aggr.round(4)
def correction(self):
self.ips_ps_pert_aggr /= self.ips_ps_pert_aggr.sum()
for i in range(0,self.sz):
self.ips_ps_pert_aggr[i] = (self.ips_ps_pert_aggr[i]*self.sz+self.prob-1.0)/(self.prob*(self.sz+1.0)-1.0)
#print self.marg_ps_recon.round(4)
'''
self.ips_ps_pert_aggr /= self.ips_ps_pert_aggr.sum()
# marg_ps_recon = np.copy(marg_noisy)
self.ips_ps_pert_aggr = np.abs(self.mat_inv.dot(self.ips_ps_pert_aggr))
self.ips_ps_pert_aggr /= self.ips_ps_pert_aggr.sum()
'''
#return self.ips_ps_pert_aggr
def pop_probmat(self):
probmat =np.zeros((self.sz,self.sz))
for i in range(0,self.sz):
for j in range(0,self.sz):
if i ==j:
probmat[i][j]= self.prob
else:
probmat[i][j]= (1.0-self.prob)/(self.sz-1.0)
return probmat
def __init__(self,d,k,e_eps,population,misc_vars):
self.d = d
self.k = k
self.population= population
self.k_way = misc_vars.k_way
self.sz = np.power(2,self.d)
self.e_eps = e_eps
self.prob = (self.e_eps/(self.e_eps+self.sz-1.0))
#print (self.prob,"input-ps")
self.problist = [self.prob,1.0-self.prob]
self.probps = (self.e_eps - 1.0) / (self.e_eps + self.sz - 1.0)
self.problist2 = [self.probps, 1.0 - self.probps]
self.rand_coef_ps = np.random.choice(np.array(range(0, self.sz)), size=self.population)
self.bern_ps = np.random.choice([True, False], size=self.population, p=[self.probps, 1.0 - self.probps])
#self.mat = self.pop_probmat()
#self.mat_inv = np.linalg.inv(self.mat) n = gc.collect()
self.bern = np.random.choice([True, False], p=self.problist, size=self.population)
self.ips_ps_pert_aggr = np.zeros(self.sz)
self.rand_cache = {}
self.marg_int = None
self.rand_cache = {}
#inp_trans_menthods.loc[l]=np.array([population,d,len(iway),input_ht_pert,iht_pert_ns_estimate,had_coefs,input_ps,input_rr],dtype="object")
def change_mapping(d):
if d:
return "1"
return "0"
def get_real_data(population,d):
data = pd.read_pickle("data/nyc_taxi_bin_sample.pkl").sample(population,replace=True)
data = data.as_matrix()
f = np.vectorize(change_mapping)
i = data.shape[1]
remainder = d % i
ncopies = d/i
copies = []
j = 0
while j < ncopies:
copies.append(data)
j+=1
#print data[:,range(0,remainder)]
copies.append(data[:,range(0,remainder)])
#rand_perm = np.random.choice(range(0,d),replace=False,size=d)
#print rand_perm
data_high = np.concatenate(tuple(copies),axis=1)#[:,rand_perm]
#print (data_high.shape)
#columns= data.columns.tolist()
#print columns
#data = f(data_high)
return f(data_high).astype("str")
class MARGINAL_VARS(object):
#We cache the set of necessary and sufficient indices to evaluate each <= k way marginal.
def compute_downward_closure(self):
all_cords = np.array(range(0, np.power(2, self.d)))
## iterate over all possible <=k way marginals.
for beta in self.allsubsetsint:
marg_str = bin(beta)[2:]
marg_str = "0" * (self.d - len(marg_str)) + marg_str
parity = np.power(2, count_1(beta))
alphas = np.zeros(parity, dtype="int64")
cnt = 0
for alpha in all_cords:
if np.bitwise_and(alpha, beta) == alpha:
alphas[cnt] = alpha
cnt += 1
### we add marginals in string formats incase needed.
self.alphas_cache[marg_str] = {"alphas": alphas, "probps": ((self.e_eps - 1.0) / (parity + self.e_eps - 1.0))}
self.alphas_cache[beta] = {"alphas": alphas, "probps": ((self.e_eps - 1.0) / (parity + self.e_eps - 1.0))}
## This method finds the set of <=k way marginal indices i.e. list of all subsets of length <=k from d.
def get_k_way_marginals(self):
j = 0
marginal = np.array(["0"] * self.d)
while j <= self.k:
subsets = list(itertools.combinations(range(0, self.d), j))
subsets = np.array([list(elem) for elem in subsets])
for s in subsets:
marginal.fill("0")
for b in s:
marginal[b] = "1"
self.allsubsetsint.append(int("".join(marginal)[::-1], 2))
if j == self.k:
# k_way.append(int("".join(marginal),2))
self.k_way.append("".join(marginal)[::-1])
self.k_way_bit_pos.append(s)
# print s,marginal,"".join(marginal)
j += 1
self.allsubsetsint = np.array(self.allsubsetsint, dtype="int64")
self.k_way = np.array(self.k_way, dtype="str")
self.k_way_bit_pos = np.array(self.k_way_bit_pos, dtype="int64")
self.allsubsetsint.sort()
#print (self.allsubsetsint)
## We tie marginals indices and corresponding bit positions together.
#print (dict(zip(self.k_way, self.k_way_bit_pos)))
return dict(zip(self.k_way, self.k_way_bit_pos))
def __init__(self,d,k,e_eps):
self.d = d
self.k = k
self.input_dist = np.zeros(np.power(2, self.d))
self.allsubsetsint = []
self.k_way = []
self.k_way_bit_pos = []
self.e_eps = e_eps
#self.f = hadamard(np.power(2,self.d)).astype("float64")
self.f = {}
self.alphas_cache = {}
self.k_way_bit_pos_dict =self.get_k_way_marginals()
self.cls = self.allsubsetsint.shape[0]
self.coef_dict = dict(zip(self.allsubsetsint, np.array(range(0, self.cls), dtype="int64")))
self.compute_downward_closure()
'''
Main driver routine that accepts all parameters and
runs perturbation simulation.
'''
def driver(d,k,e_eps,population,misc_vars):
width = 256
no_hash = 5
###### Use the NYC Taxi data.
#data = get_real_data(population, d)
####### Use synthetic data if you don't have the taxi data. ########
data = np.random.choice(["1","0"],p=[0.3,0.7],size=d*population).reshape(population,d)
misc_vars.input_dist.fill(0.0)
##### Input Based Algorithms ########
iht_obj = INPUT_HT(d, k, e_eps, population, misc_vars)
ips_obj = INPUT_PS(d, k, e_eps, population, misc_vars)
irr_obj = INPUT_RR(e_eps, d, population)
iolh_obj = INPUT_OLH(e_eps, d, population)
icms_obj = INPUT_CMS(width, no_hash,population,e_eps,d)
icmsht_obj = INPUT_HTCMS(width, no_hash,population,e_eps,d)
############ Marginal Based Algorithms #########
mps_obj = MARG_PS(d, k, e_eps, population, misc_vars.k_way)
mrr_obj = MARG_RR(d, k, e_eps, population, misc_vars.k_way)
mht_obj = MARG_HT(d, k, e_eps, population, misc_vars.k_way, misc_vars.cls)
p = 0
while p < population:
x = data[p]
index_of_1 = int("".join(x), 2)
misc_vars.input_dist[index_of_1] += 1.0
############# input_RR###############
#irr_obj.perturb(index_of_1,p)
#irr_obj.perturb2()
#########################input-PS #################################
ips_obj.perturb2(index_of_1,p)
########################################
iht_obj.perturb(index_of_1, p)
##########################INPUT_OLH ###############################
#INPUT_OLH is a compute intense scheme. Hence we don't run it for larger d's.
if d < 10:
iolh_obj.perturb(index_of_1,p)
##########################inp_CMS ########################
icms_obj.perturb(index_of_1,p)
##########################inp_HTCMS ########################
icmsht_obj.perturb(index_of_1,p)
########### marg-ps ###########
rand_questions = mps_obj.k_way_marg_ps[p]
responses = misc_vars.k_way_bit_pos_dict[rand_questions]
# print rand_questions,responses
index_of_1 = int("".join(data[p][responses]), 2)
mps_obj.perturb(index_of_1, p, rand_questions)
######################### marg-ht ############################
rand_questions = mht_obj.k_way_marg_ps[p]
responses = misc_vars.k_way_bit_pos_dict[rand_questions]
# print rand_quests,responses
index_of_1 = int("".join(data[p][responses]), 2)
mht_obj.perturb(index_of_1, p, rand_questions)
######################### marg-rs #################################
rand_questions = mrr_obj.k_way_marg_ps[p]
responses = misc_vars.k_way_bit_pos_dict[rand_questions]
index_of_1 = int("".join(data[p][responses]), 2)
mrr_obj.perturb3(index_of_1, p, rand_questions)
p += 1
irr_obj.correction3(misc_vars)
#irr_obj.correction2(misc_vars)
misc_vars.input_dist /= population
#irr_obj.correction()
#print (misc_vars.input_dist.round(4))
ips_obj.correction()
iht_obj.correction()
if d < 10:
iolh_obj.correction()
icms_obj.correction()
icmsht_obj.correction()
#print(icmsht_obj.estimate)
mht_obj.correction()
mrr_obj.correction3()
mps_obj.compute_all_marginals()
return compute_marg(misc_vars
, irr_obj.irr
, ips_obj.ips_ps_pert_aggr
, iht_obj.iht_pert_ns_estimate
, iolh_obj.estimate
, mps_obj.marg_dict
, mrr_obj.marg_dict
, mht_obj.marg_dict
, icms_obj.estimate
, icmsht_obj.estimate
)
'''
Call this method is used when you want to vary k keeping d, eps fixed.
eps = 1.1
d = 9
'''
def vary_k():
## number of repetitions.
rpt = 5
e_eps = 3.0
d = 9
counter = 0
## dfmean and dfstd store the results. We use them in our plotting script.
l1 = np.zeros((rpt, 9))
dfmean = pd.DataFrame(columns=["population", "d", "k", "e_eps", "irr_l1", "mrr_l1", "iht_l1", "mht_l1", "ips_l1", "mps_l1","iolh_l1","icms_l1","icmsht_l1"])
dfstd = pd.DataFrame(columns=["irr_l1_std", "mrr_l1_std", "iht_l1_std", "mht_l1_std", "ips_l1_std", "mps_l1_std","iolh_l1_std","icms_l1_std","icmsht_l1_std"])
## parameters of the sketch
width = 256
no_hash = 5
# population variable. We prefer to keep it in the powers of two.
population = np.power(2, 18)
for k in reversed(range(1,d)):
misc_vars = MARGINAL_VARS(d, k, e_eps)
l1.fill(0.0)
print ("------------------")
for itr in (range(rpt)):
irr_l1, mrr_l1, iht_l1, mht_l1, ips_l1, mps_l1, iolh_l1,icms_l1,icmsht_l1 = driver(d,k,e_eps,population,misc_vars)
l1[itr] = np.array([irr_l1, mrr_l1, iht_l1, mht_l1, ips_l1, mps_l1,iolh_l1,icms_l1,icmsht_l1])
print (l1[itr])
conf = [population, d, k, e_eps]
conf.extend(l1.mean(axis=0))
dfmean.loc[counter] = conf
dfstd.loc[counter] = l1.std(axis=0)
#print (conf)
counter += 1
dfstdcols = list(dfstd.columns.values)
for c in dfstdcols:
dfmean[c] = dfstd[c]
#print (dfmean)
dfmean.to_pickle("data/all_mechanisms_vary_"+str(d)+".pkl")
## (irr_l1,mrr_l1,iht_l1, mht_l1, ips_l1, mps_l1, iolh_l1, icms_l1, icmsht_l1)
#dfmean.to_pickle("all_mechanisms_vary_k_fo.pkl")
'''
Call this method when you want to vary d holding k, eps, N fixed.
Fixed k, eps values,
k= 3
eps = 1.1
N = 2^18
'''
def vary_d():
print ("------------------")
population = int(np.power(2,19))
e_eps = 3.0
rpt =4
l1 = np.zeros((rpt, 9))
## Parameters for sketches
width = 256
no_hash = 5
k=3
dfmean = pd.DataFrame(columns=["population", "d", "k", "e_eps", "irr_l1", "mrr_l1", "iht_l1", "mht_l1", "ips_l1", "mps_l1","iolh_l1","icms_l1","icmsht_l1"])
dfstd = pd.DataFrame(columns=["irr_l1_std", "mrr_l1_std", "iht_l1_std", "mht_l1_std", "ips_l1_std", "mps_l1_std","iolh_l1_std","icms_l1_std","icmsht_l1_std"])
counter =0
for d in ([4,6,8,10,12,16]):
l1.fill(0.0)
misc_vars = MARGINAL_VARS(d, k, e_eps)
for itr in (range(rpt)):
print (d, itr)
print ("computing marginals.")
irr_l1, mrr_l1, iht_l1, mht_l1, ips_l1, mps_l1, iolh_l1,icms_l1,icmsht_l1 = driver(d,k,e_eps,population,misc_vars)
l1[itr] = np.array([irr_l1, mrr_l1, iht_l1, mht_l1, ips_l1, mps_l1,iolh_l1,icms_l1,icmsht_l1])
print (l1[itr])
conf = [population, d, k, e_eps]
conf.extend(l1.mean(axis=0))
dfmean.loc[counter] = conf
dfstd.loc[counter] = l1.std(axis=0)
#print (conf)
counter += 1
dfstdcols = list(dfstd.columns.values)
for c in dfstdcols:
dfmean[c] = dfstd[c]
dfmean.fillna(0.0,inplace=True)
dfmean.to_pickle("data/all_mechanisms_vary_d.pkl")
'''
Call this method when you want to vary eps, d and k holding N fixed.
'''
def driver_vary_all():
rpt = 5
e_eps_arr = np.array([1.1,1.6,2.1,2.5, 3.0,3.5])
counter=0
## Parameters for sketches
width = 256
no_hash = 5
l1 = np.zeros((rpt, 9))
dfmean =
|
pd.DataFrame(columns=["population", "d", "k", "e_eps", "irr_l1", "mrr_l1", "iht_l1", "mht_l1", "ips_l1", "mps_l1","iolh_l1","icms_l1","icmsht_l1"])
|
pandas.DataFrame
|
# coding: utf-8
# In[ ]:
import scipy
from scipy import optimize
import scipy.misc
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
class ProportionalDiffusion(object):
def __init__(self, rt=None, accuracy=None, stimulus_strength=None, required_accuracy=None):
""" Initalizes
Parameters
-----------
rt: list-like (list or np.array or pd.Series; anything that pd.DataFrame understands)
Reaction times to fit
accuracy: list-like
Accuracies to fit
stimulus_strength: list-like
Stimulus strength corresponding to rt and accuracy arguments
required_accuracy: float
Accuracy you want to get the corresponding stimulus strength for
"""
if rt is not None and accuracy is not None and stimulus_strength is not None:
self.data = pd.DataFrame({'rt': rt, 'accuracy': accuracy, 'stimulus_strength': stimulus_strength})
# remove null responses
self.data = self.data.loc[
|
pd.notnull(self.data['rt'])
|
pandas.notnull
|
# -*- coding: utf-8 -*-
import datetime
import numpy as np
import pandas as pd
from ..signal import signal_period
def benchmark_ecg_preprocessing(function, ecg, rpeaks=None, sampling_rate=1000):
"""Benchmark ECG preprocessing pipelines.
Parameters
----------
function : function
Must be a Python function which first argument is the ECG signal and which has a
``sampling_rate`` argument.
ecg : pd.DataFrame or str
The path to a folder where you have an `ECGs.csv` file or directly its loaded DataFrame.
Such file can be obtained by running THIS SCRIPT (TO COMPLETE).
rpeaks : pd.DataFrame or str
The path to a folder where you have an `Rpeaks.csv` fils or directly its loaded DataFrame.
Such file can be obtained by running THIS SCRIPT (TO COMPLETE).
sampling_rate : int
The sampling frequency of `ecg_signal` (in Hz, i.e., samples/second). Only used if ``ecgs``
and ``rpeaks`` are single vectors.
Returns
--------
pd.DataFrame
A DataFrame containing the results of the benchmarking
Examples
--------
>>> import neurokit2 as nk
>>>
>>> # Define a preprocessing routine
>>> def function(ecg, sampling_rate):
>>> signal, info = nk.ecg_peaks(ecg, method='engzeemod2012', sampling_rate=sampling_rate)
>>> return info["ECG_R_Peaks"]
>>>
>>> # Synthetic example
>>> ecg = nk.ecg_simulate(duration=20, sampling_rate=200)
>>> true_rpeaks = nk.ecg_peaks(ecg, sampling_rate=200)[1]["ECG_R_Peaks"]
>>>
>>> nk.benchmark_ecg_preprocessing(function, ecg, true_rpeaks, sampling_rate=200)
>>>
>>> # Example using database (commented-out)
>>> # nk.benchmark_ecg_preprocessing(function, r'path/to/GUDB_database')
"""
# find data
if rpeaks is None:
rpeaks = ecg
if isinstance(ecg, str):
ecg =
|
pd.read_csv(ecg + "/ECGs.csv")
|
pandas.read_csv
|
import numpy as np
import pandas as pd
from numpy.testing import assert_array_equal
from pandas.testing import assert_frame_equal
from nose.tools import (assert_equal,
assert_almost_equal,
raises,
ok_,
eq_)
from rsmtool.preprocessor import (FeaturePreprocessor,
FeatureSubsetProcessor,
FeatureSpecsProcessor)
class TestFeaturePreprocessor:
def setUp(self):
self.fpp = FeaturePreprocessor()
def test_select_candidates_with_N_or_more_items(self):
data = pd.DataFrame({'candidate': ['a'] * 3 + ['b'] * 2 + ['c'],
'sc1': [2, 3, 1, 5, 6, 1]})
df_included_expected = pd.DataFrame({'candidate': ['a'] * 3 + ['b'] * 2,
'sc1': [2, 3, 1, 5, 6]})
df_excluded_expected = pd.DataFrame({'candidate': ['c'],
'sc1': [1]})
(df_included,
df_excluded) = FeaturePreprocessor.select_candidates(data, 2)
assert_frame_equal(df_included, df_included_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_select_candidates_with_N_or_more_items_all_included(self):
data = pd.DataFrame({'candidate': ['a'] * 2 + ['b'] * 2 + ['c'] * 2,
'sc1': [2, 3, 1, 5, 6, 1]})
(df_included,
df_excluded) = FeaturePreprocessor.select_candidates(data, 2)
assert_frame_equal(df_included, data)
assert_equal(len(df_excluded), 0)
def test_select_candidates_with_N_or_more_items_all_excluded(self):
data = pd.DataFrame({'candidate': ['a'] * 3 + ['b'] * 2 + ['c'],
'sc1': [2, 3, 1, 5, 6, 1]})
(df_included,
df_excluded) = FeaturePreprocessor.select_candidates(data, 4)
assert_frame_equal(df_excluded, data)
assert_equal(len(df_included), 0)
def test_select_candidates_with_N_or_more_items_custom_name(self):
data = pd.DataFrame({'ID': ['a'] * 3 + ['b'] * 2 + ['c'],
'sc1': [2, 3, 1, 5, 6, 1]})
df_included_expected = pd.DataFrame({'ID': ['a'] * 3 + ['b'] * 2,
'sc1': [2, 3, 1, 5, 6]})
df_excluded_expected = pd.DataFrame({'ID': ['c'],
'sc1': [1]})
(df_included,
df_excluded) = FeaturePreprocessor.select_candidates(data, 2, 'ID')
assert_frame_equal(df_included, df_included_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_rename_no_columns(self):
df = pd.DataFrame(columns=['spkitemid', 'sc1', 'sc2', 'length',
'raw', 'candidate', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'spkitemid', 'sc1', 'sc2',
'length', 'raw', 'candidate')
assert_array_equal(df.columns,
['spkitemid', 'sc1', 'sc2', 'length', 'raw',
'candidate', 'feature1', 'feature2'])
def test_rename_no_columns_some_values_none(self):
df = pd.DataFrame(columns=['spkitemid', 'sc1', 'sc2', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'spkitemid', 'sc1', 'sc2', None, None, None)
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', 'feature1', 'feature2'])
def test_rename_no_used_columns_but_unused_columns_with_default_names(self):
df = pd.DataFrame(columns=['spkitemid', 'sc1', 'sc2', 'length', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'spkitemid', 'sc1', 'sc2', None, None, None)
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2',
'##length##', 'feature1', 'feature2'])
def test_rename_used_columns(self):
df = pd.DataFrame(columns=['id', 'r1', 'r2', 'words', 'SR', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'id', 'r1', 'r2', 'words', 'SR', None)
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', 'length',
'raw', 'feature1', 'feature2'])
def test_rename_used_columns_and_unused_columns_with_default_names(self):
df = pd.DataFrame(columns=['id', 'r1', 'r2', 'words', 'raw', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'id', 'r1', 'r2', 'words', None, None)
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', 'length',
'##raw##', 'feature1', 'feature2'])
def test_rename_used_columns_with_swapped_names(self):
df = pd.DataFrame(columns=['id', 'sc1', 'sc2', 'raw', 'words', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'id', 'sc2', 'sc1', 'words', None, None)
assert_array_equal(df.columns, ['spkitemid', 'sc2', 'sc1', '##raw##',
'length', 'feature1', 'feature2'])
def test_rename_used_columns_but_not_features(self):
df = pd.DataFrame(columns=['id', 'sc1', 'sc2', 'length', 'feature2'])
df = self.fpp.rename_default_columns(df, ['length'], 'id', 'sc1', 'sc2', None, None, None)
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', 'length', 'feature2'])
def test_rename_candidate_column(self):
df = pd.DataFrame(columns=['spkitemid', 'sc1', 'sc2', 'length',
'apptNo', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [],
'spkitemid', 'sc1', 'sc2', None, None, 'apptNo')
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', '##length##',
'candidate', 'feature1', 'feature2'])
def test_rename_candidate_named_sc2(self):
df = pd.DataFrame(columns=['id', 'sc1', 'sc2', 'question', 'l1', 'score'])
df_renamed = self.fpp.rename_default_columns(df, [],
'id', 'sc1', None, None, 'score', 'sc2')
assert_array_equal(df_renamed.columns, ['spkitemid', 'sc1',
'candidate', 'question', 'l1', 'raw'])
@raises(KeyError)
def test_check_subgroups_missing_columns(self):
df = pd.DataFrame(columns=['a', 'b', 'c'])
subgroups = ['a', 'd']
FeaturePreprocessor.check_subgroups(df, subgroups)
def test_check_subgroups_nothing_to_replace(self):
df = pd.DataFrame({'a': ['1', '2'],
'b': ['32', '34'],
'd': ['abc', 'def']})
subgroups = ['a', 'd']
df_out = FeaturePreprocessor.check_subgroups(df, subgroups)
assert_frame_equal(df_out, df)
def test_check_subgroups_replace_empty(self):
df = pd.DataFrame({'a': ['1', ''],
'b': [' ', '34'],
'd': ['ab c', ' ']})
subgroups = ['a', 'd']
df_expected = pd.DataFrame({'a': ['1', 'No info'],
'b': [' ', '34'],
'd': ['ab c', 'No info']})
df_out = FeaturePreprocessor.check_subgroups(df, subgroups)
assert_frame_equal(df_out, df_expected)
def test_filter_on_column(self):
bad_df = pd.DataFrame({'spkitemlab': np.arange(1, 9, dtype='int64'),
'sc1': ['00', 'TD', '02', '03'] * 2})
df_filtered_with_zeros = pd.DataFrame({'spkitemlab': [1, 3, 4, 5, 7, 8],
'sc1': [0.0, 2.0, 3.0] * 2})
df_filtered = pd.DataFrame({'spkitemlab': [3, 4, 7, 8], 'sc1': [2.0, 3.0] * 2})
(output_df_with_zeros,
output_excluded_df_with_zeros) = self.fpp.filter_on_column(bad_df, 'sc1',
'spkitemlab',
exclude_zeros=False)
output_df, output_excluded_df = self.fpp.filter_on_column(bad_df, 'sc1',
'spkitemlab',
exclude_zeros=True)
assert_frame_equal(output_df_with_zeros, df_filtered_with_zeros)
assert_frame_equal(output_df, df_filtered)
def test_filter_on_column_all_non_numeric(self):
bad_df = pd.DataFrame({'sc1': ['A', 'I', 'TD', 'TD'] * 2,
'spkitemlab': range(1, 9)})
expected_df_excluded = bad_df.copy()
expected_df_excluded.drop('sc1', axis=1, inplace=True)
df_filtered, df_excluded = self.fpp.filter_on_column(bad_df, 'sc1',
'spkitemlab',
exclude_zeros=True)
ok_(df_filtered.empty)
ok_("sc1" not in df_filtered.columns)
assert_frame_equal(df_excluded, expected_df_excluded, check_dtype=False)
def test_filter_on_column_std_epsilon_zero(self):
# Test that the function exclude columns where std is returned as
# very low value rather than 0
data = {'id': np.arange(1, 21, dtype='int64'),
'feature_ok': np.arange(1, 21),
'feature_zero_sd': [1.5601] * 20}
bad_df = pd.DataFrame(data=data)
output_df, output_excluded_df = self.fpp.filter_on_column(bad_df,
'feature_zero_sd',
'id',
exclude_zeros=False,
exclude_zero_sd=True)
good_df = bad_df[['id', 'feature_ok']].copy()
assert_frame_equal(output_df, good_df)
ok_(output_excluded_df.empty)
def test_filter_on_column_with_inf(self):
# Test that the function exclude columns where feature value is 'inf'
data = pd.DataFrame({'feature_1': [1.5601, 0, 2.33, 11.32],
'feature_ok': np.arange(1, 5)})
data['feature_with_inf'] = 1 / data['feature_1']
data['id'] = np.arange(1, 5, dtype='int64')
bad_df = data[np.isinf(data['feature_with_inf'])].copy()
good_df = data[~np.isinf(data['feature_with_inf'])].copy()
bad_df.reset_index(drop=True, inplace=True)
good_df.reset_index(drop=True, inplace=True)
output_df, output_excluded_df = self.fpp.filter_on_column(data, 'feature_with_inf',
'id',
exclude_zeros=False,
exclude_zero_sd=True)
assert_frame_equal(output_df, good_df)
assert_frame_equal(output_excluded_df, bad_df)
def test_filter_on_flag_column_empty_flag_dictionary(self):
# no flags specified, keep the data frame as is
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, 0, 0, 0],
'flag2': [1, 2, 2, 1]})
flag_dict = {}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_int_column_and_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, 1, 2, 3]})
flag_dict = {'flag1': [0, 1, 2, 3, 4]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_float_column_and_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0.5, 1.1, 2.2, 3.6]})
flag_dict = {'flag1': [0.5, 1.1, 2.2, 3.6, 4.5]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_str_column_and_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': ['a', 'b', 'c', 'd']})
flag_dict = {'flag1': ['a', 'b', 'c', 'd', 'e']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_float_column_int_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0.0, 1.0, 2.0, 3.0]})
flag_dict = {'flag1': [0, 1, 2, 3, 4]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_int_column_float_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, 1, 2, 3]})
flag_dict = {'flag1': [0.0, 1.0, 2.0, 3.0, 4.5]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_str_column_float_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': ['4', '1', '2', '3.5']})
flag_dict = {'flag1': [0.0, 1.0, 2.0, 3.5, 4.0]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_float_column_str_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [4.0, 1.0, 2.0, 3.5]})
flag_dict = {'flag1': ['1', '2', '3.5', '4', 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_str_column_int_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': ['0.0', '1.0', '2.0', '3.0']})
flag_dict = {'flag1': [0, 1, 2, 3, 4]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_int_column_str_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, 1, 2, 3]})
flag_dict = {'flag1': ['0.0', '1.0', '2.0', '3.0', 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_mixed_type_column_str_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, '1.0', 2, 3.5]})
flag_dict = {'flag1': ['0.0', '1.0', '2.0', '3.5', 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_mixed_type_column_int_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, '1.0', 2, 3.0]})
flag_dict = {'flag1': [0, 1, 2, 3, 4]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_mixed_type_column_float_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, '1.5', 2, 3.5]})
flag_dict = {'flag1': [0.0, 1.5, 2.0, 3.5, 4.0]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_int_column_mixed_type_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, 1, 2, 3]})
flag_dict = {'flag1': [0, 1, 2, 3.0, 3.5, 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_float_column_mixed_type_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0.0, 1.0, 2.0, 3.5]})
flag_dict = {'flag1': [0, 1, 2, 3.0, 3.5, 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_str_column_mixed_type_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': ['0.0', '1.0', '2.0', '3.5']})
flag_dict = {'flag1': [0, 1, 2, 3.0, 3.5, 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_mixed_type_column_mixed_type_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [1, 2, 3.5, 'TD']})
flag_dict = {'flag1': [0, 1, 2, 3.0, 3.5, 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_mixed_type_column_mixed_type_dict_filter_preserve_type(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1, 1.5, 2, 3.5, 'TD', 'NS']})
flag_dict = {'flag1': [1.5, 2, 'TD']}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [1.5, 2, 'TD']})
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1, 3.5, 'NS']})
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_with_none_value_in_int_flag_column_int_dict(self):
df = pd.DataFrame({'spkitemid': [1, 2, 3, 4, 5, 6],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1, 2, 2, 3, 4, None]}, dtype=object)
flag_dict = {'flag1': [2, 4]}
df_new_expected = pd.DataFrame({'spkitemid': [2, 3, 5],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [2, 2, 4]}, dtype=object)
df_excluded_expected = pd.DataFrame({'spkitemid': [1, 4, 6],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1, 3, None]}, dtype=object)
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_with_none_value_in_float_flag_column_float_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1.2, 2.1, 2.1, 3.3, 4.2, None]})
flag_dict = {'flag1': [2.1, 4.2]}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [2.1, 2.1, 4.2]})
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1.2, 3.3, None]})
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_with_none_value_in_str_flag_column_str_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': ['a', 'b', 'b', 'c', 'd', None]})
flag_dict = {'flag1': ['b', 'd']}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': ['b', 'b', 'd']})
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': ['a', 'c', None]})
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_with_none_value_in_mixed_type_flag_column_float_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1, 1.5, 2.0, 'TD', 2.0, None]},
dtype=object)
flag_dict = {'flag1': [1.5, 2.0]}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [1.5, 2.0, 2.0]},
dtype=object)
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1, 'TD', None]},
dtype=object)
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_with_none_value_in_mixed_type_flag_column_int_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1.5, 2, 2, 'TD', 4, None]},
dtype=object)
flag_dict = {'flag1': [2, 4]}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [2, 2, 4]},
dtype=object)
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1.5, 'TD', None]},
dtype=object)
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_with_none_value_in_mixed_type_flag_column_mixed_type_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1, 1.5, 2, 3.5, 'TD', None]},
dtype=object)
flag_dict = {'flag1': [1.5, 2, 'TD']}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [1.5, 2, 'TD']}, dtype=object)
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1, 3.5, None]}, dtype=object)
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_two_flags_same_responses(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1, 1.5, 2, 3.5, 'TD', 'NS'],
'flag2': [1, 0, 0, 1, 0, 1]})
flag_dict = {'flag1': [1.5, 2, 'TD'], 'flag2': [0]}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [1.5, 2, 'TD'],
'flag2': [0, 0, 0]})
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1, 3.5, 'NS'],
'flag2': [1, 1, 1]})
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_two_flags_different_responses(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1, 1.5, 2, 3.5, 'TD', 'NS'],
'flag2': [2, 0, 0, 1, 0, 1]})
flag_dict = {'flag1': [1.5, 2, 'TD', 'NS'], 'flag2': [0, 2]}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [1.5, 2, 'TD'],
'flag2': [0, 0, 0]})
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1, 3.5, 'NS'],
'flag2': [2, 1, 1]})
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
@raises(KeyError)
def test_filter_on_flag_column_missing_columns(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': ['1', '1', '1', '1'],
'flag2': ['1', '2', '2', '1']})
flag_dict = {'flag3': ['0'], 'flag2': ['1', '2']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
@raises(ValueError)
def test_filter_on_flag_column_nothing_left(self):
bad_df = pd.DataFrame({'spkitemid': ['a1', 'b1', 'c1', 'd1'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [1, 0, 20, 14],
'flag2': [1, 1.0, 'TD', '03']})
flag_dict = {'flag1': [1, 0, 14], 'flag2': ['TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(bad_df, flag_dict)
def test_remove_outliers(self):
# we want to test that even if we pass in a list of
# integers, we still get the right clamped output
data = [1, 1, 2, 2, 1, 1] * 10 + [10]
ceiling = np.mean(data) + 4 * np.std(data)
clamped_data = FeaturePreprocessor.remove_outliers(data)
assert_almost_equal(clamped_data[-1], ceiling)
def test_generate_feature_names_subset(self):
reserved_column_names = ['reserved_col1', 'reserved_col2']
expected = ['col_1']
df = pd.DataFrame({'reserved_col1': ['X', 'Y', 'Z'],
'reserved_col2': ['Q', 'R', 'S'],
'col_1': [1, 2, 3],
'col_2': ['A', 'B', 'C']})
subset = 'A'
feature_subset = pd.DataFrame({'Feature': ['col_1', 'col_2', 'col_3'],
'A': [1, 0, 0],
'B': [1, 1, 1]})
feat_names = self.fpp.generate_feature_names(df,
reserved_column_names,
feature_subset,
subset)
eq_(feat_names, expected)
def test_generate_feature_names_none(self):
reserved_column_names = ['reserved_col1', 'reserved_col2']
expected = ['col_1', 'col_2']
df = pd.DataFrame({'reserved_col1': ['X', 'Y', 'Z'],
'reserved_col2': ['Q', 'R', 'S'],
'col_1': [1, 2, 3],
'col_2': ['A', 'B', 'C']})
feat_names = self.fpp.generate_feature_names(df,
reserved_column_names,
feature_subset_specs=None,
feature_subset=None)
eq_(feat_names, expected)
def test_model_name_builtin_model(self):
model_name = 'LinearRegression'
model_type = self.fpp.check_model_name(model_name)
eq_(model_type, 'BUILTIN')
def test_model_name_skll_model(self):
model_name = 'AdaBoostRegressor'
model_type = self.fpp.check_model_name(model_name)
eq_(model_type, 'SKLL')
@raises(ValueError)
def test_model_name_wrong_name(self):
model_name = 'random_model'
self.fpp.check_model_name(model_name)
def test_trim(self):
values = np.array([1.4, 8.5, 7.4])
expected = np.array([1.4, 8.4998, 7.4])
actual = FeaturePreprocessor.trim(values, 1, 8)
assert_array_equal(actual, expected)
def test_trim_with_list(self):
values = [1.4, 8.5, 7.4]
expected = np.array([1.4, 8.4998, 7.4])
actual = FeaturePreprocessor.trim(values, 1, 8)
assert_array_equal(actual, expected)
def test_trim_with_custom_tolerance(self):
values = [0.6, 8.4, 7.4]
expected = np.array([0.75, 8.25, 7.4])
actual = FeaturePreprocessor.trim(values, 1, 8, 0.25)
assert_array_equal(actual, expected)
def test_preprocess_feature_fail(self):
np.random.seed(10)
values = np.random.random(size=1000)
values = np.append(values, np.array([10000000]))
mean = values.mean()
std = values.std()
expected = values.copy()
expected[-1] = mean + 4 * std
actual = self.fpp.preprocess_feature(values,
'A',
'raw',
mean,
std)
assert_array_equal(actual, expected)
def test_preprocess_feature_with_outlier(self):
np.random.seed(10)
values = np.random.random(size=1000)
values = np.append(values, np.array([10000000]))
mean = values.mean()
std = values.std()
expected = values.copy()
expected[-1] = mean + 4 * std
actual = self.fpp.preprocess_feature(values,
'A',
'raw',
mean,
std,
exclude_zero_sd=True)
assert_array_equal(actual, expected)
def test_preprocess_features(self):
train = pd.DataFrame({'A': [1, 2, 4, 3]})
test = pd.DataFrame({'A': [4, 3, 2, 1]})
train_expected = (train['A'] - train['A'].mean()) / train['A'].std()
train_expected = pd.DataFrame(train_expected)
test_expected = (test['A'] - test['A'].mean()) / test['A'].std()
test_expected = pd.DataFrame(test_expected)
info_expected = pd.DataFrame({'feature': ['A'],
'sign': [1],
'train_mean': [train.A.mean()],
'train_sd': [train.A.std()],
'train_transformed_mean': [train.A.mean()],
'train_transformed_sd': [test.A.std()],
'transform': ['raw']})
specs = pd.DataFrame({'feature': ['A'],
'transform': ['raw'],
'sign': [1]})
(train_processed,
test_processed,
info_processed) = self.fpp.preprocess_features(train, test, specs)
assert_frame_equal(train_processed.sort_index(axis=1),
train_expected.sort_index(axis=1))
assert_frame_equal(test_processed.sort_index(axis=1),
test_expected.sort_index(axis=1))
assert_frame_equal(info_processed.sort_index(axis=1),
info_expected.sort_index(axis=1))
def test_filter_data_features(self):
data = {'ID': [1, 2, 3, 4],
'LENGTH': [10, 12, 11, 12],
'h1': [1, 2, 3, 1],
'candidate': ['A', 'B', 'C', 'A'],
'h2': [1, 2, 3, 1],
'feature1': [1, 3, 4, 1],
'feature2': [1, 3, 2, 2]}
df_filtered_features_expected = pd.DataFrame({'spkitemid': [1, 2, 3, 4],
'sc1': [1.0, 2.0, 3.0, 1.0],
'feature1': [1.0, 3.0, 4.0, 1.0],
'feature2': [1.0, 3.0, 2.0, 2.0]})
df_filtered_features_expected = df_filtered_features_expected[['spkitemid',
'sc1',
'feature1',
'feature2']]
data = pd.DataFrame(data)
(df_filtered_features,
_,
_,
_,
_,
_,
_,
_,
_,
_) = self.fpp.filter_data(data,
'h1',
'ID',
'LENGTH',
'h2',
'candidate',
['feature1', 'feature2'],
['LENGTH', 'ID', 'candidate', 'h1'],
0,
6,
{},
[])
assert_frame_equal(df_filtered_features,
df_filtered_features_expected)
def test_filter_data_correct_features_and_length_in_other_columns(self):
data = {'ID': [1, 2, 3, 4],
'LENGTH': [10, 10, 10, 10],
'h1': [1, 2, 3, 1],
'candidate': ['A', 'B', 'C', 'A'],
'h2': [1, 2, 3, 1],
'feature1': [1, 3, 4, 1],
'feature2': [1, 3, 2, 2]}
data = pd.DataFrame(data)
(_,
_,
df_filtered_other_columns,
_,
_,
_,
_,
_,
_,
feature_names) = self.fpp.filter_data(data,
'h1',
'ID',
'LENGTH',
'h2',
'candidate',
['feature1', 'feature2'],
['LENGTH', 'ID', 'candidate', 'h1'],
0,
6,
{},
[])
eq_(feature_names, ['feature1', 'feature2'])
assert '##LENGTH##' in df_filtered_other_columns.columns
def test_filter_data_length_in_other_columns(self):
data = {'ID': [1, 2, 3, 4],
'LENGTH': [10, 10, 10, 10],
'h1': [1, 2, 3, 1],
'candidate': ['A', 'B', 'C', 'A'],
'h2': [1, 2, 3, 1],
'feature1': [1, 3, 4, 1],
'feature2': [1, 3, 2, 2]}
data = pd.DataFrame(data)
(_,
_,
df_filtered_other_columns,
_,
_,
_,
_,
_,
_,
feature_names) = self.fpp.filter_data(data,
'h1',
'ID',
'LENGTH',
'h2',
'candidate',
['feature1', 'feature2'],
['LENGTH', 'ID', 'candidate', 'h1'],
0,
6,
{},
[])
eq_(feature_names, ['feature1', 'feature2'])
assert '##LENGTH##' in df_filtered_other_columns.columns
@raises(ValueError)
def test_filter_data_min_candidates_raises_value_error(self):
data = {'ID': [1, 2, 3, 4],
'LENGTH': [10, 10, 10, 10],
'h1': [1, 2, 3, 1],
'candidate': ['A', 'B', 'C', 'A'],
'h2': [1, 2, 3, 1],
'feature1': [1, 3, 4, 1],
'feature2': [1, 3, 2, 2]}
data = pd.DataFrame(data)
self.fpp.filter_data(data,
'h1',
'ID',
'LENGTH',
'h2',
'candidate',
['feature1', 'feature2'],
['LENGTH', 'ID', 'candidate', 'h1'],
0,
6,
{},
[],
min_candidate_items=5)
def test_filter_data_with_min_candidates(self):
data = {'ID': [1, 2, 3, 4],
'LENGTH': [10, 10, 10, 10],
'h1': [1, 2, 3, 1],
'candidate': ['A', 'B', 'C', 'A'],
'h2': [1, 2, 3, 1],
'feature1': [1, 3, 4, 1],
'feature2': [1, 3, 2, 2]}
data = pd.DataFrame(data)
(df_filtered_features,
_,
_,
_,
_,
df_filtered_human_scores,
_,
_,
_,
_) = self.fpp.filter_data(data,
'h1',
'ID',
'LENGTH',
'h2',
'candidate',
['feature1', 'feature2'],
['LENGTH', 'ID', 'candidate', 'h1'],
0,
6,
{},
[],
min_candidate_items=2)
eq_(df_filtered_features.shape[0], 2)
assert all(col in df_filtered_human_scores.columns
for col in ['sc1', 'sc2'])
def test_filter_data_id_candidate_equal(self):
data = {'LENGTH': [10, 12, 18, 21],
'h1': [1, 2, 3, 1],
'candidate': ['A', 'B', 'C', 'D'],
'h2': [1, 2, 3, 1],
'feature1': [1, 3, 4, 1],
'feature2': [1, 3, 2, 2]}
data = pd.DataFrame(data)
(_,
df_filtered_metadata,
_,
_,
_,
_,
_,
_,
_,
_) = self.fpp.filter_data(data,
'h1',
'candidate',
'LENGTH',
'h2',
'candidate',
['feature1', 'feature2'],
['LENGTH', 'ID', 'candidate', 'h1'],
0,
6,
{},
[])
expected = pd.DataFrame({'spkitemid': ['A', 'B', 'C', 'D'],
'candidate': ['A', 'B', 'C', 'D']})
expected = expected[['spkitemid', 'candidate']]
assert_frame_equal(df_filtered_metadata, expected)
class TestFeatureSpecsProcessor:
def test_generate_default_specs(self):
fnames = ['Grammar', 'Vocabulary', 'Pronunciation']
df_specs = FeatureSpecsProcessor.generate_default_specs(fnames)
assert_equal(len(df_specs), 3)
assert_equal(df_specs['feature'][0], 'Grammar')
assert_equal(df_specs['transform'][1], 'raw')
assert_equal(df_specs['sign'][2], 1.0)
def test_generate_specs_from_data_with_negative_sign(self):
feature_subset_specs = pd.DataFrame({'Feature': ['Grammar',
'Vocabulary',
'Fluency',
'Content_coverage',
'Discourse'],
'Sign_SYS1': ['-', '+', '+', '+', '-']})
np.random.seed(10)
data = {'Grammar': np.random.randn(10),
'Fluency': np.random.randn(10),
'Discourse': np.random.randn(10),
'r1': np.random.choice(4, 10),
'spkitemlab': ['a-5'] * 10}
df = pd.DataFrame(data)
df_specs = FeatureSpecsProcessor.generate_specs(df,
['Grammar',
'Fluency',
'Discourse'],
'r1',
feature_subset_specs,
'SYS1')
assert_equal(len(df_specs), 3)
assert_array_equal(df_specs['feature'], ['Grammar', 'Fluency', 'Discourse'])
assert_array_equal(df_specs['sign'], [-1.0, 1.0, -1.0])
def test_generate_specs_from_data_with_default_sign(self):
feature_subset_specs = pd.DataFrame({'Feature': ['Grammar',
'Vocabulary',
'Fluency',
'Content_coverage',
'Discourse'],
'Sign_SYS1': ['-', '+', '+', '+', '-']})
np.random.seed(10)
data = {'Grammar': np.random.randn(10),
'Fluency': np.random.randn(10),
'Discourse': np.random.randn(10),
'r1': np.random.choice(4, 10),
'spkitemlab': ['a-5'] * 10}
df = pd.DataFrame(data)
df_specs = FeatureSpecsProcessor.generate_specs(df,
['Grammar',
'Fluency',
'Discourse'],
'r1',
feature_subset_specs,
feature_sign=None)
assert_equal(len(df_specs), 3)
assert_array_equal(df_specs['feature'], ['Grammar', 'Fluency', 'Discourse'])
assert_array_equal(df_specs['sign'], [1.0, 1.0, 1.0])
def test_generate_specs_from_data_with_transformation(self):
feature_subset_specs = pd.DataFrame({'Feature': ['Grammar',
'Vocabulary',
'Fluency',
'Content_coverage',
'Discourse'],
'Sign_SYS1': ['-', '+', '+', '+', '-']})
np.random.seed(10)
r1 = np.random.choice(range(1, 5), 10)
data = {'Grammar': np.random.randn(10),
'Vocabulary': r1**2,
'Discourse': np.random.randn(10),
'r1': r1,
'spkitemlab': ['a-5'] * 10}
df = pd.DataFrame(data)
df_specs = FeatureSpecsProcessor.generate_specs(df,
['Grammar',
'Vocabulary',
'Discourse'],
'r1',
feature_subset_specs,
'SYS1')
assert_array_equal(df_specs['feature'], ['Grammar', 'Vocabulary', 'Discourse'])
assert_equal(df_specs['transform'][1], 'sqrt')
def test_generate_specs_from_data_when_transformation_changes_sign(self):
feature_subset_specs = pd.DataFrame({'Feature': ['Grammar',
'Vocabulary',
'Fluency',
'Content_coverage',
'Discourse'],
'Sign_SYS1': ['-', '+', '+', '+', '-']})
np.random.seed(10)
r1 = np.random.choice(range(1, 5), 10)
data = {'Grammar': np.random.randn(10),
'Vocabulary': 1 / r1,
'Discourse': np.random.randn(10),
'r1': r1,
'spkitemlab': ['a-5'] * 10}
df = pd.DataFrame(data)
df_specs = FeatureSpecsProcessor.generate_specs(df,
['Grammar',
'Vocabulary',
'Discourse'],
'r1',
feature_subset_specs,
'SYS1')
assert_equal(df_specs['feature'][1], 'Vocabulary')
assert_equal(df_specs['transform'][1], 'addOneInv')
assert_equal(df_specs['sign'][1], -1)
def test_generate_specs_from_data_no_subset_specs(self):
np.random.seed(10)
data = {'Grammar': np.random.randn(10),
'Fluency': np.random.randn(10),
'Discourse': np.random.randn(10),
'r1': np.random.choice(4, 10),
'spkitemlab': ['a-5'] * 10}
df = pd.DataFrame(data)
df_specs = FeatureSpecsProcessor.generate_specs(df,
['Grammar',
'Fluency',
'Discourse'],
'r1')
assert_equal(len(df_specs), 3)
assert_array_equal(df_specs['feature'], ['Grammar', 'Fluency', 'Discourse'])
assert_array_equal(df_specs['sign'], [1.0, 1.0, 1.0])
def test_validate_feature_specs(self):
df_feature_specs = pd.DataFrame({'feature': ['f1', 'f2', 'f3'],
'sign': [1.0, 1.0, -1.0],
'transform': ['raw', 'inv', 'sqrt']})
df_new_feature_specs = FeatureSpecsProcessor.validate_feature_specs(df_feature_specs)
assert_frame_equal(df_feature_specs, df_new_feature_specs)
def test_validate_feature_specs_with_Feature_as_column(self):
df_feature_specs = pd.DataFrame({'Feature': ['f1', 'f2', 'f3'],
'sign': [1.0, 1.0, -1.0],
'transform': ['raw', 'inv', 'sqrt']})
df_expected_feature_specs = pd.DataFrame({'feature': ['f1', 'f2', 'f3'],
'sign': [1.0, 1.0, -1.0],
'transform': ['raw', 'inv', 'sqrt']})
df_new_feature_specs = FeatureSpecsProcessor.validate_feature_specs(df_feature_specs)
assert_frame_equal(df_new_feature_specs, df_expected_feature_specs)
def test_validate_feature_specs_sign_to_float(self):
df_feature_specs = pd.DataFrame({'feature': ['f1', 'f2', 'f3'],
'sign': ['1', '1', '-1'],
'transform': ['raw', 'inv', 'sqrt']})
df_expected_feature_specs = pd.DataFrame({'feature': ['f1', 'f2', 'f3'],
'sign': [1.0, 1.0, -1.0],
'transform': ['raw', 'inv', 'sqrt']})
df_new_feature_specs = FeatureSpecsProcessor.validate_feature_specs(df_feature_specs)
|
assert_frame_equal(df_new_feature_specs, df_expected_feature_specs)
|
pandas.testing.assert_frame_equal
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 13 13:59:31 2020
@author: <NAME>
"""
import sys, os
sys.path.append('H:/cloud/cloud_data/Projects/DL/Code/src')
sys.path.append('H:/cloud/cloud_data/Projects/DL/Code/src/ct')
import pandas as pd
import ntpath
import datetime
from openpyxl.worksheet.datavalidation import DataValidation
from openpyxl.formatting.formatting import ConditionalFormattingList
from openpyxl.styles import Font, Color, Border, Side
from openpyxl.styles import Protection
from openpyxl.styles import PatternFill
from glob import glob
from shutil import copyfile
import numpy as np
from collections import defaultdict
from openpyxl.utils import get_column_letter
from CTDataStruct import CTPatient
import keyboard
from openpyxl.styles.differential import DifferentialStyle
from openpyxl.formatting import Rule
from settings import initSettings, saveSettings, loadSettings, fillSettingsTags
from classification import createRFClassification, initRFClassification, classifieRFClassification
from filterTenStepsGuide import filter_CACS_10StepsGuide, filter_CACS, filter_NCS, filterReconstruction, filter_CTA, filer10StepsGuide, filterReconstructionRF
from discharge_extract import extractDICOMTags
from tqdm import tqdm
#from reco.reco_filter import RecoFilter
patient_status = ['OK', 'EXCLUDED', 'MISSING_CACS', 'MISSING_CTA', 'MISSING_NC_CACS', 'MISSING_NC_CTA']
patient_status_manual = ['OK', 'EXCLUDED', 'UNDEFINED', 'INPROGRESS']
patient_status_manualStr = '"' + 'OK,' + 'EXCLUDED,' + 'UNDEFINED,' + 'INPROGRESS,' + '"'
scanClasses = defaultdict(lambda:None,{'UNDEFINED': 0, 'CACS': 1, 'CTA': 2, 'NCS_CACS': 3, 'NCS_CTA': 4, 'ICA': 5, 'OTHER': 6})
scanClassesInv = defaultdict(lambda:None,{0: 'UNDEFINED', 1: 'CACS', 2: 'CTA', 3: 'NCS_CACS', 4: 'NCS_CTA', 5: 'ICA', 6: 'OTHER'})
scanClassesStr = '"' + 'UNDEFINED,' + 'CACS,' + 'CTA,' + 'NCS_CACS,' + 'NCS_CTA,' + 'ICA,' + 'OTHER' +'"'
scanClassesManualStr = '"' + 'UNDEFINED,' + 'CACS,' + 'CTA,' + 'NCS_CACS,' + 'NCS_CTA,' + 'ICA,' + 'OTHER,' + 'PROBLEM,' + 'QUESTION,' +'"'
imageQualityStr = '"' + 'UNDEFINED,' + 'GOOD,' + 'BAD' +'"'
recoClasses = ['FBP', 'IR', 'UNDEFINED']
changeClasses = ['NO_CHANGE', 'SOURCE_CHANGE', 'MASTER_CHANGE', 'MASTER_SOURCE_CHANGE']
def setColor(workbook, sheet, rows, NumColumns, color):
for r in rows:
if r % 100 == 0:
print('index:', r, '/', max(rows))
for c in range(1,NumColumns):
cell = sheet.cell(r, c)
cell.fill = PatternFill(start_color=color, end_color=color, fill_type = 'solid')
def setColorFormula(sheet, formula, color, NumRows, NumColumns):
column_letter = get_column_letter(NumColumns+1)
colorrange="B2:" + str(column_letter) + str(NumRows)
dxf = DifferentialStyle(font=Font(color=color))
r = Rule(type="expression", dxf=dxf, stopIfTrue=True)
r.formula = [formula]
sheet.conditional_formatting.add(colorrange, r)
def setBorderFormula(sheet, formula, NumRows, NumColumns):
column_letter = get_column_letter(NumColumns+1)
colorrange="B1:" + str(column_letter) + str(NumRows)
thin = Side(border_style="thin", color="000000")
border = Border(bottom=thin)
dxf = DifferentialStyle(border=border)
r = Rule(type="expression", dxf=dxf, stopIfTrue=True)
r.formula = [formula]
sheet.conditional_formatting.add(colorrange, r)
# Set border for index
for i in range(1, NumRows + 1):
cell = sheet.cell(i, 1)
cell.border = Border()
return sheet
def sortFilepath(filepathList):
filenameList=[]
folderpathList=[]
for filepath in filepathList:
folderpath, filename, _ = splitFilePath(filepath)
filenameList.append(filename)
folderpathList.append(folderpath)
dates_str = [x.split('_')[-1] for x in filenameList]
dates = [datetime.datetime(int(x[4:8]), int(x[2:4]), int(x[0:2])) for x in dates_str]
idx = list(np.argsort(dates))
filepathlistsort=[]
for i in idx:
filepathlistsort.append(folderpathList[i] + '/' + '_'.join(filenameList[i].split('_')[0:-1]) + '_' + dates[i].strftime("%d%m%Y") + '.xlsx')
return filepathlistsort
def sortFolderpath(folderpath, folderpathList):
dates_str = [x.split('_')[-1] for x in folderpathList]
dates = [datetime(int(x[4:8]), int(x[2:4]), int(x[0:2])) for x in dates_str]
date_str = folderpath.split('_')[-1]
date = datetime(int(date_str[4:8]), int(date_str[2:4]), int(date_str[0:2]))
idx = list(np.argsort(dates))
folderpathSort=[]
for i in idx:
folderpathSort.append(folderpathList[i])
if dates[i] == date:
break
return folderpathSort
def isNaN(num):
return num != num
def splitFilePath(filepath):
""" Split filepath into folderpath, filename and file extension
:param filepath: Filepath
:type filepath: str
"""
folderpath, _ = ntpath.split(filepath)
head, file_extension = os.path.splitext(filepath)
folderpath, filename = ntpath.split(head)
return folderpath, filename, file_extension
def update_CACS_10StepsGuide(df_CACS, sheet):
for index, row in df_CACS.iterrows():
cell_str = 'AB' + str(index+2)
cell = sheet[cell_str]
cell.value = row['CACS10StepsGuide']
#cell.protection = Protection(locked=False)
return sheet
def mergeITT(df_ITT, df_data):
# Merge ITT table
print('Merge ITT table')
for i in range(len(df_data)):
patient = df_ITT[df_ITT['ID']==df_data.loc[i, 'PatientID']]
if len(patient)==1:
df_data.loc[i, 'ITT'] = patient.iloc[0]['ITT']
df_data.loc[i, 'Date CT'] = patient.iloc[0]['Date CT']
df_data.loc[i, 'Date ICA'] = patient.iloc[0]['Date ICA']
return df_data
def mergeDicom(df_dicom, df_data_old=None):
print('Merge dicom table')
if df_data_old is None:
df_data = df_dicom.copy()
else:
idx = df_dicom['SeriesInstanceUID'].isin(df_data_old['SeriesInstanceUID'])
df_data = pd.concat([df_data_old, df_dicom[idx==False]], axis=0)
return df_data
def mergeTracking(df_tracking, df_data, df_data_old=None):
if df_data_old is None:
df_data = df_data.copy()
df_tracking = df_tracking.copy()
df_data.replace(to_replace=[np.nan], value='', inplace=True)
df_tracking.replace(to_replace=[np.nan], value='', inplace=True)
# Merge tracking table
print('Merge tracking table')
df_data['Responsible Person Problem'] = ''
df_data['Date Query'] = ''
df_data['Date Answer'] = ''
df_data['Problem Summary'] = ''
df_data['Results'] = ''
for index, row in df_tracking.iterrows():
patient = row['PatientID']
df_patient = df_data[df_data['PatientID']==patient]
for indexP, rowP in df_patient.iterrows():
# Update 'Problem Summary'
if df_data.loc[indexP, 'Problem Summary']=='':
df_data.loc[indexP, 'Problem Summary'] = row['Problem Summary']
else:
df_data.loc[indexP, 'Problem Summary'] = df_data.loc[indexP, 'Problem Summary'] + ' | ' + row['Problem Summary']
# Update 'results'
if df_data.loc[indexP, 'Results']=='':
df_data.loc[indexP, 'Results'] = row['results']
else:
df_data.loc[indexP, 'Results'] = df_data.loc[indexP, 'Results'] + ' | ' + row['results']
else:
df_data = df_data.copy()
df_data_old = df_data_old.copy()
df_tracking = df_tracking.copy()
df_data.replace(to_replace=[np.nan], value='', inplace=True)
df_data_old.replace(to_replace=[np.nan], value='', inplace=True)
df_tracking.replace(to_replace=[np.nan], value='', inplace=True)
l = len(df_data_old)
df_data['Responsible Person Problem'] = ''
df_data['Date Query'] = ''
df_data['Date Answer'] = ''
df_data['Problem Summary'] = ''
df_data['Results'] = ''
df_data['Responsible Person Problem'][0:l] = df_data_old['Responsible Person Problem']
df_data['Date Query'][0:l] = df_data_old['Date Query']
df_data['Date Answer'][0:l] = df_data_old['Date Answer']
df_data['Problem Summary'][0:l] = df_data_old['Problem Summary']
df_data['Results'][0:l] = df_data_old['Results']
for index, row in df_tracking.iterrows():
patient = row['PatientID']
df_patient = df_data[df_data['PatientID']==patient]
for indexP, rowP in df_patient.iterrows():
# Update 'Problem Summary'
if df_data.loc[indexP, 'Problem Summary']=='':
df_data.loc[indexP, 'Problem Summary'] = row['Problem Summary']
else:
if not row['Problem Summary'] in df_data.loc[indexP, 'Problem Summary']:
df_data.loc[indexP, 'Problem Summary'] = df_data.loc[indexP, 'Problem Summary'] + ' | ' + row['Problem Summary']
# Update 'results'
if df_data.loc[indexP, 'Results']=='':
df_data.loc[indexP, 'Results'] = row['results']
else:
if not row['results'] in df_data.loc[indexP, 'Results']:
df_data.loc[indexP, 'Results'] = df_data.loc[indexP, 'Results'] + ' | ' + row['results']
return df_data
def mergeEcrf(df_ecrf, df_data):
# Merge ecrf table
print('Merge ecrf table')
df_data['1. Date of CT scan'] = ''
for index, row in df_ecrf.iterrows():
patient = row['Patient identifier']
df_patient = df_data[df_data['PatientID']==patient]
for indexP, rowP in df_patient.iterrows():
# Update '1. Date of CT scan'
df_data.loc[indexP, '1. Date of CT scan'] = row['1. Date of CT scan']
return df_data
def mergePhase_exclude_stenosis(df_phase_exclude_stenosis, df_data):
# Merge phase_exclude_stenosis
print('Merge phase_exclude_stenosis table')
df_data['phase_i0011'] = ''
df_data['phase_i0012'] = ''
for index, row in df_phase_exclude_stenosis.iterrows():
patient = row['mnpaid']
df_patient = df_data[df_data['PatientID']==patient]
for indexP, rowP in df_patient.iterrows():
# Update tags
if df_data.loc[indexP, 'phase_i0011']=='':
df_data.loc[indexP, 'phase_i0011'] = str(row['phase_i0011'])
else:
df_data.loc[indexP, 'phase_i0011'] = str(df_data.loc[indexP, 'phase_i0011']) + ', ' + str(row['phase_i0011'])
if df_data.loc[indexP, 'phase_i0012']=='':
df_data.loc[indexP, 'phase_i0012'] = str(row['phase_i0012'])
else:
df_data.loc[indexP, 'phase_i0012'] = str(df_data.loc[indexP, 'phase_i0011']) + ', ' + str(row['phase_i0011'])
return df_data
def mergePrct(df_prct, df_data):
# Merge phase_exclude_stenosis
print('Merge prct table')
df_data['other_best_phase'] = ''
df_data['rca_best_phase'] = ''
df_data['lad_best_phase'] = ''
df_data['lcx_best_phase'] = ''
for index, row in df_prct.iterrows():
patient = row['PatientId']
df_patient = df_data[df_data['PatientID']==patient]
for indexP, rowP in df_patient.iterrows():
# Update tags
df_data.loc[indexP, 'other_best_phase'] = row['other_best_phase']
df_data.loc[indexP, 'rca_best_phase'] = row['rca_best_phase']
df_data.loc[indexP, 'lad_best_phase'] = row['lad_best_phase']
df_data.loc[indexP, 'lcx_best_phase'] = row['lcx_best_phase']
return df_data
def mergeStenosis_bigger_20_phase(df_stenosis_bigger_20_phases, df_data):
# Merge phase_exclude_stenosis
print('Merge Stenosis_bigger_20_phase table')
df_data['STENOSIS'] = ''
patientnames = df_stenosis_bigger_20_phases['mnpaid'].unique()
df_stenosis_bigger_20_phases.replace(to_replace=[np.nan], value='', inplace=True)
for patient in patientnames:
patientStenose = df_stenosis_bigger_20_phases[df_stenosis_bigger_20_phases['mnpaid']==patient]
sten = ''
for index, row in patientStenose.iterrows():
art=''
if row['LAD']==1:
art = 'LAD'
if row['RCA']==1:
art = 'RCA'
if row['LMA']==1:
art = 'LMA'
if row['LCX']==1:
art = 'LCX'
if sten =='':
if not art=='':
sten = art + ':' + str(row['sten_i0231 (Phase #1)']) + ':' + str(row['sten_i0241']) + ':' + str(row['sten_i0251'])
else:
if not art=='':
sten = sten + ', ' + art + ':' + str(row['sten_i0231 (Phase #1)']) + ':' + str(row['sten_i0241']) + ':' + str(row['sten_i0251'])
df_patient = df_data[df_data['PatientID']==patient]
for indexP, rowP in df_patient.iterrows():
df_data.loc[indexP, 'STENOSIS'] = sten
return df_data
def freeze(writer, sheetname, df):
NumRows=1
NumCols=1
df.to_excel(writer, sheet_name = sheetname, freeze_panes = (NumCols, NumRows))
def highlight_columns(sheet, columns=[], color='A5A5A5', offset=2):
for col in columns:
cell = sheet.cell(1, col+offset)
cell.fill = PatternFill(start_color=color, end_color=color, fill_type = 'solid')
return sheet
def setAccessRights(sheet, columns=[], promt='', promptTitle='', formula1='"Dog,Cat,Bat"'):
for column in columns:
column_letter = get_column_letter(column+2)
dv = DataValidation(type="list", formula1=formula1, allow_blank=True)
dv.prompt = promt
dv.promptTitle = promptTitle
column_str = column_letter + str(1) + ':' + column_letter + str(1048576)
dv.add(column_str)
sheet.add_data_validation(dv)
return sheet
def setComment(sheet, columns=[], comment=''):
for column in columns:
column_letter = get_column_letter(column+2)
dv = DataValidation()
dv.prompt = comment
column_str = column_letter + str(1) + ':' + column_letter + str(1048576)
dv.add(column_str)
sheet.add_data_validation(dv)
return sheet
def checkTables(settings):
print('Checking existance of required tables.')
# Check if requird tables exist
tables=['filepath_dicom', 'filepath_ITT', 'filepath_ecrf', 'filepath_prct',
'filepath_phase_exclude_stenosis', 'filepath_stenosis_bigger_20_phases', 'filepath_tracking']
for table in tables:
if not os.path.isfile(settings[table]):
raise ValueError("Source file " + settings[table] + ' does not exist. Please copy file in the correct directory!')
return True
def createData(settings, NumSamples=None):
""" Create data columns from dicom metadata
:param settings: Dictionary of settings
:type settings: dict
"""
XA=False
# Extract dicom data
df_dicom = pd.read_excel(settings['filepath_dicom'], index_col=0)
# Reorder datafame
df_dicom = df_dicom[settings['dicom_tags_order']]
if XA:
df_dicom = df_dicom[(df_dicom['Modality']=='CT') | (df_dicom['Modality']=='OT') | (df_dicom['Modality']=='XA')]
else:
df_dicom = df_dicom[(df_dicom['Modality']=='CT') | (df_dicom['Modality']=='OT')]
df_dicom = df_dicom.reset_index(drop=True)
cols = df_dicom.columns.tolist()
cols_new = settings['dicom_tags_first'] + [x for x in cols if x not in settings['dicom_tags_first']]
df_dicom = df_dicom[cols_new]
df_data = df_dicom.copy()
df_data = df_data.reset_index(drop=True)
if NumSamples is not None:
df_data = df_data[0:NumSamples]
# Extract ecrf data
df_ecrf = pd.read_excel(settings['filepath_ecrf'])
df_data = mergeEcrf(df_ecrf, df_data)
# Extract ITT
df_ITT = pd.read_excel(settings['filepath_ITT'], 'Tabelle1')
df_data = mergeITT(df_ITT, df_data)
# Extract phase_exclude_stenosis
df_phase_exclude_stenosis = pd.read_excel(settings['filepath_phase_exclude_stenosis'])
df_data = mergePhase_exclude_stenosis(df_phase_exclude_stenosis, df_data)
# Extract prct
df_prct = pd.read_excel(settings['filepath_prct'])
df_data = mergePrct(df_prct, df_data)
# Extract stenosis_bigger_20_phases
df_stenosis_bigger_20_phases = pd.read_excel(settings['filepath_stenosis_bigger_20_phases'])
df_data = mergeStenosis_bigger_20_phase(df_stenosis_bigger_20_phases, df_data)
# Reoder columns
cols = df_data.columns.tolist()
cols_new = settings['dicom_tags_first'] + [x for x in cols if x not in settings['dicom_tags_first']]
#filepath_master_data = os.path.join(settings['folderpath_components'], 'discharge_master_data_' + settings['date'] + '.xlsx')
#df_data.to_excel(settings['filepath_data'])
df_data.to_pickle(settings['filepath_data'])
def createPredictions(settings):
""" Create prediction columns
:param settings: Dictionary of settings
:type settings: dict
"""
df_data = pd.read_pickle(settings['filepath_data'])
df_pred = pd.DataFrame()
# Filter by CACS based on 10-Steps-Guide
df = filter_CACS_10StepsGuide(df_data)
df_pred['CACS10StepsGuide'] = df['CACS10StepsGuide']
# Filter by CACS based selection
df = filter_CACS(df_data)
df_pred['CACS'] = df['CACS']
# Filter by NCS_CACS and NCS_CTA based on criteria
df = filter_NCS(df_data)
df_pred['NCS_CTA'] = df['NCS_CTA']
df_pred['NCS_CACS'] = df['NCS_CACS']
# Filter by CTA
df = filter_CTA(settings)
df_pred['CTA'] = df['CTA'].astype('bool')
df_pred['CTA_phase'] = df['phase']
df_pred['CTA_arteries'] = df['arteries']
df_pred['CTA_source'] = df['source']
# Filter by ICA
df = pd.DataFrame('', index=np.arange(len(df_pred)), columns=['ICA'])
df_pred['ICA'] = df['ICA']
# Filter by reconstruction
df = filterReconstruction(df_data, settings)
df_pred['RECO'] = df['RECO']
# Predict CLASS
classes = ['CACS', 'CTA', 'NCS_CTA', 'NCS_CACS']
for i in range(len(df_pred)):
if i % 1000 == 0:
print('index:', i, '/', len(df_pred))
value=''
for c in classes:
if df_pred.loc[i, c]:
if value=='':
value = value + c
else:
value = value + '+' + c
if value == '':
value = 'UNDEFINED'
df_pred.loc[i, 'CLASS'] = value
# Save predictions
df_pred.to_pickle(settings['filepath_prediction'])
def updateRFClassification(folderpath_master, folderpath_master_before):
""" Update random forest classification
:param settings: Dictionary of settings
:type settings: dict
"""
date = folderpath_master.split('_')[-1]
folderpath_components = os.path.join(folderpath_master, 'discharge_components_' + date)
filepath_rfc = os.path.join(folderpath_components, 'discharge_rfc_' + date + '.xlsx')
folderpath_master_before_list = glob(folderpath_master_before + '/*master*')
folderpath_master_before_list = sortFolderpath(folderpath_master, folderpath_master_before_list)
filepathMasters = glob(folderpath_master_before_list[-2] + '/*process*.xlsx')
date_before = folderpath_master_before_list[-2].split('_')[-1]
df_master = pd.read_excel(filepathMasters[0], sheet_name='MASTER_' + date_before)
columns = ['RFCLabel', 'RFCClass', 'RFCConfidence']
df_rfc = pd.DataFrame('UNDEFINED', index=np.arange(len(df_master)), columns=columns)
df_rfc[columns] = df_master[columns]
df_rfc.to_excel(filepath_rfc)
def createManualSelection(settings):
""" Create manual selection columns
:param settings: Dictionary of settings
:type settings: dict
"""
print('Create manual selection')
#df_data = pd.read_excel(settings['filepath_data'], index_col=0)
df_data = pd.read_pickle(settings['filepath_data'])
df_manual0 = pd.DataFrame('UNDEFINED', index=np.arange(len(df_data)), columns=['ClassManualCorrection'])
df_manual1 = pd.DataFrame('', index=np.arange(len(df_data)), columns=['Comment'])
df_manual2 = pd.DataFrame('', index=np.arange(len(df_data)), columns=['Responsible Person'])
df_manual3 = pd.DataFrame('UNDEFINED', index=np.arange(len(df_data)), columns=['Image Quality'])
df_manual = pd.concat([df_manual0, df_manual1, df_manual2, df_manual3], axis=1)
#df_manual.to_excel(settings['filepath_manual'])
df_manual.to_pickle(settings['filepath_manual'])
def createTrackingTable(settings):
""" Create tracking table
:param settings: Dictionary of settings
:type settings: dict
"""
print('Create tracking table')
df_track = pd.DataFrame(columns=settings['columns_tracking'])
df_track.to_pickle(settings['filepath_master_track'])
# Update master
writer = pd.ExcelWriter(settings['filepath_master'], engine="openpyxl", mode="a")
# Remove sheet if already exist
sheet_name = 'TRACKING' + '_' + settings['date']
workbook = writer.book
sheetnames = workbook.sheetnames
if sheet_name in sheetnames:
sheet = workbook[sheet_name]
workbook.remove(sheet)
# Add patient ro master
df_track.to_excel(writer, sheet_name=sheet_name)
writer.save()
print('Update tracking table')
# Read tracking table
df_tracking = pd.read_excel(settings['filepath_tracking'], 'tracking table')
df_tracking.replace(to_replace=[np.nan], value='', inplace=True)
df_track = pd.read_excel(settings['filepath_master'], 'TRACKING_' + settings['date'], index_col=0)
columns_track = df_track.columns
columns_tracking = df_tracking.columns
#columns_union = ['ProblemID', 'PatientID', 'Problem Summary', 'Problem']
columns_union = columns_track
if len(df_track)==0:
ProblemIDMax=-1
df_track = df_tracking[columns_union]
else:
ProblemIDMax = max([int(x) for x in list(df_track['ProblemID'])])
ProblemIDInt = 0
for index, row in df_tracking.iterrows():
ProblemID = row['ProblemID']
if not ProblemID == '':
index = df_track['ProblemID'][df_track['ProblemID'] == ProblemID].index[0]
for col in columns_union:
df_track.loc[index,col] = row[col]
else:
ProblemIDInt = ProblemIDMax + 1
ProblemIDMax = ProblemIDInt
row['ProblemID'] = str(ProblemIDInt).zfill(6)
row_new = pd.DataFrame('', index=[0], columns=columns_union)
for col in columns_union:
row_new.loc[0,col] = row[col]
df_track = df_track.append(row_new, ignore_index=True)
df_tracking.loc[index,'ProblemID'] = str(ProblemIDInt).zfill(6)
# Update master
writer = pd.ExcelWriter(settings['filepath_master'], engine="openpyxl", mode="a")
# Remove sheet if already exist
sheet_name = 'TRACKING' + '_' + settings['date']
workbook = writer.book
sheetnames = workbook.sheetnames
if sheet_name in sheetnames:
sheet = workbook[sheet_name]
workbook.remove(sheet)
# Add patient ro master
df_track.to_excel(writer, sheet_name=sheet_name)
writer.save()
# Update tracking
writer = pd.ExcelWriter(settings['filepath_tracking'], engine="openpyxl", mode="a")
# Remove sheet if already exist
sheet_name = 'tracking table'
workbook = writer.book
sheetnames = workbook.sheetnames
if sheet_name in sheetnames:
sheet = workbook[sheet_name]
workbook.remove(sheet)
# Add patient to master
df_tracking.to_excel(writer, sheet_name=sheet_name, index=False)
writer.save()
def orderMasterData(df_master, settings):
""" Order columns of the master
:param settings: Dictionary of settings
:type settings: dict
"""
# Reoder columns
cols = df_master.columns.tolist()
cols_new = settings['columns_first'] + [x for x in cols if x not in settings['columns_first']]
df_master = df_master[cols_new]
df_master = df_master.sort_values(['PatientID', 'StudyInstanceUID', 'SeriesInstanceUID'], ascending = (True, True, True))
df_master.reset_index(inplace=True, drop=True)
return df_master
def mergeMaster(settings):
""" Merge master file
:param settings: Dictionary of settings
:type settings: dict
"""
print('Create master')
# Read tables
print('Read discharge_data')
df_data = pd.read_pickle(settings['filepath_data'])
print('Read discharge_pred')
df_pred = pd.read_pickle(settings['filepath_prediction'])
df_pred['CTA'] = df_pred['CTA'].astype('bool')
print('Read discharge_reco')
df_reco_load = pd.read_excel(settings['filepath_reco'], index_col=0)
df_reco = pd.DataFrame()
df_reco['RECO'] = df_reco_load['PredClass']
df_reco['RECO_PROP'] = df_reco_load['Prop']
print('Read discharge_rfc')
df_rfc =
|
pd.read_pickle(settings['filepath_rfc'])
|
pandas.read_pickle
|
from mpl_toolkits.mplot3d import axes3d
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import csv
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.ticker as mtick
import plotly.graph_objects as go
import plotly.express as px
import publico as func
pd.options.mode.chained_assignment = None # default='warn'
from dateutil import parser
def MediaFileRede(res_select, interval_time=5):
res_select.drop_duplicates(subset=None, keep="first", inplace=True)
# # cria campos
# res_select['Timer2'] = 0
# res_select['Media2'] = 0.0
# velo_total = 0.0
# count=0
# timer_atual = 0.0
# timer_ant = 0.0
# elapset_atual= 0.0
# elapset_cumulativo = 0.0
# count_timer=interval_time
# for index, row in res_select.iterrows():
# timer_atual = row['Tempo']
# if (timer_ant!=0.0):
# elapset_atual = float(row['Tempo']) - float(timer_ant)
# # print(abs(elapset_atual))
# elapset_cumulativo+=float(elapset_atual)
# if ((elapset_cumulativo >= interval_time)):
# # print('Chegou')
# # break
# media_velo = velo_total / count
# res_select.at[index,"Media2"] = media_velo
# res_select.at[index,"Timer2"] = count_timer
# elapset_cumulativo=0.0
# timer_ant = 0.0
# velo_total=0.0
# media_velo=0.0
# count=0
# count_timer+=interval_time
# if (timer_atual != timer_ant):
# timer_ant = timer_atual
# velo_total = velo_total + row['Download']
# count+=1
# remove zeros
# res_select = res_select[(res_select['Timer2']!=0) & (res_select['Timer2']<=280) & (res_select['Media2']<300) ]
return res_select
EXP30="30"
EXP50="50"
EXP70="70"
print("Loading Dataframe...")
# BASELINE 30 ***************************************************
baseline_30 = pd.read_csv("../repositorio/" + EXP30 + "/REDE_GERAL.csv")
baseline_30['Download'] = baseline_30['Download'].astype(float)
baseline_30['Upload'] = baseline_30['Upload'].astype(float)
baseline_30['Tempo'] = baseline_30['Tempo'].astype(float)
baseline_30['Source'] = "BASELINE"
baseline_30['Carros'] = 30
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
baseline_30_select = baseline_30[['Download', 'Source', 'Tempo', 'Carros']]
baseline_30_select = MediaFileRede(baseline_30_select)
# *************************************************************************
# BASELINE 50 ***************************************************
baseline_50 = pd.read_csv("../repositorio/" + EXP50 + "/REDE_GERAL.csv")
baseline_50['Download'] = baseline_50['Download'].astype(float)
baseline_50['Upload'] = baseline_50['Upload'].astype(float)
baseline_50['Tempo'] = baseline_50['Tempo'].astype(float)
baseline_50['Source'] = "BASELINE"
baseline_50['Carros'] = 50
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
baseline_50_select = baseline_50[['Download', 'Source', 'Tempo', 'Carros']]
baseline_50_select = MediaFileRede(baseline_50_select)
# *************************************************************************
# BASELINE 70 ***************************************************
baseline_70 = pd.read_csv("../repositorio/" + EXP70 + "/REDE_GERAL.csv")
baseline_70['Download'] = baseline_70['Download'].astype(float)
baseline_70['Upload'] = baseline_70['Upload'].astype(float)
baseline_70['Tempo'] = baseline_70['Tempo'].astype(float)
baseline_70['Source'] = "BASELINE"
baseline_70['Carros'] = 70
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
baseline_70_select = baseline_70[['Download', 'Source', 'Tempo', 'Carros']]
baseline_70_select = MediaFileRede(baseline_70_select)
# *************************************************************************
baseline = res = pd.concat([baseline_30_select,baseline_50_select,baseline_70_select], sort=False)
#
#
#
# ONETO2 30 ***************************************************
oneTo2_30 = pd.read_csv("../repositorio/" + EXP30 + "/REDE_BASELINE_1TO2.csv")
oneTo2_30['Download'] = oneTo2_30['Download'].astype(float)
oneTo2_30['Upload'] = oneTo2_30['Upload'].astype(float)
oneTo2_30['Tempo'] = oneTo2_30['Tempo'].astype(float)
oneTo2_30['Source'] = "1to2"
oneTo2_30['Carros'] = 30
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
oneTo2_30_select = oneTo2_30[['Download', 'Source', 'Tempo', 'Carros']]
oneTo2_30_select = MediaFileRede(oneTo2_30_select)
# *************************************************************************
# ONETO2 50 ***************************************************
oneTo2_50 = pd.read_csv("../repositorio/" + EXP50 + "/REDE_BASELINE_1TO2.csv")
oneTo2_50['Download'] = oneTo2_50['Download'].astype(float)
oneTo2_50['Upload'] = oneTo2_50['Upload'].astype(float)
oneTo2_50['Tempo'] = oneTo2_50['Tempo'].astype(float)
oneTo2_50['Source'] = "1to2"
oneTo2_50['Carros'] = 50
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
oneTo2_50_select = oneTo2_50[['Download', 'Source', 'Tempo', 'Carros']]
oneTo2_50_select = MediaFileRede(oneTo2_50_select)
# *************************************************************************
# 1TO2 70 ***************************************************
oneTo2_70 = pd.read_csv("../repositorio/" + EXP70 + "/REDE_BASELINE_1TO2.csv")
oneTo2_70['Download'] = oneTo2_70['Download'].astype(float)
oneTo2_70['Upload'] = oneTo2_70['Upload'].astype(float)
oneTo2_70['Tempo'] = oneTo2_70['Tempo'].astype(float)
oneTo2_70['Source'] = "1to2"
oneTo2_70['Carros'] = 70
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
oneTo2_70_select = oneTo2_70[['Download', 'Source', 'Tempo', 'Carros']]
oneTo2_70_select = MediaFileRede(oneTo2_70_select)
# *************************************************************************
oneTo2 = res = pd.concat([oneTo2_30_select,oneTo2_50_select,oneTo2_70_select], sort=False)
#
#
#
# RANDOM 30 ***************************************************
random_30 = pd.read_csv("../repositorio/" + EXP30 + "/REDE_BASELINE_RANDOM.csv")
random_30['Download'] = random_30['Download'].astype(float)
random_30['Upload'] = random_30['Upload'].astype(float)
random_30['Tempo'] = random_30['Tempo'].astype(float)
random_30['Source'] = "Rand"
random_30['Carros'] = 30
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
random_30_select = random_30[['Download', 'Source', 'Tempo', 'Carros']]
random_30_select = MediaFileRede(random_30_select)
# *************************************************************************
# RANDOM 50 ***************************************************
random_50 = pd.read_csv("../repositorio/" + EXP50 + "/REDE_BASELINE_RANDOM.csv")
random_50['Download'] = random_50['Download'].astype(float)
random_50['Upload'] = random_50['Upload'].astype(float)
random_50['Tempo'] = random_50['Tempo'].astype(float)
random_50['Source'] = "Rand"
random_50['Carros'] = 50
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
random_50_select = random_50[['Download', 'Source', 'Tempo', 'Carros']]
random_50_select = MediaFileRede(random_50_select)
# *************************************************************************
# RANDOM 70 ***************************************************
random_70 = pd.read_csv("../repositorio/" + EXP70 + "/REDE_BASELINE_RANDOM.csv")
random_70['Download'] = random_70['Download'].astype(float)
random_70['Upload'] = random_70['Upload'].astype(float)
random_70['Tempo'] = random_70['Tempo'].astype(float)
random_70['Source'] = "Rand"
random_70['Carros'] = 70
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
random_70_select = random_70[['Download', 'Source', 'Tempo', 'Carros']]
random_70_select = MediaFileRede(random_70_select)
# *************************************************************************
random = res = pd.concat([random_30_select,random_50_select,random_70_select], sort=False)
#
#
#
# LIMITE 30 ***************************************************
limite_30 = pd.read_csv("../repositorio/" + EXP30 + "/REDE_BASELINE_THRESHOLD.csv")
limite_30['Download'] = limite_30['Download'].astype(float)
limite_30['Upload'] = limite_30['Upload'].astype(float)
limite_30['Tempo'] = limite_30['Tempo'].astype(float)
limite_30['Source'] = "Lim"
limite_30['Carros'] = 30
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
limite_30_select = limite_30[['Download', 'Source', 'Tempo', 'Carros']]
limite_30_select = MediaFileRede(limite_30_select)
# *************************************************************************
# LIMITE 50 ***************************************************
limite_50 = pd.read_csv("../repositorio/" + EXP50 + "/REDE_BASELINE_THRESHOLD.csv")
limite_50['Download'] = limite_50['Download'].astype(float)
limite_50['Upload'] = limite_50['Upload'].astype(float)
limite_50['Tempo'] = limite_50['Tempo'].astype(float)
limite_50['Source'] = "Lim"
limite_50['Carros'] = 50
# df1_filtro = df1.loc[(df1['Bytes'] > 0)]
limite_50_select = limite_50[['Download', 'Source', 'Tempo', 'Carros']]
limite_50_select = MediaFileRede(limite_50_select)
# *************************************************************************
# LIMITE 70 ***************************************************
limite_70 =
|
pd.read_csv("../repositorio/" + EXP70 + "/REDE_BASELINE_THRESHOLD.csv")
|
pandas.read_csv
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
dataset=pd.read_csv(r'C:\Users\santosh\Downloads\PAASBAAN-crime-prediction-master\data.csv')
data=pd.read_csv(r'C:\Users\santosh\Downloads\PAASBAAN-crime-prediction-master\data.csv')
print(dataset.head())
for col in data:
print (type(data[col][1]))
data['timestamp'] = pd.to_datetime(data['timestamp'])
data['timestamp'] = pd.to_datetime(data['timestamp'], format = '%d/%m/%Y %H:%M:%S')
data['timestamp']
column_1 = data.ix[:,0]
db=pd.DataFrame({"year": column_1.dt.year,
"month": column_1.dt.month,
"day": column_1.dt.day,
"hour": column_1.dt.hour,
"dayofyear": column_1.dt.dayofyear,
"week": column_1.dt.week,
"weekofyear": column_1.dt.weekofyear,
"dayofweek": column_1.dt.dayofweek,
"weekday": column_1.dt.weekday,
"quarter": column_1.dt.quarter,
})
dataset1=dataset.drop('timestamp',axis=1)
data1=
|
pd.concat([db,dataset1],axis=1)
|
pandas.concat
|
## > Imports
# > Standard libraries
import datetime
# > 3rd party dependencies
import pandas as pd
# > Discord dependencies
import discord
from discord.ext import commands
from discord.ext.tasks import loop
# Local dependencies
from util.vars import config, get_json_data
from util.disc_util import get_channel
class StockTwits(commands.Cog):
"""
This class contains the cog for posting the most discussed StockTwits tickers.
It can be enabled / disabled in the config under ["LOOPS"]["STOCKTWITS"].
Methods
-------
function() -> None:
Gets the data and formats it into an embed.
stocktwits() -> None:
The function posts the StockTwits embeds in the configured channel.
"""
def __init__(self, bot: commands.Bot) -> None:
self.bot = bot
self.channel = get_channel(self.bot, config["LOOPS"]["STOCKTWITS"]["CHANNEL"])
self.stocktwits.start()
async def get_data(self, e: discord.Embed, keyword: str) -> discord.Embed:
"""
Gets the data from StockTwits based on the passed keywords and returns a discord.Embed.
Parameters
----------
e : discord.Embed
The discord.Embed where the data will be added to.
keyword : str
The specific keyword to get the data for. Options are: ts, m_day, wl_ct_day.
Returns
-------
discord.Embed
The discord.Embed with the data added to it.
"""
# Keyword can be "ts", "m_day", "wl_ct_day"
data = await get_json_data("https://api.stocktwits.com/api/2/charts/" + keyword)
table =
|
pd.DataFrame(data["table"][keyword])
|
pandas.DataFrame
|
"""
Base Model Module
-----------------
This is the base class for all model modules. This class does not contain an particular model but it does include all of the functions to run a model, capture model statistics, and visualize model data.
"""
__author__ = 'krishnab'
__version__ = '0.1.0'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
## Initialize Constants
PROFESSOR_LEVEL_NAMES = list(['f1n', 'f2n', 'f3n', 'm1n', 'm2n', 'm3n'])
PROBABILITY_ARRAY_COLUMN_NAMES = list(['param','prof_group_mean', 'probability'])
class Base_model():
def __init__(self, number_of_females_1,
number_of_females_2,
number_of_females_3,
number_of_males_1,
number_of_males_2,
number_of_males_3,
number_of_initial_vacancies_1,
number_of_initial_vacancies_2,
number_of_initial_vacancies_3,
hiring_rate_women_1,
hiring_rate_women_2,
hiring_rate_women_3,
attrition_rate_women_1,
attrition_rate_women_2,
attrition_rate_women_3,
attrition_rate_men_1,
attrition_rate_men_2,
attrition_rate_men_3,
probablity_of_outside_hire_1,
probability_of_outside_hire_2,
probability_of_outside_hire_3,
duration,
female_promotion_probability_1,
female_promotion_probability_2,
male_promotion_probability_1,
male_promotion_probability_2,
max_threshold,
prob_random_growth):
self.name = 'replication m'
self.label = 'replication m'
self.nf1 = number_of_females_1
self.nf2 = number_of_females_2
self.nf3 = number_of_females_3
self.nm1 = number_of_males_1
self.nm2 = number_of_males_2
self.nm3 = number_of_males_3
self.vac3 = number_of_initial_vacancies_3
self.vac2 = number_of_initial_vacancies_2
self.vac1 = number_of_initial_vacancies_1
self.bf1 = hiring_rate_women_1
self.bf2 = hiring_rate_women_2
self.bf3 = hiring_rate_women_3
self.df1 = attrition_rate_women_1
self.df2 = attrition_rate_women_2
self.df3 = attrition_rate_women_3
self.dm1 = attrition_rate_men_1
self.dm2 = attrition_rate_men_2
self.dm3 = attrition_rate_men_3
self.phire2 = probability_of_outside_hire_2
self.phire3 = probability_of_outside_hire_3
self.duration = duration
self.female_promotion_probability_1 = female_promotion_probability_1
self.female_promotion_probability_2 = female_promotion_probability_2
self.male_promotion_probability_1 = male_promotion_probability_1
self.male_promotion_probability_2 = male_promotion_probability_2
self.max_threshold = max_threshold
self.prob_random_growth = prob_random_growth
self.run = 0
self.runarray = 0
self.mean_matrix = 0
self.model_summary_stats = 0
self.std_matrix = 0
self.pd_last_row_data = 0
def run_model(self):
self.res = np.zeros([self.duration, 12], dtype=np.float32)
df_ =
|
pd.DataFrame(self.res)
|
pandas.DataFrame
|
import os
import pandas as pd
import numpy as np
import re
import logging
DATA_PATH = os.getenv('DATA_PATH')
if DATA_PATH is None:
raise ValueError("DATA_PATH needs to be set")
def changeTrade(eba, rightba, wrongba, start=None, end=None, tol=1):
logger = logging.getLogger("clean")
ind = [True]*len(eba.df.index)
if start is not None:
ind &= eba.df.index > start
if end is not None:
ind &= eba.df.index < end
ind_diff = ((
(eba.df.loc[:, eba.KEY["ID"] % (rightba, wrongba)] + eba.df.loc[
:, eba.KEY["ID"] % (wrongba, rightba)]).abs() > tol)
| eba.df.loc[:, eba.KEY["ID"] % (wrongba, rightba)].isna())
ind_diff &= ind
eba.df.loc[ind_diff, eba.KEY["ID"] % (wrongba, rightba)] = (
-eba.df.loc[ind_diff, eba.KEY["ID"] % (rightba, wrongba)])
nchange = sum(ind_diff)
if nchange > 0:
logger.debug("Picking %s over %s for %d pts" % (
rightba, wrongba, nchange))
return eba
def fillNAs(eba, col, pad_limit=2, limit=3):
logger = logging.getLogger("clean")
ind_na = eba.df.loc[:, col].isna()
nchange = ind_na.sum()
if nchange > 0:
logger.debug("%s: %d NA values to deal with" % (
col, nchange))
# first try pad for 2 hours
eba.df.loc[:, col] = eba.df.loc[:, col].fillna(
method='pad', limit=pad_limit)
ind_na = eba.df.loc[:, col].isna()
nchange = ind_na.sum()
if nchange > 0:
logger.debug("%s: replacing %d NA values with next/prev week" % (
col, nchange))
if nchange > 50:
logger.warning("%s: replacing %d NA values with next/prev week" % (
col, nchange))
for ts in eba.df.index[ind_na]:
try:
eba.df.loc[ts, col] = eba.df.loc[
ts-pd.Timedelta("%dH" % (7*24)), col]
except KeyError:
eba.df.loc[ts, col] = eba.df.loc[
ts+pd.Timedelta("%dH" % (7*24)), col]
# If we didn't manage to get the right value, look forward
cnt = 0
while np.isnan(eba.df.loc[ts, col]):
cnt += 1
if cnt > limit:
logger.error("Tried to look %d times ahead for %s" %
(limit, str(ts)))
raise ValueError("Can't fill this NaN")
eba.df.loc[ts, col] = eba.df.loc[
ts+pd.Timedelta("%dH" % (cnt*7*24)), col]
return eba
def removeOutliers(eba, col, start=None, end=None, thresh_u=None,
thresh_l=None, remove=True, limit=4):
logger = logging.getLogger("clean")
if start is None:
start = pd.to_datetime("2016-01-01")
if end is None:
end = pd.to_datetime("2017-01-02")
if (thresh_u is None) and (thresh_l is None):
mu = eba.df.loc[start:end, col].mean()
sigma = eba.df.loc[start:end, col].std()
ind_out = np.abs(eba.df.loc[:, col]-mu) > (3*sigma)
else:
if thresh_l is None:
thresh_l = -np.inf
if thresh_u is None:
thresh_u = +np.inf
ind_out = (eba.df.loc[:, col] < thresh_l)
ind_out |= (eba.df.loc[:, col] > thresh_u)
ind_out &= (eba.df.index > start) & (eba.df.index < end)
nchange = sum(ind_out)
logger.debug("%s: %d outliers out of [%.2g, %.2g]" % (
col, nchange, thresh_l, thresh_u))
if nchange > 10:
logger.warning("%s: %d outliers out of [%.2g, %.2g]" % (
col, nchange, thresh_l, thresh_u))
if remove:
eba.df.loc[ind_out, col] = np.nan
return eba
def applyFixes3(eba, log_level=logging.INFO):
logger = logging.getLogger("clean")
log_level_old = logger.level
logger.setLevel(log_level)
# special changes
logger.debug("\tSpecial changes")
eba = removeOutliers(eba, "EBA.NSB-FPC.ID.H", thresh_u=-5,
start=pd.to_datetime("2016-02-12"),
end=pd.to_datetime("2016-02-14"))
eba = removeOutliers(eba, "EBA.NSB-FPC.ID.H", thresh_u=-5,
start=pd.to_datetime("2016-08-01"),
end=pd.to_datetime("2016-08-15"))
eba = removeOutliers(eba, "EBA.NSB-FPL.ID.H", thresh_u=-5.,
start=pd.to_datetime("2016-08-01"),
end=pd.to_datetime("2016-08-15"))
eba = removeOutliers(eba, "EBA.NSB-FPC.ID.H", thresh_u=-5,
start=pd.to_datetime("2016-10-07"),
end=pd.to_datetime("2016-10-08 03:00"))
eba = removeOutliers(eba, "EBA.NSB-FPL.ID.H", thresh_u=-5.,
start=pd.to_datetime("2016-10-07"),
end=pd.to_datetime("2016-10-08 03:00"))
for ba, ba2 in [("IID", "CISO"), ("PJM", "CPLW"), ("PJM", "DUK"),
("PJM", "TVA"),
("FPL", "SOCO"), ("SC", "SOCO"), ("SEPA", "SOCO"),
("CPLW", "TVA"), ("DUK", "TVA"),
("FMPP", "FPL"), ("FPC", "FPL"), ("JEA", "FPL"),
("SEC", "FPL"),
("CPLW", "DUK"), ("YAD", "DUK"), ("SEPA", "DUK"),
("DOPD", "BPAT"), ("LDWP", "BPAT"),
("FMPP", "FPC"), ("SEC", "FPC"),
("LDWP", "PACE"),
("LDWP", "NEVP"),
("SEPA", "SC"),
("FMPP", "TEC"),
("SEC", "JEA"),
("NSB", "FPC"), ("NSB", "FPL")]:
eba = fillNAs(eba, eba.KEY["ID"] % (ba, ba2))
eba = changeTrade(eba, ba, ba2, tol=0.)
for field in ["D", "NG"]:
eba = removeOutliers(eba, eba.get_cols(
r="FPC", field=field)[0], thresh_l=200.)
eba = removeOutliers(eba, eba.get_cols(
r="TVA", field=field)[0], thresh_l=3000.)
eba = removeOutliers(eba, eba.get_cols(r="PSCO", field=field)[
0], thresh_l=2000., thresh_u=10000.)
eba = removeOutliers(eba, eba.get_cols(
r="PACE", field=field)[0], thresh_u=10000.)
eba = removeOutliers(
eba, eba.get_cols(r="SRP", field=field)[0], thresh_l=1000.,
thresh_u=5000., start=pd.to_datetime("2016-12-01"),
end=pd.to_datetime("2016-12-31"))
eba = removeOutliers(
eba, eba.get_cols(r="SRP", field=field)[0], thresh_u=4900.,
start=pd.to_datetime("2016-01-01"),
end=pd.to_datetime("2016-05-01"))
eba = removeOutliers(eba, eba.get_cols(
r="LDWP", field=field)[0], thresh_l=100.)
eba = removeOutliers(
eba, eba.get_cols(r="IPCO", field=field)[0], thresh_l=800.,
start=
|
pd.to_datetime("2016-08-01")
|
pandas.to_datetime
|
import sys
sys.path.append("../")
import numpy as np
import pandas as pd
import networkx as nx
import pickle
# katz centrlaity: computes the relative influence of a node by measuring the number of
# immediate neighbors (first degree nodes) and also all other nodes that
# connect to the node under consideration through these immediate neighbors
data_list=['conference','hospital','primary_school','workplace','high_school']
for data in data_list:
original_df=pd.read_csv('../data/'+data+'.txt', sep='\t', header=None, names=['ID1','ID2','start_time','end_time'])
reverse_df=pd.read_csv('../data/'+data+'.txt', sep='\t', header=None, names=['ID2','ID1','start_time','end_time'])
df=
|
pd.concat([original_df,reverse_df])
|
pandas.concat
|
# --------------
import time
import pandas as pd
import numpy as np
from nltk import pos_tag
import matplotlib.pyplot as plt
# code starts here
df=pd.read_csv(path)
tagged_titles=df['nominee'].str.split().map(pos_tag)
print(tagged_titles)
tagged_titles_df=pd.DataFrame(tagged_titles)
# code ends here
# --------------
#tagged_titles_df already defined in the last task
def count_tags(title_with_tags):
tag_count = {}
for word, tag in title_with_tags:
if tag in tag_count:
tag_count[tag] += 1
else:
tag_count[tag] = 1
return(tag_count)
# code starts here
tagged_titles_df['tag_counts']=tagged_titles_df['nominee'].map(count_tags)
# Tagset containing all the possible tags
tag_set = list(set([tag for tags in tagged_titles_df['tag_counts'] for tag in tags]))
print(tagged_titles_df)
# Creating tag column frequency for each tags
for tag in tag_set:
tagged_titles_df[tag] = tagged_titles_df['tag_counts'].map(lambda x: x.get(tag, 0))
top_pos=tagged_titles_df[tag_set]
top_pos=top_pos.sum().sort_values().tail(10)
plt.bar(x=top_pos, height=50)
plt.show()
# code ends here
# --------------
# Function to create vocabulary of the tags
def vocab_creator(tagged_titles):
vocab = {}
for row in tagged_titles['nominee']:
for word, tag in row:
if word in vocab:
if tag in vocab[word]:
vocab[word][tag] += 1
else:
vocab[word][tag] = 1
else:
vocab[word] = {tag: 1}
return vocab
# Creating vocab of our tagged titles dataframe
vocab= vocab_creator(tagged_titles_df)
# Creating dataframe from vocab dictionary
vocab_df = pd.DataFrame.from_dict(vocab,orient='index')
# Fill the nan values of dataframe
vocab_df.fillna(value=0, inplace=True)
# Saving the top 10 most frequent VBG taggged words
size = 10
tag = 'VBG'
top_verb_nominee=vocab_df[tag].sort_values().tail(size)
# Plotting the top 10 most frequent VBG taggged words
title = 'Top {} Most Frequent Words for {} Tag'.format(size, tag)
top_verb_nominee.plot(kind='barh', figsize=(12,6), title=title)
plt.show()
# Saving the top 10 most frequent NN taggged words
size = 10
tag = 'NN'
top_noun_nominee=vocab_df[tag].sort_values().tail(size)
# Plotting the top 10 most frequent NN taggged words
title = 'Top {} Most Frequent Words for {} Tag'.format(size, tag)
top_noun_nominee.plot(kind='barh', figsize=(12,6), title=title)
plt.show()
# --------------
# code starts here
new_df=df[(df['winner']==1) & (df['category'].str.contains('Comedy'))]
tagged_titles_winner=new_df['nominee'].str.split().map(pos_tag)
print(tagged_titles_winner)
tagged_titles_winner_df=pd.DataFrame(tagged_titles_winner)
# Creating a vocabulary of the tags
vocab= vocab_creator(tagged_titles_winner_df)
vocab_df=
|
pd.DataFrame.from_dict(vocab, orient='Index')
|
pandas.DataFrame.from_dict
|
from ._split import *
import numpy as np
import pandas as pd
class Split():
def __init__(self, cv_method, n_splits=None,
train_size=None, min_train_size=None, valid_size=None, step_size=None,
random_state=None):
# assume it is preordered by orderby
# assume orderby is present
if cv_method=='OrderedKFold':
self.cv = KFold(n_splits=n_splits,
shuffle=False)
elif cv_method=='ShuffledKFold':
self.cv = KFold(n_splits=n_splits,
shuffle=True)
elif cv_method=='SlidingWindow':
self.cv = DateSlidingWindowSplit(train_size=train_size,
min_train_size=min_train_size,
valid_size=valid_size,
step_size=step_size)
elif cv_method=='ExpandingWindow':
if isinstance(valid_size, str):
self.cv = DateExpandingWindowSplit(min_train_size=min_train_size,
valid_size=valid_size,
step_size=step_size,
n_splits=n_splits)
else:
self.cv = ExtendedTimeSeriesSplit(min_train_size=min_train_size,
valid_size=valid_size,
n_splits=n_splits,
step_size=step_size)
self.cv_method = cv_method
def split(self, data, orderby=None):
if self.cv_method in ('SlidingWindow', 'ExpandingWindow'):
return self.cv.split(data, orderby)
else:
return self.cv.split(data)
def ith_split(self, i, data, orderby=None):
if self.cv_method in ('SlidingWindow', 'ExpandingWindow'):
for idx, (train, valid) in enumerate(self.split(data, orderby)):
if idx==i:
return train, valid
else:
for idx, (train, valid) in enumerate(self.split(data)):
if idx==i:
return train, valid
def get_n_splits(self, data, orderby=None):
if self.cv_method in ('SlidingWindow', 'ExpandingWindow'):
return self.cv.get_n_splits(data, orderby)
else:
return self.cv.get_n_splits(data)
def show_layout(self, data, orderby=None, display=False):
if self.cv_method=='OrderedKFold':
train_left_indices = []
valid_indices = []
train_right_indices = []
for train_index, valid_index in self.cv.split(data):
train_left_indices.append(valid_index[0] - 0)
valid_indices.append(valid_index[-1] - valid_index[0] + 1)
train_right_indices.append(len(data) - valid_index[-1] - 1)
train_left_indices = train_left_indices[::-1]
valid_indices = valid_indices[::-1]
train_right_indices = train_right_indices[::-1]
if display:
df = pd.DataFrame({'train' : train_left_indices,'validation' : valid_indices, 'placeholder': train_right_indices})
ax = df.plot.barh(stacked=True, color=['#1f77b4','#ff7f0e']);
ax.figure.set_size_inches(10, 2.5)
ax.set_title("cross validation folds layout")
ax.legend(["train", "validation"], loc='center left',bbox_to_anchor=(1.0, 0.5))
elif self.cv_method=='ExpandingWindow':
train_indices = []
valid_indices = []
for train_index, valid_index in self.cv.split(data, orderby):
train_indices.append(train_index[-1])
valid_indices.append(valid_index[-1]-valid_index[0])
train_indices = train_indices[::-1]
valid_indices = valid_indices[::-1]
if display:
df = pd.DataFrame({'train' : train_indices,'validation' : valid_indices})
ax = df.plot.barh(stacked=True, color=['#1f77b4','#ff7f0e']);
ax.figure.set_size_inches(10, 2.5)
ax.set_title("cross validation folds layout")
ax.legend(["train", "validation"], loc='center left',bbox_to_anchor=(1.0, 0.5));
elif self.cv_method=='SlidingWindow':
empty_indices = []
train_indices = []
valid_indices = []
for train_index, valid_index in self.cv.split(data, orderby):
empty_indices.append(train_index[0])
train_indices.append(train_index[-1] - train_index[0] + 1)
valid_indices.append(valid_index[-1] - valid_index[0] + 1)
empty_indices = empty_indices[::-1]
train_indices = train_indices[::-1]
valid_indices = valid_indices[::-1]
if display:
df =
|
pd.DataFrame({'empty' : empty_indices, 'train' : train_indices, 'validation': valid_indices})
|
pandas.DataFrame
|
import pandas as pd
import requests
import json
from pandas.io.json import json_normalize
from uri_to_url import uri_to_url
def most_used_by_type_bar(uri, instance, display_id, title, role, count):
"""
Uses a sparql query to obtain information about the most used parts (of the same type as the poi e.g. all terminators)
and format the data in such a way that a graph can be made comparing the poi (part of interest) to the most used parts
of that role type.
Requirements
-------
import pandas as pd
import requests
import json
from pandas import json_normalize
Most_Used_By_Type_Query.txt
Parameters
----------
uri : string
the unique identifier of a part, note that due to spoofing it may not be the same as the url
e.g. uri = 'https://synbiohub.org/public/igem/BBa_E0040/1' (url may be https://dev.synbiohub.org/public/igem/BBa_E0040/1)
instance : string
the synbiohub instance where information is to be retrieved from (where the sparql query is to be run)
e.g. 'https://synbiohub.org/'
display_id: string
The display id of the poi e.g. 'BBa_E0040'
title: string
The human readable name of the poi e.g. 'GFP'
role: string
The number (as a string) of the sequence ontology of the role of the poi e.g. '0000316'
count: integer
The number of times the poi is used (how often it is a subpart) e.g. 2348
Returns
-------
bar_df: pandas dataframe, shape(11,6)
columns are ['count', 'deff', 'displayId', 'roletog', 'title', 'color']
Example
--------
display_id = 'BBa_E0040'
title = 'GFP'
role = '0000316'
count = 2348
uri = 'https://synbiohub.org/public/igem/BBa_E0040/1'
instance = 'https://dev.synbiohub.org/'
bar_df = most_used_by_type_bar(uri, instance, display_id, title, role, count)
Output:
count,deff,displayId,roletog,title,color
948,'https://synbiohub.org/public/igem/BBa_E1010/1','BBa_E1010','http://identifiers.org/so/SO:0000316','mRFP1','rgba(119,157,205,1)'
830,'https://synbiohub.org/public/igem/BBa_C0051/1','BBa_C0051','http://identifiers.org/so/SO:0000316','CI lam','rgba(119,157,205,1)'
766,'https://synbiohub.org/public/igem/BBa_C0040/1','BBa_C0040','http://identifiers.org/so/SO:0000316','TetR','rgba(119,157,205,1)'
662,'https://synbiohub.org/public/igem/BBa_C0012/1','BBa_C0012','http://identifiers.org/so/SO:0000316','lacI','rgba(119,157,205,1)'
660,'https://synbiohub.org/public/igem/BBa_C0062/1','BBa_C0062','http://identifiers.org/so/SO:0000316','luxr','rgba(119,157,205,1)'
640,'https://synbiohub.org/public/igem/BBa_E0030/1','BBa_E0030','http://identifiers.org/so/SO:0000316','eyfp','rgba(119,157,205,1)'
538,'https://synbiohub.org/public/igem/BBa_C0061/1','BBa_C0061','http://identifiers.org/so/SO:0000316','luxI','rgba(119,157,205,1)'
342,'https://synbiohub.org/public/igem/BBa_E0020/1','BBa_E0020','http://identifiers.org/so/SO:0000316','ecfp','rgba(119,157,205,1)'
202,'https://synbiohub.org/public/igem/BBa_C0060/1','BBa_C0060','http://identifiers.org/so/SO:0000316','aiiA','rgba(119,157,205,1)'
186,'https://synbiohub.org/public/igem/BBa_I732006/1','BBa_I732006','http://identifiers.org/so/SO:0000316','lacZ-alpha','rgba(119,157,205,1)'
2348,'https://synbiohub.org/public/igem/BBa_E0040/1','BBa_E0040','http://identifiers.org/so/SO:0000316','GFP','rgba(119,157,205,1)'
"""
#if spoofing is happening the uri instance is different than the instance
spoofed_instance = uri[:uri.find('/', 8)+1]
#get part url from uri
part_url = uri_to_url(uri, instance, spoofed_instance)
#open the query to collect the necessary data
fl = open("Most_Used_By_Type_Query.txt", "r")
sparql_query = fl.read()
#replace the role with the relevant role
sparql_query = sparql_query.replace("0000167", role)
#perform the query
r = requests.post(instance+"sparql", data = {"query":sparql_query}, headers = {"Accept":"application/json"})
#format the data
d = json.loads(r.text)
bar_df = json_normalize(d['results']['bindings'])
#rename columns
rename_dict = {'count.datatype':'cd', 'count.type':'ct', 'count.value':'count', 'def.type':'dt', 'def.value':'deff', 'displayId.type':'dist', 'displayId.value':'displayId', 'role.type':'rt', 'role.value':'roletog', 'title.type':'tt', 'title.value':'title'}
bar_df.columns = [rename_dict[col] for col in bar_df.columns]
#drop unneeded columns
bar_df = bar_df.drop(['cd', 'ct', 'dt', 'dist', 'rt', 'tt'], axis=1)
#remove the poi if it appears in the data
bar_df = bar_df[bar_df.displayId != display_id]
#incase the poi was dropped reset the index (needed for colours to work)
bar_df.reset_index(drop=True, inplace = True)
#make sure it still works if less than 11 parts are present in the database
robustness = min(10, len(bar_df)-1)
#only accept the top robustness parts (usually the top eleven most used parts)
bar_df = bar_df.iloc[0:robustness+1]
#replace uris with urls
bar_df['deff'] = uri_to_url(bar_df['deff'], instance, spoofed_instance)
#change the final row in the dataframe (usually row 11)
#poi row is added like this so the ordering of the columns doesn't have to match
poi_row = pd.DataFrame.from_dict({'displayId':[display_id], 'title':[title], 'count':[count],
'roletog':[f"http://identifiers.org/so/SO:{str(role)}"], 'deff':[part_url]})
bar_df.iloc[robustness] = poi_row.iloc[0]
#define what colour each role should get (other is ignored)
colormap = {
'http://identifiers.org/so/SO:0000167': 'rgba(4,187,61,1)',
'http://identifiers.org/so/SO:0000139':'rgba(149,110,219,1)',
'http://identifiers.org/so/SO:0000316':'rgba(119,157,205,1)',
'http://identifiers.org/so/SO:0000141':'rgba(202,58,32,1)'
}
#get full identifiers form of role
part_role = "http://identifiers.org/so/SO:0000167".replace("0000167", role)
try:
colours = [colormap[part_role]] #make colours based on colormap
except KeyError:
colours = ["rgba(255, 128,0,1)"] #oran geif part type is other
#ensure the length of colours is as long as the dataframe (generally 10)
colours = colours*len(bar_df.index)
#add the column colour to the dataframe
bar_df['color'] =
|
pd.Series(colours, index=bar_df.index)
|
pandas.Series
|
"""
Convolutional Neural Network for kaggle facial keypoints detection contest.
Note: Only the labels contain missing values in all of the data.
"""
# THEANO_FLAGS=mode=FAST_RUN,device=cpu,floatX=float32 python CNN.py
from __future__ import division, print_function
import time
import theano
import lasagne
import logging
import numpy as np
import pandas as pd
import theano.tensor as T
from sklearn.cross_validation import train_test_split
def set_verbosity(verbose_level=3):
"""Set the level of verbosity of the Preprocessing."""
if not type(verbose_level) == int:
raise TypeError("verbose_level must be an int")
if verbose_level < 0 or verbose_level > 4:
raise ValueError("verbose_level must be between 0 and 4")
verbosity = [logging.CRITICAL,
logging.ERROR,
logging.WARNING,
logging.INFO,
logging.DEBUG]
logging.basicConfig(
format='%(asctime)s:\t %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
level=verbosity[verbose_level])
def imputate(frame):
"""Deal with missing values in a DataFrame."""
start_time = time.time()
frame[frame.isnull().any(axis=1)].to_csv("train_incomplete.csv",
index=False)
frame.dropna(inplace=True)
time_diff = time.time() - start_time
logging.info("Imputation completed in " + str(time_diff) + " seconds")
def parse_data(train_file="training.csv", test_file="test.csv"):
"""
Parse training and test data;
split Image and labels and convert Image column to DataFrame.
"""
start_time = time.time()
train = pd.read_csv(train_file)
imputate(train)
test =
|
pd.read_csv(test_file)
|
pandas.read_csv
|
"""
Wrapper para a biblioteca PyDEA para permitir seu uso no código Python.
Nota: este wrapper foi feito com as funcionalidades mínimas para uso neste trabalho.
"""
import os
import tempfile
import pandas as pd
from pyDEA import main as PyDEAMain
import consts
TEMPLATE_ARQUIVO_PARAMETROS = """
<PRICE_RATIO_RESTRICTIONS> {{}}
<DATA_FILE> {{{data_file}}}
<OUTPUT_CATEGORIES> {{{output_categories}}}
<INPUT_CATEGORIES> {{{input_categories}}}
<VIRTUAL_WEIGHT_RESTRICTIONS> {{{virtual_weight_restrictions}}}
<PEEL_THE_ONION> {{}}
<MULTIPLIER_MODEL_TOLERANCE> {{0}}
<RETURN_TO_SCALE> {{VRS}}
<OUTPUT_FILE> {{}}
<DEA_FORM> {{multi}}
<ORIENTATION> {{output}}
<ABS_WEIGHT_RESTRICTIONS> {{}}
<CATEGORICAL_CATEGORY> {{}}
<MAXIMIZE_SLACKS> {{}}
<NON_DISCRETIONARY_CATEGORIES> {{}}
<USE_SUPER_EFFICIENCY> {{yes}}
<WEAKLY_DISPOSAL_CATEGORIES> {{}}
"""
class ModeloDEA:
"""
Modelo para encapsualar o instanciamento e execução da análise utilizando o PyDEA
"""
def __init__(self, input_categories, output_categories, virtual_weight_restrictions):
"""
Instancia o modelo DEA do PyDEA com os parâmetros relevantes para nossos experimentos.
"""
self.input_categories = input_categories
self.output_categories = output_categories
self.virtual_weight_restrictions = virtual_weight_restrictions
def executa(self, data_file, diretorio_saida):
"""
Executa a análise DEA e salva resultados no diretorio_saida.
Nota: a primeira coluna deve conter os nomes das DMUs.
"""
# Lê o arquivo "data_file" (imbuído do PATH) como um objeto pandas DataFrame
df = pd.read_excel(data_file)
# Mantém em "df" somente 1ª coluna (nomes das DMUs) e colunas utilizadas na DEA (entradas e saídas)
df = df[[df.columns[0]] + self.input_categories + self.output_categories]
# Coleta apenas o nome do arquivo de "data_file"
arquivo_entrada_basename = os.path.basename(data_file)
# Cria diretório temporário para o arquivo de nome "arquivo_entrada_basename"
arquivo_entrada_temp = os.path.join(tempfile.gettempdir(), arquivo_entrada_basename)
# Coloca os dados de "df" num arquivo "xlsx" localizado no diretório temporário
df.to_excel(arquivo_entrada_temp, index=False)
# Cria string com o conteúdo do arquivo de parâmetros "TEMPLATE_ARQUIVO_PARAMETROS"
params_file = TEMPLATE_ARQUIVO_PARAMETROS.format(
data_file=arquivo_entrada_temp,
input_categories=';'.join(self.input_categories),
output_categories=';'.join(self.output_categories),
virtual_weight_restrictions=';'.join(self.virtual_weight_restrictions)
)
# Cria arquivo temporário com os parâmetros do modelo
with tempfile.NamedTemporaryFile(mode='w+', delete=False, prefix='pydea_') as arquivo_parametros_tmp:
# Escreve parâmetros no arquivo
arquivo_parametros_tmp.write(params_file)
# Retorna o ponteiro para o início do arquivo
arquivo_parametros_tmp.seek(0)
print('Treinando modelo DEA com dados da planilha "{}"'.format(data_file))
# Executa modelo
PyDEAMain.main(filename=arquivo_parametros_tmp.name, output_dir=diretorio_saida)
print('Treino concluído.\n\n')
def executa_por_cluster(self, data_file, diretorio_saida, coluna_cluster):
"""
Executa a análise DEA separadamente em cada subconjunto da planilha de entrada identificados pelos
valores da coluna_cluster. Ao final, os resultados para cada cluster são salvos no diretorio_saida
e uma cópia da planilha de entrada é salva com a coluna EFICIENCIA adicionada com os escores calculados
para cada DMU não normalizados.
"""
# Lê o arquivo "data_file" (imbuído do PATH) como um objeto pandas DataFrame
df = pd.read_excel(data_file)
# Obtém objeto Numpy ndarray dos valores únicos de numeração de cluster contidos...
# na coluna coluna_cluster de "df"
clusters = df[coluna_cluster].unique()
# Cria diretório temporário
diretorio_temporario = tempfile.gettempdir()
# Adiciona coluna para o score DEA ao df
df['EFICIENCIA'] = -1
# Itera sobre cada numeração de cluster
for cluster in clusters:
print('Treinando DEA para cluster {cluster}...'.format(cluster=cluster))
# Obtém objeto pandas DataFrame somente com unidades hospitalares do cluster "cluster"
df_filtrado = df[df[coluna_cluster] == cluster]
# Obtém objeto list dos nomes das colunas de "df_filtrado"
lista_colunas_sem_coluna_cluster = df_filtrado.columns.to_list()
# Remove o objeto string coluna_cluster do referido objeto list
lista_colunas_sem_coluna_cluster.remove(coluna_cluster)
# Remove o objeto string "EFICIENCIA" do referido objeto list
lista_colunas_sem_coluna_cluster.remove('EFICIENCIA')
# Reconstroi "df_filtrado" apenas com as colunas presentes em "lista_colunas_sem_coluna_cluster"
df_filtrado = df_filtrado[lista_colunas_sem_coluna_cluster]
# Coleta o nome do arquivo "data_file" sem extensão
arquivo_entrada_basename = os.path.basename(os.path.splitext(data_file)[0])
# Cria arquivo "xlsx" do cluster "cluster" no diretório temporário
arquivo_df_cluster = os.path.join(diretorio_temporario, '{arquivo_entrada}_cluster_{cluster}.xlsx'
.format(arquivo_entrada=arquivo_entrada_basename, cluster=cluster))
# Coloca os dados de "df_filtrado" no arquivo "xlsx" do cluster "cluster" localizado...
# no diretório temporário
df_filtrado.to_excel(arquivo_df_cluster)
# Chama o método "executa" da mesma class
self.executa(arquivo_df_cluster, diretorio_saida)
# Coleta o nome do arquivo "arquivo_df_cluster" sem extensão e acresce o objeto string "_result.xlsx"
arquivo_resultado_basename = os.path.splitext(os.path.basename(arquivo_df_cluster))[0] + '_result.xlsx'
# Cria arquivo "xlsx" de resultados do cluster "cluster" no diretório "diretorio_saida"
arquivo_resultado = os.path.join(diretorio_saida, arquivo_resultado_basename)
# Lê o arquivo em formato "xlsx" "arquivo_resultado" como um objeto pandas DataFrame...
# e desconsidera a primeira linha
df_resultado =
|
pd.read_excel(arquivo_resultado, skiprows=1)
|
pandas.read_excel
|
# coding: utf-8
# In[193]:
#Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
import plotly
import plotly.plotly as py
import plotly.tools as tls
import plotly.graph_objs as go
import time
import pandas_datareader as web
# Package and modules for importing data;
import datetime
import requests
import json as js
import csv
# In[195]:
# Calling API for Microsoft stock prices
headers = {
'X-API-KEY': 'Get API key',
}
API_KEY = 'Get API key'
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=MSFT&outputsize=full&apikey='+API_KEY
api_call=requests.get(url, headers=headers)
file1 = api_call.text
file1=js.loads(api_call.text)
# In[197]:
file1['Time Series (Daily)']['2017-07-27']
# In[198]:
# To write into csv
from datetime import datetime
csv.writer(open("data.csv", "wb"), dialect="excel")
x = file1
#f = csv.writer(open("abc.csv", ""))
# Write CSV Header, If you dont need that, remove this line
#f.writerow(["pk", "model", "codename", "name", "content_type"])
temp_data = file1['Time Series (Daily)']
with open('Microsoft_stock.csv','w') as f:
writ = csv.writer(f)
label=('Date','Open','High','Low','Close','Volume')
writ.writerow(label)
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
fields =[datetime_object,
float(temp_data[temp_date]['1. open']),
float(temp_data[temp_date]['2. high']),
float(temp_data[temp_date]['3. low']),
float(temp_data[temp_date]['4. close']),
float(temp_data[temp_date]['5. volume'])]
# print (fields)
with open('Microsoft_stock.csv','a') as f:
writ = csv.writer(f)
writ.writerow(fields)
# In[199]:
# Changing time to Day Month Year format
temp_data = file1['Time Series (Daily)']
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
(datetime_object)
# In[200]:
Microsoft.dropna(inplace=True)
Microsoft=pd.read_csv('Microsoft_stock.csv', parse_dates=True, index_col=0 )
print(Microsoft.head(5))
# In[201]:
Microsoft.index.values
# In[202]:
#Cleaning the index values. Changing time to Day Month Year format
Address_M='Microsoft_stock.csv'
Microsoft=pd.read_csv(Address_M)
Microsoft['Date'] = pd.to_datetime(Microsoft['Date']).apply(lambda x: x.strftime('%Y-%m-%d')if not pd.isnull(x) else '')
# In[203]:
Microsoft[['High','Low']].plot()
plt.show()
print()
# In[204]:
a=Microsoft['Date']
b=Microsoft['High']
trace= go.Scatter(x=a,y=b)
data=[trace]
py.iplot(data, filename='Microsoft')
# In[205]:
# Calling API for Apple's stock prices
headers = {
'X-API-KEY': 'Get api key ',
}
API_KEY = 'Get api key'
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=AAPL&outputsize=ful&apikey='+API_KEY
api_call=requests.get(url, headers=headers)
file2 = api_call.text
file2=js.loads(api_call.text)
# In[206]:
# To write into csv
csv.writer(open("data.csv", "wb"), dialect="excel")
x = file2
temp_data = file2['Time Series (Daily)']
with open('Apple_stock.csv','w') as f:
writ = csv.writer(f)
label=('Date','Open','High','Low','Close','Volume')
writ.writerow(label)
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
fields =[datetime_object,
float(temp_data[temp_date]['1. open']),
float(temp_data[temp_date]['2. high']),
float(temp_data[temp_date]['3. low']),
float(temp_data[temp_date]['4. close']),
float(temp_data[temp_date]['5. volume'])]
# print (fields)
with open('Apple_stock.csv','a') as f:
writ = csv.writer(f)
writ.writerow(fields)
# In[207]:
# Changing time to Day Month Year format
temp_data = file2['Time Series (Daily)']
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
(datetime_object)
# In[208]:
Apple.dropna(inplace=True)
Apple=pd.read_csv('Apple_stock.csv', parse_dates=True, index_col=0 )
# In[209]:
#Cleaning the index values. Changing time to Day Month Year format
Address_A='Apple_stock.csv'
Apple=pd.read_csv(Address_A)
Apple['Date'] = pd.to_datetime(Apple['Date']).apply(lambda x: x.strftime('%Y-%m-%d')if not pd.isnull(x) else '')
# In[210]:
a=Apple['Date']
b=Apple['High']
trace= go.Scatter(x=a,y=b)
data=[trace]
py.iplot(data, filename='Apple')
# In[211]:
# Calling API for Facebook stock prices
headers = {
'X-API-KEY': 'Get API key',
}
API_KEY = 'Get API key'
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=FB&outputsize=full&apikey='+API_KEY
api_call=requests.get(url, headers=headers)
file3 = api_call.text
file3=js.loads(api_call.text)
# In[212]:
# To write into csv
csv.writer(open("data.csv", "wb"), dialect="excel")
x = file3
temp_data = file3['Time Series (Daily)']
with open('Facebook_stock.csv','w') as f:
writ = csv.writer(f)
label=('Date','Open','High','Low','Close','Volume')
writ.writerow(label)
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
fields =[datetime_object,
float(temp_data[temp_date]['1. open']),
float(temp_data[temp_date]['2. high']),
float(temp_data[temp_date]['3. low']),
float(temp_data[temp_date]['4. close']),
float(temp_data[temp_date]['5. volume'])]
# print (fields)
with open('Facebook_stock.csv','a') as f:
writ = csv.writer(f)
writ.writerow(fields)
# In[213]:
# Changing time to Day Month Year format
temp_data = file3['Time Series (Daily)']
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
(datetime_object)
# In[214]:
Facebook = pd.read_csv('Facebook_stock.csv', parse_dates=True, index_col=0 )
Facebook.dropna(inplace=True)
# In[215]:
#Cleaning the index values. Changing time to Day Month Year format
Address_F='Facebook_stock.csv'
Facebook=pd.read_csv(Address_F)
Facebook['Date'] = pd.to_datetime(Facebook['Date']).apply(lambda x: x.strftime('%Y-%m-%d')if not pd.isnull(x) else '')
# In[216]:
a=Facebook['Date']
b=Facebook['High']
trace= go.Scatter(x=a,y=b)
data=[trace]
py.iplot(data, filename='Facebook')
# In[217]:
# Calling API for Google stock prices
headers = {
'X-API-KEY': 'Get API key',
}
API_KEY = 'Get API key'
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=GOOG&outputsize=full&apikey='+API_KEY
api_call=requests.get(url, headers=headers)
file4 = api_call.text
file4=js.loads(api_call.text)
a=file4['Time Series (Daily)']
# In[218]:
x = file4
temp_data = file4['Time Series (Daily)']
with open('Google_stock.csv','w') as f:
writ = csv.writer(f)
label=('Date','Open','High','Low','Close','Volume')
writ.writerow(label)
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
fields =[datetime_object,
float(temp_data[temp_date]['1. open']),
float(temp_data[temp_date]['2. high']),
float(temp_data[temp_date]['3. low']),
float(temp_data[temp_date]['4. close']),
float(temp_data[temp_date]['5. volume'])]
# print (fields)
with open('Google_stock.csv','a') as f:
writ = csv.writer(f)
writ.writerow(fields)
# In[219]:
# Changing time to Day Month Year format
temp_data = file4['Time Series (Daily)']
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
(datetime_object)
# In[220]:
Google = pd.read_csv('Google_stock.csv', parse_dates=True, index_col=0 )
Google.dropna(inplace=True)
# In[221]:
#Cleaning the index values. Changing time to Day Month Year format
Address_G='Google_stock.csv'
Google=pd.read_csv(Address_G)
Google['Date'] = pd.to_datetime(Google['Date']).apply(lambda x: x.strftime('%Y-%m-%d')if not pd.isnull(x) else '')
# In[222]:
a=Google['Date']
b=Google['High']
trace= go.Scatter(x=a,y=b)
data=[trace]
py.iplot(data, filename='Google')
# In[224]:
# Calling API for Disney stock prices
headers = {
'X-API-KEY': 'Get API key',
}
API_KEY = 'Get API key'
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=DIS&outputsize=full&apikey='+API_KEY
api_call=requests.get(url, headers=headers)
file5 = api_call.text
file5=js.loads(api_call.text)
# In[225]:
# To write into csv
csv.writer(open("data.csv", "wb"), dialect="excel")
x = file5
temp_data = file5['Time Series (Daily)']
with open('Disney_stock.csv','w') as f:
writ = csv.writer(f)
label=('Date','Open','High','Low','Close','Volume')
writ.writerow(label)
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
fields =[datetime_object,
float(temp_data[temp_date]['1. open']),
float(temp_data[temp_date]['2. high']),
float(temp_data[temp_date]['3. low']),
float(temp_data[temp_date]['4. close']),
float(temp_data[temp_date]['5. volume'])]
# print (fields)
with open('Disney_stock.csv','a') as f:
writ = csv.writer(f)
writ.writerow(fields)
# In[226]:
# Changing time to Day Month Year format
temp_data = file5['Time Series (Daily)']
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
(datetime_object)
# In[227]:
Disney = pd.read_csv('Disney_stock.csv', parse_dates=True, index_col=0 )
Disney.dropna(inplace=True)
# In[228]:
#Cleaning the index values. Changing time to Day Month Year format
Address_D='Disney_stock.csv'
Disney=pd.read_csv(Address_D)
Disney['Date'] =
|
pd.to_datetime(Disney['Date'])
|
pandas.to_datetime
|
import numpy as np
import pandas as pd
import matplotlib.image as mpimg
from typing import Tuple
from .config import _dir
_data_dir = '%s/input' % _dir
cell_types = ['HEPG2', 'HUVEC', 'RPE', 'U2OS']
positive_control = 1108
negative_control = 1138
nsirna = 1108 # excluding 30 positive_control + 1 negative_control
plate_shape = (14, 22)
def set_data_dir(d):
global _data_dir
_data_dir = d
def load_header(set_type):
"""
Args:
set_type (str): train or test
id_code experiment plate well sirna well_type cell_type
HEPG2-01_1_B03 HEPG2-01 1 B03 513 posotive_control HEPG2
"""
df =
|
pd.read_csv('%s/%s.csv' % (_data_dir, set_type))
|
pandas.read_csv
|
#!/usr/local/var/pyenv/shims/python3
# !python3
import calendar
import pandas as pd
import plotly.express as px
mapbox_public_token = '<KEY>'
px.set_mapbox_access_token(mapbox_public_token)
def abnyc_query(conn):
df = pd.read_sql_query("select * from abnyc;", conn, index_col='id')
while True:
print('1. NYC Airbnb statics by minimum nights on map.\n'
'2. NYC Airbnb statics by availability on map.\n'
'3. NYC Airbnb statics by reviews on map.\n'
'q. Quit')
choice = input('Input Here: ')
size_indicator = ""
if choice == '1':
query = "SELECT compound.id, compound.latitude, compound.longitude, nbhd.neighbourhood_group, compound.minimum_nights " \
"FROM (" \
"SELECT geo.id, geo.latitude, geo.longitude, main.neighbourhood, main.minimum_nights " \
"FROM abnyc_geo AS geo " \
"INNER JOIN abnyc AS main " \
"ON geo.id = main.id) AS compound " \
"INNER JOIN (" \
"SELECT * FROM abnyc_nbhd) AS nbhd " \
"ON nbhd.neighbourhood = compound.neighbourhood;"
size_indicator = "minimum_nights"
elif choice == '2':
query = "SELECT compound.id, compound.latitude, compound.longitude, nbhd.neighbourhood_group, compound.availability_365 " \
"FROM (" \
"SELECT geo.id, geo.latitude, geo.longitude, main.neighbourhood, main.availability_365 " \
"FROM abnyc_geo AS geo " \
"INNER JOIN abnyc AS main " \
"ON geo.id = main.id) AS compound " \
"INNER JOIN (" \
"SELECT * FROM abnyc_nbhd) AS nbhd " \
"ON nbhd.neighbourhood = compound.neighbourhood;"
size_indicator = "availability_365"
elif choice == '3':
query = "SELECT compound.id, compound.latitude, compound.longitude, nbhd.neighbourhood_group, compound.number_of_reviews " \
"FROM (" \
"SELECT geo.id, geo.latitude, geo.longitude, main.neighbourhood, main.number_of_reviews " \
"FROM abnyc_geo AS geo " \
"INNER JOIN abnyc AS main " \
"ON geo.id = main.id) AS compound " \
"INNER JOIN (" \
"SELECT * FROM abnyc_nbhd) AS nbhd " \
"ON nbhd.neighbourhood = compound.neighbourhood;"
size_indicator = "number_of_reviews"
else:
break
df = pd.read_sql_query(query, conn)
fig = px.scatter_mapbox(df, lat='latitude', lon='longitude',
color='neighbourhood_group',
size=size_indicator,
opacity=0.8,
color_continuous_scale=px.colors.cyclical.IceFire, size_max=15, zoom=10)
fig.update_layout(
mapbox_style="dark",
showlegend=False,
mapbox_layers=[
{
"below": 'traces',
"sourcetype": "raster",
},
]
)
fig.update_layout(margin={"r": 0, "t": 0, "l": 0, "b": 4})
fig.show()
def liquor_query(conn):
df = pd.read_sql_query("select * from abnyc;", conn, index_col='id')
while True:
print('1. NYC liquor statics by month on map.\n'
'2. NYC liquor statics by year on map.\n'
'3. NYC liquor statics overall on map.\n'
'q. Quit')
choice = input('Input Here: ')
# data_per = int(input('How many data you want to see? (Enter a integer less than 100000)\n Enter here: ')
if choice == '1':
year_month = input('Which [YEAR-MONTH] would you like to check?\nEnter here: ')
query = "SELECT compound.license_serial_number, compound.latitude, compound.longitude, compound.license_effective_date, type.license_type_name " \
"FROM (" \
"SELECT geo.license_serial_number, geo.latitude, geo.longitude, main.license_class_code, main.license_effective_date " \
"FROM liquor_geo AS geo " \
"INNER JOIN ( " \
"SELECT * " \
"FROM liquor " \
"WHERE license_effective_date >= '%(year)s-%(month)s-01' AND license_effective_date < '%(year)s-%(month)s-%(end_day)s') AS main " \
"ON geo.license_serial_number = main.license_serial_number) AS compound " \
"INNER JOIN (" \
"SELECT * FROM liquor_type) AS type " \
"ON type.license_class_code = compound.license_class_code;"
year = year_month.split("-")[0]
month = year_month.split("-")[1]
month_range = calendar.monthrange(int(year), int(month))
end_day = month_range[1]
df = pd.read_sql_query(query, conn, params={'year': int(year), 'month': int(month), 'end_day': end_day})
elif choice == '2':
year = int(input('Which [YEAR] would you like to check?\nEnter here: '))
query = "SELECT compound.license_serial_number, compound.latitude, compound.longitude, compound.license_effective_date, type.license_type_name " \
"FROM (" \
"SELECT geo.license_serial_number, geo.latitude, geo.longitude, main.license_class_code, main.license_effective_date " \
"FROM liquor_geo AS geo " \
"INNER JOIN ( " \
"SELECT * " \
"FROM liquor " \
"WHERE license_effective_date >= '%(year)s-01-01' AND license_effective_date <= '%(year)s-12-31') AS main " \
"ON geo.license_serial_number = main.license_serial_number) AS compound " \
"INNER JOIN (" \
"SELECT * FROM liquor_type) AS type " \
"ON type.license_class_code = compound.license_class_code;"
df = pd.read_sql_query(query, conn, params={'year': year})
elif choice == '3':
query = "SELECT compound.license_serial_number, compound.latitude, compound.longitude, compound.license_effective_date, type.license_type_name " \
"FROM (" \
"SELECT geo.license_serial_number, geo.latitude, geo.longitude, main.license_class_code, main.license_effective_date " \
"FROM liquor_geo AS geo " \
"INNER JOIN liquor AS main " \
"ON geo.license_serial_number = main.license_serial_number) AS compound " \
"INNER JOIN (" \
"SELECT * FROM liquor_type) AS type " \
"ON type.license_class_code = compound.license_class_code;"
# size_indicator = "number_of_reviews"
df = pd.read_sql_query(query, conn)
else:
break
# df = df.sample(data_per)
fig = px.scatter_mapbox(df, lat='latitude', lon='longitude',
# color='license_effective_date',
# size=10,
opacity=0.8,
color_continuous_scale=px.colors.cyclical.IceFire, size_max=15, zoom=10)
fig.update_layout(
mapbox_style="dark",
showlegend=False,
mapbox_layers=[
{
"below": 'traces',
"sourcetype": "raster",
},
]
)
fig.update_layout(margin={"r": 0, "t": 0, "l": 0, "b": 4})
fig.show()
def crime_query(conn):
df = pd.read_sql_query("select * from airquality_indicator;", conn, index_col='indicator_id')
while True:
print('1. Crime statics by crime.\n'
'2. Crime statics by months.\n'
'3. Crime statics by hours.\n'
'4. Crime statics by map.\n'
'q. Quit')
choice = input('Input Here: ')
if choice == '1':
query = "SELECT cd.ky_cd, ofns_desc, law_cat_cd, count(cmplnt_num) " \
"FROM crime c " \
"JOIN crime_desc cd " \
"ON (c.ky_cd=cd.ky_cd) " \
"GROUP BY cd.ky_cd, ofns_desc, law_cat_cd " \
"ORDER BY count desc;"
df = pd.read_sql_query(query,
conn)
print(df)
fig = px.bar(df, x='ofns_desc', y='count',
color='ofns_desc', barmode='relative',
hover_data=['law_cat_cd'],
labels={'pop': 'New York City Crime Data'})
fig.show()
elif choice == '2':
query = "select TO_CHAR(cmplnt_fr_dt, 'Month') as cmplnt_year, count(*) from crime group by cmplnt_year;"
df = pd.read_sql_query(query,
conn)
print(df)
fig = px.line(df, x='cmplnt_year', y='count')
fig.show()
elif choice == '3':
date_method = 'hour'
query = "select date_trunc(%(d_method)s, cmplnt_fr_tm) as cmplnt_hour, count(cmplnt_num) " \
"from crime " \
"group by cmplnt_hour;"
df = pd.read_sql_query(query,
conn,
params={'d_method': date_method})
df['cmplnt_hour'] = df['cmplnt_hour'].astype(str).str[-18:-10]
df['cmplnt_hour'] =
|
pd.to_datetime(df['cmplnt_hour'], format='%H:%M:%S')
|
pandas.to_datetime
|
import argparse
import datetime
import json
import pandas as pd
import requests
from tqdm import tqdm
import logging
import os
import sys
sys.path.append(os.path.join(os.getcwd()))
import tasking_manager_stats.data_management as dm
def get_args():
parser = argparse.ArgumentParser(description='Compute stats with ohsome')
parser.add_argument('project_id', type=int, help='Id of the HOT tasking manager project')
parser.add_argument('-project_list', type=str,
help='File containing a list of project id to compute building stats on all of them')
return parser.parse_args()
class NoDataException(Exception):
pass
class URLTooLongException(Exception):
pass
class OhsomeDataTooOldException(Exception):
pass
def get_json_request_header():
"""
Return the header for JSON request
:return:
"""
return {'Accept': 'application/json', 'Authorization': 'Token sessionTokenHere==', 'Accept-Language': 'en'}
def get_last_available_ohsome_date():
url = 'http://api.ohsome.org/v0.9/elementsFullHistory/geometry?bboxes=0,0,0,0'\
'&keys=landuse&properties=tags&showMetadata=false&time=2019-01-01,'
test_time = datetime.datetime.now() - datetime.timedelta(days=30)
status = 404
while status == 404:
r = requests.get(url + test_time.strftime('%Y-%m-%d'), headers=get_json_request_header())
status = r.status_code
test_time = test_time - datetime.timedelta(days=10)
return test_time.strftime('%Y-%m-%d')
def download_ohsome_data(area, start_time, end_time, tag, tag_type=None):
"""
Download data for the ohsome API
:param area: Download area
:param start_time: Start time of the full history OSM (format %Y-%m-%d)
:param end_time: End time of the full history OSM (format %Y-%m-%d)
:param tag: OSM tag on which data are filtered
:param tag_type: OSM type 'node', 'way' or ‘relation’ OR geometry 'point', 'line' or 'polygon’; default: all 3 OSM types
:return:
"""
url = 'http://api.ohsome.org/v0.9/elementsFullHistory/geometry?' + area +\
'&keys=' + tag + '&properties=tags&showMetadata=false&time=' + start_time + ',' + end_time
if tag_type is not None:
url += '&types=' + tag_type
logging.info(f'Extract {tag} data between {start_time} and {end_time}')
r = requests.get(url, headers=get_json_request_header())
if r.status_code == 414:
raise URLTooLongException()
elif r.status_code != 200:
logging.error(json.loads(r._content.decode())['message'])
r.raise_for_status()
return r.json()
def download_project_ohsome_data(area, start_date, end_date):
ohsome_max_date = get_last_available_ohsome_date()
if datetime.datetime.strptime(ohsome_max_date, '%Y-%m-%d') < datetime.datetime.strptime(end_date, '%Y-%m-%d'):
raise OhsomeDataTooOldException(f'ohsome data end {ohsome_max_date} whereas the latest project update was {end_date}')
if start_date == end_date:
end_date = (datetime.datetime.strptime(end_date, '%Y-%m-%d') + datetime.timedelta(days=1)).strftime('%Y-%m-%d')
return download_ohsome_data(area, start_date, end_date, 'building', tag_type=None)
def get_project_param(project_id):
db = dm.Database(project_id)
area = 'bboxes=' + str(db.get_perimeter_bounding_box()).replace('[', '').replace(']', '').replace(' ', '')
start_date = db.get_creation_date()
end_date = db.compute_final_validation_date()
if end_date == '1970-01-01':
logging.warning('No validation found !')
end_date = db.get_latest_update_date()
return area, start_date, end_date
def ohsome_to_df(data):
df = pd.DataFrame()
if len(data['features']) == 0:
raise NoDataException()
for feature in tqdm(data['features']):
df = pd.concat([df, pd.DataFrame(data=[(feature['properties']['@osmId'],
feature['properties']['@validFrom'],
feature['properties']['@validTo'])],
columns=['osmId', 'validFrom', 'validTo'])], axis=0, ignore_index=True)
df['validFrom'] = pd.to_datetime(df['validFrom'])
df['validTo'] = pd.to_datetime(df['validTo'])
return df
def print_ohsome_stats(project_id):
area, start_date, end_date = get_project_param(project_id)
data = download_project_ohsome_data(area, start_date, end_date)
print('Downloading building done.')
print('Process building data')
df = ohsome_to_df(data)
print('Kept without modification :')
kept = ((df['validFrom'] == start_date + ' 00:00:00') & (df['validTo'] == end_date + ' 00:00:00')).sum()
print(kept)
df2 = df.groupby('osmId').agg({'validFrom': min, 'validTo': max})
print('\nDeleted :')
print(((df2['validFrom'] == start_date + ' 00:00:00') & (df2['validTo'] < end_date + ' 00:00:00')).sum())
print('\nUpdated :')
print(((df2['validFrom'] == start_date + ' 00:00:00') & (df2['validTo'] == end_date + ' 00:00:00')).sum() - kept)
print('\nCreated :')
print(((df2['validFrom'] > start_date + ' 00:00:00') & (df2['validTo'] == end_date + ' 00:00:00')).sum())
print('\nTotal current :')
print((df2['validTo'] == end_date + ' 00:00:00').sum())
print('Download highway stats')
url = 'https://api.ohsome.org/v0.9/elements/length?' + area + \
'&keys=highway&format=json&showMetadata=false&types=way&time=' + start_date + '%2F' + end_date + '%2FP1D'
r = requests.get(url, headers=get_json_request_header())
data = r.json()
print('\nDelta highway (km):')
print(round((data['result'][-1]['value'] - data['result'][0]['value']) / 1000))
def get_building_data(project_id):
building_file = os.path.join(dm.get_data_dir(), 'buildings', f'{project_id}.csv')
if os.path.exists(building_file):
# Data already computed
logging.info('Data already computed')
return pd.read_csv(building_file)
ohsome_file = os.path.join(dm.get_data_dir(), 'ohsome', f'{project_id}_buildings.json')
if os.path.exists(ohsome_file):
# Read local ohsome data
with open(ohsome_file, 'r') as f:
data = json.load(f)
else:
# Download data from ohsome
data = download_project_ohsome_data(*get_project_param(project_id))
with open(ohsome_file, 'w') as f:
json.dump(data, f)
# Format ohsome data in dataframe
df = ohsome_to_df(data)
# Count the building number by date at 00:00:00
count_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""
Multicam tracking module that consumes the data from kafka, tracks the object,
and puts it back into another kafka topic
"""
__version__ = '0.2'
import json
import logging
import time
from timeit import default_timer as timer
from datetime import datetime
import pandas as pd
from kafka import KafkaConsumer, KafkaProducer, errors
from . import constants, ioutils, mctracker, validation
DEFAULT_KAFKA_LOGS_FILE = "consumerlog.csv"
DEFAULT_TRACKER_LOGS_FILE = "mctracker_log.csv"
class McTrackerStream:
"""
The main class for streaming multicam tracker
Instance variables stored:
1. mctracker_obj {MulticamTracker} -- The multi-cam tracker object
2. in_kafkaservers {string} -- Input kafka bootstrap servers
3. in_kafkatopics {string} -- Input kafka topic
4. out_kafkaservers {string} -- Output kafka bootstrap servers
5. out_kafkatopics {string} -- Output kafka topic
6. config {dict} -- Config dictionary for MulticamTracker
7. ignore_dict{dict} -- dictionary of regions to
be ignored. It will be in the format:
key = camera name
value = list of polygons to be ignored
"""
def __init__(self, in_kafkaservers, in_kafkatopics, out_kafkaservers,
out_kafkatopics, config_file, time_prof_flag=False):
"""
Initialize Streaming MC tracker
Arguments:
in_kafkaservers {string} -- Input kafka bootstrap servers
in_kafkatopics {string} -- Input kafka topic
out_kafkaservers {string} -- Output kafka bootstrap servers
out_kafkatopics {string} -- Output kafka topic
config_file {string} -- The multicam tracker config file
time_prof_file {boolean} -- Flag (True/False) to enable/disable
time profiling
"""
self.in_kafkaservers = in_kafkaservers
self.in_kafkatopics = in_kafkatopics
self.out_kafkaservers = out_kafkaservers
self.out_kafkatopics = out_kafkatopics
self.time_prof_flag = time_prof_flag
self.config = json.load(open(config_file))
self.ignore_dict = self.config.get(
"IGNORE_DETECTION_DICT_MOVING", {})
# One time creation of polygons
self.ignore_poly_dict = ioutils.create_poly_dict(self.ignore_dict)
# Schema validation
self.schema = None
self.schema_validator = None
self.schema_file_name = self.config.get("JSON_SCHEMA_FILE", None)
if self.schema_file_name is not None:
try:
with open(self.schema_file_name) as schema_file:
self.schema = json.load(schema_file)
except IOError:
logging.error(
"ERROR: Schema file (%s) could not be opened. "
"No validation will be performed", self.schema_file_name)
except ValueError:
logging.error(
"ERROR: Schema file (%s) has invalid json. "
"No validation will be performed", self.schema_file_name)
self.sleep_time_sec = self.config.get(
"resample_time_sec", constants.RESAMPLE_TIME_IN_SEC)
self.mctracker_obj = mctracker.MulticamTracker(self.config)
# Debug related
self.reid_timings = []
# Instantiate kafka producer/consumer
self.consumer = None
self.producer = None
try:
self.consumer = KafkaConsumer(self.in_kafkatopics,
bootstrap_servers=self.in_kafkaservers,
value_deserializer=lambda m:
validation.schema_validate(m, self.schema))
except errors.NoBrokersAvailable:
err_msg = "ERROR: Consumer broker not available: {}".format(
self.in_kafkaservers)
logging.error(err_msg)
print("Cannot start streaming multitracker: {}".format(err_msg))
exit()
except Exception as exception:
err_msg = "ERROR: Consumer cannot be started. Unknown error: {}".format(
exception)
logging.error(err_msg)
print("Cannot start streaming multitracker: {}".format(err_msg))
exit()
if self.consumer is None:
err_msg = "ERROR: Consumer cannot be instantiated. Unknown error"
logging.error(err_msg)
print("Cannot start streaming multitracker: {}".format(err_msg))
exit()
try:
self.producer = KafkaProducer(bootstrap_servers=self.out_kafkaservers,
value_serializer=lambda m:
json.dumps(m).encode('utf-8'))
except errors.NoBrokersAvailable:
err_msg = "ERROR: Producer broker not available: {}".format(
self.out_kafkaservers)
logging.error(err_msg)
print("Cannot start streaming multitracker: {}".format(err_msg))
exit()
except Exception as exception:
err_msg = "ERROR: Producer cannot be started. Unknown error: {}".format(
exception)
logging.error(err_msg)
print("Cannot start streaming multitracker: {}".format(err_msg))
exit()
if self.producer is None:
err_msg = "ERROR: Producer cannot be instantiated. Unknown error"
logging.error(err_msg)
print("Cannot start streaming multitracker: {}".format(err_msg))
exit()
def start_mctracker(self):
"""
This method:
1. Continiously listens to an input kafka (given by in_kafkaservers and
in_kafkatopics)
2. Performs multicam tracking
3. Writes the tracked objects to another kafka (given by out_kafkaservers
and out_kafkatopics)
"""
iters = 0
num_msgs_received = 0
recs = []
# Debugging-related objects
start_time = tstart_time = ptime_taken = ttime_taken = None
num_iters_to_print = int(
constants.APPROX_TIME_PERIOD_TO_PRINT_INFO_IN_SEC /
float(self.sleep_time_sec))
while True:
if self.time_prof_flag:
tstart_time = time.time()
raw_messages = self.consumer.poll(
timeout_ms=self.sleep_time_sec*1000.0, max_records=5000)
start_time = time.time()
json_list = []
for _, msg_list in raw_messages.items():
num_msgs_received += len(msg_list)
for msg in msg_list:
curr_time = int(round(time.time() * 1000))
kafka_ts = msg.timestamp
recs.append({'currTime': curr_time, 'kafkaTs': kafka_ts})
json_list.append(msg.value)
if self.time_prof_flag:
pstart_time = time.time()
retval = self.track_list(json_list)
time_taken = time.time() - start_time
if self.time_prof_flag:
ptime_taken = time.time() - pstart_time
ttime_taken = time.time() - tstart_time
res = {'currTime': start_time, 'count': len(json_list),
'timeTakenMs': time_taken * 1000.0,
'reidTimeTakenMs': ptime_taken*1000.0,
'totalTimeTakenMs': ttime_taken * 1000.0,
"num_unidentified_cars":
len(self.mctracker_obj.state.unidentified_cars),
"num_prev_list":
len(self.mctracker_obj.state.prev_list),
"num_carry_over_list":
len(self.mctracker_obj.state.carry_over_list),
"num_retval":
len(retval),
"num_match_stats":
len(self.mctracker_obj.state.match_stats),
"num_possible_parked_cars":
len(self.mctracker_obj.state.possible_parked_cars)}
self.reid_timings.append(res)
iters += 1
if (iters % num_iters_to_print) == 0:
logging.info(
"Mc-Tracker Stream: %s: Num msgs received = %d", str(datetime.now()), num_msgs_received)
if retval:
self.write_to_kafka(retval)
time_taken = time.time() - start_time
tts = self.sleep_time_sec - time_taken
if tts > 0:
time.sleep(tts)
if self.time_prof_flag:
if recs:
recs_pd = pd.DataFrame(recs)
recs_pd['kafkaTsDelayMs'] = recs_pd["currTime"] - \
recs_pd["kafkaTs"]
recs_pd.to_csv(DEFAULT_KAFKA_LOGS_FILE, index=False)
logging.debug("%s", str(recs_pd.describe(percentiles=[
0.05, 0.1, 0.25, 0.5, 0.75, 0.90, 0.95])))
else:
logging.debug("No data received")
def dump_stats(self):
"""
Write all the tracking timings into the file specified by
DEFAULT_TRACKER_LOGS_FILE
"""
if self.reid_timings:
recs_pd =
|
pd.DataFrame(self.reid_timings)
|
pandas.DataFrame
|
import pandas as pd
from src.tools.config_loader import Configuration
from operator import or_ as union
from functools import reduce
import numpy as np
config = Configuration.get_instance()
io = config["IO"]
local_config = config["CostCurveConfig"]
column_map = {"id": "id",
"source": "source",
"geographical_label": "geographical_label",
"year": "year",
"production_capacity": "production_capacity",
"amount": "amount",
"cost": "cost",
"lat": "lat",
"lon": "lon"}
def create_scenario_dataframes_geco(scenario):
"""
Reads GECO dataset and creates a dataframe of the given scenario
"""
df_sc = pd.read_csv(io["scenario_geco_path"])
df_sc_europe = df_sc.loc[df_sc["Country"] == "EU28"]
df_scenario = df_sc_europe.loc[df_sc_europe["Scenario"] == scenario]
return df_scenario
def unique_scenarios():
"""
Find unique scenarios in the GECO dataset
"""
return pd.read_csv(io["scenario_geco_path"]).Scenario.unique()
def fetch_objective_value(df, fuel, year):
"""
Get specific energy production for the desired fuel in the given year
"""
if fuel in ["Natural Gas", "natural gas"]:
fuel = "Gas"
if fuel == "Fossil fuels":
return df.loc[(df.Level1 == "Fossil fuels") & (df.Year == year)].Value.sum()
elif fuel in ["Gas", "Coal", "Biomass"]:
return df.loc[(df.Level2 == fuel) & (df.Year == year)].Value.values[0]
def close_powerplants(df, objective, capacity_factor, fuel, year):
"""
Simple algorithm to close power plants based on a given objetive value
"""
df = df.copy()
power = objective * 1000 / (capacity_factor * 8.6)
if fuel == "Coal" and year >= 2038:
drop_index = df.loc[(df["geographical_label"] == "DE") & (df["source"].isin(["Hard Coal", "Lignite"]))].index
df = df.drop(drop_index)
while df.production_capacity.sum() > power:
min_year = df.year.min()
drop_index_year = df.loc[(df["year"] == min_year)].index
df_year = df.loc[drop_index_year]
min_prod = df_year.production_capacity.min()
drop_index = df_year.loc[(df_year["production_capacity"] == min_prod)].index
df = df.drop(drop_index)
return df.index
def map_capacity_factor(fuel):
"""
Get capacity factor of the plant from the config file
"""
return local_config["Scenario"]["CapacityFactors"][fuel]
def close_power_plants_per_fuel(df, fuel, year, scenario_df):
"""
Apply the close power plants algorithm per fuel
"""
if fuel == "Coal":
df_cut = df.loc[df.source.isin(["Hard Coal", "Lignite"])].copy()
else:
df_cut = df.loc[df.source == fuel].copy()
if fuel in ["Lignite", "Hard Coal", "Coal", "Coals"]:
fuel_s = "Coal"
elif fuel in ["Bioenergy"]:
fuel_s = "Biomass"
elif fuel in ["Natural Gas"]:
fuel_s = "Gas"
capacity_factor = map_capacity_factor(fuel_s)
objective = fetch_objective_value(scenario_df, fuel_s, year)
index = close_powerplants(df_cut, objective, capacity_factor, fuel, year)
return index
def idx_union(mylist):
"""
Support funcion to create an index
"""
idx = reduce(union, (index for index in mylist))
return idx
def create_scenario_data_by_points(data, year, scenario):
"""
Creates a scenario dataset at point resolution
"""
final_index = create_index_for_scenario_mapping(data, year, scenario)
return data.loc[final_index]
def query_scenario_data(scenario):
"""
Get the desired scenario
"""
scenario_data = create_scenario_dataframes_geco(scenario)
return scenario_data
def create_index_for_scenario_mapping(data, year, scenario):
"""
Creates an updated index to create a scenario dataset
"""
scendata = query_scenario_data(scenario)
idx_dic = {"others": data[~(data["source"].isin(["Lignite", "Hard Coal", "Natural Gas", "Bioenergy"]))].index}
for fuel in ["Coal", "Natural Gas", "Bioenergy"]:
idx_dic[fuel] = close_power_plants_per_fuel(data, fuel, year, scendata)
idx_list = list(idx_dic.values())
final_index = idx_union(idx_list)
return final_index
def create_scenario_data_by_clusters(data, year, scenario, step=50):
"""
Creates scenarios using clustered points
"""
data = data.copy()
data["amount"] = data["amount"] / data["production_capacity"]
scendata = create_scenario_dataframes_geco(scenario)
for fuel in ["Coal", "Natural Gas", "Bioenergy"]:
if fuel in ["Lignite", "Hard Coal", "Coal", "Coals"]:
fuel_s = "Coal"
values = data.loc[data.source.isin(["Lignite", "Hard Coal"]), "production_capacity"]
elif fuel in ["Bioenergy"]:
fuel_s = "Biomass"
values = data.loc[data.source == fuel, "production_capacity"]
elif fuel in ["Natural Gas"]:
fuel_s = "Gas"
values = data.loc[data.source == fuel, "production_capacity"]
objective = fetch_objective_value(scendata, fuel_s, year)
capacity_factor = map_capacity_factor(fuel_s)
new_series = calculate_production_change(values, objective, capacity_factor, step=step)
data.update(new_series)
data["amount"] = data["amount"] * data["production_capacity"]
return data
def calculate_production_change(values, objective, capacity_factor, step=50):
"""
Porcentual changes of production for the cluster scenario production
"""
new_vals = values.values
index = values.index
power = objective * 1000 / (capacity_factor * 8.6)
total_sum = np.sum(new_vals)
i = 0
s = new_vals.shape[0]
if power > total_sum:
def test(x, y):
return x > y
else:
def test(x, y):
return x < y
step = -step
while test(power, total_sum):
new_vals[i % s] = max(new_vals[i % s] + step, 0)
total_sum = np.sum(new_vals)
i += 1
return
|
pd.Series(data=new_vals, name="production_capacity", index=index)
|
pandas.Series
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = DataFrame([[2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
tm.assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(['a', 'b', 'c'])
data2 = [Series([1.5, 3, 4], idx, dtype='O'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series_aligned_index(self):
series = [pd.Series(i, index=['b', 'a', 'c'], name=str(i))
for i in range(3)]
result = pd.DataFrame(series)
expected = pd.DataFrame({'b': [0, 1, 2],
'a': [0, 1, 2],
'c': [0, 1, 2]},
columns=['b', 'a', 'c'],
index=['0', '1', '2'])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {'a': 1.5, 'b': 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
tm.assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {'A': randn(10),
'B': randn(8)}
with pytest.raises(ValueError, match='arrays must all be same length'):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(lrange(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self):
df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}
data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])
result = DataFrame(data)
assert result.index.is_monotonic
# ordering ambiguous, raise exception
with pytest.raises(ValueError, match='ambiguous ordering'):
DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})
# this is OK though
result = DataFrame({'A': ['a', 'b'],
'B': Series(['a', 'b'], index=['a', 'b'])})
expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},
index=['a', 'b'])
tm.assert_frame_equal(result, expected)
def test_constructor_tuples(self):
result = DataFrame({'A': [(1, 2), (3, 4)]})
expected = DataFrame({'A': Series([(1, 2), (3, 4)])})
tm.assert_frame_equal(result, expected)
def test_constructor_namedtuples(self):
# GH11181
from collections import namedtuple
named_tuple = namedtuple("Pandas", list('ab'))
tuples = [named_tuple(1, 3), named_tuple(2, 4)]
expected = DataFrame({'a': [1, 2], 'b': [3, 4]})
result = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
# with columns
expected = DataFrame({'y': [1, 2], 'z': [3, 4]})
result = DataFrame(tuples, columns=['y', 'z'])
tm.assert_frame_equal(result, expected)
def test_constructor_orient(self):
data_dict = self.mixed_frame.T._series
recons = DataFrame.from_dict(data_dict, orient='index')
expected = self.mixed_frame.sort_index()
tm.assert_frame_equal(recons, expected)
# dict of sequence
a = {'hi': [32, 3, 3],
'there': [3, 5, 3]}
rs = DataFrame.from_dict(a, orient='index')
xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
tm.assert_frame_equal(rs, xp)
def test_from_dict_columns_parameter(self):
# GH 18529
# Test new columns parameter for from_dict that was added to make
# from_items(..., orient='index', columns=[...]) easier to replicate
result = DataFrame.from_dict(OrderedDict([('A', [1, 2]),
('B', [4, 5])]),
orient='index', columns=['one', 'two'])
expected = DataFrame([[1, 2], [4, 5]], index=['A', 'B'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
msg = "cannot use columns parameter with orient='columns'"
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
orient='columns', columns=['one', 'two'])
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
columns=['one', 'two'])
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
tm.assert_index_equal(df.index, a.index)
# ndarray like
arr = np.random.randn(10)
s = Series(arr, name='x')
df = DataFrame(s)
expected = DataFrame(dict(x=s))
tm.assert_frame_equal(df, expected)
s = Series(arr, index=range(3, 13))
df = DataFrame(s)
expected = DataFrame({0: s})
tm.assert_frame_equal(df, expected)
pytest.raises(ValueError, DataFrame, s, columns=[1, 2])
# #2234
a = Series([], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
# series with name and w/o
s1 = Series(arr, name='x')
df = DataFrame([s1, arr]).T
expected = DataFrame({'x': s1, 'Unnamed 0': arr},
columns=['x', 'Unnamed 0'])
tm.assert_frame_equal(df, expected)
# this is a bit non-intuitive here; the series collapse down to arrays
df = DataFrame([arr, s1]).T
expected = DataFrame({1: s1, 0: arr}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
def test_constructor_Series_named_and_columns(self):
# GH 9232 validation
s0 = Series(range(5), name=0)
s1 = Series(range(5), name=1)
# matching name and column gives standard frame
tm.assert_frame_equal(pd.DataFrame(s0, columns=[0]),
s0.to_frame())
tm.assert_frame_equal(pd.DataFrame(s1, columns=[1]),
s1.to_frame())
# non-matching produces empty frame
assert pd.DataFrame(s0, columns=[1]).empty
assert pd.DataFrame(s1, columns=[0]).empty
def test_constructor_Series_differently_indexed(self):
# name
s1 = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
# no name
s2 = Series([1, 2, 3], index=['a', 'b', 'c'])
other_index = Index(['a', 'b'])
df1 = DataFrame(s1, index=other_index)
exp1 = DataFrame(s1.reindex(other_index))
assert df1.columns[0] == 'x'
tm.assert_frame_equal(df1, exp1)
df2 = DataFrame(s2, index=other_index)
exp2 = DataFrame(s2.reindex(other_index))
assert df2.columns[0] == 0
tm.assert_index_equal(df2.index, other_index)
tm.assert_frame_equal(df2, exp2)
def test_constructor_manager_resize(self):
index = list(self.frame.index[:5])
columns = list(self.frame.columns[:3])
result = DataFrame(self.frame._data, index=index,
columns=columns)
tm.assert_index_equal(result.index, Index(index))
tm.assert_index_equal(result.columns, Index(columns))
def test_constructor_from_items(self):
items = [(c, self.frame[c]) for c in self.frame.columns]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items)
tm.assert_frame_equal(recons, self.frame)
# pass some columns
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])
tm.assert_frame_equal(recons, self.frame.loc[:, ['C', 'B', 'A']])
# orient='index'
row_items = [(idx, self.mixed_frame.xs(idx))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert recons['A'].dtype == np.float64
msg = "Must pass columns with orient='index'"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items(row_items, orient='index')
# orient='index', but thar be tuples
arr = construct_1d_object_array_from_listlike(
[('bar', 'baz')] * len(self.mixed_frame))
self.mixed_frame['foo'] = arr
row_items = [(idx, list(self.mixed_frame.xs(idx)))
for idx in self.mixed_frame.index]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
recons = DataFrame.from_items(row_items,
columns=self.mixed_frame.columns,
orient='index')
tm.assert_frame_equal(recons, self.mixed_frame)
assert isinstance(recons['foo'][0], tuple)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
orient='index',
columns=['one', 'two', 'three'])
xp = DataFrame([[1, 2, 3], [4, 5, 6]], index=['A', 'B'],
columns=['one', 'two', 'three'])
tm.assert_frame_equal(rs, xp)
def test_constructor_from_items_scalars(self):
# GH 17312
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 4)])
msg = (r'The value in each \(key, value\) '
'pair must be an array, Series, or dict')
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', 1), ('B', 2)], columns=['col1'],
orient='index')
def test_from_items_deprecation(self):
# GH 17320
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],
columns=['col1', 'col2', 'col3'],
orient='index')
def test_constructor_mix_series_nonseries(self):
df = DataFrame({'A': self.frame['A'],
'B': list(self.frame['B'])}, columns=['A', 'B'])
tm.assert_frame_equal(df, self.frame.loc[:, ['A', 'B']])
msg = 'does not match index length'
with pytest.raises(ValueError, match=msg):
DataFrame({'A': self.frame['A'], 'B': list(self.frame['B'])[:-2]})
def test_constructor_miscast_na_int_dtype(self):
df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)
expected = DataFrame([[np.nan, 1], [1, 0]])
tm.assert_frame_equal(df, expected)
def test_constructor_column_duplicates(self):
# it works! #2079
df = DataFrame([[8, 5]], columns=['a', 'a'])
edf = DataFrame([[8, 5]])
edf.columns = ['a', 'a']
tm.assert_frame_equal(df, edf)
idf = DataFrame.from_records([(8, 5)],
columns=['a', 'a'])
tm.assert_frame_equal(idf, edf)
pytest.raises(ValueError, DataFrame.from_dict,
OrderedDict([('b', 8), ('a', 5), ('a', 6)]))
def test_constructor_empty_with_string_dtype(self):
# GH 9428
expected = DataFrame(index=[0, 1], columns=[0, 1], dtype=object)
df = DataFrame(index=[0, 1], columns=[0, 1], dtype=str)
tm.assert_frame_equal(df, expected)
df =
|
DataFrame(index=[0, 1], columns=[0, 1], dtype=np.str_)
|
pandas.DataFrame
|
import pandas as pd
from sklearn.model_selection import KFold, GridSearchCV
from xgboost import XGBRegressor, DMatrix, cv
def optimize(pipeline, train_X, train_y):
"""
Optimize XGBoost Regressor hyper-parameters
:param pipeline: pipeline containing the initial model
:param train_X: training data
:param train_y: training targets
:return:
"""
parameter_space = {}
fine_tune_range = [-1, 0, 1]
fine_fine_tune_range = [-0.1, -0.05, 0.0, 0.05, 0.1]
# 1. Find the optimal number of estimators
# ----------------------------------------
# Search for the best number of estimators within 200 to 2000 in steps of 200.
parameter_space['model__n_estimators'] = [n for n in range(150, 3001, 150)]
print("Parameter search space: ", parameter_space)
# Initializing the grid search.
folds = KFold(n_splits=5, shuffle=True, random_state=0)
grid_search = GridSearchCV(pipeline,
param_grid=parameter_space,
scoring='neg_mean_absolute_error',
cv=folds,
n_jobs=4,
verbose=1)
grid_search.fit(train_X, train_y)
print("Best found parameter values: ", grid_search.best_params_)
print("Best score: ", grid_search.best_score_)
print()
# Fix n_estimators to the best found value
parameter_space['model__n_estimators'] = [grid_search.best_params_['model__n_estimators']]
# 2.1 Find the best combination of max_depth and min_child_weight
# ---------------------------------------------------------------
# Add max_depth and min_child_weight with possible values 1, 4, 7 each to the search.
parameter_space['model__max_depth'] = [x for x in [1, 4, 7]]
parameter_space['model__min_child_weight'] = [x for x in [1, 4, 7]]
print("Parameter search space: ", parameter_space)
grid_search.fit(train_X, train_y)
print("Best found parameter values: ", grid_search.best_params_)
print("Best score: ", grid_search.best_score_)
print()
# 2.2 Fine tune the combination of max_depth and min_child_weight
# ---------------------------------------------------------------
parameter_space['model__max_depth'] = [grid_search.best_params_['model__max_depth'] + i
for i in fine_tune_range]
parameter_space['model__min_child_weight'] = [grid_search.best_params_['model__min_child_weight'] + i
for i in fine_tune_range]
print("Parameter search space: ", parameter_space)
grid_search.fit(train_X, train_y)
print("Best found parameter values: ", grid_search.best_params_)
print("Best score: ", grid_search.best_score_)
print()
# Fix max_depth and min_child_weight with the best found values
parameter_space['model__max_depth'] = [grid_search.best_params_['model__max_depth']]
parameter_space['model__min_child_weight'] = [grid_search.best_params_['model__min_child_weight']]
# 3.1 Find the best combination of subsample and colsample_bytree
# ---------------------------------------------------------------
# Add subsample and colsample_bytree with possible values 0.6 and 0.9 each.
parameter_space['model__subsample'] = [x for x in [0.3, 0.6, 0.9]]
parameter_space['model__colsample_bytree'] = [x for x in [0.3, 0.6, 0.9]]
print("Parameter search space: ", parameter_space)
grid_search.fit(train_X, train_y)
print("Best found parameter values: ", grid_search.best_params_)
print("Best score: ", grid_search.best_score_)
print()
# 3.2 Fine tune the combination of subsample and colsample_bytree
# ---------------------------------------------------------------
parameter_space['model__subsample'] = [grid_search.best_params_['model__subsample'] + i
for i in fine_fine_tune_range]
parameter_space['model__colsample_bytree'] = [grid_search.best_params_['model__colsample_bytree'] + i
for i in fine_fine_tune_range]
print("Parameter search space: ", parameter_space)
grid_search.fit(train_X, train_y)
parameter_space['model__subsample'] = [grid_search.best_params_['model__subsample']]
parameter_space['model__colsample_bytree'] = [grid_search.best_params_['model__colsample_bytree']]
# 4. Find exact optimal of estimators using early_stopping
# --------------------------------------------------------
print("Parameter search space: ", parameter_space)
# Setting up parameter dict with found optimal values
params = {
'max_depth': parameter_space['model__max_depth'][0],
'min_child_weight': parameter_space['model__min_child_weight'][0],
'eta': 0.01, # learning rate
'subsample': parameter_space['model__subsample'][0],
'colsample_bytree': parameter_space['model__colsample_bytree'][0]
}
cv_results = cv(params,
DMatrix(
|
pd.get_dummies(train_X)
|
pandas.get_dummies
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 3 11:30:03 2018
@author: gary.allison
These routines take the pre-processed injection results and try to match
API numbers
"""
import pandas as pd
import numpy as np
import pandas.api.types as ptypes
from processInjectionInput import processAllFiles
##### --------------------------------------------------
#### Input file definitions
##### --------------------------------------------------
# set data dirs for input files and for resulting output files
datadir = './sources/'
outdir = './out/'
indir = datadir+'OH_injection/'
### metadata sources ###
SWDfn = indir+'Copy of SWD locations - July_2018.xls'
ODNR_permit_pickle = outdir+'ODNR_permit.pkl'
ODNR_injection_pickle = outdir+'ODNR_injection.pkl'
xlatefn = 'xlateAPI.txt'
xlate_excel = 'xlateAPI.xls'
pre_proc_out = outdir+'injection_tall_pre.csv'
inj_meta = outdir+'injection_meta_list.csv'
tempf = outdir+'temp.csv'
### --------------------------------------------------------------
def getTallSet(fn=pre_proc_out):
return pd.read_csv(fn)
ppout = pd.read_csv(pre_proc_out)
def prepInjData(fn=pre_proc_out):
ppout = pd.read_csv(pre_proc_out)
ppAPI = ppout.groupby(['API10'],as_index=False)['CompanyName'].last()
assert len(ppAPI) == len(ppAPI.API10.unique())
return ppAPI
def prepSWD(fn=SWDfn):
SWD_df =
|
pd.read_excel(fn)
|
pandas.read_excel
|
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.generic import ABCIndexClass
import pandas as pd
import pandas._testing as tm
from pandas.api.types import is_float, is_float_dtype, is_integer, is_scalar
from pandas.core.arrays import IntegerArray, integer_array
from pandas.core.arrays.integer import (
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
)
from pandas.tests.extension.base import BaseOpsUtil
def make_data():
return list(range(8)) + [np.nan] + list(range(10, 98)) + [np.nan] + [99, 100]
@pytest.fixture(
params=[
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
]
)
def dtype(request):
return request.param()
@pytest.fixture
def data(dtype):
return integer_array(make_data(), dtype=dtype)
@pytest.fixture
def data_missing(dtype):
return integer_array([np.nan, 1], dtype=dtype)
@pytest.fixture(params=["data", "data_missing"])
def all_data(request, data, data_missing):
"""Parametrized fixture giving 'data' and 'data_missing'"""
if request.param == "data":
return data
elif request.param == "data_missing":
return data_missing
def test_dtypes(dtype):
# smoke tests on auto dtype construction
if dtype.is_signed_integer:
assert np.dtype(dtype.type).kind == "i"
else:
assert np.dtype(dtype.type).kind == "u"
assert dtype.name is not None
@pytest.mark.parametrize(
"dtype, expected",
[
(Int8Dtype(), "Int8Dtype()"),
(Int16Dtype(), "Int16Dtype()"),
(Int32Dtype(), "Int32Dtype()"),
(Int64Dtype(), "Int64Dtype()"),
(UInt8Dtype(), "UInt8Dtype()"),
(UInt16Dtype(), "UInt16Dtype()"),
(UInt32Dtype(), "UInt32Dtype()"),
(UInt64Dtype(), "UInt64Dtype()"),
],
)
def test_repr_dtype(dtype, expected):
assert repr(dtype) == expected
def test_repr_array():
result = repr(integer_array([1, None, 3]))
expected = "<IntegerArray>\n[1, <NA>, 3]\nLength: 3, dtype: Int64"
assert result == expected
def test_repr_array_long():
data = integer_array([1, 2, None] * 1000)
expected = (
"<IntegerArray>\n"
"[ 1, 2, <NA>, 1, 2, <NA>, 1, 2, <NA>, 1,\n"
" ...\n"
" <NA>, 1, 2, <NA>, 1, 2, <NA>, 1, 2, <NA>]\n"
"Length: 3000, dtype: Int64"
)
result = repr(data)
assert result == expected
class TestConstructors:
def test_uses_pandas_na(self):
a = pd.array([1, None], dtype=pd.Int64Dtype())
assert a[1] is pd.NA
def test_from_dtype_from_float(self, data):
# construct from our dtype & string dtype
dtype = data.dtype
# from float
expected = pd.Series(data)
result = pd.Series(
data.to_numpy(na_value=np.nan, dtype="float"), dtype=str(dtype)
)
tm.assert_series_equal(result, expected)
# from int / list
expected = pd.Series(data)
result = pd.Series(np.array(data).tolist(), dtype=str(dtype))
tm.assert_series_equal(result, expected)
# from int / array
expected = pd.Series(data).dropna().reset_index(drop=True)
dropped = np.array(data.dropna()).astype(np.dtype((dtype.type)))
result = pd.Series(dropped, dtype=str(dtype))
tm.assert_series_equal(result, expected)
class TestArithmeticOps(BaseOpsUtil):
def _check_divmod_op(self, s, op, other, exc=None):
super()._check_divmod_op(s, op, other, None)
def _check_op(self, s, op_name, other, exc=None):
op = self.get_op_from_name(op_name)
result = op(s, other)
# compute expected
mask = s.isna()
# if s is a DataFrame, squeeze to a Series
# for comparison
if isinstance(s, pd.DataFrame):
result = result.squeeze()
s = s.squeeze()
mask = mask.squeeze()
# other array is an Integer
if isinstance(other, IntegerArray):
omask = getattr(other, "mask", None)
mask = getattr(other, "data", other)
if omask is not None:
mask |= omask
# 1 ** na is na, so need to unmask those
if op_name == "__pow__":
mask = np.where(~s.isna() & (s == 1), False, mask)
elif op_name == "__rpow__":
other_is_one = other == 1
if isinstance(other_is_one, pd.Series):
other_is_one = other_is_one.fillna(False)
mask = np.where(other_is_one, False, mask)
# float result type or float op
if (
is_float_dtype(other)
or is_float(other)
or op_name in ["__rtruediv__", "__truediv__", "__rdiv__", "__div__"]
):
rs = s.astype("float")
expected = op(rs, other)
self._check_op_float(result, expected, mask, s, op_name, other)
# integer result type
else:
rs = pd.Series(s.values._data, name=s.name)
expected = op(rs, other)
self._check_op_integer(result, expected, mask, s, op_name, other)
def _check_op_float(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in float dtypes
expected[mask] = np.nan
if "floordiv" in op_name:
# Series op sets 1//0 to np.inf, which IntegerArray does not do (yet)
mask2 = np.isinf(expected) & np.isnan(result)
expected[mask2] = np.nan
tm.assert_series_equal(result, expected)
def _check_op_integer(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in integer dtypes
# to compare properly, we convert the expected
# to float, mask to nans and convert infs
# if we have uints then we process as uints
# then convert to float
# and we ultimately want to create a IntArray
# for comparisons
fill_value = 0
# mod/rmod turn floating 0 into NaN while
# integer works as expected (no nan)
if op_name in ["__mod__", "__rmod__"]:
if is_scalar(other):
if other == 0:
expected[s.values == 0] = 0
else:
expected = expected.fillna(0)
else:
expected[
(s.values == 0).fillna(False)
& ((expected == 0).fillna(False) | expected.isna())
] = 0
try:
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
except ValueError:
expected = expected.astype(float)
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
expected[mask] = pd.NA
# assert that the expected astype is ok
# (skip for unsigned as they have wrap around)
if not s.dtype.is_unsigned_integer:
original = pd.Series(original)
# we need to fill with 0's to emulate what an astype('int') does
# (truncation) for certain ops
if op_name in ["__rtruediv__", "__rdiv__"]:
mask |= original.isna()
original = original.fillna(0).astype("int")
original = original.astype("float")
original[mask] = np.nan
tm.assert_series_equal(original, expected.astype("float"))
# assert our expected result
tm.assert_series_equal(result, expected)
def test_arith_integer_array(self, data, all_arithmetic_operators):
# we operate with a rhs of an integer array
op = all_arithmetic_operators
s = pd.Series(data)
rhs = pd.Series([1] * len(data), dtype=data.dtype)
rhs.iloc[-1] = np.nan
self._check_op(s, op, rhs)
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# scalar
op = all_arithmetic_operators
s = pd.Series(data)
self._check_op(s, op, 1, exc=TypeError)
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
op = all_arithmetic_operators
df = pd.DataFrame({"A": data})
self._check_op(df, op, 1, exc=TypeError)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
# ndarray & other series
op = all_arithmetic_operators
s = pd.Series(data)
other = np.ones(len(s), dtype=s.dtype.type)
self._check_op(s, op, other, exc=TypeError)
def test_arith_coerce_scalar(self, data, all_arithmetic_operators):
op = all_arithmetic_operators
s = pd.Series(data)
other = 0.01
self._check_op(s, op, other)
@pytest.mark.parametrize("other", [1.0, np.array(1.0)])
def test_arithmetic_conversion(self, all_arithmetic_operators, other):
# if we have a float operand we should have a float result
# if that is equal to an integer
op = self.get_op_from_name(all_arithmetic_operators)
s = pd.Series([1, 2, 3], dtype="Int64")
result = op(s, other)
assert result.dtype is np.dtype("float")
def test_arith_len_mismatch(self, all_arithmetic_operators):
# operating with a list-like with non-matching length raises
op = self.get_op_from_name(all_arithmetic_operators)
other = np.array([1.0])
s = pd.Series([1, 2, 3], dtype="Int64")
with pytest.raises(ValueError, match="Lengths must match"):
op(s, other)
@pytest.mark.parametrize("other", [0, 0.5])
def test_arith_zero_dim_ndarray(self, other):
arr = integer_array([1, None, 2])
result = arr + np.array(other)
expected = arr + other
tm.assert_equal(result, expected)
def test_error(self, data, all_arithmetic_operators):
# invalid ops
op = all_arithmetic_operators
s = pd.Series(data)
ops = getattr(s, op)
opa = getattr(data, op)
# invalid scalars
msg = (
r"(:?can only perform ops with numeric values)"
r"|(:?IntegerArray cannot perform the operation mod)"
)
with pytest.raises(TypeError, match=msg):
ops("foo")
with pytest.raises(TypeError, match=msg):
ops(pd.Timestamp("20180101"))
# invalid array-likes
with pytest.raises(TypeError, match=msg):
ops(pd.Series("foo", index=s.index))
if op != "__rpow__":
# TODO(extension)
# rpow with a datetimelike coerces the integer array incorrectly
msg = (
"can only perform ops with numeric values|"
"cannot perform .* with this index type: DatetimeArray|"
"Addition/subtraction of integers and integer-arrays "
"with DatetimeArray is no longer supported. *"
)
with pytest.raises(TypeError, match=msg):
ops(pd.Series(pd.date_range("20180101", periods=len(s))))
# 2d
result = opa(pd.DataFrame({"A": s}))
assert result is NotImplemented
msg = r"can only perform ops with 1-d structures"
with pytest.raises(NotImplementedError, match=msg):
opa(np.arange(len(s)).reshape(-1, len(s)))
@pytest.mark.parametrize("zero, negative", [(0, False), (0.0, False), (-0.0, True)])
def test_divide_by_zero(self, zero, negative):
# https://github.com/pandas-dev/pandas/issues/27398
a = pd.array([0, 1, -1, None], dtype="Int64")
result = a / zero
expected = np.array([np.nan, np.inf, -np.inf, np.nan])
if negative:
expected *= -1
tm.assert_numpy_array_equal(result, expected)
def test_pow_scalar(self):
a = pd.array([-1, 0, 1, None, 2], dtype="Int64")
result = a ** 0
expected = pd.array([1, 1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** 1
expected = pd.array([-1, 0, 1, None, 2], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** pd.NA
expected = pd.array([None, None, 1, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** np.nan
expected = np.array([np.nan, np.nan, 1, np.nan, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
# reversed
a = a[1:] # Can't raise integers to negative powers.
result = 0 ** a
expected = pd.array([1, 0, None, 0], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = 1 ** a
expected = pd.array([1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = pd.NA ** a
expected = pd.array([1, None, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = np.nan ** a
expected = np.array([1, np.nan, np.nan, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
def test_pow_array(self):
a = integer_array([0, 0, 0, 1, 1, 1, None, None, None])
b = integer_array([0, 1, None, 0, 1, None, 0, 1, None])
result = a ** b
expected = integer_array([1, 0, None, 1, 1, 1, 1, None, None])
tm.assert_extension_array_equal(result, expected)
def test_rpow_one_to_na(self):
# https://github.com/pandas-dev/pandas/issues/22022
# https://github.com/pandas-dev/pandas/issues/29997
arr = integer_array([np.nan, np.nan])
result = np.array([1.0, 2.0]) ** arr
expected = np.array([1.0, np.nan])
tm.assert_numpy_array_equal(result, expected)
class TestComparisonOps(BaseOpsUtil):
def _compare_other(self, data, op_name, other):
op = self.get_op_from_name(op_name)
# array
result = pd.Series(op(data, other))
expected = pd.Series(op(data._data, other), dtype="boolean")
# fill the nan locations
expected[data._mask] = pd.NA
tm.assert_series_equal(result, expected)
# series
s = pd.Series(data)
result = op(s, other)
expected = op(pd.Series(data._data), other)
# fill the nan locations
expected[data._mask] = pd.NA
expected = expected.astype("boolean")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("other", [True, False, pd.NA, -1, 0, 1])
def test_scalar(self, other, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([1, 0, None], dtype="Int64")
result = op(a, other)
if other is pd.NA:
expected = pd.array([None, None, None], dtype="boolean")
else:
values = op(a._data, other)
expected = pd.arrays.BooleanArray(values, a._mask, copy=True)
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
result[0] = pd.NA
tm.assert_extension_array_equal(a, pd.array([1, 0, None], dtype="Int64"))
def test_array(self, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([0, 1, 2, None, None, None], dtype="Int64")
b = pd.array([0, 1, None, 0, 1, None], dtype="Int64")
result = op(a, b)
values = op(a._data, b._data)
mask = a._mask | b._mask
expected = pd.arrays.BooleanArray(values, mask)
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
result[0] = pd.NA
tm.assert_extension_array_equal(
a, pd.array([0, 1, 2, None, None, None], dtype="Int64")
)
tm.assert_extension_array_equal(
b, pd.array([0, 1, None, 0, 1, None], dtype="Int64")
)
def test_compare_with_booleanarray(self, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([True, False, None] * 3, dtype="boolean")
b = pd.array([0] * 3 + [1] * 3 + [None] * 3, dtype="Int64")
other = pd.array([False] * 3 + [True] * 3 + [None] * 3, dtype="boolean")
expected = op(a, other)
result = op(a, b)
tm.assert_extension_array_equal(result, expected)
def test_no_shared_mask(self, data):
result = data + 1
assert np.shares_memory(result._mask, data._mask) is False
def test_compare_to_string(self, any_nullable_int_dtype):
# GH 28930
s = pd.Series([1, None], dtype=any_nullable_int_dtype)
result = s == "a"
expected = pd.Series([False, pd.NA], dtype="boolean")
self.assert_series_equal(result, expected)
def test_compare_to_int(self, any_nullable_int_dtype, all_compare_operators):
# GH 28930
s1 = pd.Series([1, None, 3], dtype=any_nullable_int_dtype)
s2 = pd.Series([1, None, 3], dtype="float")
method = getattr(s1, all_compare_operators)
result = method(2)
method = getattr(s2, all_compare_operators)
expected = method(2).astype("boolean")
expected[s2.isna()] = pd.NA
self.assert_series_equal(result, expected)
class TestCasting:
@pytest.mark.parametrize("dropna", [True, False])
def test_construct_index(self, all_data, dropna):
# ensure that we do not coerce to Float64Index, rather
# keep as Index
all_data = all_data[:10]
if dropna:
other = np.array(all_data[~all_data.isna()])
else:
other = all_data
result = pd.Index(integer_array(other, dtype=all_data.dtype))
expected = pd.Index(other, dtype=object)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
def test_astype_index(self, all_data, dropna):
# as an int/uint index to Index
all_data = all_data[:10]
if dropna:
other = all_data[~all_data.isna()]
else:
other = all_data
dtype = all_data.dtype
idx = pd.Index(np.array(other))
assert isinstance(idx, ABCIndexClass)
result = idx.astype(dtype)
expected = idx.astype(object).astype(dtype)
tm.assert_index_equal(result, expected)
def test_astype(self, all_data):
all_data = all_data[:10]
ints = all_data[~all_data.isna()]
mixed = all_data
dtype = Int8Dtype()
# coerce to same type - ints
s = pd.Series(ints)
result = s.astype(all_data.dtype)
expected = pd.Series(ints)
tm.assert_series_equal(result, expected)
# coerce to same other - ints
s = pd.Series(ints)
result = s.astype(dtype)
expected = pd.Series(ints, dtype=dtype)
tm.assert_series_equal(result, expected)
# coerce to same numpy_dtype - ints
s = pd.Series(ints)
result = s.astype(all_data.dtype.numpy_dtype)
expected = pd.Series(ints._data.astype(all_data.dtype.numpy_dtype))
tm.assert_series_equal(result, expected)
# coerce to same type - mixed
s = pd.Series(mixed)
result = s.astype(all_data.dtype)
expected = pd.Series(mixed)
tm.assert_series_equal(result, expected)
# coerce to same other - mixed
s = pd.Series(mixed)
result = s.astype(dtype)
expected = pd.Series(mixed, dtype=dtype)
tm.assert_series_equal(result, expected)
# coerce to same numpy_dtype - mixed
s = pd.Series(mixed)
msg = r"cannot convert to .*-dtype NumPy array with missing values.*"
with pytest.raises(ValueError, match=msg):
s.astype(all_data.dtype.numpy_dtype)
# coerce to object
s = pd.Series(mixed)
result = s.astype("object")
expected = pd.Series(np.asarray(mixed))
tm.assert_series_equal(result, expected)
def test_astype_to_larger_numpy(self):
a = pd.array([1, 2], dtype="Int32")
result = a.astype("int64")
expected = np.array([1, 2], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
a = pd.array([1, 2], dtype="UInt32")
result = a.astype("uint64")
expected = np.array([1, 2], dtype="uint64")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [Int8Dtype(), "Int8", UInt32Dtype(), "UInt32"])
def test_astype_specific_casting(self, dtype):
s = pd.Series([1, 2, 3], dtype="Int64")
result = s.astype(dtype)
expected = pd.Series([1, 2, 3], dtype=dtype)
tm.assert_series_equal(result, expected)
s = pd.Series([1, 2, 3, None], dtype="Int64")
result = s.astype(dtype)
expected = pd.Series([1, 2, 3, None], dtype=dtype)
tm.assert_series_equal(result, expected)
def test_construct_cast_invalid(self, dtype):
msg = "cannot safely"
arr = [1.2, 2.3, 3.7]
with pytest.raises(TypeError, match=msg):
integer_array(arr, dtype=dtype)
with pytest.raises(TypeError, match=msg):
pd.Series(arr).astype(dtype)
arr = [1.2, 2.3, 3.7, np.nan]
with pytest.raises(TypeError, match=msg):
integer_array(arr, dtype=dtype)
with pytest.raises(TypeError, match=msg):
pd.Series(arr).astype(dtype)
@pytest.mark.parametrize("in_series", [True, False])
def test_to_numpy_na_nan(self, in_series):
a = pd.array([0, 1, None], dtype="Int64")
if in_series:
a = pd.Series(a)
result = a.to_numpy(dtype="float64", na_value=np.nan)
expected = np.array([0.0, 1.0, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
result = a.to_numpy(dtype="int64", na_value=-1)
expected = np.array([0, 1, -1], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
result = a.to_numpy(dtype="bool", na_value=False)
expected = np.array([False, True, False], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("in_series", [True, False])
@pytest.mark.parametrize("dtype", ["int32", "int64", "bool"])
def test_to_numpy_dtype(self, dtype, in_series):
a = pd.array([0, 1], dtype="Int64")
if in_series:
a = pd.Series(a)
result = a.to_numpy(dtype=dtype)
expected = np.array([0, 1], dtype=dtype)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", ["float64", "int64", "bool"])
def test_to_numpy_na_raises(self, dtype):
a = pd.array([0, 1, None], dtype="Int64")
with pytest.raises(ValueError, match=dtype):
a.to_numpy(dtype=dtype)
def test_astype_str(self):
a = pd.array([1, 2, None], dtype="Int64")
expected = np.array(["1", "2", "<NA>"], dtype=object)
tm.assert_numpy_array_equal(a.astype(str), expected)
tm.assert_numpy_array_equal(a.astype("str"), expected)
def test_astype_boolean(self):
# https://github.com/pandas-dev/pandas/issues/31102
a = pd.array([1, 0, -1, 2, None], dtype="Int64")
result = a.astype("boolean")
expected = pd.array([True, False, True, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_frame_repr(data_missing):
df = pd.DataFrame({"A": data_missing})
result = repr(df)
expected = " A\n0 <NA>\n1 1"
assert result == expected
def test_conversions(data_missing):
# astype to object series
df = pd.DataFrame({"A": data_missing})
result = df["A"].astype("object")
expected = pd.Series(np.array([np.nan, 1], dtype=object), name="A")
tm.assert_series_equal(result, expected)
# convert to object ndarray
# we assert that we are exactly equal
# including type conversions of scalars
result = df["A"].astype("object").values
expected = np.array([pd.NA, 1], dtype=object)
tm.assert_numpy_array_equal(result, expected)
for r, e in zip(result, expected):
if pd.isnull(r):
assert pd.isnull(e)
elif is_integer(r):
assert r == e
assert is_integer(e)
else:
assert r == e
assert type(r) == type(e)
def test_integer_array_constructor():
values = np.array([1, 2, 3, 4], dtype="int64")
mask = np.array([False, False, False, True], dtype="bool")
result = IntegerArray(values, mask)
expected = integer_array([1, 2, 3, np.nan], dtype="int64")
tm.assert_extension_array_equal(result, expected)
msg = r".* should be .* numpy array. Use the 'integer_array' function instead"
with pytest.raises(TypeError, match=msg):
IntegerArray(values.tolist(), mask)
with pytest.raises(TypeError, match=msg):
IntegerArray(values, mask.tolist())
with pytest.raises(TypeError, match=msg):
IntegerArray(values.astype(float), mask)
msg = r"__init__\(\) missing 1 required positional argument: 'mask'"
with pytest.raises(TypeError, match=msg):
IntegerArray(values)
@pytest.mark.parametrize(
"a, b",
[
([1, None], [1, np.nan]),
([None], [np.nan]),
([None, np.nan], [np.nan, np.nan]),
([np.nan, np.nan], [np.nan, np.nan]),
],
)
def test_integer_array_constructor_none_is_nan(a, b):
result = integer_array(a)
expected = integer_array(b)
tm.assert_extension_array_equal(result, expected)
def test_integer_array_constructor_copy():
values = np.array([1, 2, 3, 4], dtype="int64")
mask = np.array([False, False, False, True], dtype="bool")
result = IntegerArray(values, mask)
assert result._data is values
assert result._mask is mask
result = IntegerArray(values, mask, copy=True)
assert result._data is not values
assert result._mask is not mask
@pytest.mark.parametrize(
"values",
[
["foo", "bar"],
["1", "2"],
"foo",
1,
1.0,
pd.date_range("20130101", periods=2),
np.array(["foo"]),
[[1, 2], [3, 4]],
[np.nan, {"a": 1}],
],
)
def test_to_integer_array_error(values):
# error in converting existing arrays to IntegerArrays
msg = (
r"(:?.* cannot be converted to an IntegerDtype)"
r"|(:?values must be a 1D list-like)"
)
with pytest.raises(TypeError, match=msg):
integer_array(values)
def test_to_integer_array_inferred_dtype():
# if values has dtype -> respect it
result = integer_array(np.array([1, 2], dtype="int8"))
assert result.dtype == Int8Dtype()
result = integer_array(np.array([1, 2], dtype="int32"))
assert result.dtype == Int32Dtype()
# if values have no dtype -> always int64
result = integer_array([1, 2])
assert result.dtype == Int64Dtype()
def test_to_integer_array_dtype_keyword():
result = integer_array([1, 2], dtype="int8")
assert result.dtype == Int8Dtype()
# if values has dtype -> override it
result = integer_array(np.array([1, 2], dtype="int8"), dtype="int32")
assert result.dtype == Int32Dtype()
def test_to_integer_array_float():
result = integer_array([1.0, 2.0])
expected = integer_array([1, 2])
tm.assert_extension_array_equal(result, expected)
with pytest.raises(TypeError, match="cannot safely cast non-equivalent"):
integer_array([1.5, 2.0])
# for float dtypes, the itemsize is not preserved
result = integer_array(np.array([1.0, 2.0], dtype="float32"))
assert result.dtype == Int64Dtype()
@pytest.mark.parametrize(
"bool_values, int_values, target_dtype, expected_dtype",
[
([False, True], [0, 1], Int64Dtype(), Int64Dtype()),
([False, True], [0, 1], "Int64", Int64Dtype()),
([False, True, np.nan], [0, 1, np.nan], Int64Dtype(), Int64Dtype()),
],
)
def test_to_integer_array_bool(bool_values, int_values, target_dtype, expected_dtype):
result = integer_array(bool_values, dtype=target_dtype)
assert result.dtype == expected_dtype
expected = integer_array(int_values, dtype=target_dtype)
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
"values, to_dtype, result_dtype",
[
(np.array([1], dtype="int64"), None, Int64Dtype),
(np.array([1, np.nan]), None, Int64Dtype),
(np.array([1, np.nan]), "int8", Int8Dtype),
],
)
def test_to_integer_array(values, to_dtype, result_dtype):
# convert existing arrays to IntegerArrays
result = integer_array(values, dtype=to_dtype)
assert result.dtype == result_dtype()
expected = integer_array(values, dtype=result_dtype())
tm.assert_extension_array_equal(result, expected)
def test_cross_type_arithmetic():
df = pd.DataFrame(
{
"A": pd.Series([1, 2, np.nan], dtype="Int64"),
"B":
|
pd.Series([1, np.nan, 3], dtype="UInt8")
|
pandas.Series
|
from typing import List
import datetime as dt
from datetime import timedelta
import requests
import os
import pickle
from typing import Dict, Tuple
import geopandas as gpd
import pandas as pd
import pytz, datetime
from shapely import wkt
from timezonefinderL import TimezoneFinder
from peaky_finders.data_acquisition.train_model import (
LoadCollector,
GEO_COORDS,
CATEGORICAL_FEATURES,
MONTH_TO_SEASON,
)
from peaky_finders.training_pipeline import MODEL_OUTPUT_DIR, MODEL_INPUT_DIR
from peaky_finders.data_acquisition.train_model import GEO_COORDS
ISO_MAP_IDS = {
56669: "MISO",
14725: "PJM",
2775: "CAISO",
13434: "ISONE",
13501: "NYISO",
}
ISO_LIST = ["NYISO", "ISONE", "PJM", "MISO", "CAISO"]
PEAK_DATA_PATH = os.path.join(os.path.dirname(__file__), "historical_peaks")
tz_finder = TimezoneFinder()
def get_iso_map():
iso_df = pd.read_csv("iso_map_final.csv")
iso_df["geometry"] = iso_df["geometry"].apply(wkt.loads)
iso_gdf = gpd.GeoDataFrame(iso_df, crs="EPSG:4326", geometry="geometry")
return iso_gdf
class Predictor:
def __init__(self, iso_name: str, start: str, end: str) -> None:
self.start = start
self.end = end
self.iso_name = iso_name
self.load_collector: LoadCollector = None
def get_load(self, start: str, end: str):
self.load_collector = LoadCollector(self.iso_name, start, end)
def featurize(self):
self.load_collector.engineer_features()
def add_future(self, load: pd.Series) -> pd.Series:
future = pd.date_range(
start=load.index[-1], end=(load.index[-1] + timedelta(days=1)), freq="H"
).to_frame(name="load_MW")
tz_finder = TimezoneFinder()
lon = float(GEO_COORDS[self.iso_name]["lon"])
lat = float(GEO_COORDS[self.iso_name]["lat"])
tz_name = tz_finder.timezone_at(lng=lon, lat=lat)
future["load_MW"] = None
future.index = future.index.tz_convert(tz_name)
return future
def prepare_predictions(self):
self.get_load(self.start, self.end)
load = self.load_collector.load
self.load_collector.engineer_features()
model_input = self.load_collector.load.copy()
for feature in CATEGORICAL_FEATURES:
dummies = pd.get_dummies(
model_input[feature], prefix=feature, drop_first=True
)
model_input = model_input.drop(feature, axis=1)
model_input = pd.concat([model_input, dummies], axis=1)
return model_input, load
def predict_load(self, model_input: pd.DataFrame) -> Tuple[pd.Series, pd.Series]:
model_path = os.path.join(
MODEL_OUTPUT_DIR, (f"xg_boost_{self.iso_name}_load_model.pkl")
)
xgb = pickle.load(open(model_path, "rb"))
if "holiday_True" not in model_input.columns:
model_input["holiday_True"] = 0
X = model_input.drop("load_MW", axis=1).astype(float).dropna()
weekday_cols = [f"weekday_{i + 1}" for i in range(0, 6)]
if len(set(weekday_cols) - set(X.columns)) > 0:
for col in list(set(weekday_cols) - set(X.columns)):
X[col] = 0
predictions = xgb.predict(X[xgb.get_booster().feature_names])
X["predicted_load"] = predictions
return X["predicted_load"]
def predict_load(self,):
for iso in ISO_LIST:
model_input_path = os.path.join(MODEL_INPUT_DIR, MODEL_INPUT_DATA[iso])
model_path = os.path.join(
MODEL_OUTPUT_DIR, (f"xg_boost_{self.iso_name}_load_model.pkl")
)
def predict_all(iso_list: list, start: str, end: str) -> Tuple[Dict[str, pd.DataFrame]]:
historical_vs_predicted = {}
for iso in iso_list:
predictor = Predictor(iso, start, end)
model_input, historical_load = predictor.prepare_predictions()
predictions = predictor.predict_load(model_input)
comparison_df =
|
pd.concat([model_input, predictions], axis=1)
|
pandas.concat
|
#!/usr/bin/env python
# coding: utf-8
import sys
import os
from datetime import datetime, timedelta
import urllib
import matplotlib as mpl
# mpl.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
from scipy.integrate import odeint
import scipy.signal
import pandas as pd
import seaborn as sns
sns.set_context('paper', font_scale=1.3)
red, blue, green = sns.color_palette('Set1', 3)
colors = {'red':red, 'blue':blue, 'green':green}
from click_spinner import spinner
from inference import get_last_NPI_date
from inference import get_first_NPI_date
from inference import params_bounds
from inference import get_model_class
from inference import find_start_day
from model.normal_prior_model import NormalPriorModel
from model.fixed_tau_model import FixedTauModel
from sklearn.metrics import mean_squared_error
def int_to_dt(t):
return pd.to_datetime(start_date) + timedelta(days=t)
def date_to_int(x):
dt = datetime.strptime(x + ' 2020', '%b %d %Y')
td = dt - start_date
return td.days
def date_to_date(x):
dt = datetime.strptime(x + ' 2020', '%b %d %Y')
return dt
def τ_to_string(τ, start_date):
return (
|
pd.to_datetime(start_date)
|
pandas.to_datetime
|
#Genero el dataset de febrero para el approach de boosting. Este approach tiene algunas variables mas incluyendo sumas y promedios de valores pasados
import gc
gc.collect()
import pandas as pd
import seaborn as sns
import numpy as np
#%% Cargo los datos, Con el dataset de boosting no hice las pruebas de quitarle un dia a marzo y agregarlo a febrero por falta de tiempo
#Se toma febrero y marzo tal como vienen
train = pd.read_parquet(r'C:\Users\argomezja\Desktop\Data Science\MELI challenge\train_data.parquet', engine='pyarrow')
#Cambio las variables object a categoricas
for col in ['currency', 'listing_type', 'shipping_logistic_type', 'shipping_payment']:
train[col] = train[col].astype('category')
train['date'] = pd.to_datetime(train['date'])
train['day'] =train.date.dt.day
train['month'] = train.date.dt.month
train['listing_type'] = train['listing_type'].factorize()[0]
train['shipping_logistic_type'] = train['shipping_logistic_type'].factorize()[0]
train['shipping_payment'] = train['shipping_payment'].factorize()[0]
febrero = train.loc[train['month']==2]
marzo = train.loc[train['month']==3]
febrero.to_csv('febrero_limpio.csv.gz',index=False, compression="gzip")
marzo.to_csv('marzo_limpio.csv.gz',index=False, compression="gzip")
#%% Febrero
febrero = pd.read_csv(r'C:\Users\argomezja\Desktop\Data Science\MELI challenge\Project MELI\Dataset_limpios\febrero_limpio.csv.gz')
#Trabajo mejor el price
febrero = febrero.assign(current_price=febrero.groupby('currency').transform(lambda x: (x - x.min()) / (x.max()- x.min())))
subtest1 = febrero[['sku', 'day', 'sold_quantity']]
subtest1= subtest1.pivot_table(index = 'sku', columns= 'day', values = 'sold_quantity').add_prefix('sales')
subtest2 = febrero[['sku', 'day', 'current_price']]
subtest2= subtest2.pivot_table(index = 'sku', columns= 'day', values = 'current_price').add_prefix('price')
subtest3 = febrero[['sku', 'day', 'minutes_active']]
subtest3= subtest3.pivot_table(index = 'sku', columns= 'day', values = 'minutes_active').add_prefix('active_time')
subtest4 = febrero[['sku', 'day', 'listing_type']]
subtest4= subtest4.pivot_table(index = 'sku', columns= 'day', values = 'listing_type').add_prefix('listing_type')
subtest6 = febrero[['sku', 'day', 'shipping_logistic_type']]
subtest6= subtest6.pivot_table(index = 'sku', columns= 'day', values = 'shipping_logistic_type').add_prefix('shipping_logistic_type')
subtest7 = febrero[['sku', 'day', 'shipping_payment']]
subtest7= subtest7.pivot_table(index = 'sku', columns= 'day', values = 'shipping_payment').add_prefix('shipping_payment')
final = pd.merge(subtest1, subtest2, left_index=True, right_index=True )
final = pd.merge(final, subtest3, left_index=True, right_index=True)
final = pd.merge(final, subtest4, left_index=True, right_index=True)
final =
|
pd.merge(final, subtest6, left_index=True, right_index=True)
|
pandas.merge
|
import pandas as pd
import numpy as np
index = ['Mory', 'Ann']
columns = ['Windy', 'Sunny', 'Snowy', 'Thundery', 'Soild', 'Lighting']
data = {
'Mory': [2.0, 4.0, 6.0, 7.0, 6.0, 5.0],
'Ann': [1.0, 5.0, 1.0, 1.0, 1.0, 1.0],
}
df = pd.DataFrame(index=index, columns=columns, dtype=np.float64)
for (k, v) in data.items():
df.T[k] = v
print(df)
######## demo2
data = {
'Name': ['Mory', 'Ann', 'Jenny'],
'Dream': ['Become a leader', 'Maybe world will enlightened', 'Everyone in happiness'],
'Level': [2.0, 5.0, 2.5]
}
df_surpass = pd.DataFrame(data=data, index=[1, 2, 3])
ann = df_surpass.iloc[1]
mory = df_surpass.iloc[0]
df_surpass.loc[4] = 'Know myself', 3.5, 'Demon'
print(df_surpass)
df_surpass.sort_values(by='Level', ascending=False)
surpass_type = pd.Series(
data=['light', 'demon', 'snow', np.nan],
index=[2, 1, 3, 4]
)
df_surpass['SType'] = surpass_type
print(df_surpass)
df_surpass['SType'].fillna('ordinary', inplace=True)
print(df_surpass)
df_surpass['Level'] = df_surpass['Level'].map(lambda x: min(5, x+1))
print(df_surpass)
# demo dummy variable
data = pd.DataFrame(columns=['weekday'])
data.weekday = [i for i in range(1, 8)] * 3
data['score'] = 1.0
# perform dummy
dummy_data = pd.get_dummies(data.weekday, prefix='weekday')
# merge two
mergedata = pd.concat([data.drop(['weekday'], axis=1), dummy_data], axis=1)
# another merge method
data.join(dummy_data)
# Excel
excel = pd.ExcelWriter('demo.xlsx')
data.to_excel(excel, 'dummy')
excel.close()
# read back
dummy =
|
pd.read_excel('demo.xlsx', 'dummy')
|
pandas.read_excel
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
if len(sys.argv) < 2:
print("No log file found")
exit(-1)
nr_graphs = 5
algo_file = sys.argv[1]
algos =
|
pd.read_csv(algo_file)
|
pandas.read_csv
|
from io import DEFAULT_BUFFER_SIZE
import json
import pandas as pd
import numpy as np
class Recommend:
def __init__(self, artists_per_cluster):
folder = 'precomp_diff_clusters/precomp_' + str(artists_per_cluster)
with open(folder + '/s_info.json') as sif:
self.s_info = json.load(sif)
with open(folder + '/t_info.json') as tif:
self.t_info = json.load(tif)
with open(folder + '/candidates_scores.json') as csf:
self.candidates_scores = json.load(csf)
def get_recs(self, artists):
all_recs = {}
for artist in artists:
all_recs[artist] = {'name': self.s_info[artist]['spotify name']}
df_recs =
|
pd.DataFrame(columns=['id', 'name', 'score'])
|
pandas.DataFrame
|
"""
Program: preprocessing.py
Takes in data files and standardises them by applying a coordinate transform and converting all files to csv,.
This is an ad hoc step so is best run line by line!
@author <NAME>
"""
import pandas as pd
import pyproj
import matplotlib.pyplot as plt
# read data in
print("Reading data in...")
meas = pd.read_csv("../data/original_data/measurement.txt", sep=' ')
bore = pd.read_csv("../data/original_data/borehole.txt", sep=' ')
foss = pd.read_csv("../data/original_data/fossil.txt", sep=' ')
rock = pd.read_csv("../data/original_data/rock.txt", sep=' ')
#coordinate transformation - BNG to latlon
print("Converting to lat/lon...")
bng=pyproj.Proj("+init=EPSG:27700")
lon_bore, lat_bore = bng(bore['X'].values, bore['Y'].values, inverse=True)
lon_rock, lat_rock = bng(rock['X'].values, rock['Y'].values, inverse=True)
bore=bore.drop('X', axis=1)
bore=bore.drop('Y', axis=1)
bore['Latitude_WGS84']=pd.Series(lat_bore, index=bore.index)
bore['Longitude_WGS84']=pd.Series(lon_bore, index=bore.index)
rock=rock.drop('X', axis=1)
rock=rock.drop('Y', axis=1)
rock['Latitude_WGS84']=pd.Series(lat_rock, index=rock.index)
rock['Longitude_WGS84']=
|
pd.Series(lon_rock, index=rock.index)
|
pandas.Series
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 15 14:33:01 2018
@author: AyushRastogi
"""
# Extracting the cumulative 60, 90, 180, 365 and 730 day production for Oil, Gas and Water
import pandas as pd
import os
os.getcwd() # Get the default working directory
path = r'C:\Users\ayush\Desktop\Meetup2_All Files'
os.chdir(path)
# Reading the input file with cumulative oil/gas/water production and cum days
df = pd.read_csv(path+r'\Cumulative_Production_0619.csv')
# The process is repeated for Oil, Gas and Water
# --------------------------OIL Production------------------
# Creating the columns and filling them with 0
df['60_Interpol_OIL'] = 0
df['90_Interpol_OIL'] = 0
df['180_Interpol_OIL'] = 0
df['365_Interpol_OIL'] = 0
df['730_Interpol_OIL'] = 0
# For loop which runs through every row (until last but 1). If the cum_days value we need (60/90/180/365/730) fall in between the cell value, it uses linear
# interpolation and calculates the cumulative sum for that particular value
# y = y1 + ((y2-y1)*(x-x1)/(x2-x1)), where y = required production value, and x = 365 (Example)
for count in range(len(df['APINO'])-1): #loop running through the entire column
if (df['cum_days'][count] < 60 and df['cum_days'][count+1] > 60):
df['60_Interpol_OIL'][count] = df['cum_oil'][count-1] + ((df['cum_oil'][count+1]) - df['cum_oil'][count-1])*(60 - df['cum_days'][count-1])/(df['cum_days'][count+1]-df['cum_days'][count-1])
elif (df['cum_days'][count] == 60): # if the required value is already present, simply copy it
df['60_Interpol_OIL'][count] = df['cum_oil'][count]
pd.to_numeric(df['60_Interpol_OIL'], errors='coerce') # Convert the column values to numbers
df['60_Interpol_OIL'] = df['60_Interpol_OIL'].apply(lambda x: '%.1f' % x).values.tolist() # Getting only 1 decimal place and adding values to a list
df[df['60_Interpol_OIL'] != '0.0'] # Getting rid of all the values which = 0.0
df['60_Interpol_OIL'].astype(float) # Convert the datatype to float (better since its a calculation)
for count in range(len(df['APINO'])-1):
if (df['cum_days'][count] < 90 and df['cum_days'][count+1] > 90):
df['90_Interpol_OIL'][count] = df['cum_oil'][count-1] + ((df['cum_oil'][count+1]) - df['cum_oil'][count-1])*(90 - df['cum_days'][count-1])/(df['cum_days'][count+1]-df['cum_days'][count-1])
elif (df['cum_days'][count] == 90):
df['90_Interpol_OIL'][count] = df['cum_oil'][count]
pd.to_numeric(df['90_Interpol_OIL'], errors='coerce')
df['90_Interpol_OIL'] = df['90_Interpol_OIL'].apply(lambda x: '%.1f' % x).values.tolist()
df[df['90_Interpol_OIL'] != '0.0']
df['90_Interpol_OIL'].astype(float)
for count in range(len(df['APINO'])-1):
if (df['cum_days'][count] < 180 and df['cum_days'][count+1] > 180):
df['180_Interpol_OIL'][count] = df['cum_oil'][count-1] + ((df['cum_oil'][count+1]) - df['cum_oil'][count-1])*(180 - df['cum_days'][count-1])/(df['cum_days'][count+1]-df['cum_days'][count-1])
elif (df['cum_days'][count] == 180):
df['180_Interpol_OIL'][count] = df['cum_oil'][count]
pd.to_numeric(df['180_Interpol_OIL'], errors='coerce')
df['180_Interpol_OIL'] = df['180_Interpol_OIL'].apply(lambda x: '%.1f' % x).values.tolist()
df[df['180_Interpol_OIL'] != '0.0']
df['180_Interpol_OIL'].astype(float)
for count in range(len(df['APINO'])-1):
if (df['cum_days'][count] < 365 and df['cum_days'][count+1] > 365):
df['365_Interpol_OIL'][count] = df['cum_oil'][count-1] + ((df['cum_oil'][count+1]) - df['cum_oil'][count-1])*(365 - df['cum_days'][count-1])/(df['cum_days'][count+1]-df['cum_days'][count-1])
elif (df['cum_days'][count] == 365):
df['365_Interpol_OIL'][count] = df['cum_oil'][count]
pd.to_numeric(df['365_Interpol_OIL'], errors='coerce')
df['365_Interpol_OIL'] = df['365_Interpol_OIL'].apply(lambda x: '%.1f' % x).values.tolist()
df[df['365_Interpol_OIL'] != '0.0']
df['365_Interpol_OIL'].astype(float)
for count in range(len(df['APINO'])-1):
if (df['cum_days'][count] < 730 and df['cum_days'][count+1] > 730):
df['730_Interpol_OIL'][count] = df['cum_oil'][count-1] + ((df['cum_oil'][count+1]) - df['cum_oil'][count-1])*(730 - df['cum_days'][count-1])/(df['cum_days'][count+1]-df['cum_days'][count-1])
elif (df['cum_days'][count] == 730):
df['730_Interpol_OIL'][count] = df['cum_oil'][count]
pd.to_numeric(df['730_Interpol_OIL'], errors='coerce')
df['730_Interpol_OIL'] = df['730_Interpol_OIL'].apply(lambda x: '%.1f' % x).values.tolist()
df[df['730_Interpol_OIL'] != '0.0']
df['730_Interpol_OIL'].astype(float)
# --------------------------GAS Production------------------
df['60_Interpol_GAS'] = 0
df['90_Interpol_GAS'] = 0
df['180_Interpol_GAS'] = 0
df['365_Interpol_GAS'] = 0
df['730_Interpol_GAS'] = 0
for count in range(len(df['APINO'])-1):
if (df['cum_days'][count] < 60 and df['cum_days'][count+1] > 60):
df['60_Interpol_GAS'][count] = df['cum_gas'][count-1] + ((df['cum_gas'][count+1]) - df['cum_gas'][count-1])*(60 - df['cum_days'][count-1])/(df['cum_days'][count+1]-df['cum_days'][count-1])
elif (df['cum_days'][count] == 60):
df['60_Interpol_GAS'][count] = df['cum_gas'][count]
|
pd.to_numeric(df['60_Interpol_GAS'], errors='coerce')
|
pandas.to_numeric
|
# Author: <NAME>
# Created: 7/23/20, 8:43 PM
import logging
from functools import reduce
import pandas as pd
from typing import *
from mg_general import Environment
from mg_general.general import fix_names
log = logging.getLogger(__name__)
def all_columns_equal(df, columns=None):
# type: (pd.DataFrame, List[str]) -> pd.Series
"""Return True/False series shown rows where all columns have the same value"""
if columns is None:
columns = df.columns.values
# create condition list
conditions = list()
for i in range(1, len(columns)):
conditions.append(f"(df[{columns[i-1]}] == df[{columns[i]}])")
return eval(" & ".join(conditions))
def create_joint_reference_from_list(df, list_reference):
# type: (pd.DataFrame, List[str]) -> str
reference = "=".join(list_reference)
reference_rows = all_columns_equal(df, [f"'5p-{r}'" for r in list_reference])
reference_values = df.loc[reference_rows, f"5p-{list_reference[0]}"]
df.loc[reference_rows, f"5p-{reference}"] = reference_values
reference_rows = all_columns_equal(df, [f"'3p-{r}'" for r in list_reference])
reference_values = df.loc[reference_rows, f"3p-{list_reference[0]}"]
df.loc[reference_rows, f"3p-{reference}"] = reference_values
list_partial = [f"'Partial3p-{r}'" for r in list_reference if f"Partial3p-{r}" in df.columns.values]
if len(list_partial) > 0:
reference_rows = all_columns_equal(df, list_partial)
reference_values = df.loc[reference_rows, f"Partial3p-{list_reference[0]}"]
df.loc[reference_rows, f"Partial3p-{reference}"] = reference_values
list_partial = [f"'Partial5p-{r}'" for r in list_reference if f"Partial5p-{r}" in df.columns.values]
if len(list_partial) > 0:
reference_rows = all_columns_equal(df, list_partial)
reference_values = df.loc[reference_rows, f"Partial5p-{list_reference[0]}"]
df.loc[reference_rows, f"Partial5p-{reference}"] = reference_values
return reference
def update_dataframe_with_stats(df, tools, reference):
# type: (pd.DataFrame, List[str], List[str]) -> pd.DataFrame
for t in tools:
tag_5p = f"5p:Match({t}={reference})"
tag_3p = f"3p:Match({t}={reference})"
# match by 5prime end
df[tag_5p] = df[f"5p-{t}"] == df[f"5p-{reference}"]
# all tools have a prediction
df[tag_3p] = df[[f"5p-{t}", f"5p-{reference}"]].notnull().all(axis=1)
df[f"Length({t})"] = df.apply(
lambda r: abs(r[f"3p-{t}"] - r[f"5p-{t}"]) + 1,
axis=1
)
df[f"Length({reference})"] = df.apply(
lambda r: abs(r[f"3p-{reference}"] - r[f"5p-{reference}"]) + 1,
axis=1
)
# remove short
if f"Partial5p-{reference}" in df.columns:
before = len(df)
df = df[~((df[f"Length({reference})"] < 90) & (
(df[f"Partial5p-{reference}"]) | (df[f"Partial3p-{reference}"]))
)]
after = len(df)
log.info(f"Filtered {before-after} short partial genes")
return df
def tidy_genome_level(env, df):
# type: (Environment, pd.DataFrame) -> pd.DataFrame
"""Creates a tidy dataframe for all metrics"""
values_to_melt = ["Match", "Number of Error", "Number of Found", "Number of Match", "Number of Predictions",
"Number of IC5p Match", "Number of IC5p Found", "Number of IC3p Match", "Number of IC3p Found",
"Number of Comp Match", "Number of Comp Found", "Precision", "Recall", "WR", "Number of Missed",
"Sensitivity", "Specificity", "Error Rate",
"IC3p Match", "IC5p Match", "Comp Match"]
df_total = list()
list_index = [x for x in ["Genome", "Clade", "Chunk Size", "Genome GC", "Number in Reference"] if x in df.columns]
for v in values_to_melt:
value_vars = [x for x in df.columns if v == x.split("(")[0].strip()]
if len(value_vars) == 0:
continue
df_curr = pd.melt(df, id_vars=list_index,
value_vars=value_vars,
var_name="Combination", value_name=v)
df_curr["Tool"] = df_curr["Combination"].apply(lambda x: x.split("(")[1].split(",")[0].upper())
df_total.append(df_curr)
return reduce(lambda df1, df2: pd.merge(df1, df2, on=list_index + ["Tool"],
how="outer"), df_total)
def _helper_df_joint_reference(df, reference):
# create joint references when possible
if len(reference) > 1:
reference = create_joint_reference_from_list(df, reference)
else:
reference = reference[0]
return reference
def check_tools_and_reference_lists(df, tools, ref_5p, ref_3p):
# type: (pd.DataFrame, Union[List[str], None], List[str], List[str]) -> List[str]
"""Verifies references exist in dataframe, and returns cleaned up tools list (without references in it)"""
# get tools list
# If not provided, extract from df
# Make sure it doesn't contain any references
all_tools = sorted(set([x.split("-")[1] for x in df.columns if "5p-" in x]))
# check that references exist
for list_ref in [ref_5p, ref_3p]:
for ref in list_ref:
if ref not in all_tools:
raise ValueError(f"Unknown reference {ref}")
if tools is None:
tools = all_tools
tools = sorted(set(tools).difference({*ref_5p}).difference({*ref_3p}))
return tools
def read_small_stats_per_gene(pf_data, parse_names=False):
# type: (pd.DataFrame, bool) -> pd.DataFrame
df =
|
pd.read_csv(pf_data)
|
pandas.read_csv
|
import pandas as pd
import boto3
import json
import sys
def load_configuration():
"""
Load configuration parameters to AWS.
"""
import configparser
config = configparser.ConfigParser()
config.read_file(open('dwh.cfg'))
config_dict = dict()
config_dict['KEY'] = config.get('AWS','KEY')
config_dict['SECRET'] = config.get('AWS','SECRET')
config_dict['CLUSTER_TYPE'] = config.get("DWH","CLUSTER_TYPE")
config_dict['NUM_NODES'] = config.get("DWH","NUM_NODES")
config_dict['NODE_TYPE'] = config.get("DWH","NODE_TYPE")
config_dict['IAM_ROLE_NAME'] = config.get("DWH", "IAM_ROLE_NAME")
config_dict['CLUSTER_IDENTIFIER'] = config.get("DWH","CLUSTER_IDENTIFIER")
config_dict['DB_NAME'] = config.get("CLUSTER","DB_NAME")
config_dict['DB_USER'] = config.get("CLUSTER","DB_USER")
config_dict['DB_PASSWORD'] = config.get("CLUSTER","DB_PASSWORD")
config_dict['DB_PORT'] = config.get("CLUSTER","DB_PORT")
config_dict['POLICY'] = config.get("IAM_ROLE", "POLICY")
config_dict['ARN'] = config.get("IAM_ROLE", "ARN")
return config_dict
def create_clients(config_dict):
"""
Create clients for EC2, S3, IAM and Redshift
"""
ec2 = boto3.resource('ec2',
region_name="us-east-1",
aws_access_key_id=config_dict['KEY'],
aws_secret_access_key=config_dict['SECRET']
)
s3 = boto3.resource('s3',
region_name="us-east-1",
aws_access_key_id=config_dict['KEY'],
aws_secret_access_key=config_dict['SECRET']
)
iam = boto3.client('iam',
aws_access_key_id=config_dict['KEY'],
aws_secret_access_key=config_dict['SECRET'],
region_name='us-east-1'
)
redshift = boto3.client('redshift',
region_name="us-east-1",
aws_access_key_id=config_dict['KEY'],
aws_secret_access_key=config_dict['SECRET']
)
print('Success creating clients.')
return ec2, s3, iam, redshift
def create_iam_role(config_dict, iam):
"""
Create iam role.
"""
try:
print('Creating a new IAM Role')
dwhRole = iam.create_role(Path='/',
RoleName=config_dict['IAM_ROLE_NAME'],
Description = "Allows Redshift clusters to call AWS services on your behalf.",
AssumeRolePolicyDocument=json.dumps(
{'Statement': [{'Action': 'sts:AssumeRole',
'Effect': 'Allow',
'Principal': {'Service': 'redshift.amazonaws.com'}}],
'Version': '2012-10-17'})
)
except Exception as e:
print(e)
try:
print('Creating role policy')
iam.attach_role_policy(RoleName=config_dict['IAM_ROLE_NAME'],
PolicyArn=config_dict['ARN']
)['ResponseMetadata']['HTTPStatusCode']
except Exception as e:
print(e)
roleArn = iam.get_role(RoleName=config_dict['IAM_ROLE_NAME'])['Role']['Arn']
print('role arn: ', roleArn)
return roleArn
def open_tcp_port(myClusterProps):
try:
vpc = ec2.Vpc(id=myClusterProps['VpcId'])
defaultSg = list(vpc.security_groups.all())[-1]
print(defaultSg)
defaultSg.authorize_ingress(
GroupName= defaultSg.group_name,
CidrIp='0.0.0.0/0',
IpProtocol='TCP',
FromPort=int(config_dict['DB_PORT']),
ToPort=int(config_dict['DB_PORT'])
)
except Exception as e:
print(e)
def create_cluster(config_dict, roleArn, redshift):
try:
print('Creating redshift cluster')
response = redshift.create_cluster(ClusterType=config_dict['CLUSTER_TYPE'],
NodeType=config_dict['NODE_TYPE'],
NumberOfNodes=int(config_dict['NUM_NODES']),
DBName=config_dict['DB_NAME'],
ClusterIdentifier=config_dict['CLUSTER_IDENTIFIER'],
MasterUsername=config_dict['DB_USER'],
MasterUserPassword=config_dict['DB_PASSWORD'],
IamRoles=[roleArn]
)
myClusterProps = redshift.describe_clusters(ClusterIdentifier=config_dict['CLUSTER_IDENTIFIER'])['Clusters'][0]
if myClusterProps['ClusterStatus'] == 'available':
open_tcp_port(myClusterProps)
except Exception as e:
print(e)
def prettyRedshiftProps(props):
pd.set_option('display.max_colwidth', -1)
keysToShow = ["ClusterIdentifier", "NodeType", "ClusterStatus", "MasterUsername", "DBName", "Endpoint", "NumberOfNodes", 'VpcId']
x = [(k, v) for k,v in props.items() if k in keysToShow]
return
|
pd.DataFrame(data=x, columns=["Key", "Value"])
|
pandas.DataFrame
|
import pandas as pd
import os
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.dates as mdates
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
root = '/Users/Gabe/Downloads/thesis spreadies'
# sg_1k_1k = pd.read_csv(os.path.join(root,'we_depletions_sg_SWHC1000_INIDEP1000_timeseries.csv'), parse_dates=True)
# sg_600_600 = pd.read_csv(os.path.join(root, 'we_depletions_sg_SWHC600_INIDEP600_timeseries.csv'), parse_dates=True)
# sg_600_300 = pd.read_csv(os.path.join(root,'we_depletions_sg_SWHC600_INIDEP300_timeseries.csv'), parse_dates=True)
# sg_600_150 = pd.read_csv(os.path.join(root, 'we_depletions_sg_SWHC600_INIDEP150_timeseries.csv'), parse_dates=True)
#
# sg_300_300 = pd.read_csv(os.path.join(root, 'we_depletions_sg_SWHC300_INIDEP300_timeseries.csv'), parse_dates=True)
# sg_300_150 = pd.read_csv(os.path.join(root,'we_depletions_sg_SWHC300_INIDEP150_timeseries.csv'), parse_dates=True)
# sg_300_0 = pd.read_csv(os.path.join(root, 'we_depletions_sg_SWHC300_INIDEP0_timeseries.csv'), parse_dates=True)
#
# sg_150_150 = pd.read_csv(os.path.join(root, 'we_depletions_sg_SWHC150_INIDEP150_timeseries.csv'), parse_dates=True)
# sg_150_75 = pd.read_csv(os.path.join(root,'we_depletions_sg_SWHC150_INIDEP75_timeseries.csv'), parse_dates=True)
# sg_150_0 = pd.read_csv(os.path.join(root, 'we_depletions_sg_SWHC150_INIDEP0_timeseries.csv'), parse_dates=True)
#
# print sg_1k_1k.head()
#
# vcm_600_600 = pd.read_csv(os.path.join(root,'we_depletions_vcm_SWHC600_INIDEP600_timeseries.csv'), parse_dates=True)
# vcm_600_300 = pd.read_csv(os.path.join(root,'we_depletions_vcm_SWHC600_INIDEP300_timeseries.csv'), parse_dates=True)
# vcm_600_150 = pd.read_csv(os.path.join(root,'we_depletions_vcm_SWHC600_INIDEP150_timeseries.csv'), parse_dates=True)
# vcm_300_300 = pd.read_csv(os.path.join(root, 'ext_we_depletions_vcm_SWHC300_INIDEP300.csv'), parse_dates=True)
# vcm_300_150 = pd.read_csv(os.path.join(root,'ext_we_depletions_vcm_SWHC300_INIDEP150.csv'), parse_dates=True)
# vcm_300_0 = pd.read_csv(os.path.join(root, 'ext_we_depletions_vcm_SWHC300_INIDEP0.csv'), parse_dates=True)
# plt.plot([1,2,3], [3, 5,7])
# plt.show()
vcm_600_600 = pd.read_csv(os.path.join(root, 'ext_we_depletions_vcm_SWHC600_INIDEP600.csv'), parse_dates=True)
vcm_600_300 = pd.read_csv(os.path.join(root, 'ext_we_depletions_vcm_SWHC600_INIDEP300.csv'), parse_dates=True)
vcm_600_000 = pd.read_csv(os.path.join(root, 'ext_we_depletions_vcm_SWHC600_INIDEP0.csv'), parse_dates=True)
vcm_300_300 = pd.read_csv(os.path.join(root, 'ext_we_depletions_vcm_SWHC300_INIDEP300.csv'), parse_dates=True)
vcm_300_150 = pd.read_csv(os.path.join(root, 'ext_we_depletions_vcm_SWHC300_INIDEP150.csv'), parse_dates=True)
vcm_300_000 = pd.read_csv(os.path.join(root, 'ext_we_depletions_vcm_SWHC300_INIDEP0.csv'), parse_dates=True)
sg_600_600 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC600_INIDEP600.csv'), parse_dates=True)
sg_600_300 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC600_INIDEP300.csv'), parse_dates=True)
sg_600_000 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC600_INIDEP0.csv'), parse_dates=True)
sg_300_300 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC300_INIDEP300.csv'), parse_dates=True)
sg_300_150 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC300_INIDEP150.csv'), parse_dates=True)
sg_300_000 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC300_INIDEP0.csv'), parse_dates=True)
sg_150_150 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC150_INIDEP150.csv'), parse_dates=True)
sg_150_075 = pd.read_csv(os.path.join(root,'ext_we_depletions_sg_SWHC150_INIDEP75.csv'), parse_dates=True)
sg_150_000 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC150_INIDEP0.csv'), parse_dates=True)
sg_50_050 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC50_INIDEP50.csv'), parse_dates=True)
sg_50_025 = pd.read_csv(os.path.join(root,'ext_we_depletions_sg_SWHC50_INIDEP25.csv'), parse_dates=True)
sg_50_000 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC50_INIDEP0.csv'), parse_dates=True)
vcm_150_150 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC150_INIDEP150.csv'), parse_dates=True)
vcm_150_075 = pd.read_csv(os.path.join(root,'ext_we_depletions_sg_SWHC150_INIDEP75.csv'), parse_dates=True)
vcm_150_000 = pd.read_csv(os.path.join(root, 'ext_we_depletions_sg_SWHC150_INIDEP0.csv'), parse_dates=True)
# # plt.plot([1,2,3], [3, 5,7])
# # plt.show()
#
# vcm_600_600 = pd.read_csv(os.path.join(root,'we_depletions_vcm_SWHC600_INIDEP600_timeseries.csv'), parse_dates=True)
# vcm_600_300 = pd.read_csv(os.path.join(root,'we_depletions_vcm_SWHC600_INIDEP300_timeseries.csv'), parse_dates=True)
# vcm_600_150 = pd.read_csv(os.path.join(root,'we_depletions_vcm_SWHC600_INIDEP150_timeseries.csv'), parse_dates=True)
#
# vcm_300_300 = pd.read_csv(os.path.join(root, 'we_depletions_vcm_SWHC300_INIDEP300_timeseries.csv'), parse_dates=True)
# vcm_300_150 = pd.read_csv(os.path.join(root,'we_depletions_vcm_SWHC300_INIDEP150_timeseries.csv'), parse_dates=True)
# vcm_300_0 = pd.read_csv(os.path.join(root, 'we_depletions_vcm_SWHC300_INIDEP0_timeseries.csv'), parse_dates=True)
# print(sg_600_600['date'])
#
# plt.plot(sg_600_150['date'], sg_600_150['depletion'], label='sg')
# # plt.grid()
# plt.legend()
# plt.show()
# # plt.savefig(os.path.join(root, 'testfig.png'))
years = mdates.YearLocator()
months = mdates.MonthLocator()
years_fmt = mdates.DateFormatter('%Y')
### ===== SG 50 ======
fig, (ax1, ax2) = plt.subplots(nrows=2, sharey=False, sharex=True)
ax1.plot(pd.to_datetime(sg_50_000['date']), sg_50_000['depletion'], color='r', label='swhc_50_inidep_000', linewidth=5)
ax1.plot(pd.to_datetime(sg_50_025['date']), sg_50_025['depletion'], color='b', label='swhc_50_inidep_025', linewidth=3)
ax1.plot(pd.to_datetime(sg_50_050['date']), sg_50_050['depletion'], color='g', label='swhc_50_inidep_050', linewidth=1)
ax1.set_xlabel('Date')
ax1.set_ylabel('Depletion (mm)')
ax1.set_title('Depletion with Given SWHC and Initial Depletion - Sevilleta')
ax1.legend()
ax1.grid()
ax2.plot(pd.to_datetime(sg_50_000['date']), sg_50_000['recharge_ro'], color='r', label='swhc_50_inidep_000', linewidth=3)
ax2.plot(pd.to_datetime(sg_50_025['date']), sg_50_025['recharge_ro'], color='b', label='swhc_50_inidep_025', linewidth=2)
ax2.plot(pd.to_datetime(sg_50_050['date']), sg_50_050['recharge_ro'], color='g', label='swhc_50_inidep_050', linewidth=1)
ax2.set_xlabel('Date')
ax2.set_ylabel('Recharge (mm)')
ax2.legend()
ax2.grid()
ax2.set_title('Recharge with Given SWHC and Initial Depletion - Sevilleta')
plt.subplots_adjust(hspace=1)
plt.show()
### ===== vcm 150 ======
fig, (ax1, ax2) = plt.subplots(nrows=2, sharey=False, sharex=True)
ax1.plot(pd.to_datetime(vcm_150_000['date']), vcm_150_000['depletion'], color='r', label='swhc_150_inidep_000', linewidth=5)
ax1.plot(pd.to_datetime(vcm_150_075['date']), vcm_150_075['depletion'], color='b', label='swhc_150_inidep_075', linewidth=3)
ax1.plot(pd.to_datetime(vcm_150_150['date']), vcm_150_150['depletion'], color='g', label='swhc_600_inidep_150', linewidth=1)
ax1.set_title('Depletion with Given SWHC and Initial Depletion - <NAME>')
ax1.grid()
ax1.legend()
ax2.plot(pd.to_datetime(vcm_150_000['date']), vcm_150_000['recharge_ro'], color='r', label='swhc_150_inidep_000', linewidth=5)
ax2.plot(pd.to_datetime(vcm_150_075['date']), vcm_150_075['recharge_ro'], color='b', label='swhc_150_inidep_075', linewidth=3)
ax2.plot(pd.to_datetime(vcm_150_150['date']), vcm_150_150['recharge_ro'], color='g', label='swhc_600_inidep_150', linewidth=1)
ax2.set_title('Depletion with Given SWHC and Initial Depletion - <NAME>')
ax2.grid()
ax2.legend()
plt.subplots_adjust(hspace=1)
plt.show()
### ===== SG 600 ======
fig, (ax1, ax2) = plt.subplots(nrows=2, sharey=False, sharex=True)
ax1.plot(pd.to_datetime(sg_600_000['date']), sg_600_000['depletion'], color='r', label='swhc_600_inidep_000', linewidth=5)
ax1.plot(pd.to_datetime(sg_600_300['date']), sg_600_300['depletion'], color='b', label='swhc_600_inidep_300', linewidth=3)
ax1.plot(pd.to_datetime(sg_600_600['date']), sg_600_600['depletion'], color='g', label='swhc_600_inidep_600', linewidth=1)
ax1.set_xlabel('Date')
ax1.set_ylabel('Depletion (mm)')
ax1.set_title('Depletion with Given SWHC and Initial Depletion - Sevilleta')
ax1.legend()
ax1.grid()
ax2.plot(
|
pd.to_datetime(sg_600_000['date'])
|
pandas.to_datetime
|
# install imblearn package to a specific anaconda enviroment boston_house_price
# $ conda install -n boston_house_price -c conda-forge imbalanced-learn
# update imblearn package to a specific anaconda enviroment boston_house_price
# $ conda update -n boston_house_price -c glemaitre imbalanced-learn
# =============================================================
# Import libraries necessary for this project
import numpy as np
import pandas as pd
from time import time
# Set a random seed
import random
seed = 42
random.seed(seed)
# Import supplementary visualization code visuals.py
import scripts.visuals as vs
# Load the Census dataset
path = '../data/'
train_data = path + 'census.csv'
test_data = path + 'test_census.csv'
data = pd.read_csv(train_data)
print(data.head(n=1))
print(data.shape)
# get the types of columns
print(data.dtypes)
# Pandas has a helpful select_dtypes function
# which we can use to build a new dataframe containing only the object columns.
obj_data = data.select_dtypes(include=['object']).copy()
# Before going any further, we have to check if there are null values in the data that we need to clean up.
print(obj_data[obj_data.isnull().any(axis=1)])
# TODO: Total number of records
n_records = data.shape[0]
# TODO: Number of records where individual's income is more than $50,000
# TODO: Number of records where individual's income is at most $50,000
# Method1:
n_at_most_50k, n_greater_50k = data.income.value_counts()
# Method2: (optional) -->
# n2_greater_50k = data[data['income']=='>50K'].shape[0]
# n2_at_most_50k = data[data['income']=='<=50K'].shape[0]
n_aux = data.loc[(data['capital-gain'] > 0) & (data['capital-loss'] > 0)].shape
# TODO: Percentage of individuals whose income is more than $50,000
greater_percent = (100*n_greater_50k)/n_records
# Print the results
print("Total number of records: {}".format(n_records))
print("Individuals making more than $50,000: {}".format(n_greater_50k))
print("Individuals making at most $50,000: {}".format(n_at_most_50k))
print("Percentage of individuals making more than $50,000: {}%".format(greater_percent))
# Split the data into features and target label
income_raw = data['income']
features_raw = data.drop('income', axis = 1)
# Visualize skewed continuous features of original data
vs.distribution(data)
# Log-transform the skewed features
skewed = ['capital-gain', 'capital-loss']
features_log_transformed = pd.DataFrame(data = features_raw)
features_log_transformed[skewed] = features_raw[skewed].apply(lambda x: np.log(x + 1))
# Visualize the new log distributions
vs.distribution(features_log_transformed, transformed = True)
# Import sklearn.preprocessing.StandardScaler
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
# Initialize a scaler, then apply it to the features
scaler = MinMaxScaler() # default=(0, 1)
numerical = ['age', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week']
features_log_minmax_transform = pd.DataFrame(data = features_log_transformed)
features_log_minmax_transform[numerical] = scaler.fit_transform(features_log_transformed[numerical])
# Show an example of a record with scaling applied
print(features_log_minmax_transform.head(n = 5))
# TODO: One-hot encode the 'features_log_minmax_transform' data using pandas.get_dummies()
features_final = pd.get_dummies(features_log_minmax_transform)
# TODO: Encode the 'income_raw' data to numerical values
# Method1:
encoder = LabelEncoder()
income = pd.Series(encoder.fit_transform(income_raw))
# Method2:(optional) -->
income1 =income_raw.map({'<=50K':0, '>50K':1})
# Method3:(optional) -->
income2 =pd.get_dummies(income_raw)['>50K']
# Print the number of features after one-hot encoding
encoded = list(features_final.columns)
print("{} total features after one-hot encoding.".format(len(encoded)))
# Uncomment the following line to see the encoded feature names
print(encoded)
#-----------------
# @Raafat: Some techniques to deal imbalanced data:
# --> under sampling
from imblearn.under_sampling import CondensedNearestNeighbour
cnn = CondensedNearestNeighbour(random_state=42)
X_res, y_res = cnn.fit_sample(features_final[0:300], income[0:300])
print('not Resampled dataset shape {}'.format(income[0:300].value_counts()))
print('cnn Resampled dataset shape {}'.format(pd.Series(y_res).value_counts()))
from imblearn.under_sampling import RandomUnderSampler
rus = RandomUnderSampler(random_state=42)
X_res, y_res = rus.fit_sample(features_final[0:300], income[0:300])
print('rus Resampled dataset shape {}'.format(
|
pd.Series(y_res)
|
pandas.Series
|
import pandas as pd
import os
import re
import numpy as np
import xml.etree.ElementTree as ET
#The purpose of this file is to take XML files found in the `/xml` directory, and produce a csv output (`output.csv`)
#XML files that are already generated via PDFx are assumed to be in a directory "xml"
directory="xml"
impact_dict={"title":[], "paper identifier":[], "paper link":[], "impact statement":[], "impact title":[], "impact statement word count":[], "impact statement sentence count":[], "citation count":[],
"has positive":[], "has negative":[], "has opt out":[], "has NA":[], "has impact statement":[]}
#initialize citations_dict, which is a separate dictionary to be generated as a separate CSV file (citation.csv)
citation_dict={"paper title":[],"paper id":[],"citation":[]}
#loops through the directory, and appends the relevant information to impact_dict, which will be appended to the dataframe later
for filename in os.listdir(directory):
#to exclude "sample.xml"
if filename.endswith(".pdfx.xml"):
full_path = os.path.join(directory, filename)
#i need to clear impact_statement_text for each file
impact_statement_text=""
tree = ET.parse(full_path)
root = tree.getroot()
#get article title
#initialize a list of citations for this document
citation_ref = []
#signals if impact statement exists
has_impact_statement = "False"
for section in root[1][0][0]:
if section.tag=="article-title":
title = section.text
for section in root[1][1]:
citations = 0
signal = 0
for child in section:
if signal == 1 :
#print(section.text)
#broader_dict[filename] = section.text
#loop through any xrefs to count for citations
for xref in child:
#narrow down xref citations to bibliography references
if xref.tag == "xref" and xref.attrib['ref-type'] == "bibr":
#use "rid" as the identifier, so we come out of this with a list of references
citation_ref.append(xref.attrib['rid'])
citations +=1
#itertext will make sure that if there are any tags within the section, we still get the whole thing.
#impact_statement_text will become whatever the current value of it is, plus whatever the for loop finds so long as it is true
if child.itertext() != "" and (child.attrib["class"] == "DoCO:TextChunk" or child.attrib["class"] == "DoCO:TextBox" or child.attrib["class"] == "unknown"):
#so it captures the text so long as there is text in the section
impact_statement_text=impact_statement_text + " "+''.join(child.itertext())
else:
signal = 0
#focus on heading
if "impact" in str(child.text).lower() and child.tag == "h1":
#print("It has a Broader Impact!")
#log the title of the broader impact statement
impact_statement_title = child.text
signal=1
elif str(child.text).lower() == "broader impact" and child.tag == "h1":
impact_statement_title = child.text
signal=1
elif str(child.text).lower() == "broader impacts" and child.tag == "h1":
impact_statement_title = child.text
signal=1
#insert a new for loop here to check for the "smaller" parts. The "h2" headers
for smaller in child:
if signal == 1 :
#print(section.text)
#broader_dict[filename] = section.text
#loop through any xrefs to count for citations
for xref in smaller:
#narrow down xref citations to bibliography references
if xref.tag == "xref" and xref.attrib['ref-type'] == "bibr":
#use "rid" as the identifier, so we come out of this with a list of references
citation_ref.append(xref.attrib['rid'])
citations +=1
#itertext will make sure that if there are any tags within the section, we still get the whole thing.
#if smaller.itertext() != "" and (smaller.attrib["class"] == "DoCO:TextChunk" or smaller.attrib["class"] == "DoCO:TextBox"):
try:
if smaller.itertext() != "" and (smaller.attrib["class"] == "DoCO:TextChunk" or smaller.attrib["class"] == "DoCO:TextBox"):
impact_statement_text=impact_statement_text + " "+''.join(smaller.itertext())
elif smaller.attrib["ref-type"] == "bibr":
continue
else:
signal = 0
except KeyError:
continue
#focus on heading
if "impact" in str(smaller.text).lower() and smaller.tag == "h2":
#print("It has a Broader Impact!")
#log the title of the broader impact statement
impact_statement_title = smaller.text
signal=1
elif str(smaller.text).lower() == "broader impact" and smaller.tag == "h2":
impact_statement_title = smaller.text
signal=1
elif str(smaller.text).lower() == "broader impacts" and smaller.tag == "h2":
impact_statement_title = smaller.text
signal=1
#identify the bibliography
if section.attrib["class"] == "DoCO:Bibliography":
#loop through the bibliography section, but we really only want one part
for references in section:
if references.attrib["class"] == "DoCO:BiblioGraphicReferenceList":
#loop through all the entries in the reference list
for citation in references:
#the try statement is because if the bibliography is across multiple pages, there will be entries with no "rid", so we account for that with a keyerror.
try:
#check if the citation is in the citation_ref we established earlier
if citation.attrib["rid"] in citation_ref:
citation_dict["paper title"].append(title)
citation_dict["paper id"].append(paper_identifier[1])
citation_dict["citation"].append(citation.text)
except KeyError:
continue
#bring the variable assignment down here instead
if impact_statement_text != "":
#remove double spaces
impact_statement_text = re.sub('\s+',' ',impact_statement_text)
impact_statement_number_of_words=len(impact_statement_text.split())
#add count for setences using delimeters of ".", "?", and "!"
impact_statement_number_of_sentences=len(re.split("\.|\?|!", impact_statement_text))-1
#will identify the hash based off of this pattern "86d7c8a08b4aaa1bc7c599473f5dddda-Paper.pdfx.xml"
paper_identifier = re.search("(\w*)(-Paper)", filename)
#check if "positive" is in the statement
has_positive = "True" if "positive" in impact_statement_text.lower() else "False"
#check if "negative" is in the statement
has_negative = "True" if "negative" in impact_statement_text.lower() else "False"
#check if it has the NeurIPS opt-out phrase
has_opt_out = "True" if "this work does not present any foreseeable societal consequence" in impact_statement_text.lower() else "False"
#check if it has "Not Applicable"
has_NA = "True" if "not applicable" in impact_statement_text.lower() else "False"
has_impact_statement = "True"
#add everything to the dictionary
impact_dict["impact title"].append(impact_statement_title)
impact_dict["impact statement"].append(impact_statement_text)
impact_dict["impact statement word count"].append(impact_statement_number_of_words)
impact_dict["impact statement sentence count"].append(impact_statement_number_of_sentences)
impact_dict["citation count"].append(citations)
impact_dict["title"].append(title)
impact_dict["paper identifier"].append(paper_identifier[1])
impact_dict["paper link"].append("https://proceedings.neurips.cc/paper/2020/file/" + paper_identifier[1] + "-Paper.pdf")
impact_dict["has positive"].append(has_positive)
impact_dict["has negative"].append(has_negative)
impact_dict["has opt out"].append(has_opt_out)
impact_dict["has NA"].append(has_NA)
impact_dict["has impact statement"].append(has_impact_statement)
if has_impact_statement == "False":
#no impact statement was found at all
#set variables
impact_statement_title = ""
impact_statement_text = ""
impact_statement_number_of_words = 0
impact_statement_number_of_sentences = 0
citations = 0
paper_identifier = re.search("(\w*)(-Paper)", filename)
has_positive = "False"
has_negative = "False"
has_opt_out = "False"
has_NA = "False"
#append to dictionary
impact_dict["impact title"].append(impact_statement_title)
impact_dict["impact statement"].append(impact_statement_text)
impact_dict["impact statement word count"].append(impact_statement_number_of_words)
impact_dict["impact statement sentence count"].append(impact_statement_number_of_sentences)
impact_dict["citation count"].append(citations)
impact_dict["title"].append(title)
impact_dict["paper identifier"].append(paper_identifier[1])
impact_dict["paper link"].append("https://proceedings.neurips.cc/paper/2020/file/" + paper_identifier[1] + "-Paper.pdf")
impact_dict["has positive"].append(has_positive)
impact_dict["has negative"].append(has_negative)
impact_dict["has opt out"].append(has_opt_out)
impact_dict["has NA"].append(has_NA)
impact_dict["has impact statement"].append(has_impact_statement)
#create the dataframe for the output from the dictionary
impact_statements =
|
pd.DataFrame.from_dict(impact_dict)
|
pandas.DataFrame.from_dict
|
import pandas as pd
def convert_to_date(s: pd.Series):
"""
Convert a pandas Series to date
"""
result =
|
pd.to_datetime(s)
|
pandas.to_datetime
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 2 13:45:18 2017
This is my attempt to scrap live feeds from all bikeshare programs
it is largely based on:
https://www.citybik.es
@author: changyaochen
"""
import json, requests, os, time, re
import pandas as pd
_debug = True
dname = os.path.dirname(os.path.abspath(__file__))
os.chdir(dname)
all_networks_url = 'https://api.citybik.es/v2/networks'
# get all the networks (cities)
all_networks = requests.get(all_networks_url).json()['networks']
print('total number of cities:', len(all_networks))
if not os.path.exists('all_networks'):
os.mkdir('all_networks')
os.chdir('./all_networks')
# start scraping each of the networks
round_count = 0
network_id_blk_list = [
'nextbike-offenbach-am-main'
]
while round_count < 100:
for network in all_networks:
print(network['id'])
if network['id'] in network_id_blk_list:
continue
# let's make the preambles
network_city = network['location']['city']
network_city = network_city.replace('/','')
network_city = network_city.replace(' ','')
network_country = network['location']['country']
# save network status to individual dataframe
csv_name = network_city.split(',')[0] + '_' + network_country + '.csv'
if os.path.exists(csv_name):
df_network = pd.read_csv(csv_name, encoding = 'utf-8')
else:
df_network =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
import requests
from datetime import datetime as dt, timedelta, time
import pydeck as pdk
# import sys
# sys.path.append("./")
from functions4kofu import *
from matplotlib import rcParams
rcParams['font.family'] = 'sans-serif'
def map(data, lat=35.668, lon=138.569, zoom=12):
st.write(pdk.Deck(
# map_style="mapbox://styles/mapbox/light-v9",
map_provider="mapbox",
map_style=pdk.map_styles.SATELLITE,
initial_view_state={
"latitude": lat,
"longitude": lon,
"zoom": zoom,
"pitch": 50,
},
layers = [
pdk.Layer(
# 'HexagonLayer', # `type` positional argument is here
"HeatmapLayer",
#UK_ACCIDENTS_DATA,
data=data,
get_position=['log', 'lat'],
get_elevation='count',
auto_highlight=True,
elevation_scale=10,
pickable=True,
elevation_range=[0, 100],
radius = 50,
extruded=True,
coverage=1.0)
]
# layers=[
# pdk.Layer(
# "HexagonLayer",
# data=data,
# get_position=["lon", "lat"],
# radius=100,
# elevation_scale=4,
# elevation_range=[0, 1000],
# pickable=True,
# extruded=True,
# ),
# ]
))
@st.cache
def load_data():
url = "https://8tops.yamanashi.ac.jp/kofu/bt/getPopulation_csv.php"
sday = "20211226"
res = requests.get(url
+ "?sday=" + sday).content
kofu_df = pd.read_csv(io.StringIO(res.decode('utf-8')))
kofu_df['date'] = kofu_df['Unnamed: 0'].astype('str')
kofu_df['date'] = pd.to_datetime(kofu_df['date'], format="%Y-%m-%d %H:%M:%S")
kofu_df['day'] = kofu_df['date'].dt.strftime("%Y-%m-%d")
kofu_df['hourmin'] = kofu_df['date'].dt.strftime("%H:%M")
return kofu_df
@st.cache
def load_sensor_info():
sensors = getSensorInfo()
sensor_df =pd.DataFrame(sensors)
sensor_df = sensor_df.T.reset_index().rename(
columns={"index": "point"})
return sensor_df
kofu_df = load_data()
sensor_df = load_sensor_info()
init_day = dt(2022,1,23, 0,0)
date4show = st.slider("表示",
min_value=dt(2022,1,1,0,0),
max_value= dt(2022,1,31,0,0), # dt.now() doesn't work (return the initial state)
value=init_day,
step = timedelta(minutes=10),
format="YYYY-MM-DD hh:mm")
st.write(date4show)
day = date4show.strftime("%Y-%m-%d")
hourmin = date4show.strftime("%H:%M")
st.session_state.day = date4show
tmp_df = kofu_df[(kofu_df['day']==day) & (kofu_df['hourmin']==hourmin)].T.reset_index()
tmp_df = tmp_df.set_axis(["point", "count"], axis=1)
data4pydeck =
|
pd.merge(tmp_df, sensor_df, on="point", how="inner")
|
pandas.merge
|
import numpy as np
from numba import jit
import pandas as pd
import neuroseries as nts
import sys, os
import scipy
from scipy import signal
from itertools import combinations
'''
Utilities functions
Feel free to add your own
'''
#########################################################
# CORRELATION
#########################################################
@jit(nopython=True)
def crossCorr(t1, t2, binsize, nbins):
'''
Fast crossCorr
'''
nt1 = len(t1)
nt2 = len(t2)
if np.floor(nbins/2)*2 == nbins:
nbins = nbins+1
m = -binsize*((nbins+1)/2)
B = np.zeros(nbins)
for j in range(nbins):
B[j] = m+j*binsize
w = ((nbins/2) * binsize)
C = np.zeros(nbins)
i2 = 1
for i1 in range(nt1):
lbound = t1[i1] - w
while i2 < nt2 and t2[i2] < lbound:
i2 = i2+1
while i2 > 1 and t2[i2-1] > lbound:
i2 = i2-1
rbound = lbound
l = i2
for j in range(nbins):
k = 0
rbound = rbound+binsize
while l < nt2 and t2[l] < rbound:
l = l+1
k = k+1
C[j] += k
# for j in range(nbins):
# C[j] = C[j] / (nt1 * binsize)
C = C/(nt1 * binsize/1000)
return C
def crossCorr2(t1, t2, binsize, nbins):
'''
Slow crossCorr
'''
window = np.arange(-binsize*(nbins/2),binsize*(nbins/2)+2*binsize,binsize) - (binsize/2.)
allcount = np.zeros(nbins+1)
for e in t1:
mwind = window + e
# need to add a zero bin and an infinite bin in mwind
mwind = np.array([-1.0] + list(mwind) + [np.max([t1.max(),t2.max()])+binsize])
index = np.digitize(t2, mwind)
# index larger than 2 and lower than mwind.shape[0]-1
# count each occurences
count = np.array([np.sum(index == i) for i in range(2,mwind.shape[0]-1)])
allcount += np.array(count)
allcount = allcount/(float(len(t1))*binsize / 1000)
return allcount
def xcrossCorr_slow(t1, t2, binsize, nbins, nbiter, jitter, confInt):
times = np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2
H0 = crossCorr(t1, t2, binsize, nbins)
H1 = np.zeros((nbiter,nbins+1))
t2j = t2 + 2*jitter*(np.random.rand(nbiter, len(t2)) - 0.5)
t2j = np.sort(t2j, 1)
for i in range(nbiter):
H1[i] = crossCorr(t1, t2j[i], binsize, nbins)
Hm = H1.mean(0)
tmp = np.sort(H1, 0)
HeI = tmp[int((1-confInt)/2*nbiter),:]
HeS = tmp[int((confInt + (1-confInt)/2)*nbiter)]
Hstd = np.std(tmp, 0)
return (H0, Hm, HeI, HeS, Hstd, times)
def xcrossCorr_fast(t1, t2, binsize, nbins, nbiter, jitter, confInt):
times = np.arange(0, binsize*(nbins*2+1), binsize) - (nbins*2*binsize)/2
# need to do a cross-corr of double size to convolve after and avoid boundary effect
H0 = crossCorr(t1, t2, binsize, nbins*2)
window_size = 2*jitter//binsize
window = np.ones(window_size)*(1/window_size)
Hm = np.convolve(H0, window, 'same')
Hstd = np.sqrt(np.var(Hm))
HeI = np.NaN
HeS = np.NaN
return (H0, Hm, HeI, HeS, Hstd, times)
def compute_AutoCorrs(spks, ep, binsize = 5, nbins = 200):
# First let's prepare a pandas dataframe to receive the data
times = np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2
autocorrs = pd.DataFrame(index = times, columns = list(spks.keys()))
firing_rates = pd.Series(index = list(spks.keys()))
# Now we can iterate over the dictionnary of spikes
for i in spks:
# First we extract the time of spikes in ms during wake
spk_time = spks[i].restrict(ep).as_units('ms').index.values
# Calling the crossCorr function
autocorrs[i] = crossCorr(spk_time, spk_time, binsize, nbins)
# Computing the mean firing rate
firing_rates[i] = len(spk_time)/ep.tot_length('s')
# We can divide the autocorrs by the firing_rates
autocorrs = autocorrs / firing_rates
# And don't forget to replace the 0 ms for 0
autocorrs.loc[0] = 0.0
return autocorrs, firing_rates
def compute_CrossCorrs(spks, ep, binsize=10, nbins = 2000, norm = False):
"""
"""
neurons = list(spks.keys())
times = np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2
cc = pd.DataFrame(index = times, columns = list(combinations(neurons, 2)))
for i,j in cc.columns:
spk1 = spks[i].restrict(ep).as_units('ms').index.values
spk2 = spks[j].restrict(ep).as_units('ms').index.values
tmp = crossCorr(spk1, spk2, binsize, nbins)
fr = len(spk2)/ep.tot_length('s')
if norm:
cc[(i,j)] = tmp/fr
else:
cc[(i,j)] = tmp
return cc
def compute_PairCrossCorr(spks, ep, pair, binsize=10, nbins = 2000, norm = False):
"""
"""
times = np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2
spk1 = spks[pair[0]].restrict(ep).as_units('ms').index.values
spk2 = spks[pair[1]].restrict(ep).as_units('ms').index.values
tmp = crossCorr(spk1, spk2, binsize, nbins)
fr = len(spk2)/ep.tot_length('s')
tmp = pd.Series(index = times, data = tmp)
if norm:
tmp = tmp/fr
else:
tmp = tmp
return tmp
def compute_EventCrossCorr(spks, evt, ep, binsize = 5, nbins = 1000, norm=False):
"""
"""
neurons = list(spks.keys())
times = np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2
cc = pd.DataFrame(index = times, columns = neurons)
tsd1 = evt.restrict(ep).as_units('ms').index.values
for i in neurons:
spk2 = spks[i].restrict(ep).as_units('ms').index.values
tmp = crossCorr(tsd1, spk2, binsize, nbins)
fr = len(spk2)/ep.tot_length('s')
if norm:
cc[i] = tmp/fr
else:
cc[i] = tmp
return cc
def compute_ISI(spks, ep, maxisi, nbins, log_=False):
"""
"""
neurons = list(spks.keys())
if log_:
bins = np.linspace(np.log10(1), np.log10(maxisi), nbins)
else:
bins = np.linspace(0, maxisi, nbins)
isi = pd.DataFrame(index = bins[0:-1] + np.diff(bins)/2, columns = neurons)
for i in neurons:
tmp = []
for j in ep.index:
tmp.append(np.diff(spks[i].restrict(ep.loc[[j]]).as_units('ms').index.values))
tmp = np.hstack(tmp)
if log_:
isi[i], _ = np.histogram(np.log10(tmp), bins)
else:
isi[i], _ = np.histogram(tmp, bins)
return isi
def compute_AllPairsCrossCorrs(spks, ep, binsize=10, nbins = 2000, norm = False):
"""
"""
neurons = list(spks.keys())
times = np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2
cc = pd.DataFrame(index = times, columns = list(combinations(neurons, 2)))
for i,j in cc.columns:
spk1 = spks[i].restrict(ep).as_units('ms').index.values
spk2 = spks[j].restrict(ep).as_units('ms').index.values
tmp = crossCorr(spk1, spk2, binsize, nbins)
fr = len(spk2)/ep.tot_length('s')
if norm:
cc[(i,j)] = tmp/fr
else:
cc[(i,j)] = tmp
return cc
def compute_AsyncCrossCorrs(spks, ep, binsize=10, nbins = 2000, norm = False, edge = 20):
"""
"""
neurons = list(spks.keys())
times = np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2
cc = pd.DataFrame(index = times, columns = list(combinations(neurons, 2)))
for i,j in cc.columns:
spk1 = spks[i].restrict(ep).as_units('ms').index.values
spk2 = spks[j].restrict(ep).as_units('ms').index.values
spksync = []
spkasync = []
for t in spk2:
if np.sum(np.abs(t-spk1)<edge):
spksync.append(t)
else:
spkasync.append(t)
# tmp = crossCorr(spk1, spk2, binsize, nbins)
tmp = crossCorr(spk1, np.array(spkasync), binsize, nbins)
fr = len(spkasync)/ep.tot_length('s')
if norm:
cc[(i,j)] = tmp/fr
else:
cc[(i,j)] = tmp
return cc
def compute_RandomCrossCorrs(spks, ep, binsize=10, nbins = 2000, norm = False, percent = 0.5):
"""
"""
neurons = list(spks.keys())
times = np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2
cc = pd.DataFrame(index = times, columns = list(combinations(neurons, 2)))
for i,j in cc.columns:
spk1 = spks[i].restrict(ep).as_units('ms').index.values
spk2 = spks[j].restrict(ep).as_units('ms').index.values
spk1_random = np.sort(np.random.choice(spk1, int(len(spk1)*percent), replace=False))
spk2_random = np.sort(np.random.choice(spk2, int(len(spk2)*percent), replace=False))
# tmp = crossCorr(spk1, spk2, binsize, nbins)
tmp = crossCorr(spk1_random, spk2_random, binsize, nbins)
fr = len(spk2_random)/ep.tot_length('s')
if norm:
cc[(i,j)] = tmp/fr
else:
cc[(i,j)] = tmp
return cc
#########################################################
# VARIOUS
#########################################################
def computeLMNAngularTuningCurves(spikes, angle, ep, nb_bins = 180, frequency = 120.0, bin_size = 100):
tmp = pd.Series(index = angle.index.values, data = np.unwrap(angle.values))
tmp2 = tmp.rolling(window=50,win_type='gaussian',center=True,min_periods=1).mean(std=10.0)
bin_size = bin_size * 1000
time_bins = np.arange(tmp.index[0], tmp.index[-1]+bin_size, bin_size) # assuming microseconds
index = np.digitize(tmp2.index.values, time_bins)
tmp3 = tmp2.groupby(index).mean()
tmp3.index = time_bins[np.unique(index)-1]+bin_size/2
tmp3 = nts.Tsd(tmp3)
tmp4 = np.diff(tmp3.values)/np.diff(tmp3.as_units('s').index.values)
newangle = nts.Tsd(t = tmp3.index.values, d = tmp3.values%(2*np.pi))
velocity = nts.Tsd(t=tmp3.index.values[1:], d = tmp4)
velocity = velocity.restrict(ep)
velo_spikes = {}
for k in spikes: velo_spikes[k] = velocity.realign(spikes[k].restrict(ep))
# bins_velocity = np.array([velocity.min(), -2*np.pi/3, -np.pi/6, np.pi/6, 2*np.pi/3, velocity.max()+0.001])
bins_velocity = np.array([velocity.min(), -np.pi/6, np.pi/6, velocity.max()+0.001])
idx_velocity = {k:np.digitize(velo_spikes[k].values, bins_velocity)-1 for k in spikes}
bins = np.linspace(0, 2*np.pi, nb_bins)
idx = bins[0:-1]+np.diff(bins)/2
tuning_curves = {i:pd.DataFrame(index = idx, columns = list(spikes.keys())) for i in range(3)}
# for i,j in zip(range(3),range(0,6,2)):
for i,j in zip(range(3),range(3)):
for k in spikes:
spks = spikes[k].restrict(ep)
spks = spks[idx_velocity[k] == j]
angle_spike = newangle.restrict(ep).realign(spks)
spike_count, bin_edges = np.histogram(angle_spike, bins)
tmp = newangle.loc[velocity.index[np.logical_and(velocity.values>bins_velocity[j], velocity.values<bins_velocity[j+1])]]
occupancy, _ = np.histogram(tmp, bins)
spike_count = spike_count/occupancy
tuning_curves[i][k] = spike_count*(1/(bin_size*1e-6))
return tuning_curves, velocity, bins_velocity
def computeAngularTuningCurves(spikes, angle, ep, nb_bins = 180, frequency = 120.0):
bins = np.linspace(0, 2*np.pi, nb_bins)
idx = bins[0:-1]+np.diff(bins)/2
tuning_curves = pd.DataFrame(index = idx, columns = list(spikes.keys()))
angle = angle.restrict(ep)
# Smoothing the angle here
tmp = pd.Series(index = angle.index.values, data = np.unwrap(angle.values))
tmp2 = tmp.rolling(window=50,win_type='gaussian',center=True,min_periods=1).mean(std=10.0)
angle = nts.Tsd(tmp2%(2*np.pi))
for k in spikes:
spks = spikes[k]
# true_ep = nts.IntervalSet(start = np.maximum(angle.index[0], spks.index[0]), end = np.minimum(angle.index[-1], spks.index[-1]))
spks = spks.restrict(ep)
angle_spike = angle.restrict(ep).realign(spks)
spike_count, bin_edges = np.histogram(angle_spike, bins)
occupancy, _ = np.histogram(angle, bins)
spike_count = spike_count/occupancy
tuning_curves[k] = spike_count*frequency
return tuning_curves
def findHDCells(tuning_curves, z = 50, p = 0.0001 , m = 1):
"""
Peak firing rate larger than 1
and Rayleigh test p<0.001 & z > 100
"""
cond1 = tuning_curves.max()>m
from pycircstat.tests import rayleigh
stat = pd.DataFrame(index = tuning_curves.columns, columns = ['pval', 'z'])
for k in tuning_curves:
stat.loc[k] = rayleigh(tuning_curves[k].index.values, tuning_curves[k].values)
cond2 = np.logical_and(stat['pval']<p,stat['z']>z)
tokeep = stat.index.values[np.where(np.logical_and(cond1, cond2))[0]]
return tokeep, stat
def decodeHD(tuning_curves, spikes, ep, bin_size = 200, px = None):
"""
See : Zhang, 1998, Interpreting Neuronal Population Activity by Reconstruction: Unified Framework With Application to Hippocampal Place Cells
tuning_curves: pd.DataFrame with angular position as index and columns as neuron
spikes : dictionnary of spike times
ep : nts.IntervalSet, the epochs for decoding
bin_size : in ms (default:200ms)
px : Occupancy. If None, px is uniform
"""
if len(ep) == 1:
bins = np.arange(ep.as_units('ms').start.iloc[0], ep.as_units('ms').end.iloc[-1], bin_size)
else:
# ep2 = nts.IntervalSet(ep.copy().as_units('ms'))
# ep2 = ep2.drop_short_intervals(bin_size*2)
# bins = []
# for i in ep2.index:
# bins.append(np.arange())
# bins = np.arange(ep2.start.iloc[0], ep.end.iloc[-1], bin_size)
print("TODO")
sys.exit()
order = tuning_curves.columns.values
# TODO CHECK MATCH
# smoothing with a non-normalized gaussian
w = scipy.signal.gaussian(51, 2)
spike_counts = pd.DataFrame(index = bins[0:-1]+np.diff(bins)/2, columns = order)
for n in spike_counts:
spks = spikes[n].restrict(ep).as_units('ms').index.values
tmp = np.histogram(spks, bins)
spike_counts[n] = np.convolve(tmp[0], w, mode = 'same')
# spike_counts[k] = tmp[0]
tcurves_array = tuning_curves.values
spike_counts_array = spike_counts.values
proba_angle = np.zeros((spike_counts.shape[0], tuning_curves.shape[0]))
part1 = np.exp(-(bin_size/1000)*tcurves_array.sum(1))
if px is not None:
part2 = px
else:
part2 = np.ones(tuning_curves.shape[0])
#part2 = np.histogram(position['ry'], np.linspace(0, 2*np.pi, 61), weights = np.ones_like(position['ry'])/float(len(position['ry'])))[0]
for i in range(len(proba_angle)):
part3 = np.prod(tcurves_array**spike_counts_array[i], 1)
p = part1 * part2 * part3
proba_angle[i] = p/p.sum() # Normalization process here
proba_angle =
|
pd.DataFrame(index = spike_counts.index.values, columns = tuning_curves.index.values, data= proba_angle)
|
pandas.DataFrame
|
import quandl
import pandas as pd
import numpy as np
def get_data(daysahead=20):
# import data
# USDCAD= quandl.get("FED/RXI_N_B_CA", authtoken="mmpvRYssGGBNky8<PASSWORD>")
# US overnight rates
EffFedRate = quandl.get("FED/RIFSPFF_N_D", authtoken="<PASSWORD>", start_date='1980-01-01')
FedUppTarRange= quandl.get("FRED/DFEDTARU", authtoken="<PASSWORD>")
FedLowTarRange= quandl.get("FRED/DFEDTARL", authtoken="<PASSWORD>")
FedHisTarRate=quandl.get("FRED/DFEDTAR", authtoken="<PASSWORD>", start_date='1980-01-01')
#US yield curve rates since 1990
USyields = quandl.get("USTREASURY/YIELD", authtoken="<PASSWORD>")
#net cad long/short spec and non-speculative positions
NetCAD=quandl.get("CFTC/090741_F_L_ALL")
#oil prices futures weekly - Calculate backwardation/contango
Oil4 = quandl.get("EIA/PET_RCLC4_W", authtoken="mmpvRYssG<PASSWORD>")
Oil4.columns=['Oil4']
Oil1 = quandl.get("EIA/PET_RCLC1_W", authtoken="mmpvRYssGGBN<PASSWORD>")
Oil1.columns=['Oil1']
#oil spot
Oilspot = quandl.get("FRED/DCOILWTICO", authtoken="<PASSWORD>")
Oilspot.columns=['Oilspot']
# Rig count
RigsUS = quandl.get("BKRHUGHES/COUNT_BY_TRAJECTORY", authtoken="<PASSWORD>")
RigsUS['RigsDelta']=RigsUS['Total']-RigsUS['Total'].shift()
RigsUS=RigsUS[['Total','RigsDelta']]
#US oil inventories
OilInv = quandl.get("EIA/WCESTUS1", authtoken="<PASSWORD>")
OilInv.columns=['Inv']
OilInv['InvDelta']=OilInv['Inv']-OilInv['Inv'].shift()
#USCPI
CPI = quandl.get("YALE/SP_CPI", authtoken="<PASSWORD>", start_date="1979-12-30")
CPI.columns=['CPI']
#Cad Bonds
CADBOC= pd.read_csv('C1.csv',skiprows=4, index_col=['Rates'],skipfooter=7).convert_objects(convert_numeric=True)# CANSIM table 176-0043 CanBonds
#BoC overnight rates
BOCON= pd.read_csv('C2.csv',skiprows=2, index_col=['Daily'])#CANSIM table 176-0048
BOCON.columns=['BOC fundrate']
BOCON.dropna(inplace=True)
# Employment numbers
USUnEm=quandl.get("FRED/UNRATE", authtoken="<PASSWORD>",start_date='1955-06-01')
USUnEm.columns=['Unemployment rate US']
USNonFarm=quandl.get("BLSE/CES0000000001", authtoken="<PASSWORD>",start_date='1955-06-01')
USNonFarm.columns=['1000s employed US']
employmentsituationdate=pd.DataFrame(pd.read_excel('EmploySitUS.xlsx',skiprows=35).iloc[:,0])
employmentsituationdate.columns=['date']
rest=pd.merge(USUnEm, employmentsituationdate,left_index=True, right_on='date',how='outer')
rest=rest.set_index('date')
rest.sort_index(level=0)
rest=pd.merge(rest, USNonFarm,left_index=True, right_index=True,how='outer')
rest['Uunemploy']=rest['Unemployment rate US'].shift(2)
rest.fillna(method='pad',inplace=True)
rest.tail(10)
emp=pd.merge(employmentsituationdate,rest,left_on='date',right_index=True)
emp.drop(['Unemployment rate US'],axis=1, inplace=True)
emp=emp.set_index('date')
CanEm=pd.read_csv('C3.csv',skiprows=3, index_col=['Data type']) #Cansim table 282-0087
CanEm=CanEm.iloc[0:3,5:].T['Seasonally adjusted']
CanEm.columns=[['1000s employed Can','Unemployment rate Can']]
CanEm1=CanEm.shift()
CanEm1.columns=[['C1000s employed shift1','CUnemploy rate shift1']]
CanEm2=CanEm.shift(2)
CanEm2.columns=[['C1000s employed shift2','CUnemploy rate shift2']]
CanEmS=
|
pd.merge(CanEm1,CanEm2, left_index=True,right_index=True)
|
pandas.merge
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.