prompt
stringlengths
19
1.03M
completion
stringlengths
4
2.12k
api
stringlengths
8
90
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. from __future__ import division from __future__ import print_function import os import pickle import re import sys import copy import json import yaml import redis import bisect import shutil import difflib import hashlib import warnings import datetime import requests import tempfile import importlib import contextlib import collections import numpy as np import pandas as pd from pathlib import Path from typing import Dict, Union, Tuple, Any, Text, Optional, Callable from types import ModuleType from urllib.parse import urlparse from ..config import C from ..log import get_module_logger, set_log_with_config log = get_module_logger("utils") #################### Server #################### def get_redis_connection(): """get redis connection instance.""" return redis.StrictRedis(host=C.redis_host, port=C.redis_port, db=C.redis_task_db) #################### Data #################### def read_bin(file_path: Union[str, Path], start_index, end_index): file_path = Path(file_path.expanduser().resolve()) with file_path.open("rb") as f: # read start_index ref_start_index = int(np.frombuffer(f.read(4), dtype="<f")[0]) si = max(ref_start_index, start_index) if si > end_index: return pd.Series(dtype=np.float32) # calculate offset f.seek(4 * (si - ref_start_index) + 4) # read nbytes count = end_index - si + 1 data = np.frombuffer(f.read(4 * count), dtype="<f") series = pd.Series(data, index=pd.RangeIndex(si, si + len(data))) return series def np_ffill(arr: np.array): """ forward fill a 1D numpy array Parameters ---------- arr : np.array Input numpy 1D array """ mask = np.isnan(arr.astype(float)) # np.isnan only works on np.float # get fill index idx = np.where(~mask, np.arange(mask.shape[0]), 0) np.maximum.accumulate(idx, out=idx) return arr[idx] #################### Search #################### def lower_bound(data, val, level=0): """multi fields list lower bound. for single field list use `bisect.bisect_left` instead """ left = 0 right = len(data) while left < right: mid = (left + right) // 2 if val <= data[mid][level]: right = mid else: left = mid + 1 return left def upper_bound(data, val, level=0): """multi fields list upper bound. for single field list use `bisect.bisect_right` instead """ left = 0 right = len(data) while left < right: mid = (left + right) // 2 if val >= data[mid][level]: left = mid + 1 else: right = mid return left #################### HTTP #################### def requests_with_retry(url, retry=5, **kwargs): while retry > 0: retry -= 1 try: res = requests.get(url, timeout=1, **kwargs) assert res.status_code in {200, 206} return res except AssertionError: continue except Exception as e: log.warning("exception encountered {}".format(e)) continue raise Exception("ERROR: requests failed!") #################### Parse #################### def parse_config(config): # Check whether need parse, all object except str do not need to be parsed if not isinstance(config, str): return config # Check whether config is file if os.path.exists(config): with open(config, "r") as f: return yaml.safe_load(f) # Check whether the str can be parsed try: return yaml.safe_load(config) except BaseException: raise ValueError("cannot parse config!") #################### Other #################### def drop_nan_by_y_index(x, y, weight=None): # x, y, weight: DataFrame # Find index of rows which do not contain Nan in all columns from y. mask = ~y.isna().any(axis=1) # Get related rows from x, y, weight. x = x[mask] y = y[mask] if weight is not None: weight = weight[mask] return x, y, weight def hash_args(*args): # json.dumps will keep the dict keys always sorted. string = json.dumps(args, sort_keys=True, default=str) # frozenset return hashlib.md5(string.encode()).hexdigest() def parse_field(field): # Following patterns will be matched: # - $close -> Feature("close") # - $close5 -> Feature("close5") # - $open+$close -> Feature("open")+Feature("close") if not isinstance(field, str): field = str(field) return re.sub(r"\$(\w+)", r'Feature("\1")', re.sub(r"(\w+\s*)\(", r"Operators.\1(", field)) def get_module_by_module_path(module_path: Union[str, ModuleType]): """Load module path :param module_path: :return: """ if isinstance(module_path, ModuleType): module = module_path else: if module_path.endswith(".py"): module_name = re.sub("^[^a-zA-Z_]+", "", re.sub("[^0-9a-zA-Z_]", "", module_path[:-3].replace("/", "_"))) module_spec = importlib.util.spec_from_file_location(module_name, module_path) module = importlib.util.module_from_spec(module_spec) sys.modules[module_name] = module module_spec.loader.exec_module(module) else: module = importlib.import_module(module_path) return module def get_callable_kwargs(config: Union[dict, str], default_module: Union[str, ModuleType] = None) -> (type, dict): """ extract class/func and kwargs from config info Parameters ---------- config : [dict, str] similar to config please refer to the doc of init_instance_by_config default_module : Python module or str It should be a python module to load the class type This function will load class from the config['module_path'] first. If config['module_path'] doesn't exists, it will load the class from default_module. Returns ------- (type, dict): the class/func object and it's arguments. """ if isinstance(config, dict): if isinstance(config["class"], str): module = get_module_by_module_path(config.get("module_path", default_module)) # raise AttributeError _callable = getattr(module, config["class" if "class" in config else "func"]) else: _callable = config["class"] # the class type itself is passed in kwargs = config.get("kwargs", {}) elif isinstance(config, str): # a.b.c.ClassName *m_path, cls = config.split(".") m_path = ".".join(m_path) module = get_module_by_module_path(default_module if m_path == "" else m_path) _callable = getattr(module, cls) kwargs = {} else: raise NotImplementedError(f"This type of input is not supported") return _callable, kwargs get_cls_kwargs = get_callable_kwargs # NOTE: this is for compatibility for the previous version def init_instance_by_config( config: Union[str, dict, object], default_module=None, accept_types: Union[type, Tuple[type]] = (), try_kwargs: Dict = {}, **kwargs, ) -> Any: """ get initialized instance with config Parameters ---------- config : Union[str, dict, object] dict example. case 1) { 'class': 'ClassName', 'kwargs': dict, # It is optional. {} will be used if not given 'model_path': path, # It is optional if module is given } case 2) { 'class': <The class it self>, 'kwargs': dict, # It is optional. {} will be used if not given } str example. 1) specify a pickle object - path like 'file:///<path to pickle file>/obj.pkl' 2) specify a class name - "ClassName": getattr(module, "ClassName")() will be used. 3) specify module path with class name - "a.b.c.ClassName" getattr(<a.b.c.module>, "ClassName")() will be used. object example: instance of accept_types default_module : Python module Optional. It should be a python module. NOTE: the "module_path" will be override by `module` arguments This function will load class from the config['module_path'] first. If config['module_path'] doesn't exists, it will load the class from default_module. accept_types: Union[type, Tuple[type]] Optional. If the config is a instance of specific type, return the config directly. This will be passed into the second parameter of isinstance. try_kwargs: Dict Try to pass in kwargs in `try_kwargs` when initialized the instance If error occurred, it will fail back to initialization without try_kwargs. Returns ------- object: An initialized object based on the config info """ if isinstance(config, accept_types): return config if isinstance(config, str): # path like 'file:///<path to pickle file>/obj.pkl' pr = urlparse(config) if pr.scheme == "file": with open(os.path.join(pr.netloc, pr.path), "rb") as f: return pickle.load(f) klass, cls_kwargs = get_callable_kwargs(config, default_module=default_module) try: return klass(**cls_kwargs, **try_kwargs, **kwargs) except (TypeError,): # TypeError for handling errors like # 1: `XXX() got multiple values for keyword argument 'YYY'` # 2: `XXX() got an unexpected keyword argument 'YYY' return klass(**cls_kwargs, **kwargs) @contextlib.contextmanager def class_casting(obj: object, cls: type): """ Python doesn't provide the downcasting mechanism. We use the trick here to downcast the class Parameters ---------- obj : object the object to be cast cls : type the target class type """ orig_cls = obj.__class__ obj.__class__ = cls yield obj.__class__ = orig_cls def compare_dict_value(src_data: dict, dst_data: dict): """Compare dict value :param src_data: :param dst_data: :return: """ class DateEncoder(json.JSONEncoder): # FIXME: This class can only be accurate to the day. If it is a minute, # there may be a bug def default(self, o): if isinstance(o, (datetime.datetime, datetime.date)): return o.strftime("%Y-%m-%d %H:%M:%S") return json.JSONEncoder.default(self, o) src_data = json.dumps(src_data, indent=4, sort_keys=True, cls=DateEncoder) dst_data = json.dumps(dst_data, indent=4, sort_keys=True, cls=DateEncoder) diff = difflib.ndiff(src_data, dst_data) changes = [line for line in diff if line.startswith("+ ") or line.startswith("- ")] return changes def get_or_create_path(path: Optional[Text] = None, return_dir: bool = False): """Create or get a file or directory given the path and return_dir. Parameters ---------- path: a string indicates the path or None indicates creating a temporary path. return_dir: if True, create and return a directory; otherwise c&r a file. """ if path: if return_dir and not os.path.exists(path): os.makedirs(path) elif not return_dir: # return a file, thus we need to create its parent directory xpath = os.path.abspath(os.path.join(path, "..")) if not os.path.exists(xpath): os.makedirs(xpath) else: temp_dir = os.path.expanduser("~/tmp") if not os.path.exists(temp_dir): os.makedirs(temp_dir) if return_dir: _, path = tempfile.mkdtemp(dir=temp_dir) else: _, path = tempfile.mkstemp(dir=temp_dir) return path @contextlib.contextmanager def save_multiple_parts_file(filename, format="gztar"): """Save multiple parts file Implementation process: 1. get the absolute path to 'filename' 2. create a 'filename' directory 3. user does something with file_path('filename/') 4. remove 'filename' directory 5. make_archive 'filename' directory, and rename 'archive file' to filename :param filename: result model path :param format: archive format: one of "zip", "tar", "gztar", "bztar", or "xztar" :return: real model path Usage:: >>> # The following code will create an archive file('~/tmp/test_file') containing 'test_doc_i'(i is 0-10) files. >>> with save_multiple_parts_file('~/tmp/test_file') as filename_dir: ... for i in range(10): ... temp_path = os.path.join(filename_dir, 'test_doc_{}'.format(str(i))) ... with open(temp_path) as fp: ... fp.write(str(i)) ... """ if filename.startswith("~"): filename = os.path.expanduser(filename) file_path = os.path.abspath(filename) # Create model dir if os.path.exists(file_path): raise FileExistsError("ERROR: file exists: {}, cannot be create the directory.".format(file_path)) os.makedirs(file_path) # return model dir yield file_path # filename dir to filename.tar.gz file tar_file = shutil.make_archive(file_path, format=format, root_dir=file_path) # Remove filename dir if os.path.exists(file_path): shutil.rmtree(file_path) # filename.tar.gz rename to filename os.rename(tar_file, file_path) @contextlib.contextmanager def unpack_archive_with_buffer(buffer, format="gztar"): """Unpack archive with archive buffer After the call is finished, the archive file and directory will be deleted. Implementation process: 1. create 'tempfile' in '~/tmp/' and directory 2. 'buffer' write to 'tempfile' 3. unpack archive file('tempfile') 4. user does something with file_path('tempfile/') 5. remove 'tempfile' and 'tempfile directory' :param buffer: bytes :param format: archive format: one of "zip", "tar", "gztar", "bztar", or "xztar" :return: unpack archive directory path Usage:: >>> # The following code is to print all the file names in 'test_unpack.tar.gz' >>> with open('test_unpack.tar.gz') as fp: ... buffer = fp.read() ... >>> with unpack_archive_with_buffer(buffer) as temp_dir: ... for f_n in os.listdir(temp_dir): ... print(f_n) ... """ temp_dir = os.path.expanduser("~/tmp") if not os.path.exists(temp_dir): os.makedirs(temp_dir) with tempfile.NamedTemporaryFile("wb", delete=False, dir=temp_dir) as fp: fp.write(buffer) file_path = fp.name try: tar_file = file_path + ".tar.gz" os.rename(file_path, tar_file) # Create dir os.makedirs(file_path) shutil.unpack_archive(tar_file, format=format, extract_dir=file_path) # Return temp dir yield file_path except Exception as e: log.error(str(e)) finally: # Remove temp tar file if os.path.exists(tar_file): os.unlink(tar_file) # Remove temp model dir if os.path.exists(file_path): shutil.rmtree(file_path) @contextlib.contextmanager def get_tmp_file_with_buffer(buffer): temp_dir = os.path.expanduser("~/tmp") if not os.path.exists(temp_dir): os.makedirs(temp_dir) with tempfile.NamedTemporaryFile("wb", delete=True, dir=temp_dir) as fp: fp.write(buffer) file_path = fp.name yield file_path def remove_repeat_field(fields): """remove repeat field :param fields: list; features fields :return: list """ fields = copy.deepcopy(fields) _fields = set(fields) return sorted(_fields, key=fields.index) def remove_fields_space(fields: [list, str, tuple]): """remove fields space :param fields: features fields :return: list or str """ if isinstance(fields, str): return fields.replace(" ", "") return [i.replace(" ", "") for i in fields if isinstance(i, str)] def normalize_cache_fields(fields: [list, tuple]): """normalize cache fields :param fields: features fields :return: list """ return sorted(remove_repeat_field(remove_fields_space(fields))) def normalize_cache_instruments(instruments): """normalize cache instruments :return: list or dict """ if isinstance(instruments, (list, tuple, pd.Index, np.ndarray)): instruments = sorted(list(instruments)) else: # dict type stockpool if "market" in instruments: pass else: instruments = {k: sorted(v) for k, v in instruments.items()} return instruments def is_tradable_date(cur_date): """judgy whether date is a tradable date ---------- date : pandas.Timestamp current date """ from ..data import D return str(cur_date.date()) == str(D.calendar(start_time=cur_date, future=True)[0].date()) def get_date_range(trading_date, left_shift=0, right_shift=0, future=False): """get trading date range by shift Parameters ---------- trading_date: pd.Timestamp left_shift: int right_shift: int future: bool """ from ..data import D start = get_date_by_shift(trading_date, left_shift, future=future) end = get_date_by_shift(trading_date, right_shift, future=future) calendar = D.calendar(start, end, future=future) return calendar def get_date_by_shift(trading_date, shift, future=False, clip_shift=True, freq="day", align: Optional[str] = None): """get trading date with shift bias will cur_date e.g. : shift == 1, return next trading date shift == -1, return previous trading date ---------- trading_date : pandas.Timestamp current date shift : int clip_shift: bool align : Optional[str] When align is None, this function will raise ValueError if `trading_date` is not a trading date when align is "left"/"right", it will try to align to left/right nearest trading date before shifting when `trading_date` is not a trading date """ from qlib.data import D cal = D.calendar(future=future, freq=freq) trading_date = pd.to_datetime(trading_date) if align is None: if trading_date not in list(cal): raise ValueError("{} is not trading day!".format(str(trading_date))) _index = bisect.bisect_left(cal, trading_date) elif align == "left": _index = bisect.bisect_right(cal, trading_date) - 1 elif align == "right": _index = bisect.bisect_left(cal, trading_date) else: raise ValueError(f"align with value `{align}` is not supported") shift_index = _index + shift if shift_index < 0 or shift_index >= len(cal): if clip_shift: shift_index = np.clip(shift_index, 0, len(cal) - 1) else: raise IndexError(f"The shift_index({shift_index}) of the trading day ({trading_date}) is out of range") return cal[shift_index] def get_next_trading_date(trading_date, future=False): """get next trading date ---------- cur_date : pandas.Timestamp current date """ return get_date_by_shift(trading_date, 1, future=future) def get_pre_trading_date(trading_date, future=False): """get previous trading date ---------- date : pandas.Timestamp current date """ return get_date_by_shift(trading_date, -1, future=future) def transform_end_date(end_date=None, freq="day"): """handle the end date with various format If end_date is -1, None, or end_date is greater than the maximum trading day, the last trading date is returned. Otherwise, returns the end_date ---------- end_date: str end trading date date : pandas.Timestamp current date """ from ..data import D last_date = D.calendar(freq=freq)[-1] if end_date is None or (str(end_date) == "-1") or (pd.Timestamp(last_date) < pd.Timestamp(end_date)): log.warning( "\nInfo: the end_date in the configuration file is {}, " "so the default last date {} is used.".format(end_date, last_date) ) end_date = last_date return end_date def get_date_in_file_name(file_name): """Get the date(YYYY-MM-DD) written in file name Parameter file_name : str :return date : str 'YYYY-MM-DD' """ pattern = "[0-9]{4}-[0-9]{2}-[0-9]{2}" date = re.search(pattern, str(file_name)).group() return date def split_pred(pred, number=None, split_date=None): """split the score file into two part Parameter --------- pred : pd.DataFrame (index:<instrument, datetime>) A score file of stocks number: the number of dates for pred_left split_date: the last date of the pred_left Return ------- pred_left : pd.DataFrame (index:<instrument, datetime>) The first part of original score file pred_right : pd.DataFrame (index:<instrument, datetime>) The second part of original score file """ if number is None and split_date is None: raise ValueError("`number` and `split date` cannot both be None") dates = sorted(pred.index.get_level_values("datetime").unique()) dates = list(map(pd.Timestamp, dates)) if split_date is None: date_left_end = dates[number - 1] date_right_begin = dates[number] date_left_start = None else: split_date = pd.Timestamp(split_date) date_left_end = split_date date_right_begin = split_date + pd.Timedelta(days=1) if number is None: date_left_start = None else: end_idx = bisect.bisect_right(dates, split_date) date_left_start = dates[end_idx - number] pred_temp = pred.sort_index() pred_left = pred_temp.loc(axis=0)[:, date_left_start:date_left_end] pred_right = pred_temp.loc(axis=0)[:, date_right_begin:] return pred_left, pred_right def time_to_slc_point(t: Union[None, str, pd.Timestamp]) -> Union[None, pd.Timestamp]: """ Time slicing in Qlib or Pandas is a frequently-used action. However, user often input all kinds of data format to represent time. This function will help user to convert these inputs into a uniform format which is friendly to time slicing. Parameters ---------- t : Union[None, str, pd.Timestamp] original time Returns ------- Union[None, pd.Timestamp]: """ if t is None: # None represents unbounded in Qlib or Pandas(e.g. df.loc[slice(None, "20210303")]). return t else: return pd.Timestamp(t) def can_use_cache(): res = True r = get_redis_connection() try: r.client() except redis.exceptions.ConnectionError: res = False finally: r.close() return res def exists_qlib_data(qlib_dir): qlib_dir = Path(qlib_dir).expanduser() if not qlib_dir.exists(): return False calendars_dir = qlib_dir.joinpath("calendars") instruments_dir = qlib_dir.joinpath("instruments") features_dir = qlib_dir.joinpath("features") # check dir for _dir in [calendars_dir, instruments_dir, features_dir]: if not (_dir.exists() and list(_dir.iterdir())): return False # check calendar bin for _calendar in calendars_dir.iterdir(): if ("_future" not in _calendar.name) and ( not list(features_dir.rglob(f"*.{_calendar.name.split('.')[0]}.bin")) ): return False # check instruments code_names = set(map(lambda x: x.name.lower(), features_dir.iterdir())) _instrument = instruments_dir.joinpath("all.txt") miss_code = set(pd.read_csv(_instrument, sep="\t", header=None).loc[:, 0].apply(str.lower)) - set(code_names) if miss_code and any(map(lambda x: "sht" not in x, miss_code)): return False return True def check_qlib_data(qlib_config): inst_dir = Path(qlib_config["provider_uri"]).joinpath("instruments") for _p in inst_dir.glob("*.txt"): try: assert len(pd.read_csv(_p, sep="\t", nrows=0, header=None).columns) == 3, ( f"\nThe {str(_p.resolve())} of qlib data is not equal to 3 columns:" f"\n\tIf you are using the data provided by qlib: " f"https://qlib.readthedocs.io/en/latest/component/data.html#qlib-format-dataset" f"\n\tIf you are using your own data, please dump the data again: " f"https://qlib.readthedocs.io/en/latest/component/data.html#converting-csv-format-into-qlib-format" ) except AssertionError: raise def lazy_sort_index(df: pd.DataFrame, axis=0) -> pd.DataFrame: """ make the df index sorted df.sort_index() will take a lot of time even when `df.is_lexsorted() == True` This function could avoid such case Parameters ---------- df : pd.DataFrame Returns ------- pd.DataFrame: sorted dataframe """ idx = df.index if axis == 0 else df.columns # NOTE: MultiIndex.is_lexsorted() is a deprecated method in Pandas 1.3.0 and is suggested to be replaced by MultiIndex.is_monotonic_increasing (see discussion here: https://github.com/pandas-dev/pandas/issues/32259). However, in case older versions of Pandas is implemented, MultiIndex.is_lexsorted() is necessary to prevent certain fatal errors. if idx.is_monotonic_increasing and not (isinstance(idx, pd.MultiIndex) and not idx.is_lexsorted()): return df else: return df.sort_index(axis=axis) FLATTEN_TUPLE = "_FLATTEN_TUPLE" def flatten_dict(d, parent_key="", sep=".") -> dict: """ Flatten a nested dict. >>> flatten_dict({'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y' : 10}}, 'd': [1, 2, 3]}) >>> {'a': 1, 'c.a': 2, 'c.b.x': 5, 'd': [1, 2, 3], 'c.b.y': 10} >>> flatten_dict({'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y' : 10}}, 'd': [1, 2, 3]}, sep=FLATTEN_TUPLE) >>> {'a': 1, ('c','a'): 2, ('c','b','x'): 5, 'd': [1, 2, 3], ('c','b','y'): 10} Args: d (dict): the dict waiting for flatting parent_key (str, optional): the parent key, will be a prefix in new key. Defaults to "". sep (str, optional): the separator for string connecting. FLATTEN_TUPLE for tuple connecting. Returns: dict: flatten dict """ items = [] for k, v in d.items(): if sep == FLATTEN_TUPLE: new_key = (parent_key, k) if parent_key else k else: new_key = parent_key + sep + k if parent_key else k if isinstance(v, collections.abc.MutableMapping): items.extend(flatten_dict(v, new_key, sep=sep).items()) else: items.append((new_key, v)) return dict(items) #################### Wrapper ##################### class Wrapper: """Wrapper class for anything that needs to set up during qlib.init""" def __init__(self): self._provider = None def register(self, provider): self._provider = provider def __repr__(self): return "{name}(provider={provider})".format(name=self.__class__.__name__, provider=self._provider) def __getattr__(self, key): if self.__dict__.get("_provider", None) is None: raise AttributeError("Please run qlib.init() first using qlib") return getattr(self._provider, key) def register_wrapper(wrapper, cls_or_obj, module_path=None): """register_wrapper :param wrapper: A wrapper. :param cls_or_obj: A class or class name or object instance. """ if isinstance(cls_or_obj, str): module = get_module_by_module_path(module_path) cls_or_obj = getattr(module, cls_or_obj) obj = cls_or_obj() if isinstance(cls_or_obj, type) else cls_or_obj wrapper.register(obj) def load_dataset(path_or_obj, index_col=[0, 1]): """load dataset from multiple file formats""" if isinstance(path_or_obj, pd.DataFrame): return path_or_obj if not os.path.exists(path_or_obj): raise ValueError(f"file {path_or_obj} doesn't exist") _, extension = os.path.splitext(path_or_obj) if extension == ".h5": return pd.read_hdf(path_or_obj) elif extension == ".pkl": return pd.read_pickle(path_or_obj) elif extension == ".csv": return
pd.read_csv(path_or_obj, parse_dates=True, index_col=index_col)
pandas.read_csv
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Nov 19 17:50:36 2018 @author: Sinnik """ import pickle import pandas as pd import numpy as np from sklearn.preprocessing import StandardScaler, PolynomialFeatures from sklearn.linear_model import LinearRegression from sklearn.model_selection import cross_val_score, GridSearchCV from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor with open("./clean_train.pkl", 'rb') as f: clean_train_df = pickle.load(f) # Standardize the data, so that they have the same scale scaler = StandardScaler() scaler.fit(clean_train_df) standardized_clean_train_df = scaler.transform(clean_train_df) standardized_clean_train_df = pd.DataFrame(standardized_clean_train_df, columns = clean_train_df.columns) lin_reg = LinearRegression() lin_reg.fit(self.clean_train_df.drop("load", axis=1), self.clean_train_df[["load"]]) scores_lin = cross_val_score(lin_reg, standardized_clean_train_df[["temperature"]], standardized_clean_train_df[["load"]], scoring = "r2", cv = 10) print(np.mean(scores_lin)) tree_reg = DecisionTreeRegressor() scores_tree = cross_val_score(tree_reg, standardized_clean_train_df[["temperature"]], standardized_clean_train_df[["load"]], scoring = "r2", cv = 10) print(np.mean(scores_tree)) forest_reg = RandomForestRegressor() scores_forest = cross_val_score(forest_reg, standardized_clean_train_df[["temperature"]], standardized_clean_train_df[["load"]], scoring = "r2", cv = 10) print(np.mean(scores_forest)) param_grid = [{'n_estimators': [40], 'max_features':[13]}] grid_search = GridSearchCV(forest_reg, param_grid, cv = 5, scoring = 'r2') grid_search.fit(test.drop("load", axis=1), test[["load"]]) grid_search.best_params_ grid_search.cv_results_ feature_importances = grid_search.best_estimator_.feature_importances_ sorted(zip(feature_importances, list(test.drop("load", axis=1).columns)),reverse = True) forest_reg = RandomForestRegressor(n_estimators= 40) scores_forest = cross_val_score(forest_reg, test[['temperature','Sunday', 'Monday', 'Tuesday', 'Wednesday','Thursday', 'Friday', 'Saturday', 'summer']],test[["load"]], scoring = "r2", cv = 10) print(np.mean(scores_forest)) scores_forest = cross_val_score(forest_reg, standardized_clean_train_df[["temperature"]],standardized_clean_train_df[["load"]], scoring = "r2", cv = 10) print(np.mean(scores_forest)) poly = PolynomialFeatures(2) pol_features = poly.fit_transform(clean_train_df[['temperature']]) pol_features_df = pd.DataFrame(pol_features[:,2], columns = ["temperature_squared"]) pol_features_df.index = clean_train_df.index clean_train_poly_df = clean_train_df.join(pol_features_df) lin_reg = LinearRegression() scores_lin = cross_val_score(lin_reg, clean_train_poly_df[["temperature", "temperature_squared"]], clean_train_poly_df[["load"]], scoring = "r2", cv = 10) print(np.mean(scores_lin)) scores_tree = cross_val_score(tree_reg, clean_train_poly_df[["temperature", "temperature_squared"]], clean_train_poly_df[["load"]], scoring = "r2", cv = 10) print(np.mean(scores_tree)) scores_forest = cross_val_score(forest_reg, clean_train_poly_df[["temperature", "temperature_squared"]], clean_train_poly_df[["load"]], scoring = "r2", cv = 10) print(np.mean(scores_forest)) # Using the squared temperature really didn't help. Will look for other features ####### Try to re-do the feature engineering with sklearn's OneHotEncoder # Feature 1 day of the week clean_train_df['day_of_week'] = clean_train_df.index.dayofweek.astype('category', copy = False) clean_train_df = pd.get_dummies(clean_train_df) '''clean_train_df = clean_train_df.drop(['day_of_week_0', 'day_of_week_1', 'day_of_week_2', 'day_of_week_3', 'day_of_week_4', 'day_of_week_5', 'day_of_week_6'], axis = 1)''' scores_lin = cross_val_score(lin_reg, clean_train_df[["temperature",'day_of_week_0', 'day_of_week_1', 'day_of_week_2', 'day_of_week_3', 'day_of_week_4', 'day_of_week_5', 'day_of_week_6']], clean_train_df[["load"]], scoring = "r2", cv = 10) print(np.mean(scores_lin)) scores_tree = cross_val_score(tree_reg, clean_train_df[["temperature", 'day_of_week_0', 'day_of_week_1', 'day_of_week_2', 'day_of_week_3', 'day_of_week_4', 'day_of_week_5', 'day_of_week_6']], clean_train_df[["load"]], scoring = "r2", cv = 10) print(np.mean(scores_tree)) scores_forest = cross_val_score(forest_reg, clean_train_df[["temperature", 'day_of_week_0', 'day_of_week_1', 'day_of_week_2', 'day_of_week_3', 'day_of_week_4', 'day_of_week_5', 'day_of_week_6']], clean_train_df[["load"]], scoring = "r2", cv = 10) print(np.mean(scores_forest)) # Feature 2 time of the day # It is assumed that gas and electricity consumption follow a similar pattern # So, from 0-5 early morning, 6-7 morning ramp, 8-19 working hours, 20-23 nighttime clean_train_df['time'] = clean_train_df.index.hour conditions = [(clean_train_df['time'] >= 0) & (clean_train_df['time'] <= 5), (clean_train_df['time'] >= 6) & (clean_train_df['time'] <= 7),(clean_train_df['time'] >= 8) & (clean_train_df['time'] <= 19),(clean_train_df['time'] >= 20) & (clean_train_df['time'] <= 23)] choices = ['early_morning', 'morning_ramp', 'working_hours', 'night_time'] clean_train_df['time_of_day'] = np.select(conditions, choices) clean_train_df =
pd.get_dummies(clean_train_df)
pandas.get_dummies
import os import pandas as pd import datetime import numpy as np from talib import abstract from .crawler import check_monthly_revenue class Data(): def __init__(self): self.date = datetime.datetime.now().date() self.warrning = False self.col2table = {} tnames = os.listdir(os.path.join('history', 'items')) for tname in tnames: path = os.path.join('history', 'items', tname) if not os.path.isdir(path): continue items = [f[:-4] for f in os.listdir(path)] for item in items: if item not in self.col2table: self.col2table[item] = [] self.col2table[item].append(tname) def get(self, name, amount=0, table=None, convert_to_numeric=True): if table is None: candidates = self.col2table[name] if len(candidates) > 1 and self.warrning: print('**WARRN there are tables have the same item', name, ':', candidates) print('** take', candidates[0]) print('** please specify the table name as an argument if you need the file from another table') for c in candidates: print('** data.get(', name, ',',amount, ', table=', c, ')') table = candidates[0] df = pd.read_pickle(os.path.join('history', 'items', table, name + '.pkl')) return df.loc[:self.date.strftime("%Y-%m-%d")].iloc[-amount:] def talib(self, func_name, amount=0, **args): func = getattr(abstract, func_name) isSeries = True if len(func.output_names) == 1 else False names = func.output_names if isSeries: dic = {} else: dics = {n:{} for n in names} close = self.get('收盤價', amount) open_ = self.get('開盤價', amount) high = self.get('最高價', amount) low = self.get('最低價', amount) volume= self.get('成交股數', amount) for key in close.columns: try: s = func({'open':open_[key].ffill(), 'high':high[key].ffill(), 'low':low[key].ffill(), 'close':close[key].ffill(), 'volume':volume[key].ffill()}, **args) except Exception as e: if "inputs are all NaN" != str(e): print('Warrning occur during calculating stock '+key+':',e) print('The indicator values are set to NaN.') if isSeries: s = pd.Series(index=close[key].index) else: s = pd.DataFrame(index=close[key].index, columns=dics.keys()) if isSeries: dic[key] = s else: for colname, si in zip(names, s): dics[colname][key] = si if isSeries: ret = pd.DataFrame(dic, index=close.index) ret = ret.apply(lambda s:pd.to_numeric(s, errors='coerce')) else: newdic = {} for key, dic in dics.items(): newdic[key] =
pd.DataFrame(dic, close.index)
pandas.DataFrame
# -*- coding: utf-8 -*- """ #Handling Missing Values in Pandas * Tutorial: https://news.towardsai.net/hmv * Github """ #Import Required Libraries: import numpy as np import pandas as pd #Scalar arguments: #Numerical value pd.notna(28) #Scalar arguments: #String value pd.notna("Pratik") #Scalar arguments: #Empty strings are not considered as NA values pd.notna("") #Scalar arguments: #Infinite values are not considered as NA values pd.notna(np.inf) #Scalar arguments: #NaN: Not a Number pd.notna(np.NaN) #Scalar arguments: #None pd.notna(None) #Scalar arguments: #NA: Not Available pd.notna(pd.NA) #Scalar arguments: #NaT: Not a Timestamp pd.notna(pd.NaT) #nd-arrays: arr = np.array([1,2,"Blue"]) print(arr) print("\n") pd.notna(arr) #nd-arrays: #Empty strings are not considered as NA values arr = np.array([[1,2,None], [3,4,pd.NA], [5,np.NaN,6], ["",7,8], ["Blue",pd.NaT,"Red"]]) print(arr) print("\n") pd.notna(arr) #For index values: id = pd.Index([1,2,np.NaN,"Blue"]) print(id) print("\n") pd.notna(id) #For index values: id = pd.DatetimeIndex([
pd.Timestamp("2020-10-28")
pandas.Timestamp
from __future__ import print_function import copy import matplotlib.pyplot as plt import numba as nb import numpy as np import pandas as pd from astromodels import Model, PointSource from threeML.classicMLE.goodness_of_fit import GoodnessOfFit from threeML.classicMLE.joint_likelihood import JointLikelihood from threeML.data_list import DataList from threeML.io.logging import setup_logger from threeML.io.package_data import get_path_of_data_file from threeML.plugin_prototype import PluginPrototype from threeML.utils.statistics.likelihood_functions import ( half_chi2, poisson_log_likelihood_ideal_bkg) plt.style.use(str(get_path_of_data_file("threeml.mplstyle"))) log = setup_logger(__name__) __instrument_name = "n.a." class XYLike(PluginPrototype): def __init__( self, name, x, y, yerr=None, poisson_data=False, exposure=None, quiet=False, source_name=None, ): nuisance_parameters = {} super(XYLike, self).__init__(name, nuisance_parameters) # Make x and y always arrays so we can handle them always in the same way # even if they have only one element self._x = np.array(x, ndmin=1) self._y = np.array(y, ndmin=1) # If there are specified errors, use those (assume Gaussian statistic) # otherwise make sure that the user specified poisson_error = True and use # Poisson statistic if yerr is not None: self._yerr = np.array(yerr, ndmin=1) assert np.all(self._yerr > 0), "Errors cannot be negative or zero." log.info( "Using Gaussian statistic (equivalent to chi^2) with the provided errors." ) self._is_poisson = False self._has_errors = True elif not poisson_data: self._yerr = np.ones_like(self._y) self._is_poisson = False self._has_errors = False log.info("Using unweighted Gaussian (equivalent to chi^2) statistic.") else: log.info("Using Poisson log-likelihood") self._is_poisson = True self._yerr = None self._has_errors = True self._y = self._y.astype(np.int64) self._zeros = np.zeros_like(self._y) # sets the exposure assuming eval at center # of bin. this should probably be improved # with a histogram plugin if exposure is None: self._has_exposure: bool = False self._exposure = np.ones(len(self._x)) else: self._has_exposure: bool = True self._exposure = exposure # This will keep track of the simulated datasets we generate self._n_simulated_datasets = 0 # This will contain the JointLikelihood object after a call to .fit() self._joint_like_obj = None self._likelihood_model = None # currently not used by XYLike, but needed for subclasses self._mask = np.ones(self._x.shape, dtype=bool) # This is the name of the source this SED refers to (if it is a SED) self._source_name = source_name @classmethod def from_function(cls, name, function, x, yerr=None, exposure=None, **kwargs): """ Generate an XYLike plugin from an astromodels function instance :param name: name of plugin :param function: astromodels function instance :param x: where to simulate :param yerr: y errors or None for Poisson data :param kwargs: kwargs from xylike constructor :return: XYLike plugin """ y = function(x) xyl_gen = XYLike("generator", x, y, yerr=yerr, exposure=exposure, **kwargs) pts = PointSource("fake", 0.0, 0.0, function) model = Model(pts) xyl_gen.set_model(model) return xyl_gen.get_simulated_dataset(name) @classmethod def from_dataframe( cls, name, dataframe, x_column="x", y_column="y", err_column="yerr", poisson=False, ): """ Generate a XYLike instance from a Pandas.DataFrame instance :param name: the name for the XYLike instance :param dataframe: the input data frame :param x_column: name of the column to be used as x (default: 'x') :param y_column: name of the column to be used as y (default: 'y') :param err_column: name of the column to be used as error on y (default: 'yerr') :param poisson: if True, then the err_column is ignored and data are treated as Poisson distributed :return: a XYLike instance """ x = dataframe[x_column] y = dataframe[y_column] if poisson is False: yerr = dataframe[err_column] if np.all(yerr == -99): # This is a dataframe generate with the to_dataframe method, which uses -99 to indicate that the # data are Poisson return cls(name, x=x, y=y, poisson_data=True) else: # A dataset with errors return cls(name, x=x, y=y, yerr=yerr) else: return cls(name, x=x, y=y, poisson_data=True) @classmethod def from_text_file(cls, name, filename): """ Instance the plugin starting from a text file generated with the .to_txt() method. Note that a more general way of creating a XYLike instance from a text file is to read the file using pandas.DataFrame.from_csv, and then use the .from_dataframe method of the XYLike plugin: > df = pd.DataFrame.from_csv(filename, ...) > xyl = XYLike.from_dataframe("my instance", df) :param name: the name for the new instance :param filename: path to the file :return: """ df = pd.read_csv(filename, sep=" ") return cls.from_dataframe(name, df) def to_dataframe(self): """ Returns a pandas.DataFrame instance with the data in the 'x', 'y', and 'yerr' column. If the data are Poisson, the yerr column will be -99 for every entry :return: a pandas.DataFrame instance """ x_series =
pd.Series(self.x, name="x")
pandas.Series
import numpy as np import pandas as pd import matplotlib.pyplot as plt import csv def plot_FA(inputfiles, show=False): var1, var2, var3 = 'Time', 'Environment', 'Treatment' # x, color, shade filtervar, filterterm = 'Cell Line', 'HT1080' peak_per_exp = [] area_per_exp = [] for inp in inputfiles: data =
pd.read_csv(inp)
pandas.read_csv
import pandas as pd import numpy as np from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt from sklearn.ensemble import IsolationForest column_name_list = ["class", 'cap-shape', 'cap-surface', 'cap-color', 'bruises', 'odor', 'gill-attachment', 'gill-spacing', 'gill-size', 'gill-color', 'stalk-shape', 'stalk-surface-above-ring', 'stalk-surface-below-ring', 'stalk-color-above-ring', 'stalk-color-below-ring', 'veil-type', 'veil-color', 'ring-number', 'ring-type', 'spore-print-color', 'population', 'habitat'] def readCsv(csvFile): return pd.read_csv(csvFile, names=column_name_list) def preprocessData(df): X = df.drop(['odor'], axis=1) y = pd.DataFrame(df['odor']) converted_X =
pd.get_dummies(X)
pandas.get_dummies
import os import shutil import pandas as pd import joblib, logging, sys, datetime, tarfile import numpy as np import email import imaplib from credentials import Credentials, JsonFileBackend from joblib import Parallel, delayed if sys.platform=='linux': file_cred='~/filemail.json' path_nwp = '/media/smartrue/HHD2/ECMWF' import pygrib else: file_cred = 'D:/Dropbox/current_codes/PycharmProjects/forecasting_platform/filemail.json' path_nwp = 'D:/Dropbox/ECMWF' import cfgrib class ecmwf_Extractor(): def __init__(self, projects_group, pathnwp, nwp_resolution, path_nwp_group, dates_ts, area_group, njobs=1): self.pathnwp = pathnwp self.pathnwp_group = path_nwp_group self.nwp_resolution = nwp_resolution self.area = area_group self.projects_group = projects_group self.njobs = njobs self.create_logger() if isinstance(dates_ts, pd.Timestamp): self.dates_ts =dates_ts else: self.dates_ts = self.check_dates(dates_ts) def create_logger(self): self.logger = logging.getLogger('log_' + self.projects_group + '.log') self.logger.setLevel(logging.INFO) handler = logging.FileHandler(os.path.join(os.path.dirname(self.pathnwp_group), 'log_' + self.projects_group + '.log'), 'a') handler.setLevel(logging.INFO) # create a logging format formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') handler.setFormatter(formatter) # add the handlers to the logger self.logger.addHandler(handler) def check_dates(self, dates_ts): start_date = pd.to_datetime(dates_ts[0].strftime('%d%m%y'), format='%d%m%y') end_date = pd.to_datetime(dates_ts[-1].strftime('%d%m%y'), format='%d%m%y') dates = pd.date_range(start_date, end_date) data_dates = pd.to_datetime(np.unique(dates_ts.strftime('%d%m%y')), format='%d%m%y') dates = [d for d in dates if d in data_dates] self.logger.info('Dates is checked. Number of time samples %s', str(len(dates))) return
pd.DatetimeIndex(dates)
pandas.DatetimeIndex
import streamlit as st import pandas as pd from pyvis.network import Network import networkx as nx import matplotlib.pyplot as plt import bz2 import pickle import _pickle as cPickle import pydot import math import numpy as num def decompress_pickle(file): data = bz2.BZ2File(file, 'rb') data = cPickle.load(data) return data uploaded_files = st.sidebar.file_uploader("Choose files", accept_multiple_files=True) # sidebar for navigating pages page_nav = st.sidebar.selectbox("Select view:",('Document overviews','Focus concepts','Path views','Active Study view','Study phenomena','Study sets')) @st.cache def do_this_first(uploaded_files): #st.write(st.__version__) # Load any compressed pickle file # for uploaded_file in uploaded_files: # concepts = decompress_pickle(uploaded_file) # st.write("filename:", uploaded_file.name) filenames = [file.name for file in uploaded_files] # return this import pandas as pd Agg_Conceptdata =
pd.DataFrame()
pandas.DataFrame
import dash import dash_core_components as dcc import dash_html_components as html import pandas as pd from dash.dependencies import Input, Output import dash_table app=dash.Dash(__name__) titulo=html.H1("Modelo de Jerarquía Analítica AHP",style={'text-align':'center','font-family':'Arial Black','color':'blue'}) subtitulo=html.H2("Cuatro Criterios / Tres Alternativas",style={'text-align':'center','font-family':'Arial Black'}) nombre=html.H3('<NAME>',style={'font-family':'Arial'}) universidad=html.H3('Universidad Santiago de Cali',style={'font-family':'Arial'}) #Uso de Callbacks A=dcc.Input(id='criterio1',value=1,type='number') B=dcc.Input(id='criterio2',value=1,type='number') C=dcc.Input(id='criterio3',value=1,type='number') D=dcc.Input(id='criterio4',value=1,type='number') E=dcc.Input(id='criterio5',value=1,type='number') F=dcc.Input(id='criterio6',value=1,type='number') #---------------------------------------------------------------------------- G=dcc.Input(id='c1alternativa1',value=1,type='number') H=dcc.Input(id='c1alternativa2',value=1,type='number') I=dcc.Input(id='c1alternativa3',value=1,type='number') #---------------------------------------------------------------------------- J=dcc.Input(id='c2alternativa1',value=1,type='number') K=dcc.Input(id='c2alternativa2',value=1,type='number') L=dcc.Input(id='c2alternativa3',value=1,type='number') #---------------------------------------------------------------------------- M=dcc.Input(id='c3alternativa1',value=1,type='number') N=dcc.Input(id='c3alternativa2',value=1,type='number') O=dcc.Input(id='c3alternativa3',value=1,type='number') #---------------------------------------------------------------------------- P=dcc.Input(id='c4alternativa1',value=1,type='number') Q=dcc.Input(id='c4alternativa2',value=1,type='number') R=dcc.Input(id='c4alternativa3',value=1,type='number') #---------------------------------------------------------------------------- resultado1=html.H3(id='micriterio1',children='') texto1=html.H3(id='micriterio2',children='') resultadotabla1=html.Div(id='tabla1criterio',children=dash_table.DataTable()) resultadotabla2=html.Div(id='tabla2criterio',children=dash_table.DataTable()) #----------------------------------------------------------------------------- resultado2=html.H3(id='c1mialternativa1',children='') texto2=html.H3(id='c1mialternativa2',children='') resulc1alter1=html.Div(id='alternc1resul1',children=dash_table.DataTable()) resulc1alter2=html.Div(id='alternc1resul2',children=dash_table.DataTable()) #----------------------------------------------------------------------------- resultado3=html.H3(id='c2mialternativa1',children='') texto3=html.H3(id='c2mialternativa2',children='') resulc2alter1=html.Div(id='alternc2resul1',children=dash_table.DataTable()) resulc2alter2=html.Div(id='alternc2resul2',children=dash_table.DataTable()) #----------------------------------------------------------------------------- resultado4=html.H3(id='c3mialternativa1',children='') texto4=html.H3(id='c3mialternativa2',children='') resulc3alter1=html.Div(id='alternc3resul1',children=dash_table.DataTable()) resulc3alter2=html.Div(id='alternc3resul2',children=dash_table.DataTable()) #----------------------------------------------------------------------------- resultado5=html.H3(id='c4mialternativa1',children='') texto5=html.H3(id='c4mialternativa2',children='') resulc4alter1=html.Div(id='alternc4resul1',children=dash_table.DataTable()) resulc4alter2=html.Div(id='alternc4resul2',children=dash_table.DataTable()) #----------------------------------------------------------------------------- resulc8alter80=html.H3(id='tablitafinal0',children='') resulc8alter81=html.H3(id='tablitafinal1',children='',style={'color':'red'}) resulc8alter82=html.H3(id='tablitafinal2',children='') @app.callback([Output(component_id='micriterio1',component_property='children'), Output(component_id='micriterio2',component_property='children'), Output(component_id='tabla1criterio',component_property='children'), Output(component_id='tabla2criterio',component_property='children'), Output(component_id='c1mialternativa1',component_property='children'), Output(component_id='c1mialternativa2',component_property='children'), Output(component_id='alternc1resul1',component_property='children'), Output(component_id='alternc1resul2',component_property='children'), Output(component_id='c2mialternativa1',component_property='children'), Output(component_id='c2mialternativa2',component_property='children'), Output(component_id='alternc2resul1',component_property='children'), Output(component_id='alternc2resul2',component_property='children'), Output(component_id='c3mialternativa1',component_property='children'), Output(component_id='c3mialternativa2',component_property='children'), Output(component_id='alternc3resul1',component_property='children'), Output(component_id='alternc3resul2',component_property='children'), Output(component_id='c4mialternativa1',component_property='children'), Output(component_id='c4mialternativa2',component_property='children'), Output(component_id='alternc4resul1',component_property='children'), Output(component_id='alternc4resul2',component_property='children'), Output(component_id='tablitafinal0',component_property='children'), Output(component_id='tablitafinal1',component_property='children'), Output(component_id='tablitafinal2',component_property='children'),], [Input(component_id='criterio1',component_property='value'), Input(component_id='criterio2',component_property='value'), Input(component_id='criterio3',component_property='value'), Input(component_id='criterio4',component_property='value'), Input(component_id='criterio5',component_property='value'), Input(component_id='criterio6',component_property='value'), Input(component_id='c1alternativa1',component_property='value'), Input(component_id='c1alternativa2',component_property='value'), Input(component_id='c1alternativa3',component_property='value'), Input(component_id='c2alternativa1',component_property='value'), Input(component_id='c2alternativa2',component_property='value'), Input(component_id='c2alternativa3',component_property='value'), Input(component_id='c3alternativa1',component_property='value'), Input(component_id='c3alternativa2',component_property='value'), Input(component_id='c3alternativa3',component_property='value'), Input(component_id='c4alternativa1',component_property='value'), Input(component_id='c4alternativa2',component_property='value'), Input(component_id='c4alternativa3',component_property='value'),]) def miFuncion(dato1,dato2,dato3,dato4,dato5,dato6,c1dato1,c1dato2,c1dato3,c2dato1,c2dato2,c2dato3,c3dato1,c3dato2,c3dato3,c4dato1,c4dato2,c4dato3): #Matriz comparacion criterios por pares diag=1 C1_C2=dato1 C1_C3=dato2 C1_C4=dato3 C2_C3=dato4 C2_C4=dato5 C3_C4=dato6 C1_C2_Inv=1/C1_C2 C1_C3_Inv=1/C1_C3 C1_C4_Inv=1/C1_C4 C2_C3_Inv=1/C2_C3 C2_C4_Inv=1/C2_C4 C3_C4_Inv=1/C3_C4 matriz_inicial={ "C1":{"C1":diag,"C2":C1_C2,"C3":C1_C3,"C4":C1_C4}, "C2":{"C1":C1_C2_Inv,"C2":diag,"C3":C2_C3,"C4":C2_C4}, "C3":{"C1":C1_C3_Inv,"C2":C2_C3_Inv,"C3":diag,"C4":C3_C4}, "C4":{"C1":C1_C4_Inv,"C2":C2_C4_Inv,"C3":C3_C4_Inv,"C4":diag}, } #Primer Paso del AHP AHPStv_paso1=pd.DataFrame(matriz_inicial) AHPStv_paso1=AHPStv_paso1.T AHPStv_paso1_a=AHPStv_paso1.sum() #Paso1a AHPStv_paso1_a=pd.DataFrame(AHPStv_paso1_a) AHPStv_paso1_a=AHPStv_paso1_a.T AHPStv_paso1_b=AHPStv_paso1.div(AHPStv_paso1_a.iloc[0]) AHPStv_paso1_c=AHPStv_paso1_b.mean(axis=1) AHPStv_paso1_c=pd.DataFrame(AHPStv_paso1_c) AHPStv_paso1_c=AHPStv_paso1_c.T #Calculo del lambda max lambda_max=(AHPStv_paso1_a*AHPStv_paso1_c) lambda_max=lambda_max.sum(axis=1) CC=((lambda_max-4)/3)/0.900 respuesta1=html.H3(CC) tablacriterio1df=dash_table.DataTable( columns=[{'name':i,'id':i} for i in (AHPStv_paso1.columns)], data=AHPStv_paso1.to_dict('records'), ) tablacriterio2df=dash_table.DataTable( columns=[{'name':i,'id':i} for i in (AHPStv_paso1_c.columns)], data=AHPStv_paso1_c.to_dict('records'), ) #Matriz comparacion alternativas por pares diag=1 A1_A2=c1dato1 A1_A3=c1dato2 A2_A3=c1dato3 A1_A2_Inv=1/A1_A2 A1_A3_Inv=1/A1_A3 A2_A3_Inv=1/A2_A3 matriz_secundaria1={ 'A1':{'A1':diag,'A2':A1_A2,'A3':A1_A3}, 'A2':{'A1':A1_A2_Inv,'A2':diag,'A3':A2_A3}, 'A3':{'A1':A1_A3_Inv,'A2':A2_A3_Inv,'A3':diag}, } #Primer Paso del AHP AHPStv_c1paso2=pd.DataFrame(matriz_secundaria1) AHPStv_c1paso2=AHPStv_c1paso2.T AHPStv_c1paso2_a=AHPStv_c1paso2.sum() #Paso2a AHPStv_c1paso2_a=pd.DataFrame(AHPStv_c1paso2_a) AHPStv_c1paso2_a=AHPStv_c1paso2_a.T AHPStv_c1paso2_b=AHPStv_c1paso2.div(AHPStv_c1paso2_a.iloc[0]) AHPStv_c1paso2_c=AHPStv_c1paso2_b.mean(axis=1) AHPStv_c1paso2_c=pd.DataFrame(AHPStv_c1paso2_c) AHPStv_c1paso2_c=AHPStv_c1paso2_c.T #Calculo del lambda max lambda_max=(AHPStv_c1paso2_a*AHPStv_c1paso2_c) lambda_max=lambda_max.sum(axis=1) CC2=((lambda_max-3)/2)/0.58 respuesta2=html.H3(CC2) tablac1alternativa1df=dash_table.DataTable( columns=[{'name':i,'id':i} for i in (AHPStv_c1paso2.columns)], data=AHPStv_c1paso2.to_dict('records'), ) tablac1alternativa2df=dash_table.DataTable( columns=[{'name':i,'id':i} for i in (AHPStv_c1paso2_c.columns)], data=AHPStv_c1paso2_c.to_dict('records'), ) #Matriz comparacion alternativas por pares diag=1 A1_A2=c2dato1 A1_A3=c2dato2 A2_A3=c2dato3 A1_A2_Inv=1/A1_A2 A1_A3_Inv=1/A1_A3 A2_A3_Inv=1/A2_A3 matriz_secundaria2={ 'A1':{'A1':diag,'A2':A1_A2,'A3':A1_A3}, 'A2':{'A1':A1_A2_Inv,'A2':diag,'A3':A2_A3}, 'A3':{'A1':A1_A3_Inv,'A2':A2_A3_Inv,'A3':diag}, } #Primer Paso del AHP AHPStv_c2paso2=pd.DataFrame(matriz_secundaria2) AHPStv_c2paso2=AHPStv_c2paso2.T AHPStv_c2paso2_a=AHPStv_c2paso2.sum() #Paso2a AHPStv_c2paso2_a=pd.DataFrame(AHPStv_c2paso2_a) AHPStv_c2paso2_a=AHPStv_c2paso2_a.T AHPStv_c2paso2_b=AHPStv_c2paso2.div(AHPStv_c2paso2_a.iloc[0]) AHPStv_c2paso2_c=AHPStv_c2paso2_b.mean(axis=1) AHPStv_c2paso2_c=pd.DataFrame(AHPStv_c2paso2_c) AHPStv_c2paso2_c=AHPStv_c2paso2_c.T #Calculo del lambda max lambda_max=(AHPStv_c2paso2_a*AHPStv_c2paso2_c) lambda_max=lambda_max.sum(axis=1) CC3=((lambda_max-3)/2)/0.58 respuesta3=html.H3(CC3) tablac2alternativa1df=dash_table.DataTable( columns=[{'name':i,'id':i} for i in (AHPStv_c2paso2.columns)], data=AHPStv_c2paso2.to_dict('records'), ) tablac2alternativa2df=dash_table.DataTable( columns=[{'name':i,'id':i} for i in (AHPStv_c2paso2_c.columns)], data=AHPStv_c2paso2_c.to_dict('records'), ) #Matriz comparacion alternativas por pares diag=1 A1_A2=c3dato1 A1_A3=c3dato2 A2_A3=c3dato3 A1_A2_Inv=1/A1_A2 A1_A3_Inv=1/A1_A3 A2_A3_Inv=1/A2_A3 matriz_secundaria3={ 'A1':{'A1':diag,'A2':A1_A2,'A3':A1_A3}, 'A2':{'A1':A1_A2_Inv,'A2':diag,'A3':A2_A3}, 'A3':{'A1':A1_A3_Inv,'A2':A2_A3_Inv,'A3':diag}, } #Primer Paso del AHP AHPStv_c3paso2=pd.DataFrame(matriz_secundaria3) AHPStv_c3paso2=AHPStv_c3paso2.T AHPStv_c3paso2_a=AHPStv_c3paso2.sum() #Paso2a AHPStv_c3paso2_a=pd.DataFrame(AHPStv_c3paso2_a) AHPStv_c3paso2_a=AHPStv_c3paso2_a.T AHPStv_c3paso2_b=AHPStv_c3paso2.div(AHPStv_c3paso2_a.iloc[0]) AHPStv_c3paso2_c=AHPStv_c3paso2_b.mean(axis=1) AHPStv_c3paso2_c=pd.DataFrame(AHPStv_c3paso2_c) AHPStv_c3paso2_c=AHPStv_c3paso2_c.T #Calculo del lambda max lambda_max=(AHPStv_c3paso2_a*AHPStv_c3paso2_c) lambda_max=lambda_max.sum(axis=1) CC4=((lambda_max-3)/2)/0.58 respuesta4=html.H3(CC4) tablac3alternativa1df=dash_table.DataTable( columns=[{'name':i,'id':i} for i in (AHPStv_c3paso2.columns)], data=AHPStv_c3paso2.to_dict('records'), ) tablac3alternativa2df=dash_table.DataTable( columns=[{'name':i,'id':i} for i in (AHPStv_c3paso2_c.columns)], data=AHPStv_c3paso2_c.to_dict('records'), ) #Matriz comparacion alternativas por pares diag=1 A1_A2=c4dato1 A1_A3=c4dato2 A2_A3=c4dato3 A1_A2_Inv=1/A1_A2 A1_A3_Inv=1/A1_A3 A2_A3_Inv=1/A2_A3 matriz_secundaria4={ 'A1':{'A1':diag,'A2':A1_A2,'A3':A1_A3}, 'A2':{'A1':A1_A2_Inv,'A2':diag,'A3':A2_A3}, 'A3':{'A1':A1_A3_Inv,'A2':A2_A3_Inv,'A3':diag}, } #Primer Paso del AHP AHPStv_c4paso2=pd.DataFrame(matriz_secundaria4) AHPStv_c4paso2=AHPStv_c4paso2.T AHPStv_c4paso2_a=AHPStv_c4paso2.sum() #Paso2a AHPStv_c4paso2_a=pd.DataFrame(AHPStv_c4paso2_a) AHPStv_c4paso2_a=AHPStv_c4paso2_a.T AHPStv_c4paso2_b=AHPStv_c4paso2.div(AHPStv_c4paso2_a.iloc[0]) AHPStv_c4paso2_c=AHPStv_c4paso2_b.mean(axis=1) AHPStv_c4paso2_c=
pd.DataFrame(AHPStv_c4paso2_c)
pandas.DataFrame
import calendar from ..utils import search_quote from datetime import datetime, timedelta from ..utils import process_dataframe_and_series import rich from jsonpath import jsonpath from retry import retry import pandas as pd import requests import multitasking import signal from tqdm import tqdm from typing import (Dict, List, Union) from ..shared import session from ..common import get_quote_history as get_quote_history_for_stock from ..common import get_history_bill as get_history_bill_for_stock from ..common import get_today_bill as get_today_bill_for_stock from ..common import get_realtime_quotes_by_fs from ..utils import (to_numeric, get_quote_id) from .config import EASTMONEY_STOCK_DAILY_BILL_BOARD_FIELDS, EASTMONEY_STOCK_BASE_INFO_FIELDS from ..common.config import ( FS_DICT, MARKET_NUMBER_DICT, EASTMONEY_REQUEST_HEADERS, EASTMONEY_QUOTE_FIELDS ) signal.signal(signal.SIGINT, multitasking.killall) @to_numeric def get_base_info_single(stock_code: str) -> pd.Series: """ 获取单股票基本信息 Parameters ---------- stock_code : str 股票代码 Returns ------- Series 单只股票基本信息 """ fields = ",".join(EASTMONEY_STOCK_BASE_INFO_FIELDS.keys()) secid = get_quote_id(stock_code) if not secid: return pd.Series(index=EASTMONEY_STOCK_BASE_INFO_FIELDS.values()) params = ( ('ut', 'fa5fd1943c7b386f172d6893dbfba10b'), ('invt', '2'), ('fltt', '2'), ('fields', fields), ('secid', secid), ) url = 'http://push2.eastmoney.com/api/qt/stock/get' json_response = session.get(url, headers=EASTMONEY_REQUEST_HEADERS, params=params).json() s = pd.Series(json_response['data']).rename( index=EASTMONEY_STOCK_BASE_INFO_FIELDS) return s[EASTMONEY_STOCK_BASE_INFO_FIELDS.values()] def get_base_info_muliti(stock_codes: List[str]) -> pd.DataFrame: """ 获取股票多只基本信息 Parameters ---------- stock_codes : List[str] 股票代码列表 Returns ------- DataFrame 多只股票基本信息 """ @multitasking.task @retry(tries=3, delay=1) def start(stock_code: str): s = get_base_info_single(stock_code) dfs.append(s) pbar.update() pbar.set_description(f'Processing => {stock_code}') dfs: List[pd.DataFrame] = [] pbar = tqdm(total=len(stock_codes)) for stock_code in stock_codes: start(stock_code) multitasking.wait_for_tasks() df = pd.DataFrame(dfs) df = df.dropna(subset=['股票代码']) return df @to_numeric def get_base_info(stock_codes: Union[str, List[str]]) -> Union[pd.Series, pd.DataFrame]: """ Parameters ---------- stock_codes : Union[str, List[str]] 股票代码或股票代码构成的列表 Returns ------- Union[Series, DataFrame] - ``Series`` : 包含单只股票基本信息(当 ``stock_codes`` 是字符串时) - ``DataFrane`` : 包含多只股票基本信息(当 ``stock_codes`` 是字符串列表时) Raises ------ TypeError 当 ``stock_codes`` 类型不符合要求时 Examples -------- >>> import efinance as ef >>> # 获取单只股票信息 >>> ef.stock.get_base_info('600519') 股票代码 600519 股票名称 贵州茅台 市盈率(动) 39.38 市净率 12.54 所处行业 酿酒行业 总市值 2198082348462.0 流通市值 2198082348462.0 板块编号 BK0477 ROE 8.29 净利率 54.1678 净利润 13954462085.610001 毛利率 91.6763 dtype: object >>> # 获取多只股票信息 >>> ef.stock.get_base_info(['600519','300715']) 股票代码 股票名称 市盈率(动) 市净率 所处行业 总市值 流通市值 板块编号 ROE 净利率 净利润 毛利率 0 300715 凯伦股份 42.29 3.12 水泥建材 9.160864e+09 6.397043e+09 BK0424 3.97 12.1659 5.415488e+07 32.8765 1 600519 贵州茅台 39.38 12.54 酿酒行业 2.198082e+12 2.198082e+12 BK0477 8.29 54.1678 1.395446e+10 91.6763 """ if isinstance(stock_codes, str): return get_base_info_single(stock_codes) elif hasattr(stock_codes, '__iter__'): return get_base_info_muliti(stock_codes) raise TypeError(f'所给的 {stock_codes} 不符合参数要求') def get_quote_history(stock_codes: Union[str, List[str]], beg: str = '19000101', end: str = '20500101', klt: int = 101, fqt: int = 1, **kwargs) -> Union[pd.DataFrame, Dict[str, pd.DataFrame]]: """ 获取股票的 K 线数据 Parameters ---------- stock_codes : Union[str,List[str]] 股票代码、名称 或者 股票代码、名称构成的列表 beg : str, optional 开始日期,默认为 ``'19000101'`` ,表示 1900年1月1日 end : str, optional 结束日期,默认为 ``'20500101'`` ,表示 2050年1月1日 klt : int, optional 行情之间的时间间隔,默认为 ``101`` ,可选示例如下 - ``1`` : 分钟 - ``5`` : 5 分钟 - ``15`` : 15 分钟 - ``30`` : 30 分钟 - ``60`` : 60 分钟 - ``101`` : 日 - ``102`` : 周 - ``103`` : 月 fqt : int, optional 复权方式,默认为 ``1`` ,可选示例如下 - ``0`` : 不复权 - ``1`` : 前复权 - ``2`` : 后复权 Returns ------- Union[DataFrame, Dict[str, DataFrame]] 股票的 K 线数据 - ``DataFrame`` : 当 ``stock_codes`` 是 ``str`` 时 - ``Dict[str, DataFrame]`` : 当 ``stock_codes`` 是 ``List[str]`` 时 Examples -------- >>> import efinance as ef >>> # 获取单只股票日 K 行情数据 >>> ef.stock.get_quote_history('600519') 股票名称 股票代码 日期 开盘 收盘 最高 最低 成交量 成交额 振幅 涨跌幅 涨跌额 换手率 0 贵州茅台 600519 2001-08-27 -89.74 -89.53 -89.08 -90.07 406318 1.410347e+09 -1.10 0.92 0.83 56.83 1 贵州茅台 600519 2001-08-28 -89.64 -89.27 -89.24 -89.72 129647 4.634630e+08 -0.54 0.29 0.26 18.13 2 贵州茅台 600519 2001-08-29 -89.24 -89.36 -89.24 -89.42 53252 1.946890e+08 -0.20 -0.10 -0.09 7.45 3 贵州茅台 600519 2001-08-30 -89.38 -89.22 -89.14 -89.44 48013 1.775580e+08 -0.34 0.16 0.14 6.72 4 贵州茅台 600519 2001-08-31 -89.21 -89.24 -89.12 -89.28 23231 8.623100e+07 -0.18 -0.02 -0.02 3.25 ... ... ... ... ... ... ... ... ... ... ... ... ... ... 4756 贵州茅台 600519 2021-07-23 1937.82 1900.00 1937.82 1895.09 47585 9.057762e+09 2.20 -2.06 -40.01 0.38 4757 贵州茅台 600519 2021-07-26 1879.00 1804.11 1879.00 1780.00 98619 1.789436e+10 5.21 -5.05 -95.89 0.79 4758 贵州茅台 600519 2021-07-27 1803.00 1712.89 1810.00 1703.00 86577 1.523081e+10 5.93 -5.06 -91.22 0.69 4759 贵州茅台 600519 2021-07-28 1703.00 1768.90 1788.20 1682.12 85369 1.479247e+10 6.19 3.27 56.01 0.68 4760 贵州茅台 600519 2021-07-29 1810.01 1749.79 1823.00 1734.34 63864 1.129957e+10 5.01 -1.08 -19.11 0.51 >>> # 获取多只股票历史行情 >>> stock_df = ef.stock.get_quote_history(['600519','300750']) >>> type(stock_df) <class 'dict'> >>> stock_df.keys() dict_keys(['300750', '600519']) >>> stock_df['600519'] 股票名称 股票代码 日期 开盘 收盘 最高 最低 成交量 成交额 振幅 涨跌幅 涨跌额 换手率 0 贵州茅台 600519 2001-08-27 -89.74 -89.53 -89.08 -90.07 406318 1.410347e+09 -1.10 0.92 0.83 56.83 1 贵州茅台 600519 2001-08-28 -89.64 -89.27 -89.24 -89.72 129647 4.634630e+08 -0.54 0.29 0.26 18.13 2 贵州茅台 600519 2001-08-29 -89.24 -89.36 -89.24 -89.42 53252 1.946890e+08 -0.20 -0.10 -0.09 7.45 3 贵州茅台 600519 2001-08-30 -89.38 -89.22 -89.14 -89.44 48013 1.775580e+08 -0.34 0.16 0.14 6.72 4 贵州茅台 600519 2001-08-31 -89.21 -89.24 -89.12 -89.28 23231 8.623100e+07 -0.18 -0.02 -0.02 3.25 ... ... ... ... ... ... ... ... ... ... ... ... ... ... 4756 贵州茅台 600519 2021-07-23 1937.82 1900.00 1937.82 1895.09 47585 9.057762e+09 2.20 -2.06 -40.01 0.38 4757 贵州茅台 600519 2021-07-26 1879.00 1804.11 1879.00 1780.00 98619 1.789436e+10 5.21 -5.05 -95.89 0.79 4758 贵州茅台 600519 2021-07-27 1803.00 1712.89 1810.00 1703.00 86577 1.523081e+10 5.93 -5.06 -91.22 0.69 4759 贵州茅台 600519 2021-07-28 1703.00 1768.90 1788.20 1682.12 85369 1.479247e+10 6.19 3.27 56.01 0.68 4760 贵州茅台 600519 2021-07-29 1810.01 1749.79 1823.00 1734.34 63864 1.129957e+10 5.01 -1.08 -19.11 0.51 """ df = get_quote_history_for_stock( stock_codes, beg=beg, end=end, klt=klt, fqt=fqt ) if isinstance(df, pd.DataFrame): df.rename(columns={'代码': '股票代码', '名称': '股票名称' }, inplace=True) elif isinstance(df, dict): for stock_code in df.keys(): df[stock_code].rename(columns={'代码': '股票代码', '名称': '股票名称' }, inplace=True) # NOTE 扩展接口 设定此关键词即返回 DataFrame 而不是 dict if kwargs.get('return_df'): df: pd.DataFrame = pd.concat(df, axis=0, ignore_index=True) return df @process_dataframe_and_series(remove_columns_and_indexes=['市场编号']) @to_numeric def get_realtime_quotes(fs: Union[str, List[str]] = None) -> pd.DataFrame: """ 获取单个或者多个市场行情的最新状况 Parameters ---------- fs : Union[str, List[str]], optional 行情名称或者多个行情名列表 可选值及示例如下 - ``None`` 沪深京A股市场行情 - ``'沪深A股'`` 沪深A股市场行情 - ``'沪A'`` 沪市A股市场行情 - ``'深A'`` 深市A股市场行情 - ``北A`` 北证A股市场行情 - ``'可转债'`` 沪深可转债市场行情 - ``'期货'`` 期货市场行情 - ``'创业板'`` 创业板市场行情 - ``'美股'`` 美股市场行情 - ``'港股'`` 港股市场行情 - ``'中概股'`` 中国概念股市场行情 - ``'新股'`` 沪深新股市场行情 - ``'科创板'`` 科创板市场行情 - ``'沪股通'`` 沪股通市场行情 - ``'深股通'`` 深股通市场行情 - ``'行业板块'`` 行业板块市场行情 - ``'概念板块'`` 概念板块市场行情 - ``'沪深系列指数'`` 沪深系列指数市场行情 - ``'上证系列指数'`` 上证系列指数市场行情 - ``'深证系列指数'`` 深证系列指数市场行情 - ``'ETF'`` ETF 基金市场行情 - ``'LOF'`` LOF 基金市场行情 Returns ------- DataFrame 单个或者多个市场行情的最新状况 Raises ------ KeyError 当参数 ``fs`` 中含有不正确的行情类型时引发错误 Examples -------- >>> import efinance as ef >>> ef.stock.get_realtime_quotes() 股票代码 股票名称 涨跌幅 最新价 最高 最低 今开 涨跌额 换手率 量比 动态市盈率 成交量 成交额 昨日收盘 总市值 流通市值 行情ID 市场类型 0 688787 N海天 277.59 139.48 172.39 139.25 171.66 102.54 85.62 - 78.93 74519 1110318832.0 36.94 5969744000 1213908667 1.688787 沪A 1 301045 N天禄 149.34 39.42 48.95 39.2 48.95 23.61 66.66 - 37.81 163061 683878656.0 15.81 4066344240 964237089 0.301045 深A 2 300532 今天国际 20.04 12.16 12.16 10.69 10.69 2.03 8.85 3.02 -22.72 144795 171535181.0 10.13 3322510580 1989333440 0.300532 深A 3 300600 国瑞科技 20.02 13.19 13.19 11.11 11.41 2.2 18.61 2.82 218.75 423779 541164432.0 10.99 3915421427 3003665117 0.300600 深A 4 300985 致远新能 20.01 47.08 47.08 36.8 39.4 7.85 66.65 2.17 58.37 210697 897370992.0 39.23 6277336472 1488300116 0.300985 深A ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... 4598 603186 华正新材 -10.0 43.27 44.09 43.27 43.99 -4.81 1.98 0.48 25.24 27697 120486294.0 48.08 6146300650 6063519472 1.603186 沪A 4599 688185 康希诺-U -10.11 476.4 534.94 460.13 530.0 -53.6 6.02 2.74 -2088.07 40239 1960540832.0 530.0 117885131884 31831479215 1.688185 沪A 4600 688148 芳源股份 -10.57 31.3 34.39 31.3 33.9 -3.7 26.07 0.56 220.01 188415 620632512.0 35.0 15923562000 2261706043 1.688148 沪A 4601 300034 钢研高纳 -10.96 43.12 46.81 42.88 46.5 -5.31 7.45 1.77 59.49 323226 1441101824.0 48.43 20959281094 18706911861 0.300034 深A 4602 300712 永福股份 -13.71 96.9 110.94 95.4 109.0 -15.4 6.96 1.26 511.21 126705 1265152928.0 112.3 17645877600 17645877600 0.300712 深A >>> ef.stock.get_realtime_quotes(['创业板','港股']) 股票代码 股票名称 涨跌幅 最新价 最高 最低 今开 涨跌额 换手率 量比 动态市盈率 成交量 成交额 昨日收盘 总市值 流通市值 行情ID 市场类型 0 00859 中昌国际控股 49.02 0.38 0.38 0.26 0.26 0.125 0.08 86.85 -2.83 938000 262860.0 0.255 427510287 427510287 128.00859 None 1 01058 粤海制革 41.05 1.34 1.51 0.9 0.93 0.39 8.34 1.61 249.89 44878000 57662440.0 0.95 720945460 720945460 128.01058 None 2 00713 世界(集团) 27.94 0.87 0.9 0.68 0.68 0.19 1.22 33.28 3.64 9372000 7585400.0 0.68 670785156 670785156 128.00713 None 3 08668 瀛海集团 24.65 0.177 0.179 0.145 0.145 0.035 0.0 10.0 -9.78 20000 3240.0 0.142 212400000 212400000 128.08668 None 4 08413 亚洲杂货 24.44 0.28 0.28 0.25 0.25 0.055 0.01 3.48 -20.76 160000 41300.0 0.225 325360000 325360000 128.08413 None ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... 5632 08429 冰雪集团 -16.75 0.174 0.2 0.166 0.2 -0.035 2.48 3.52 -21.58 11895000 2074645.0 0.209 83520000 83520000 128.08429 None 5633 00524 长城天下 -17.56 0.108 0.118 0.103 0.118 -0.023 0.45 15.43 -6.55 5961200 649171.0 0.131 141787800 141787800 128.00524 None 5634 08377 申酉控股 -17.71 0.395 0.46 0.39 0.46 -0.085 0.07 8.06 -5.07 290000 123200.0 0.48 161611035 161611035 128.08377 None 5635 00108 国锐地产 -19.01 1.15 1.42 1.15 1.42 -0.27 0.07 0.78 23.94 2376000 3012080.0 1.42 3679280084 3679280084 128.00108 None 5636 08237 华星控股 -25.0 0.024 0.031 0.023 0.031 -0.008 0.43 8.74 -2.01 15008000 364188.0 0.032 83760000 83760000 128.08237 None >>> ef.stock.get_realtime_quotes(['ETF']) 股票代码 股票名称 涨跌幅 最新价 最高 最低 今开 涨跌额 换手率 量比 动态市盈率 成交量 成交额 昨日收盘 总市值 流通市值 行情ID 市场类型 0 513050 中概互联网ETF 4.49 1.444 1.455 1.433 1.452 0.062 6.71 0.92 - 12961671 1870845984.0 1.382 27895816917 27895816917 1.513050 沪A 1 513360 教育ETF 4.38 0.5 0.502 0.486 0.487 0.021 16.89 1.7 - 1104254 54634387.0 0.479 326856952 326856952 1.513360 沪A 2 159766 旅游ETF 3.84 0.974 0.988 0.95 0.95 0.036 14.46 1.97 - 463730 45254947.0 0.938 312304295 312304295 0.159766 深A 3 159865 养殖ETF 3.8 0.819 0.828 0.785 0.791 0.03 12.13 0.89 - 1405871 114254714.0 0.789 949594189 949594189 0.159865 深A 4 516670 畜牧养殖ETF 3.76 0.856 0.864 0.825 0.835 0.031 24.08 0.98 - 292027 24924513.0 0.825 103803953 103803953 1.516670 沪A .. ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... 549 513060 恒生医疗ETF -4.12 0.861 0.905 0.86 0.902 -0.037 47.96 1.57 - 1620502 141454355.0 0.898 290926128 290926128 1.513060 沪A 550 515220 煤炭ETF -4.46 2.226 2.394 2.194 2.378 -0.104 14.39 0.98 - 2178176 487720560.0 2.330 3369247992 3369247992 1.515220 沪A 551 513000 日经225ETF易方达 -4.49 1.212 1.269 1.21 1.269 -0.057 5.02 2.49 - 25819 3152848.0 1.269 62310617 62310617 1.513000 沪A 552 513880 日经225ETF -4.59 1.163 1.224 1.162 1.217 -0.056 16.93 0.94 - 71058 8336846.0 1.219 48811110 48811110 1.513880 沪A 553 513520 日经ETF -4.76 1.2 1.217 1.196 1.217 -0.06 27.7 1.79 - 146520 17645828.0 1.260 63464640 63464640 1.513520 沪A Notes ----- 无论股票、可转债、期货还是基金。第一列表头始终叫 ``股票代码`` """ fs_list: List[str] = [] if fs is None: fs_list.append(FS_DICT['stock']) if isinstance(fs, str): fs = [fs] if isinstance(fs, list): for f in fs: if not FS_DICT.get(f): raise KeyError(f'指定的行情参数 `{fs}` 不正确') fs_list.append(FS_DICT[f]) # 给空列表时 试用沪深A股行情 if not fs_list: fs_list.append(FS_DICT['stock']) fs_str = ','.join(fs_list) df = get_realtime_quotes_by_fs(fs_str) df.rename(columns={'代码': '股票代码', '名称': '股票名称' }, inplace=True) return df @to_numeric def get_history_bill(stock_code: str) -> pd.DataFrame: """ 获取单只股票历史单子流入流出数据 Parameters ---------- stock_code : str 股票代码 Returns ------- DataFrame 沪深市场单只股票历史单子流入流出数据 Examples -------- >>> import efinance as ef >>> ef.stock.get_history_bill('600519') 股票名称 股票代码 日期 主力净流入 小单净流入 中单净流入 大单净流入 超大单净流入 主力净流入占比 小单流入净占比 中单流入净占比 大单流入净占比 超大单流入净占比 收盘价 涨跌幅 0 贵州茅台 600519 2021-03-04 -3.670272e+06 -2282056.0 5.952143e+06 1.461528e+09 -1.465199e+09 -0.03 -0.02 0.04 10.99 -11.02 2013.71 -5.05 1 贵州茅台 600519 2021-03-05 -1.514880e+07 -1319066.0 1.646793e+07 -2.528896e+07 1.014016e+07 -0.12 -0.01 0.13 -0.19 0.08 2040.82 1.35 2 贵州茅台 600519 2021-03-08 -8.001702e+08 -877074.0 8.010473e+08 5.670671e+08 -1.367237e+09 -6.29 -0.01 6.30 4.46 -10.75 1940.71 -4.91 3 贵州茅台 600519 2021-03-09 -2.237770e+08 -6391767.0 2.301686e+08 -1.795013e+08 -4.427571e+07 -1.39 -0.04 1.43 -1.11 -0.27 1917.70 -1.19 4 贵州茅台 600519 2021-03-10 -2.044173e+08 -1551798.0 2.059690e+08 -2.378506e+08 3.343331e+07 -2.02 -0.02 2.03 -2.35 0.33 1950.72 1.72 .. ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... 97 贵州茅台 600519 2021-07-26 -1.564233e+09 13142211.0 1.551091e+09 -1.270400e+08 -1.437193e+09 -8.74 0.07 8.67 -0.71 -8.03 1804.11 -5.05 98 贵州茅台 600519 2021-07-27 -7.803296e+08 -10424715.0 7.907544e+08 6.725104e+07 -8.475807e+08 -5.12 -0.07 5.19 0.44 -5.56 1712.89 -5.06 99 贵州茅台 600519 2021-07-28 3.997645e+08 2603511.0 -4.023677e+08 2.315648e+08 1.681997e+08 2.70 0.02 -2.72 1.57 1.14 1768.90 3.27 100 贵州茅台 600519 2021-07-29 -9.209842e+08 -2312235.0 9.232964e+08 -3.959741e+08 -5.250101e+08 -8.15 -0.02 8.17 -3.50 -4.65 1749.79 -1.08 101 贵州茅台 600519 2021-07-30 -1.524740e+09 -6020099.0 1.530761e+09 1.147248e+08 -1.639465e+09 -11.63 -0.05 11.68 0.88 -12.51 1678.99 -4.05 """ df = get_history_bill_for_stock(stock_code) df.rename(columns={ '代码': '股票代码', '名称': '股票名称' }, inplace=True) return df @to_numeric def get_today_bill(stock_code: str) -> pd.DataFrame: """ 获取单只股票最新交易日的日内分钟级单子流入流出数据 Parameters ---------- stock_code : str 股票代码 Returns ------- DataFrame 单只股票最新交易日的日内分钟级单子流入流出数据 Examples -------- >>> import efinance as ef >>> ef.stock.get_today_bill('600519') 股票代码 时间 主力净流入 小单净流入 中单净流入 大单净流入 超大单净流入 0 600519 2021-07-29 09:31 -3261705.0 -389320.0 3651025.0 -12529658.0 9267953.0 1 600519 2021-07-29 09:32 6437999.0 -606994.0 -5831006.0 -42615994.0 49053993.0 2 600519 2021-07-29 09:33 13179707.0 -606994.0 -12572715.0 -85059118.0 98238825.0 3 600519 2021-07-29 09:34 15385244.0 -970615.0 -14414632.0 -86865209.0 102250453.0 4 600519 2021-07-29 09:35 7853716.0 -970615.0 -6883104.0 -75692436.0 83546152.0 .. ... ... ... ... ... ... ... 235 600519 2021-07-29 14:56 -918956019.0 -1299630.0 920255661.0 -397127393.0 -521828626.0 236 600519 2021-07-29 14:57 -920977761.0 -2319213.0 923296987.0 -397014702.0 -523963059.0 237 600519 2021-07-29 14:58 -920984196.0 -2312233.0 923296442.0 -395974137.0 -525010059.0 238 600519 2021-07-29 14:59 -920984196.0 -2312233.0 923296442.0 -395974137.0 -525010059.0 239 600519 2021-07-29 15:00 -920984196.0 -2312233.0 923296442.0 -395974137.0 -525010059.0 """ df = get_today_bill_for_stock(stock_code) df.rename(columns={ '代码': '股票代码', '名称': '股票名称' }, inplace=True) return df @to_numeric def get_latest_quote(stock_codes: List[str]) -> pd.DataFrame: """ 获取沪深市场多只股票的实时涨幅情况 Parameters ---------- stock_codes : List[str] 多只股票代码列表 Returns ------- DataFrame 沪深市场、港股、美股多只股票的实时涨幅情况 Examples -------- >>> import efinance as ef >>> ef.stock.get_latest_quote(['600519','300750']) 股票代码 股票名称 涨跌幅 最新价 最高 最低 今开 涨跌额 换手率 量比 动态市盈率 成交量 成交额 昨日收盘 总市值 流通市值 市场类型 0 600519 贵州茅台 0.59 1700.04 1713.0 1679.0 1690.0 10.04 0.30 0.72 43.31 37905 6.418413e+09 1690.0 2135586507912 2135586507912 沪A 1 300750 宁德时代 0.01 502.05 529.9 480.0 480.0 0.05 1.37 1.75 149.57 277258 1.408545e+10 502.0 1169278366994 1019031580505 深A Notes ----- 当需要获取多只沪深 A 股 的实时涨跌情况时,最好使用 ``efinance.stock.get_realtime_quptes`` """ if isinstance(stock_codes, str): stock_codes = [stock_codes] secids: List[str] = [get_quote_id(stock_code) for stock_code in stock_codes] columns = EASTMONEY_QUOTE_FIELDS fields = ",".join(columns.keys()) params = ( ('OSVersion', '14.3'), ('appVersion', '6.3.8'), ('fields', fields), ('fltt', '2'), ('plat', 'Iphone'), ('product', 'EFund'), ('secids', ",".join(secids)), ('serverVersion', '6.3.6'), ('version', '6.3.8'), ) url = 'https://push2.eastmoney.com/api/qt/ulist.np/get' json_response = session.get(url, headers=EASTMONEY_REQUEST_HEADERS, params=params).json() rows = jsonpath(json_response, '$..diff[:]') if rows is None: return pd.DataFrame(columns=columns.values()).rename({ '市场编号': '市场类型' }) df = pd.DataFrame(rows)[columns.keys()].rename(columns=columns) df['市场类型'] = df['市场编号'].apply(lambda x: MARKET_NUMBER_DICT.get(str(x))) del df['市场编号'] return df @to_numeric def get_top10_stock_holder_info(stock_code: str, top: int = 4) -> pd.DataFrame: """ 获取沪深市场指定股票前十大股东信息 Parameters ---------- stock_code : str 股票代码 top : int, optional 最新 top 个前 10 大流通股东公开信息, 默认为 ``4`` Returns ------- DataFrame 个股持仓占比前 10 的股东的一些信息 Examples -------- >>> import efinance as ef >>> ef.stock.get_top10_stock_holder_info('600519',top = 1) 股票代码 更新日期 股东代码 股东名称 持股数 持股比例 增减 变动率 0 600519 2021-03-31 80010298 中国贵州茅台酒厂(集团)有限责任公司 6.783亿 54.00% 不变 -- 1 600519 2021-03-31 80637337 香港中央结算有限公司 9594万 7.64% -841.1万 -8.06% 2 600519 2021-03-31 80732941 贵州省国有资本运营有限责任公司 5700万 4.54% -182.7万 -3.11% 3 600519 2021-03-31 80010302 贵州茅台酒厂集团技术开发公司 2781万 2.21% 不变 -- 4 600519 2021-03-31 80475097 中央汇金资产管理有限责任公司 1079万 0.86% 不变 -- 5 600519 2021-03-31 80188285 中国证券金融股份有限公司 803.9万 0.64% -91 0.00% 6 600519 2021-03-31 78043999 深圳市金汇荣盛财富管理有限公司-金汇荣盛三号私募证券投资基金 502.1万 0.40% 不变 -- 7 600519 2021-03-31 70400207 中国人寿保险股份有限公司-传统-普通保险产品-005L-CT001沪 434.1万 0.35% 44.72万 11.48% 8 600519 2021-03-31 005827 中国银行股份有限公司-易方达蓝筹精选混合型证券投资基金 432万 0.34% 新进 -- 9 600519 2021-03-31 78083830 珠海市瑞丰汇邦资产管理有限公司-瑞丰汇邦三号私募证券投资基金 416.1万 0.33% 不变 -- """ def gen_fc(stock_code: str) -> str: """ Parameters ---------- stock_code : str 股票代码 Returns ------- str 指定格式的字符串 """ _type, stock_code = get_quote_id(stock_code).split('.') _type = int(_type) # 深市 if _type == 0: return f'{stock_code}02' # 沪市 return f'{stock_code}01' def get_public_dates(stock_code: str) -> List[str]: """ 获取指定股票公开股东信息的日期 Parameters ---------- stock_code : str 股票代码 Returns ------- List[str] 公开日期列表 """ quote_id = get_quote_id(stock_code) stock_code = quote_id.split('.')[-1] fc = gen_fc(stock_code) data = {"fc": fc} url = 'https://emh5.eastmoney.com/api/GuBenGuDong/GetFirstRequest2Data' json_response = requests.post( url, json=data).json() dates = jsonpath(json_response, f'$..BaoGaoQi') if not dates: return [] return dates fields = { 'GuDongDaiMa': '股东代码', 'GuDongMingCheng': '股东名称', 'ChiGuShu': '持股数', 'ChiGuBiLi': '持股比例', 'ZengJian': '增减', 'BianDongBiLi': '变动率', } quote_id = get_quote_id(stock_code) stock_code = quote_id.split('.')[-1] fc = gen_fc(stock_code) dates = get_public_dates(stock_code) dfs: List[pd.DataFrame] = [] empty_df = pd.DataFrame(columns=['股票代码', '日期']+list(fields.values())) for date in dates[:top]: data = {"fc": fc, "BaoGaoQi": date} url = 'https://emh5.eastmoney.com/api/GuBenGuDong/GetShiDaLiuTongGuDong' response = requests.post(url, json=data) response.encoding = 'utf-8' items: List[dict] = jsonpath( response.json(), f'$..ShiDaLiuTongGuDongList[:]') if not items: continue df =
pd.DataFrame(items)
pandas.DataFrame
import numpy as np import pandas as pd from sklearn.preprocessing import RobustScaler, MinMaxScaler, StandardScaler def scale_data(train, validate, test, scale_type = None, to_scale = None): ''' returns scaled data of specified type into data frame, will ''' train_copy = train.copy() validate_copy = validate.copy() test_copy = test.copy() if to_scale == None: return train_copy, validate_copy, test_copy else: X_train = train_copy[to_scale] X_validate = validate_copy[to_scale] X_test = test_copy[to_scale] min_max_scaler = MinMaxScaler() robust_scaler = RobustScaler() standard_scaler = StandardScaler() min_max_scaler.fit(X_train) robust_scaler.fit(X_train) standard_scaler.fit(X_train) mmX_train_scaled = min_max_scaler.transform(X_train) rX_train_scaled = robust_scaler.transform(X_train) sX_train_scaled = standard_scaler.transform(X_train) mmX_validate_scaled = min_max_scaler.transform(X_validate) rX_validate_scaled = robust_scaler.transform(X_validate) sX_validate_scaled = standard_scaler.transform(X_validate) mmX_test_scaled = min_max_scaler.transform(X_test) rX_test_scaled = robust_scaler.transform(X_test) sX_test_scaled = standard_scaler.transform(X_test) mmX_train_scaled = pd.DataFrame(mmX_train_scaled, columns=X_train.columns) mmX_validate_scaled = pd.DataFrame(mmX_validate_scaled, columns=X_validate.columns) mmX_test_scaled = pd.DataFrame(mmX_test_scaled, columns=X_test.columns) rX_train_scaled = pd.DataFrame(rX_train_scaled, columns=X_train.columns) rX_validate_scaled = pd.DataFrame(rX_validate_scaled, columns=X_validate.columns) rX_test_scaled = pd.DataFrame(rX_test_scaled, columns=X_test.columns) sX_train_scaled = pd.DataFrame(sX_train_scaled, columns=X_train.columns) sX_validate_scaled =
pd.DataFrame(sX_validate_scaled, columns=X_validate.columns)
pandas.DataFrame
# Airspeed Velocity Benchmarks for pandera import pandas as pd from pandera import ( Column, DataFrameSchema, Bool, Category, Check, DateTime, Float, Int, Object, String, Timedelta, check_input, check_output) class Validate: """ Benchmarking schema.validate """ def setup(self): self.schema = DataFrameSchema( { "a": Column(Int), "b": Column(Float), "c": Column(String), "d": Column(Bool), "e": Column(Category), "f": Column(Object), "g": Column(DateTime), "i": Column(Timedelta), }, ) self.df = pd.DataFrame( { "a": [1, 2, 3], "b": [1.1, 2.5, 9.9], "c": ["z", "y", "x"], "d": [True, True, False], "e": pd.Series(["c2", "c1", "c3"], dtype="category"), "f": [(3,), (2,), (1,)], "g": [pd.Timestamp("2015-02-01"), pd.Timestamp("2015-02-02"), pd.Timestamp("2015-02-03")], "i": [pd.Timedelta(1, unit="D"), pd.Timedelta(5, unit="D"), pd.Timedelta(9, unit="D")] }) def time_df_schema(self): self.schema.validate(self.df) def mem_df_schema(self): self.schema.validate(self.df) def peakmem_df_schema(self): self.schema.validate(self.df) class Decorators: """ Benchmarking input and output decorator performance. """ def transformer(df): return df.assign(column2=[1, 2, 3]) def setup(self): self.in_schema = DataFrameSchema({"column1": Column(String)}) self.out_schema = DataFrameSchema({"column2": Column(Int)}) self.df =
pd.DataFrame({"column1": ["a", "b", "c"]})
pandas.DataFrame
# -*- coding:utf-8 -*- import joblib import logging import numpy as np import pandas as pd import scipy.sparse as sp from itertools import combinations from sklearn.preprocessing import LabelEncoder from scipy.sparse import coo_matrix from lightfm import LightFM data_path = '../data/' result_path = '../result/' logs_path = '../logs/' date_columns = ['expiration_date', 'registration_init_time'] not_categorical_columns = ['target', 'song_length', 'registration_init_time', 'expiration_date', 'time', 'bd'] def logger_fn(name, file, level=logging.INFO): logger = logging.getLogger(name) logger.setLevel(level) fh = logging.FileHandler(file, mode='w') ch = logging.StreamHandler() ch.setLevel(level) formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') fh.setFormatter(formatter) ch.setFormatter(formatter) logger.addHandler(fh) logger.addHandler(ch) return logger def preprocess(): train = pd.read_csv(data_path + "train.csv") test = pd.read_csv(data_path + "test.csv", index_col=0) song_data = pd.read_csv(data_path + "songs.csv") user_data = pd.read_csv(data_path + "members.csv", parse_dates=date_columns) all_data = pd.concat([train, test]) all_data = all_data.merge(song_data, on='song_id', how='left') all_data = all_data.merge(user_data, on='msno', how='left') enc = LabelEncoder() for col in [ 'msno', 'song_id', 'source_screen_name', 'source_system_tab', 'source_type', 'genre_ids', 'artist_name', 'composer', 'lyricist', 'gender' ]: all_data[col] = enc.fit_transform(all_data[col].fillna('nan')) for col in ['language', 'city', 'registered_via']: all_data[col] = enc.fit_transform(all_data[col].fillna(-2)) all_data['time'] = all_data.index / len(all_data) n = len(train) train_data = all_data[:n] test_data = all_data[n:] train_data.to_hdf(data_path + 'train.hdf', key='wsdm') test_data.to_hdf(data_path + 'test.hdf', key='wsdm') def create_features(): train_data = pd.read_hdf(data_path + 'train.hdf', parse_dates=date_columns) test_data = pd.read_hdf(data_path + 'test.hdf', parse_dates=date_columns) all_data = pd.concat([train_data, test_data]) df_test = test_data df_history_test = train_data df_trains = [] df_history_trains = [] n = len(test_data) shift = int(0.05 * len(train_data)) for i in range(2): m = - (i * shift) if m == 0: m = None df_trains.append(train_data[-(n + i * shift):m]) df_history_trains.append(train_data[:-(n + i * shift)]) categorical_columns = all_data.columns.difference(not_categorical_columns) orders = {} for col in categorical_columns: orders[col] = 10 ** (int(np.log(all_data[col].max() + 1) / np.log(10)) + 1) print('Orders computing finished...') # ================================================= def get_group(df, cols): group = df[cols[0]].copy() for col in cols[1:]: group = group * orders[col] + df[col] return group def mean(df, df_history, cols): group = get_group(df, cols) group_history = get_group(df_history, cols) mean_map = df_history.groupby(group_history)['target'].mean() return group.map(mean_map).fillna(-1) def count(all_data, df, cols): group = get_group(df, cols) group_all = get_group(all_data, cols) count_map = group_all.value_counts() return group.map(count_map).fillna(0) def regression(df, df_history, cols): group = get_group(df, cols) group_history = get_group(df_history, cols) targets = {} times = {} for (y, t), u in zip(df_history[['target', 'time']].values, group_history): if u not in targets: targets[u] = [y] times[u] = [t] else: targets[u].append(y) times[u].append(t) linal_user = {} for u in times: if len(times[u]) > 1: A = np.vstack([times[u], np.ones(len(times[u]))]).T linal_user[u] = np.linalg.inv(A.T.dot(A)).dot(A.T).dot(targets[u]) result = [] for t, u in zip(df['time'], group): if u not in times: result.append(0.5) else: if len(times[u]) < 2: result.append(0.5) else: result.append(linal_user[u].dot([t, 1])) return result def time_from_prev_heard(df, df_history, cols): group = get_group(df, cols) group_history = get_group(df_history, cols) last_heard = df_history.groupby(group_history)['time'].last().to_dict() result = [] for t, g in zip(df.time, group): if g in last_heard: result.append(t - last_heard[g]) else: result.append(-1) last_heard[g] = t return result def time_to_next_heard(df, df_history, cols): result = [] df_reverse = df.sort_index(ascending=False) group = get_group(df_reverse, cols) next_heard = {} for g, t in zip(group, df_reverse['time']): if g in next_heard: result.append(t - next_heard[g]) else: result.append(-1) next_heard[g] = t result.reverse() return result def count_from_future(df, df_history, cols): result = [] df_reverse = df.sort_index(ascending=False) group = get_group(df_reverse, cols) count = {} for g in group.values: if g in count: result.append(count[g]) count[g] += 1 else: result.append(0) count[g] = 1 result.reverse() return result def last_time_diff(df, df_history, cols): group = get_group(df, cols) last_time = df.groupby(group)['time'].last() return group.map(last_time) - df.time def count_from_past(df, df_history, cols): group = get_group(df, cols) count = {} result = [] for g in group.values: if g not in count: count[g] = 0 else: count[g] += 1 result.append(count[g]) return result def part_of_unique_song(df): group = get_group(all_data, ['msno', 'artist_name']) group_df = get_group(df, ['msno', 'artist_name']) num_song_by_artist = all_data.groupby('artist_name')['song_id'].nunique() num_song_by_user_artist = all_data.groupby(group)['song_id'].nunique() s1 = df['artist_name'].map(num_song_by_artist) s2 = group_df.map(num_song_by_user_artist) return s2 / s1 def matrix_factorization(df, df_history): cols = ['msno', 'source_type'] group = get_group(df, cols) group_history = get_group(df_history, cols) encoder = LabelEncoder() encoder.fit(
pd.concat([group, group_history])
pandas.concat
import pandas as pd from os import listdir # Change the path to where you have your data path = 'C:\\Users\\<NAME>\\BookSamples\\BookModels\\data\\random_stocks\\' """ The ingest function needs to have this exact signature, meaning these arguments passed, as shown below. """ def random_stock_data(environ, asset_db_writer, minute_bar_writer, daily_bar_writer, adjustment_writer, calendar, start_session, end_session, cache, show_progress, output_dir): # Get list of files from path # Slicing off the last part # 'example.csv'[:-4] = 'example' symbols = [f[:-4] for f in listdir(path)] if not symbols: raise ValueError("No symbols found in folder.") # Prepare an empty DataFrame for dividends divs = pd.DataFrame(columns=['sid', 'amount', 'ex_date', 'record_date', 'declared_date', 'pay_date'] ) # Prepare an empty DataFrame for splits splits = pd.DataFrame(columns=['sid', 'ratio', 'effective_date'] ) # Prepare an empty DataFrame for metadata metadata = pd.DataFrame(columns=('start_date', 'end_date', 'auto_close_date', 'symbol', 'exchange' ) ) # Check valid trading dates, according to the selected exchange calendar sessions = calendar.sessions_in_range(start_session, end_session) # Get data for all stocks and write to Zipline daily_bar_writer.write( process_stocks(symbols, sessions, metadata, divs) ) # Write the metadata asset_db_writer.write(equities=metadata) # Write splits and dividends adjustment_writer.write(splits=splits, dividends=divs) """ Generator function to iterate stocks, build historical data, metadata and dividend data """ def process_stocks(symbols, sessions, metadata, divs): # Loop the stocks, setting a unique Security ID (SID) for sid, symbol in enumerate(symbols): print('Loading {}...'.format(symbol)) # Read the stock data from csv file. df = pd.read_csv('{}/{}.csv'.format(path, symbol), index_col=[0], parse_dates=[0]) # Check first and last date. start_date = df.index[0] end_date = df.index[-1] # Synch to the official exchange calendar df = df.reindex(sessions.tz_localize(None))[start_date:end_date] # Forward fill missing data df.fillna(method='ffill', inplace=True) # Drop remaining NaN df.dropna(inplace=True) # The auto_close date is the day after the last trade. ac_date = end_date +
pd.Timedelta(days=1)
pandas.Timedelta
"""Miscellaneous internal PyJanitor helper functions.""" import fnmatch import functools import os import re import socket import sys import warnings from collections.abc import Callable as dispatch_callable from itertools import chain, combinations from typing import ( Callable, Dict, Iterable, List, NamedTuple, Optional, Pattern, Tuple, Union, ) import numpy as np import pandas as pd from pandas.api.types import ( CategoricalDtype, is_extension_array_dtype, is_list_like, is_scalar, ) from pandas.core.common import apply_if_callable from .errors import JanitorError def check(varname: str, value, expected_types: list): """ One-liner syntactic sugar for checking types. It can also check callables. Should be used like this:: check('x', x, [int, float]) :param varname: The name of the variable (for diagnostic error message). :param value: The value of the varname. :param expected_types: The types we expect the item to be. :raises TypeError: if data is not the expected type. """ is_expected_type: bool = False for t in expected_types: if t is callable: is_expected_type = t(value) else: is_expected_type = isinstance(value, t) if is_expected_type: break if not is_expected_type: raise TypeError( "{varname} should be one of {expected_types}".format( varname=varname, expected_types=expected_types ) ) def _clean_accounting_column(x: str) -> float: """ Perform the logic for the `cleaning_style == "accounting"` attribute. This is a private function, not intended to be used outside of ``currency_column_to_numeric``. It is intended to be used in a pandas `apply` method. :returns: An object with a cleaned column. """ y = x.strip() y = y.replace(",", "") y = y.replace(")", "") y = y.replace("(", "-") if y == "-": return 0.00 return float(y) def _currency_column_to_numeric(x, cast_non_numeric=None) -> str: """ Perform logic for changing cell values. This is a private function intended to be used only in ``currency_column_to_numeric``. It is intended to be used in a pandas `apply` method, after being passed through `partial`. """ acceptable_currency_characters = { "-", ".", "1", "2", "3", "4", "5", "6", "7", "8", "9", "0", } if len(x) == 0: return "ORIGINAL_NA" if cast_non_numeric: if x in cast_non_numeric.keys(): check( "{%r: %r}" % (x, str(cast_non_numeric[x])), cast_non_numeric[x], [int, float], ) return cast_non_numeric[x] return "".join(i for i in x if i in acceptable_currency_characters) return "".join(i for i in x if i in acceptable_currency_characters) def _replace_empty_string_with_none(column_series): column_series.loc[column_series == ""] = None return column_series def _replace_original_empty_string_with_none(column_series): column_series.loc[column_series == "ORIGINAL_NA"] = None return column_series def _strip_underscores( df: pd.DataFrame, strip_underscores: Union[str, bool] = None ) -> pd.DataFrame: """ Strip underscores from DataFrames column names. Underscores can be stripped from the beginning, end or both. .. code-block:: python df = _strip_underscores(df, strip_underscores='left') :param df: The pandas DataFrame object. :param strip_underscores: (optional) Removes the outer underscores from all column names. Default None keeps outer underscores. Values can be either 'left', 'right' or 'both' or the respective shorthand 'l', 'r' and True. :returns: A pandas DataFrame with underscores removed. """ df = df.rename( columns=lambda x: _strip_underscores_func(x, strip_underscores) ) return df def _strip_underscores_func( col: str, strip_underscores: Union[str, bool] = None ) -> pd.DataFrame: """Strip underscores from a string.""" underscore_options = [None, "left", "right", "both", "l", "r", True] if strip_underscores not in underscore_options: raise JanitorError( f"strip_underscores must be one of: {underscore_options}" ) if strip_underscores in ["left", "l"]: col = col.lstrip("_") elif strip_underscores in ["right", "r"]: col = col.rstrip("_") elif strip_underscores == "both" or strip_underscores is True: col = col.strip("_") return col def import_message( submodule: str, package: str, conda_channel: str = None, pip_install: bool = False, ): """ Return warning if package is not found. Generic message for indicating to the user when a function relies on an optional module / package that is not currently installed. Includes installation instructions. Used in `chemistry.py` and `biology.py`. :param submodule: pyjanitor submodule that needs an external dependency. :param package: External package this submodule relies on. :param conda_channel: Conda channel package can be installed from, if at all. :param pip_install: Whether package can be installed via pip. """ is_conda = os.path.exists(os.path.join(sys.prefix, "conda-meta")) installable = True if is_conda: if conda_channel is None: installable = False installation = f"{package} cannot be installed via conda" else: installation = f"conda install -c {conda_channel} {package}" else: if pip_install: installation = f"pip install {package}" else: installable = False installation = f"{package} cannot be installed via pip" print( f"To use the janitor submodule {submodule}, you need to install " f"{package}." ) print() if installable: print("To do so, use the following command:") print() print(f" {installation}") else: print(f"{installation}") def idempotent(func: Callable, df: pd.DataFrame, *args, **kwargs): """ Raises error if a function operating on a `DataFrame` is not idempotent, that is, `func(func(df)) = func(df)` is not true for all `df`. :param func: A python method. :param df: A pandas `DataFrame`. :param args: Positional arguments supplied to the method. :param kwargs: Keyword arguments supplied to the method. :raises ValueError: If `func` is found to not be idempotent for the given `DataFrame` `df`. """ if not func(df, *args, **kwargs) == func( func(df, *args, **kwargs), *args, **kwargs ): raise ValueError( "Supplied function is not idempotent for the given " "DataFrame." ) def deprecated_alias(**aliases) -> Callable: """ Used as a decorator when deprecating old function argument names, while keeping backwards compatibility. Implementation is inspired from `StackOverflow`_. .. _StackOverflow: https://stackoverflow.com/questions/49802412/how-to-implement-deprecation-in-python-with-argument-alias Functional usage example: .. code-block:: python @deprecated_alias(a='alpha', b='beta') def simple_sum(alpha, beta): return alpha + beta :param aliases: Dictionary of aliases for a function's arguments. :return: Your original function wrapped with the kwarg redirection function. """ # noqa: E501 def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): rename_kwargs(func.__name__, kwargs, aliases) return func(*args, **kwargs) return wrapper return decorator def refactored_function(message: str) -> Callable: """Used as a decorator when refactoring functions Implementation is inspired from `Hacker Noon`_. .. Hacker Noon: https://hackernoon.com/why-refactoring-how-to-restructure-python-package-51b89aa91987 Functional usage example: .. code-block:: python @refactored_function( message="simple_sum() has been refactored. Use hard_sum() instead." ) def simple_sum(alpha, beta): return alpha + beta :param message: Message to use in warning user about refactoring. :return: Your original function wrapped with the kwarg redirection function. """ # noqa: E501 def decorator(func): def emit_warning(*args, **kwargs): warnings.warn(message, FutureWarning) return func(*args, **kwargs) return emit_warning return decorator def rename_kwargs(func_name: str, kwargs: Dict, aliases: Dict): """ Used to update deprecated argument names with new names. Throws a TypeError if both arguments are provided, and warns if old alias is used. Nothing is returned as the passed ``kwargs`` are modified directly. Implementation is inspired from `StackOverflow`_. .. _StackOverflow: https://stackoverflow.com/questions/49802412/how-to-implement-deprecation-in-python-with-argument-alias :param func_name: name of decorated function. :param kwargs: Arguments supplied to the method. :param aliases: Dictionary of aliases for a function's arguments. :raises TypeError: if both arguments are provided. """ # noqa: E501 for old_alias, new_alias in aliases.items(): if old_alias in kwargs: if new_alias in kwargs: raise TypeError( f"{func_name} received both {old_alias} and {new_alias}" ) warnings.warn( f"{old_alias} is deprecated; use {new_alias}", DeprecationWarning, ) kwargs[new_alias] = kwargs.pop(old_alias) def check_column( df: pd.DataFrame, column_names: Union[Iterable, str], present: bool = True ): """ One-liner syntactic sugar for checking the presence or absence of columns. Should be used like this:: check(df, ['a', 'b'], present=True) This will check whether columns "a" and "b" are present in df's columns. One can also guarantee that "a" and "b" are not present by switching to ``present = False``. :param df: The name of the variable. :param column_names: A list of column names we want to check to see if present (or absent) in df. :param present: If True (default), checks to see if all of column_names are in df.columns. If False, checks that none of column_names are in df.columns. :raises ValueError: if data is not the expected type. """ if isinstance(column_names, str) or not isinstance(column_names, Iterable): column_names = [column_names] for column_name in column_names: if present and column_name not in df.columns: # skipcq: PYL-R1720 raise ValueError( f"{column_name} not present in dataframe columns!" ) elif not present and column_name in df.columns: raise ValueError( f"{column_name} already present in dataframe columns!" ) def skipna(f: Callable) -> Callable: """ Decorator for escaping np.nan and None in a function Should be used like this:: df[column].apply(skipna(transform)) or:: @skipna def transform(x): pass :param f: the function to be wrapped :returns: _wrapped, the wrapped function """ def _wrapped(x, *args, **kwargs): if (type(x) is float and np.isnan(x)) or x is None: return np.nan return f(x, *args, **kwargs) return _wrapped def skiperror( f: Callable, return_x: bool = False, return_val=np.nan ) -> Callable: """ Decorator for escaping any error in a function. Should be used like this:: df[column].apply( skiperror(transform, return_val=3, return_x=False)) or:: @skiperror(return_val=3, return_x=False) def transform(x): pass :param f: the function to be wrapped :param return_x: whether or not the original value that caused error should be returned :param return_val: the value to be returned when an error hits. Ignored if return_x is True :returns: _wrapped, the wrapped function """ def _wrapped(x, *args, **kwargs): try: return f(x, *args, **kwargs) except Exception: # skipcq: PYL-W0703 if return_x: return x return return_val return _wrapped def _computations_expand_grid(others: dict) -> pd.DataFrame: """ Creates a cartesian product of all the inputs in `others`. Combines Numpy's `mgrid`, with the `take` method in numpy/Pandas, to expand each input to the length of the cumulative product of all inputs in `others`. There is a performance penalty for small entries (length less than 10) in using this method, instead of `itertools.product`; however, there is significant performance benefits as the size of the data increases. Another benefit of this approach, in addition to the significant performance gains, is the preservation of data types. This is particularly relevant for Pandas' extension arrays dtypes (categoricals, nullable integers, ...). A dataframe of all possible combinations is returned. """ for key, _ in others.items(): check("key", key, [str]) grid = {} for key, value in others.items(): if is_scalar(value): grid[key] = pd.Series([value]) elif is_extension_array_dtype(value) and not ( isinstance(value, pd.Series) ): grid[key] = pd.Series(value) elif is_list_like(value): if not isinstance( value, (pd.DataFrame, pd.Series, np.ndarray, list, pd.Index) ): grid[key] = list(value) else: grid[key] = value others = None mgrid_values = [slice(len(value)) for _, value in grid.items()] mgrid_values = np.mgrid[mgrid_values] mgrid_values = map(np.ravel, mgrid_values) grid = zip([*grid.items()], mgrid_values) grid = ((*left, right) for left, right in grid) grid = ( _expand_grid(value, key, mgrid_values) for key, value, mgrid_values in grid ) grid = pd.concat(grid, axis="columns", sort=False) return grid @functools.singledispatch def _expand_grid(value, key, mgrid_values, mode="expand_grid"): """ Base function for dispatch of `_expand_grid`. `mode` parameter is added, to make the function reusable in the `_computations_complete` function. Also, allowing `key` as None enables reuse in the `_computations_complete` function. """ raise TypeError( f"{type(value).__name__} data type is not supported in `expand_grid`." ) @_expand_grid.register(list) # noqa: F811 def _sub_expand_grid(value, key, mgrid_values): # noqa: F811 """ Expands the list object based on `mgrid_values`. Converts to an array and passes it to the `_expand_grid` function for arrays. `mode` parameter is added, to make the function reusable in the `_computations_complete` function. Also, allowing `key` as None enables reuse in the `_computations_complete` function. Returns Series with name if 1-Dimensional array or DataFrame if 2-Dimensional array with column names. """ if not value: raise ValueError("""list object cannot be empty.""") value = np.array(value) return _expand_grid(value, key, mgrid_values) @_expand_grid.register(np.ndarray) def _sub_expand_grid( # noqa: F811 value, key, mgrid_values, mode="expand_grid" ): """ Expands the numpy array based on `mgrid_values`. Ensures array dimension is either 1 or 2. `mode` parameter is added, to make the function reusable in the `_computations_complete` function. Also, allowing `key` as None enables reuse in the `_computations_complete` function. Returns Series with name if 1-Dimensional array or DataFrame if 2-Dimensional array with column names. The names are derived from the `key` parameter. """ if not (value.size > 0): raise ValueError("""array cannot be empty.""") if value.ndim > 2: raise ValueError("""expand_grid works only on 1D and 2D structures.""") value = value.take(mgrid_values, axis=0) if value.ndim == 1: value = pd.Series(value) # a tiny bit faster than chaining with `rename` value.name = key else: value = pd.DataFrame(value) # a tiny bit faster than using `add_prefix` value.columns = value.columns.map(lambda column: f"{key}_{column}") return value @_expand_grid.register(pd.Series) def _sub_expand_grid( # noqa: F811 value, key, mgrid_values, mode="expand_grid" ): """ Expands the Series based on `mgrid_values`. `mode` parameter is added, to make the function reusable in the `_computations_complete` function. Also, allowing `key` as None enables reuse in the `_computations_complete` function. Checks for empty Series and returns modified keys. Returns Series with new Series name. """ if value.empty: raise ValueError("""Series cannot be empty.""") value = value.take(mgrid_values) value.index = np.arange(len(value)) if mode != "expand_grid": return value if value.name: value.name = f"{key}_{value.name}" else: value.name = key return value @_expand_grid.register(pd.DataFrame) def _sub_expand_grid( # noqa: F811 value, key, mgrid_values, mode="expand_grid" ): """ Expands the DataFrame based on `mgrid_values`. `mode` parameter is added, to make the function reusable in the `_computations_complete` function. Also, allowing `key` as None enables reuse in the `_computations_complete` function. Checks for empty dataframe and returns modified keys. Returns a DataFrame with new column names. """ if value.empty: raise ValueError("""DataFrame cannot be empty.""") value = value.take(mgrid_values) value.index = np.arange(len(value)) if mode != "expand_grid": return value if isinstance(value.columns, pd.MultiIndex): value.columns = [f"{key}_{num}" for num, _ in enumerate(value.columns)] else: value.columns = value.columns.map(lambda column: f"{key}_{column}") return value @_expand_grid.register(pd.Index) def _sub_expand_grid( # noqa: F811 value, key, mgrid_values, mode="expand_grid" ): """ Expands the Index based on `mgrid_values`. `mode` parameter is added, to make the function reusable in the `_computations_complete` function. Also, allowing `key` as None enables reuse in the `_computations_complete` function. Checks for empty Index and returns modified keys. Returns a DataFrame (if MultiIndex) with new column names, or a Series with a new name. """ if value.empty: raise ValueError("""Index cannot be empty.""") value = value.take(mgrid_values) if mode != "expand_grid": return value if isinstance(value, pd.MultiIndex): value = value.to_frame(index=False) value.columns = value.columns.map(lambda column: f"{key}_{column}") else: value = value.to_series(index=np.arange(len(value))) if value.name: value.name = f"{key}_{value.name}" else: value.name = key return value def _data_checks_complete( df: pd.DataFrame, columns: List[Union[List, Tuple, Dict, str]], by: Optional[Union[list, str]] = None, ): """ Function to check parameters in the `complete` function. Checks the type of the `columns` parameter, as well as the types within the `columns` parameter. Check is conducted to ensure that column names are not repeated. Also checks that the names in `columns` actually exist in `df`. Returns `df`, `columns`, `column_checker`, and `by` if all checks pass. """ # TODO: get `complete` to work on MultiIndex columns, # if there is sufficient interest with use cases if isinstance(df.columns, pd.MultiIndex): raise ValueError( """ `complete` does not support MultiIndex columns. """ ) check("columns", columns, [list]) columns = [ list(grouping) if isinstance(grouping, tuple) else grouping for grouping in columns ] column_checker = [] for grouping in columns: check("grouping", grouping, [list, dict, str]) if not grouping: raise ValueError("grouping cannot be empty") if isinstance(grouping, str): column_checker.append(grouping) else: column_checker.extend(grouping) # columns should not be duplicated across groups column_checker_no_duplicates = set() for column in column_checker: if column in column_checker_no_duplicates: raise ValueError( f"""{column} column should be in only one group.""" ) column_checker_no_duplicates.add(column) # noqa: PD005 check_column(df, column_checker) column_checker_no_duplicates = None if by is not None: if isinstance(by, str): by = [by] check("by", by, [list]) return df, columns, column_checker, by def _computations_complete( df: pd.DataFrame, columns: List[Union[List, Tuple, Dict, str]], by: Optional[Union[list, str]] = None, ) -> pd.DataFrame: """ This function computes the final output for the `complete` function. If `by` is present, then groupby apply is used. For some cases, the `stack/unstack` combination is preferred; it is more efficient than `reindex`, as the size of the data grows. It is only applicable if all the entries in `columns` are strings, there are no nulls(stacking implicitly removes nulls in columns), the length of `columns` is greater than 1, and the index has no duplicates. If there is a dictionary in `columns`, it is possible that all the values of a key, or keys, may not be in the existing column with the same key(s); as such, a union of the current index and the generated index is executed, to ensure that all combinations are in the final dataframe. A dataframe, with rows of missing values, if any, is returned. """ df, columns, column_checker, by = _data_checks_complete(df, columns, by) dict_present = any((isinstance(entry, dict) for entry in columns)) all_strings = all(isinstance(column, str) for column in columns) df = df.set_index(column_checker) df_index = df.index df_names = df_index.names any_nulls = any( df_index.get_level_values(name).hasnans for name in df_names ) if not by: df = _base_complete(df, columns, all_strings, any_nulls, dict_present) # a better (and faster) way would be to create a dataframe # from the groupby ... # solution here got me thinking # https://stackoverflow.com/a/66667034/7175713 # still thinking on how to improve speed of groupby apply else: df = df.groupby(by).apply( _base_complete, columns, all_strings, any_nulls, dict_present, ) df = df.drop(columns=by) df = df.reset_index() return df def _base_complete( df: pd.DataFrame, columns: List[Union[List, Tuple, Dict, str]], all_strings: bool, any_nulls: bool, dict_present: bool, ) -> pd.DataFrame: df_empty = df.empty df_index = df.index unique_index = df_index.is_unique columns_to_stack = None if all_strings and (not any_nulls) and (len(columns) > 1) and unique_index: if df_empty: df["dummy"] = 1 columns_to_stack = columns[1:] df = df.unstack(columns_to_stack) # noqa: PD010 df = df.stack(columns_to_stack, dropna=False) # noqa: PD013 if df_empty: df = df.drop(columns="dummy") columns_to_stack = None return df indexer = _create_indexer_for_complete(df_index, columns) if unique_index: if dict_present: indexer = df_index.union(indexer, sort=None) df = df.reindex(indexer) else: df = df.join(pd.DataFrame([], index=indexer), how="outer") return df def _create_indexer_for_complete( df_index: pd.Index, columns: List[Union[List, Dict, str]], ) -> pd.DataFrame: """ This creates the index that will be used to expand the dataframe in the `complete` function. A pandas Index is returned. """ complete_columns = ( _complete_column(column, df_index) for column in columns ) complete_columns = ( (entry,) if not isinstance(entry, list) else entry for entry in complete_columns ) complete_columns = chain.from_iterable(complete_columns) indexer = [*complete_columns] if len(indexer) > 1: indexer = _complete_indexer_expand_grid(indexer) else: indexer = indexer[0] return indexer def _complete_indexer_expand_grid(indexer): """ Generate indices to expose explicitly missing values, using the `expand_grid` function. Returns a pandas Index. """ indexers = [] mgrid_values = [slice(len(value)) for value in indexer] mgrid_values = np.mgrid[mgrid_values] mgrid_values = map(np.ravel, mgrid_values) indexer = zip(indexer, mgrid_values) indexer = ( _expand_grid(value, None, mgrid_values, mode=None) for value, mgrid_values in indexer ) for entry in indexer: if isinstance(entry, pd.MultiIndex): names = entry.names val = (entry.get_level_values(name) for name in names) indexers.extend(val) else: indexers.append(entry) indexer = pd.MultiIndex.from_arrays(indexers) indexers = None return indexer @functools.singledispatch def _complete_column(column, index): """ This function processes the `columns` argument, to create a pandas Index or a list. Args: column : str/list/dict index: pandas Index A unique pandas Index or a list of unique pandas Indices is returned. """ raise TypeError( """This type is not supported in the `complete` function.""" ) @_complete_column.register(str) # noqa: F811 def _sub_complete_column(column, index): # noqa: F811 """ This function processes the `columns` argument, to create a pandas Index. Args: column : str index: pandas Index Returns: pd.Index: A pandas Index with a single level """ arr = index.get_level_values(column) if not arr.is_unique: arr = arr.drop_duplicates() return arr @_complete_column.register(list) # noqa: F811 def _sub_complete_column(column, index): # noqa: F811 """ This function processes the `columns` argument, to create a pandas Index. Args: column : list index: pandas Index Returns: pd.MultiIndex """ level_to_drop = [name for name in index.names if name not in column] arr = index.droplevel(level_to_drop) if not arr.is_unique: return arr.drop_duplicates() return arr @_complete_column.register(dict) # noqa: F811 def _sub_complete_column(column, index): # noqa: F811 """ This function processes the `columns` argument, to create a pandas Index or a list. Args: column : dict index: pandas Index Returns: list: A list of unique pandas Indices. """ collection = [] for key, value in column.items(): arr = apply_if_callable(value, index.get_level_values(key)) if not is_list_like(arr): raise ValueError( """ Input in the supplied dictionary must be list-like. """ ) if ( not isinstance( arr, (pd.DataFrame, pd.Series, np.ndarray, pd.Index) ) ) and (not is_extension_array_dtype(arr)): arr = pd.Index([*arr], name=key) if arr.ndim != 1: raise ValueError( """ It seems the supplied pair in the supplied dictionary cannot be converted to a 1-dimensional Pandas object. Kindly provide data that can be converted to a 1-dimensional Pandas object. """ ) if isinstance(arr, pd.MultiIndex): raise ValueError( """ MultiIndex object not acceptable in the supplied dictionary. """ ) if not isinstance(arr, pd.Index): arr = pd.Index(arr, name=key) if arr.empty: raise ValueError( """ Input in the supplied dictionary cannot be empty. """ ) if not arr.is_unique: arr = arr.drop_duplicates() if arr.name is None: arr.name = key collection.append(arr) return collection def _data_checks_pivot_longer( df, index, column_names, names_to, values_to, column_level, names_sep, names_pattern, sort_by_appearance, ignore_index, ): """ This function raises errors if the arguments have the wrong python type, or if an unneeded argument is provided. It also raises errors for some other scenarios(e.g if there are no matches returned for the regular expression in `names_pattern`, or if the dataframe has MultiIndex columns and `names_sep` or `names_pattern` is provided). This function is executed before proceeding to the computation phase. Type annotations are not provided because this function is where type checking happens. """ if column_level is not None: check("column_level", column_level, [int, str]) df.columns = df.columns.get_level_values(column_level) if index is not None: if is_list_like(index) and (not isinstance(index, tuple)): index = list(index) index = _select_columns(index, df) if column_names is not None: if is_list_like(column_names) and ( not isinstance(column_names, tuple) ): column_names = list(column_names) column_names = _select_columns(column_names, df) if isinstance(names_to, str): names_to = [names_to] elif isinstance(names_to, tuple): names_to = list(names_to) check("names_to", names_to, [list]) if not all((isinstance(word, str) for word in names_to)): raise TypeError("All entries in `names_to` argument must be strings.") if len(names_to) > 1: if all((names_pattern, names_sep)): raise ValueError( """ Only one of `names_pattern` or `names_sep` should be provided. """ ) if (".value" in names_to) and (names_to.count(".value") > 1): raise ValueError("There can be only one `.value` in `names_to`.") # names_sep creates more than one column # whereas regex with names_pattern can be limited to one column if (len(names_to) == 1) and (names_sep is not None): raise ValueError( """ For a single `names_to` value, `names_sep` is not required. """ ) if names_pattern is not None: check("names_pattern", names_pattern, [str, Pattern, List, Tuple]) if isinstance(names_pattern, (list, tuple)): if not all( isinstance(word, (str, Pattern)) for word in names_pattern ): raise TypeError( """ All entries in the ``names_pattern`` argument must be regular expressions. """ ) if len(names_pattern) != len(names_to): raise ValueError( """ Length of ``names_to`` does not match number of patterns. """ ) if ".value" in names_to: raise ValueError( """ ``.value`` is not accepted if ``names_pattern`` is a list/tuple. """ ) if names_sep is not None: check("names_sep", names_sep, [str, Pattern]) check("values_to", values_to, [str]) if (values_to in df.columns) and not any( ( ".value" in names_to, isinstance(names_pattern, (list, tuple)), ) ): # copied from pandas' melt source code # with a minor tweak raise ValueError( """ This dataframe has a column name that matches the 'values_to' column name of the resulting Dataframe. Kindly set the 'values_to' parameter to a unique name. """ ) if any((names_sep, names_pattern)) and ( isinstance(df.columns, pd.MultiIndex) ): raise ValueError( """ Unpivoting a MultiIndex column dataframe when `names_sep` or `names_pattern` is supplied is not supported. """ ) if all((names_sep is None, names_pattern is None)): # adapted from pandas' melt source code if ( (index is not None) and isinstance(df.columns, pd.MultiIndex) and (not isinstance(index, list)) ): raise ValueError( """ index must be a list of tuples when columns are a MultiIndex. """ ) if ( (column_names is not None) and isinstance(df.columns, pd.MultiIndex) and (not isinstance(column_names, list)) ): raise ValueError( """ column_names must be a list of tuples when columns are a MultiIndex. """ ) check("sort_by_appearance", sort_by_appearance, [bool]) check("ignore_index", ignore_index, [bool]) return ( df, index, column_names, names_to, values_to, column_level, names_sep, names_pattern, sort_by_appearance, ignore_index, ) def _sort_by_appearance_for_melt( df: pd.DataFrame, ignore_index: bool, len_index: int ) -> pd.DataFrame: """ This function sorts the resulting dataframe by appearance, via the `sort_by_appearance` parameter in `computations_pivot_longer`. An example for `sort_by_appearance`: Say data looks like this : id, a1, a2, a3, A1, A2, A3 1, a, b, c, A, B, C when unpivoted into long form, it will look like this : id instance a A 0 1 1 a A 1 1 2 b B 2 1 3 c C where the column `a` comes before `A`, as it was in the source data, and in column `a`, `a > b > c`, also as it was in the source data. A dataframe that is sorted by appearance is returned. """ index_sorter = None # if the height of the new dataframe # is the same as the height of the original dataframe, # then there is no need to sort by appearance length_check = any((len_index == 1, len_index == len(df))) if not length_check: index_sorter = np.reshape(np.arange(len(df)), (-1, len_index)).ravel( order="F" ) df = df.take(index_sorter) if ignore_index: df.index = np.arange(len(df)) return df def _pivot_longer_extractions( df: pd.DataFrame, index: Optional[Union[List, Tuple]] = None, column_names: Optional[Union[List, Tuple]] = None, names_to: Optional[List] = None, names_sep: Optional[Union[str, Pattern]] = None, names_pattern: Optional[ Union[ List[Union[str, Pattern]], Tuple[Union[str, Pattern]], str, Pattern ] ] = None, ) -> Tuple: """ This is where the labels within the column names are separated into new columns, and is executed if `names_sep` or `names_pattern` is not None. A dataframe is returned. """ if any((names_sep, names_pattern)): if index: df = df.set_index(index, append=True) if column_names: df = df.loc[:, column_names] mapping = None if names_sep: mapping = df.columns.str.split(names_sep, expand=True) if len(mapping.names) != len(names_to): raise ValueError( """ The length of ``names_to`` does not match the number of columns extracted. """ ) mapping.names = names_to elif isinstance(names_pattern, str): mapping = df.columns.str.extract(names_pattern, expand=True) if mapping.isna().all(axis=None): raise ValueError( """ No labels in the columns matched the regular expression in ``names_pattern``. Kindly provide a regular expression that matches all labels in the columns. """ ) if mapping.isna().any(axis=None): raise ValueError( """ Not all labels in the columns matched the regular expression in ``names_pattern``. Kindly provide a regular expression that matches all labels in the columns. """ ) if len(names_to) != len(mapping.columns): raise ValueError( """ The length of ``names_to`` does not match the number of columns extracted. """ ) if len(mapping.columns) == 1: mapping = pd.Index(mapping.iloc[:, 0], name=names_to[0]) else: mapping = pd.MultiIndex.from_frame(mapping, names=names_to) elif isinstance(names_pattern, (list, tuple)): mapping = [ df.columns.str.contains(regex, na=False) for regex in names_pattern ] if not np.any(mapping): raise ValueError( """ Not all labels in the columns matched the regular expression in ``names_pattern``. Kindly provide a regular expression that matches all labels in the columns. """ ) mapping = np.select(mapping, names_to, None) mapping = pd.Index(mapping, name=".value") if np.any(mapping.isna()): raise ValueError( """ The regular expressions in ``names_pattern`` did not return all matches. Kindly provide a regular expression that captures all patterns. """ ) outcome = None single_index_mapping = not isinstance(mapping, pd.MultiIndex) if single_index_mapping: outcome = pd.Series(mapping) outcome = outcome.groupby(outcome).cumcount() mapping = pd.MultiIndex.from_arrays([mapping, outcome]) outcome = None df.columns = mapping dot_value = any( ((".value" in names_to), isinstance(names_pattern, (list, tuple))) ) first = None last = None complete_index = None dtypes = None cumcount = None if dot_value: if not mapping.is_unique: cumcount = pd.factorize(mapping)[0] cumcount =
pd.Series(cumcount)
pandas.Series
import concurrent import pickle import re from concurrent.futures import ThreadPoolExecutor from configparser import ConfigParser, ExtendedInterpolation from http.client import HTTPException from json import JSONDecodeError import numpy as np import pandas as pd import requests from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity def _get_courses(): response = requests.get(OPENDATA + '/course/catalog/filter/*/*/*', auth=(OPENDATA_USER, OPENDATA_KEY)) response = response.json() catalog = pd.DataFrame(response) catalog['subject'] = catalog['subject'] + catalog['catalog'] catalog['title'] = catalog['title'].apply(lambda x: x.title()) catalog.index = catalog['ID'] del catalog['ID'] del catalog['catalog'] del catalog['crosslisted'] catalog.drop_duplicates('title', inplace=True) return catalog def _get_descriptions(): response = requests.get(OPENDATA + '/course/description/filter/*', auth=(OPENDATA_USER, OPENDATA_KEY)) response = response.json() description = pd.DataFrame(response) description.index = description['ID'] del description['ID'] return description def _delete_duds(data: pd.DataFrame): patterns = [ "(?:Please|See) .*alendar\.*|(?:PLEASE )?SEE .*DAR(?:\.)?", "\*\*\*(?:PLESE)*", "\*VID\*\n*\*KEYB\*\n|\*VID\*\n|\*(?:CNT|APP)\*", "\"|\*|\n|\r|^ |~*|<.*>", "(?:IMPORTANT )?NOTES?[:-].*|Notes?[:-].*", "Prerequisite.*?[\.!\?](?:\s|$)", "(?i:...).*Students who have taken.*", "(\t|\r\n|\n)", "\\\\", "(?:Tutorial|Lectures|Laboratory):[^.]*[.]", ] patterns = '|'.join([f'(?:{x})' for x in patterns]) patterns = rf'{patterns}' patterns = re.compile(patterns) for _, row in data.iterrows(): row['description'] = patterns.sub('', row['description']) def _get_keywords(data: pd.DataFrame): params = {'text': None, 'confidence': '0.35', 'support': 0, 'spotter': 'Default', 'policy': 'whitelist'} headers = {'accept': 'application/json'} url = 'https://api.dbpedia-spotlight.org/en/annotate' def run(chunk: list): for item in chunk: _params = params.copy() _params['text'] = item['description'] response = requests.get(url, params=_params, headers=headers) try: if response.status_code != 200: raise HTTPException(f'Cannot find text or url for {item}') response = response.json() if 'Resources' in response: response = response['Resources'] item['keywords'] = list(set([x['@URI'] for x in response])) else: item['keywords'] = [] except JSONDecodeError: item['keywords'] = [] return True data_dict = data.to_dict('records') futures = [] course_chunks = np.array_split(data_dict, 20) executor = ThreadPoolExecutor(max_workers=20) for chunk in course_chunks: futures.append(executor.submit(run, chunk)) for _ in concurrent.futures.as_completed(futures): pass executor.shutdown() return pd.DataFrame().from_records(data_dict) def _get_abstracts(data: pd.DataFrame): url = 'https://dbpedia.org/sparql/' params = {'query': '', 'format': 'application/sparql-results+json', 'timeout': '0', 'signal_void': 'on', 'signal_unconnected': 'on', 'default-graph-uri': 'http://dbpedia.org' } def run(chunk: list): try: params_ = params.copy() for item in chunk: result = [] for keyword in item['keywords']: params_['query'] = """ select ?info where { <%s> dbo:abstract ?info filter (lang(?info) = 'en') } """ % keyword response = requests.get(url, params=params_) response.encoding = response.apparent_encoding response = response.json() text = response['results']['bindings'] if text: text = text[0]['info']['value'] result.append(text) item['keyword description'] = '\n'.join(result) except Exception: print('why') return True data_dict = data.to_dict('records') futures = [] course_chunks = np.array_split(data_dict, 20) executor = ThreadPoolExecutor(max_workers=20) for chunk in course_chunks: futures.append(executor.submit(run, chunk)) for _ in concurrent.futures.as_completed(futures): pass executor.shutdown() return
pd.DataFrame()
pandas.DataFrame
from __future__ import division # if you'll ever need integer division: c = a // b import os import pandas as pd import matplotlib.pyplot as plt import numpy as np import itertools import matplotlib import matplotlib.cm as cm from matplotlib.ticker import FuncFormatter #import seaborn as sns def data_import(filename = 'inp.csv'): dataframe = pd.read_csv(filename)[['yy', 'value']] dataframe['decade'] = dataframe['yy'].apply(lambda yy: yy//10) # genera la decade dataframe['up-down'] = 1 dataframe['up-down'] = dataframe['up-down'] * (dataframe['value'] > dataframe['value'].shift(1)) # definisce l'up-down return dataframe def to_percent(y, position): # Ignore the passed in position. This has the effect of scaling the default # tick locations. s = str(100 * y) # The percent symbol needs escaping in latex if matplotlib.rcParams['text.usetex'] is True: return s + r'$\%$' else: return s + '%' def generate_list_of_tuples(df, k): # crea un array di arrays, shiftati di valore di volta in volta list_of_values = [df['up-down'][i:].tolist() for i in range(k)] # poi li zippa sulla base della loro posizione, creando in questo modo # una lista di tuple che rappresenta tutte le sequenze di valori che avvengono # nella lista di up and down list_of_tuples = list(zip(*list_of_values)) return list_of_tuples def count_unique_values_sum(df, k): list_of_tuples = generate_list_of_tuples(df, k) list_of_tuples = map(sum, list_of_tuples) # conta i valori univoci negli array counts = pd.value_counts(list_of_tuples) # ritorna lo zip corrispondente ai risultati return zip(counts.index, counts.values) def count_unique_values_combinations(df, k): list_of_tuples = generate_list_of_tuples(df, k) # conta i valori univoci di combinazioni counts = pd.value_counts(list_of_tuples) # ritorna lo zip corrispondente ai risultati return zip(counts.index, counts.values) def columns_to_percent(df): for column in df.columns.values: df[column] = df[column]/df[column].sum() return df def count_only_up(dataframe): results_sum = {} decades = dataframe['decade'].unique() for k in range(1, 8): # da 1 a 7 giorni di combinazioni possibili results_sum[k] = pd.DataFrame(columns = decades, index = range(k)) for decade in decades: # per ogni decade df = dataframe[dataframe['decade'] == decade] counts = count_unique_values_sum(df, k) for count in counts: results_sum[k].ix[count[0], decade] = count[1] # to percent for key, value in results_sum.iteritems(): results_sum[key] = columns_to_percent(results_sum[key]) return results_sum def count_up_down(dataframe): combinations = [] results = {} decades = dataframe['decade'].unique() for k in range(1, 8): # da 1 a 7 giorni di combinazioni possibili r_for_k = [item for item in itertools.product(range(2), repeat=k)] combinations.extend(r_for_k) results[k] = pd.DataFrame(columns = decades, index = r_for_k) df_counts_all = pd.DataFrame(columns = decades, index = combinations) df_counts_all = df_counts_all.fillna(0) for k in range(1, 8): for decade in decades: # per ogni decade df = dataframe[dataframe['decade'] == decade] counts = count_unique_values_combinations(df, k) for count in counts: df_counts_all.ix[[count[0]], decade] = count[1] results[k].ix[[count[0]], decade] = count[1] # Converte i valori da assoluti a percentuali for key, value in results.iteritems(): results[key] = columns_to_percent(results[key]) results['all'] = df_counts_all return results def forecast_dominant_event(dataframe): df = count_up_down(dataframe)['all'] df = df.sum(axis=1) dominant_patterns = {} for index, combination in enumerate(df.index.values): if (index != 0) and (combination[:-1] == df.index.values[index-1][:-1]): first, second = combination, df.index.values[index-1] dominant = first if (df.ix[[first]].values[0] > df.ix[[second]].values[0]) else second dominant_patterns[combination[:-1]] = dominant[-1] events_for_k = {} for k in range(1,8): array_k = generate_list_of_tuples(dataframe, k) del array_k[-1] for i in range(k): array_k.insert(i, None) events_for_k[k] = array_k actual_events =
pd.DataFrame.from_dict(events_for_k)
pandas.DataFrame.from_dict
# First Pre-process the image import torch from torch import nn, optim from torchvision import datasets, transforms, models import matplotlib.pyplot as plt import pandas as pd import numpy as np from PIL import Image import time import json from collections import OrderedDict import seaborn as sns import argparse import load import img parser = argparse.ArgumentParser(description='This is predict function') parser.add_argument('inputDirectory_image',help=' Enter directory path of the image',action='store',default = '/home/workspace/ImageClassifier/flowers/test/1/image_06743.jpg') parser.add_argument('checkpoint_path',help='Enter the checkpoint path', action='store',default='/home/workspace/ImageClassifier/checkpoint.pth') parser.add_argument('--category_names',dest='category',default = 'cat_to_name.json') parser.add_argument('--top_k', dest = 'topk', type=int,default = 5) parser.add_argument('--gpu',dest='gpu_use',action = 'store_true') args = parser.parse_args() checkpoint = args.checkpoint_path inputdir = args.inputDirectory_image category_name = args.category topk_value = args.topk gpu_u = args.gpu_use def imshow(image, ax=None, title=None): """Imshow for Tensor.""" if ax is None: fig, ax = plt.subplots() # PyTorch tensors assume the color channel is the first dimension # but matplotlib assumes is the third dimension image = image.numpy().transpose((1, 2, 0)) # Undo preprocessing mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) image = std * image + mean # Image needs to be clipped between 0 and 1 or it looks like noise when displayed image = np.clip(image, 0, 1) ax.imshow(image) return ax # PREDICTION TIME ! with open(category_name) as f: flower_to_name = json.load(f) def predict(image, pretrained_model,flower_to_name, topk_value): processed_image = img.process_image(image) pretrained_model.to('cpu') processed_image.unsqueeze_(0) probs = torch.exp(pretrained_model.forward(processed_image)) top_probs, top_labs = probs.topk(topk_value) top_probs = top_probs.detach().numpy().tolist() top_labs = top_labs.tolist() labels = pd.DataFrame({'class':
pd.Series(pretrained_model.class_to_idx)
pandas.Series
import datetime import inspect import numpy.testing as npt import os.path import pandas as pd import pkgutil import sys from tabulate import tabulate import unittest try: from StringIO import StringIO except ImportError: from io import StringIO, BytesIO # #find parent directory and import model # parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)) # sys.path.append(parentddir) from ..screenip_exe import Screenip, ScreenipOutputs #print(sys.path) #print(os.path) # load transposed qaqc data for inputs and expected outputs # this works for both local nosetests and travis deploy #input details try: if __package__ is not None: csv_data = pkgutil.get_data(__package__, 'screenip_qaqc_in_transpose.csv') data_inputs = BytesIO(csv_data) pd_obj_inputs = pd.read_csv(data_inputs, index_col=0, engine='python') else: # csv_transpose_path_in = "./screenip_qaqc_in_transpose.csv" csv_transpose_path_in = os.path.join(os.path.dirname(__file__), "screenip_qaqc_in_transpose.csv") #print(csv_transpose_path_in) pd_obj_inputs = pd.read_csv(csv_transpose_path_in, index_col=0, engine='python') pd_obj_inputs['csrfmiddlewaretoken'] = 'test' #with open('./screenip_qaqc_in_transpose.csv') as f: #csv_data = csv.reader(f) finally: pass #print('screenip inputs') #print('screenip input dimensions ' + str(pd_obj_inputs.shape)) #print('screenip input keys ' + str(pd_obj_inputs.columns.values.tolist())) #print(pd_obj_inputs) #expected output details try: if __package__ is not None: data_exp_outputs = BytesIO(pkgutil.get_data(__package__, 'screenip_qaqc_exp_transpose.csv')) pd_obj_exp = pd.read_csv(data_exp_outputs, index_col=0, engine= 'python') #print("screenip expected outputs") #print('screenip expected output dimensions ' + str(pd_obj_exp.shape)) #print('screenip expected output keys ' + str(pd_obj_exp.columns.values.tolist())) else: #csv_transpose_path_exp = "./screenip_qaqc_exp_transpose.csv" csv_transpose_path_exp = os.path.join(os.path.dirname(__file__), "screenip_qaqc_exp_transpose.csv") #print(csv_transpose_path_exp) pd_obj_exp = pd.read_csv(csv_transpose_path_exp, index_col=0, engine='python') finally: pass #print('screenip expected') #generate output screenip_output_empty = ScreenipOutputs() screenip_calc = Screenip(pd_obj_inputs, pd_obj_exp) screenip_calc.execute_model() inputs_json, outputs_json, exp_out_json = screenip_calc.get_dict_rep() #print("screenip output") #print(inputs_json) # #print(tabulate(pd_obj_inputs.iloc[:,0:5], headers='keys', tablefmt='fancy_grid')) #print(tabulate(pd_obj_inputs.iloc[:,6:11], headers='keys', tablefmt='fancy_grid')) #print(tabulate(pd_obj_inputs.iloc[:,12:17], headers='keys', tablefmt='fancy_grid')) # #print(tabulate(pd_obj_exp.iloc[:,0:1], headers='keys', tablefmt='fancy_grid')) test = {} class TestScreenip(unittest.TestCase): """ Integration tests for screenip. """ print("screenip integration tests conducted at " + str(datetime.datetime.today())) def __init__(self, *args, **kwargs): """ Constructor for screenip integration tests :param args: :param kwargs: :return: """ #adding to TestCase constructor so super super(TestScreenip, self).__init__(*args, **kwargs) self.ncases = len(pd_obj_inputs) def setUp(self): """ Setup routine for screenip integration tests :return: """ pass # screenip2 = screenip_model.screenip(0, pd_obj_inputs, pd_obj_exp_out) # setup the test as needed # e.g. pandas to open screenip qaqc csv # Read qaqc csv and create pandas DataFrames for inputs and expected outputs def tearDown(self): """ Teardown routine for screenip integration tests :return: """ pass # teardown called after each test # e.g. maybe write test results to some text file def test_assert_output_series(self): """ Verify that each output variable is a pd.Series """ try: num_variables = len(screenip_calc.pd_obj_out.columns) result = pd.Series(False, index=list(range(num_variables)), dtype='bool') expected = pd.Series(True, index=list(range(num_variables)), dtype='bool') for i in range(num_variables): column_name = screenip_calc.pd_obj_out.columns[i] output = getattr(screenip_calc, column_name) if isinstance(output, pd.Series): result[i] = True tab = pd.concat([result,expected], axis=1) print('model output properties as pandas series') print(tabulate(tab, headers='keys', tablefmt='fancy_grid')) npt.assert_array_equal(result, expected) finally: pass return def test_assert_output_series_dtypes(self): """ Verify that each output variable is the correct dtype """ try: num_variables = len(screenip_calc.pd_obj_out.columns) #get the string of the type that is expected and the type that has resulted result = pd.Series(False, index=list(range(num_variables)), dtype='bool') expected = pd.Series(True, index=list(range(num_variables)), dtype='bool') for i in range(num_variables): column_name = screenip_calc.pd_obj_out.columns[i] output_result = getattr(screenip_calc, column_name) column_dtype_result = output_result.dtype.name output_expected = getattr(screenip_output_empty, column_name) output_expected2 = getattr(screenip_calc.pd_obj_out, column_name) column_dtype_expected = output_expected.dtype.name if column_dtype_result == column_dtype_expected: result[i] = True #tab = pd.concat([result,expected], axis=1) if(result[i] != expected[i]): print(i) print(column_name) print(str(result[i]) + "/" + str(expected[i])) print(column_dtype_result + "/" + column_dtype_expected) print('result') print(output_result) print('expected') print(output_expected2) #print(tabulate(tab, headers='keys', tablefmt='fancy_grid')) npt.assert_array_equal(result, expected) finally: pass return def test_screenip_integration_dose_bird(self): """ Integration test for output screenip.dose_bird :return: """ func_name = inspect.currentframe().f_code.co_name try: self.blackbox_method_int('dose_bird', func_name) finally: pass return def test_screenip_integration_dose_mamm(self): """ Integration test for output screenip.dose_mamm :return: """ func_name = inspect.currentframe().f_code.co_name try: self.blackbox_method_int('dose_mamm', func_name) finally: pass return def test_screenip_integration_at_bird(self): """ Integration test for output screenip.at_bird :return: """ func_name = inspect.currentframe().f_code.co_name try: self.blackbox_method_int('at_bird', func_name) finally: pass return def test_screenip_integration_at_mamm(self): """ Integration test for output screenip.at_mamm :return: """ func_name = inspect.currentframe().f_code.co_name try: self.blackbox_method_int('at_mamm', func_name) finally: pass return # def test_screenip_integration_fi_bird(self): # """ # Integration test for output screenip.fi_bird # :return: # """ # func_name = inspect.currentframe().f_code.co_name # try: # self.blackbox_method_int('fi_bird', func_name) # pass # finally: # pass # return def test_screenip_integration_det(self): """ Integration test for output screenip.det :return: """ func_name = inspect.currentframe().f_code.co_name try: self.blackbox_method_int('det', func_name) finally: pass return def test_screenip_integration_act(self): """ Integration test for output screenip.act :return: """ func_name = inspect.currentframe().f_code.co_name try: self.blackbox_method_int('act', func_name) finally: pass return def test_screenip_integration_acute_bird(self): """ Integration test for output screenip.acute_bird :return: """ func_name = inspect.currentframe().f_code.co_name try: self.blackbox_method_int('acute_bird', func_name) finally: pass return def test_screenip_integration_acuconb(self): """ Integration test for output screenip.acuconb :return: """ func_name = inspect.currentframe().f_code.co_name try: self.blackbox_method_str('acuconb', func_name) finally: pass return def test_screenip_integration_acute_mamm(self): """ Integration test for output screenip.acute_mamm :return: """ func_name = inspect.currentframe().f_code.co_name try: self.blackbox_method_int('acute_mamm', func_name) finally: pass return def test_screenip_integration_acuconm(self): """ Integration test for output screenip.acuconm :return: """ func_name = inspect.currentframe().f_code.co_name try: self.blackbox_method_str('acuconm', func_name) finally: pass return def test_screenip_integration_chron_bird(self): """ Integration test for output screenip.chron_bird :return: """ func_name = inspect.currentframe().f_code.co_name try: self.blackbox_method_int('chron_bird', func_name) finally: pass return def test_screenip_integration_chronconb(self): """ Integration test for output screenip.chronconb :return: """ func_name = inspect.currentframe().f_code.co_name try: self.blackbox_method_str('chronconb', func_name) finally: pass return def test_screenip_integration_chron_mamm(self): """ integration test for output screenip.chron_mamm :return: """ func_name = inspect.currentframe().f_code.co_name try: self.blackbox_method_int('chron_mamm', func_name) finally: pass return def test_screenip_integration_chronconm(self): """ integration test for output screenip.chronconm :return: """ func_name = inspect.currentframe().f_code.co_name try: self.blackbox_method_str('chronconm', func_name) finally: pass return def blackbox_method_int(self, output, func_name): """ Helper method to reuse code for testing numpy array outputs from screenip model :param output: String; Pandas Series name (e.g. column name) without '_out' :return: """ try: pd.set_option('display.float_format','{:.4E}'.format) # display model output in scientific notation result = screenip_calc.pd_obj_out["out_" + output] expected = screenip_calc.pd_obj_exp["exp_" + output] tab = pd.concat([result, expected], axis=1) #print(" ") #print(tabulate(tab, headers='keys', tablefmt='fancy_grid')) # npt.assert_array_almost_equal(result, expected, 4, '', True) rtol = 1e-5 npt.assert_allclose(result, expected, rtol, 0, '', True) finally: tab =
pd.concat([result, expected], axis=1)
pandas.concat
import pandas as pd import yfinance as yf import datetime as dt from darts.models import* from darts import TimeSeries from darts.utils.missing_values import fill_missing_values from darts.metrics import mape, mase import logging import warnings from warnings import filterwarnings #here the magic operates def oracle(portfolio, start_date, weights=None, prediction_days=None, based_on='Adj Close'): print("Collecting datas...") #define weights if weights == None: weights = [1.0 / len(portfolio)] * len(portfolio) #define today today = dt.datetime.today().strftime('%Y-%m-%d') #clean output from warning logger = logging.getLogger() warnings.simplefilter(action='ignore', category=FutureWarning) filterwarnings('ignore') logging.disable(logging.INFO) mape_df = pd.DataFrame() mape_df = mape_df.append({'Exponential smoothing' : 0, 'Prophet' : 0, 'Auto-ARIMA' : 0, 'Theta(2)':0, 'ARIMA' : 0, 'FFT' : 0, 'FourTheta' : 0, 'NaiveDrift':0, 'NaiveMean' : 0, 'NaiveSeasonal':0 }, ignore_index = True) final_df = pd.DataFrame() final_df = final_df.append({'Exponential smoothing' : 0, 'Prophet' : 0, 'Auto-ARIMA' : 0, 'Theta(2)':0, 'ARIMA' : 0, 'FFT' : 0, 'FourTheta' : 0, 'NaiveDrift':0, 'NaiveMean' : 0, 'NaiveSeasonal':0 }, ignore_index = True) for asset in portfolio: result = pd.DataFrame() df = yf.download(asset, start=start_date, end=today, progress=False)["Adj Close"] df = pd.DataFrame(df) df.reset_index(level=0, inplace=True) if prediction_days==None: x = 1 while x/(len(df)+x) < 0.3: x+=1 prediction_days = x def eval_model(model): model.fit(train) forecast = model.predict(len(val)) result[model] = [mape(val, forecast)] prediction = pd.DataFrame() def predict(model): model.fit(train) forecast = model.predict(len(val)) pred = model.predict(prediction_days) b = [str(pred[-1][0][0])][0] b = b.split("array([[[") c = b[1].split("]]])") d = c[0][ : -3] b = float(d) prediction[model] = [str(round(((b-start_value)/start_value)*100,3))+' %'] series = TimeSeries.from_dataframe(df, 'Date', based_on, freq='D') series = fill_missing_values(series) train_index = round(len(df.index)*0.7) train_date = df.loc[[train_index]]['Date'].values date = str(train_date[0])[:10] date = date.replace('-', '') timestamp = date+'000000' train, val = series.split_before(pd.Timestamp(timestamp)) print("Evaluating the models for "+str(asset)+"...") eval_model(ExponentialSmoothing()) eval_model(Prophet()) eval_model(AutoARIMA()) eval_model(Theta()) eval_model(ARIMA()) eval_model(FFT()) eval_model(FourTheta()) eval_model(NaiveDrift()) eval_model(NaiveMean()) eval_model(NaiveSeasonal()) print("Models evaluated!") result.columns = ['Exponential smoothing','Prophet', 'Auto-ARIMA', 'Theta(2)', 'ARIMA', 'FFT','FourTheta','NaiveDrift','NaiveMean', 'NaiveSeasonal'] result.index = [asset] mape_df = pd.concat([result, mape_df]) start_pred = str(df["Date"].iloc[-1])[:10] start_value = df[based_on].iloc[-1] start_pred = start_pred.replace('-', '') timestamp = start_pred+'000000' train, val = series.split_before(pd.Timestamp(timestamp)) print("Making the predictions for "+str(asset)+"...") predict(ExponentialSmoothing()) predict(Prophet()) predict(AutoARIMA()) predict(Theta()) predict(ARIMA()) predict(FFT()) predict(FourTheta()) predict(NaiveDrift()) predict(NaiveMean()) predict(NaiveSeasonal()) print("Predictions generated!") prediction.columns = ['Exponential smoothing','Prophet', 'Auto-ARIMA', 'Theta(2)', 'ARIMA', 'FFT','FourTheta','NaiveDrift','NaiveMean', 'NaiveSeasonal'] prediction.index = [asset] final_df = pd.concat([prediction, final_df]) print("\n") print("Assets MAPE (accuracy score)") with pd.option_context('display.max_rows', None, 'display.max_columns', None) and pd.option_context('expand_frame_repr', False): print(mape_df.iloc[:-1,:]) mape_df = pd.DataFrame(mape_df.iloc[:-1,:]) print("\n") print("Assets returns prediction for the next "+str(prediction_days)+" days") with pd.option_context('display.max_rows', None, 'display.max_columns', None) and
pd.option_context('expand_frame_repr', False)
pandas.option_context
"""Compute statistics about missing values on a databse.""" import os from os.path import join import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd from custom.const import get_fig_folder, get_tab_folder from database import _load_feature_types, dbs from database.constants import (BINARY, CATEGORICAL, CONTINUE_R, is_categorical, is_continue, is_continuous, is_ordinal) from joblib import Memory from prediction.tasks import tasks from tqdm import tqdm from .common import filepaths from .plot_statistics import (figure1, figure2, figure2bis, figure3, plot_feature_types, plot_feature_wise_v2) from .tests import tasks_to_drop memory = Memory('joblib_cache') plt.rcParams.update({ 'text.usetex': True, 'mathtext.fontset': 'stix', 'font.family': 'STIXGeneral', 'axes.labelsize': 15, 'legend.fontsize': 11, 'figure.figsize': (8, 4.8), # 'figure.dpi': 600, }) task_tags = [ 'TB/death_pvals', 'TB/hemo', 'TB/hemo_pvals', # 'TB/platelet', 'TB/platelet_pvals', 'TB/septic_pvals', 'UKBB/breast_25', 'UKBB/breast_pvals', 'UKBB/fluid_pvals', 'UKBB/parkinson_pvals', 'UKBB/skin_pvals', 'MIMIC/hemo_pvals', 'MIMIC/septic_pvals', # 'NHIS/bmi_pvals', 'NHIS/income_pvals', ] db_order = [ 'TB', 'UKBB', 'MIMIC', 'NHIS', ] db_rename = { 'TB': 'Traumabase', } def get_indicators_mv(df_mv): """Compute indicators about missing values. Used for plotting figures.""" # 1: Statistics on the full database n_rows, n_cols = df_mv.shape n_values = n_rows*n_cols df_mv1 = df_mv == 1 df_mv2 = df_mv == 2 df_mv_bool = df_mv != 0 # Number of missing values in the DB n_mv1 = df_mv1.values.sum() n_mv2 = df_mv2.values.sum() n_mv = n_mv1 + n_mv2 n_not_mv = n_values - n_mv # Frequencies of missing values in the DB f_mv1 = 100*n_mv1/n_values f_mv2 = 100*n_mv2/n_values f_mv = 100*n_mv/n_values f_not_mv = 100*n_not_mv/n_values # Store the indicators in a df df_1 = pd.DataFrame({ 'n_rows': [n_rows], 'n_cols': [n_cols], 'n_values': [n_values], 'n_mv': [n_mv], 'n_mv1': [n_mv1], 'n_mv2': [n_mv2], 'n_not_mv': [n_not_mv], 'f_mv': [f_mv], 'f_mv1': [f_mv1], 'f_mv2': [f_mv2], 'f_not_mv': [f_not_mv], }) # 2: Number of features with missing values # For each feature, tells if it contains MV of type 1 df_f_w_mv1 = df_mv1.any().rename('MV1') # For each feature, tells if it contains MV of type 2 df_f_w_mv2 = df_mv2.any().rename('MV2') # Concat previous series df_f_w_mv = pd.concat([df_f_w_mv1, df_f_w_mv2], axis=1) # Add columns for logical combination of the two series df_f_w_mv['MV'] = df_f_w_mv['MV1'] | df_f_w_mv['MV2'] # MV1 or MV2 df_f_w_mv['MV1a2'] = df_f_w_mv['MV1'] & df_f_w_mv['MV2'] # MV1 and MV2 df_f_w_mv['MV1o'] = df_f_w_mv['MV1'] & ~df_f_w_mv['MV2'] # MV1 only df_f_w_mv['MV2o'] = ~df_f_w_mv['MV1'] & df_f_w_mv['MV2'] # MV2 only # By summing, derive the number of features with MV of a given type df_n_f_w_mv = df_f_w_mv.sum() # Numbers of features with missing values n_f_w_mv = df_n_f_w_mv['MV'] # MV1 or MV2 n_f_w_mv1_o = df_n_f_w_mv['MV1o'] # MV1 only n_f_w_mv2_o = df_n_f_w_mv['MV2o'] # MV2 only n_f_w_mv_1a2 = df_n_f_w_mv['MV1a2'] # MV1 and MV2 n_f_wo_mv = n_cols - df_n_f_w_mv['MV'] # Without MV # Frequencies of features with missing values f_f_w_mv1_o = 100*n_f_w_mv1_o/n_cols f_f_w_mv2_o = 100*n_f_w_mv2_o/n_cols f_f_w_mv = 100*n_f_w_mv/n_cols f_f_w_mv_1a2 = 100*n_f_w_mv_1a2/n_cols f_f_wo_mv = 100*n_f_wo_mv/n_cols # Store the indicators in a df df_2 = pd.DataFrame({ 'n_f_w_mv': [n_f_w_mv], 'n_f_w_mv1_o': [n_f_w_mv1_o], 'n_f_w_mv2_o': [n_f_w_mv2_o], 'n_f_w_mv_1a2': [n_f_w_mv_1a2], 'n_f_wo_mv': [n_f_wo_mv], 'f_f_w_mv': [f_f_w_mv], 'f_f_w_mv1_o': [f_f_w_mv1_o], 'f_f_w_mv2_o': [f_f_w_mv2_o], 'f_f_w_mv_1a2': [f_f_w_mv_1a2], 'f_f_wo_mv': [f_f_wo_mv], }) # 3: Statistics feature-wise n_mv1_fw = df_mv1.sum().to_frame('N MV1') # Number of MV 1 by feature n_mv2_fw = df_mv2.sum().to_frame('N MV2') # Number of MV 2 by feature n_mv_fw = pd.concat([n_mv1_fw, n_mv2_fw], axis=1) n_mv_fw['N MV'] = n_mv_fw['N MV1'] + n_mv_fw['N MV2'] n_mv_fw['N V'] = n_rows n_mv_fw['N NMV'] = n_mv_fw['N V'] - n_mv_fw['N MV'] n_mv_fw['F MV1'] = 100*n_mv_fw['N MV1']/n_rows n_mv_fw['F MV2'] = 100*n_mv_fw['N MV2']/n_rows n_mv_fw['F MV'] = 100*n_mv_fw['N MV']/n_rows n_mv_fw['id'] = np.arange(0, n_mv_fw.shape[0]) # Sort by number of missing values n_mv_fw.sort_values('N MV', ascending=False, inplace=True) # Store the indicators in a df df_3 = n_mv_fw # 4: Rows without missing values # For each row, tells if it contains MV of type 1 df_r_w_mv1 = df_mv1.any(axis=1).rename('MV1') # For each row, tells if it contains MV of type 2 df_r_w_mv2 = df_mv2.any(axis=1).rename('MV2') # Concat previous series df_r_w_mv = pd.concat([df_r_w_mv1, df_r_w_mv2], axis=1) # Add columns for logical combination of the two series df_r_w_mv['MV'] = df_r_w_mv['MV1'] | df_r_w_mv['MV2'] # MV1 or MV2 df_r_w_mv['MV1a2'] = df_r_w_mv['MV1'] & df_r_w_mv['MV2'] # MV1 and MV2 df_r_w_mv['MV1o'] = df_r_w_mv['MV1'] & ~df_r_w_mv['MV2'] # MV1 only df_r_w_mv['MV2o'] = ~df_r_w_mv['MV1'] & df_r_w_mv['MV2'] # MV2 only # By summing, derive the number of rows with MV of a given type df_n_r_w_mv = df_r_w_mv.sum() # Numbers of rows with missing values n_r_w_mv = df_n_r_w_mv['MV'] # MV1 or MV2 n_r_w_mv1_o = df_n_r_w_mv['MV1o'] # MV1 only n_r_w_mv2_o = df_n_r_w_mv['MV2o'] # MV2 only n_r_w_mv_1a2 = df_n_r_w_mv['MV1a2'] # MV1 and MV2 n_r_wo_mv = n_rows - df_n_r_w_mv['MV'] # Without MV # Frequencies of rows with missing values f_r_w_mv1_o = 100*n_r_w_mv1_o/n_rows f_r_w_mv2_o = 100*n_r_w_mv2_o/n_rows f_r_w_mv = 100*n_r_w_mv/n_rows f_r_w_mv_1a2 = 100*n_r_w_mv_1a2/n_rows f_r_wo_mv = 100*n_r_wo_mv/n_rows # Store the indicators in a df df_4 = pd.DataFrame({ 'n_r_w_mv': [n_r_w_mv], 'n_r_w_mv1_o': [n_r_w_mv1_o], 'n_r_w_mv2_o': [n_r_w_mv2_o], 'n_r_w_mv_1a2': [n_r_w_mv_1a2], 'n_r_wo_mv': [n_r_wo_mv], 'f_r_w_mv': [f_r_w_mv], 'f_r_w_mv1_o': [f_r_w_mv1_o], 'f_r_w_mv2_o': [f_r_w_mv2_o], 'f_r_w_mv_1a2': [f_r_w_mv_1a2], 'f_r_wo_mv': [f_r_wo_mv], }) # 5: Number of rows affected if we remove features with MV df_f_w_mv1 = df_f_w_mv['MV1'] # Series of features having MV1 df_f_w_mv2 = df_f_w_mv['MV2'] # Series of features having MV2 df_f_w_mv_1o2 = df_f_w_mv['MV'] # Series of features having MV1 or MV2 df_f_w_mv1_o = df_f_w_mv['MV1o'] # Series of features having MV1 only df_f_w_mv2_o = df_f_w_mv['MV2o'] # Series of features having MV2 only df_f_w_mv_1a2 = df_f_w_mv['MV1a2'] # Series of features having MV1 and MV2 df_features = pd.Series(True, index=df_f_w_mv.index) features_to_drop_mv1 = df_features.loc[~df_f_w_mv1].index features_to_drop_mv2 = df_features.loc[~df_f_w_mv2].index features_to_drop_mv_1o2 = df_features.loc[~df_f_w_mv_1o2].index features_to_drop_mv1_o = df_features.loc[~df_f_w_mv1_o].index features_to_drop_mv2_o = df_features.loc[~df_f_w_mv2_o].index features_to_drop_mv_1a2 = df_features.loc[~df_f_w_mv_1a2].index df_mv1_dropped = df_mv_bool.drop(features_to_drop_mv1, 1) df_mv2_dropped = df_mv_bool.drop(features_to_drop_mv2, 1) df_mv_1o2_dropped = df_mv_bool.drop(features_to_drop_mv_1o2, 1) df_mv1_o_dropped = df_mv_bool.drop(features_to_drop_mv1_o, 1) df_mv2_o_dropped = df_mv_bool.drop(features_to_drop_mv2_o, 1) df_mv_1a2_dropped = df_mv_bool.drop(features_to_drop_mv_1a2, 1) # Number of rows affected if we remove feature having MV of type: n_r_a_rm_mv1 = (~df_mv1_dropped).any(axis=1).sum() # MV1 n_r_a_rm_mv2 = (~df_mv2_dropped).any(axis=1).sum() # MV2 n_r_a_rm_mv_1o2 = (~df_mv_1o2_dropped).any(axis=1).sum() # MV1 or MV2 n_r_a_rm_mv1_o = (~df_mv1_o_dropped).any(axis=1).sum() # MV1 only n_r_a_rm_mv2_o = (~df_mv2_o_dropped).any(axis=1).sum() # MV2 only n_r_a_rm_mv_1a2 = (~df_mv_1a2_dropped).any(axis=1).sum() # MV1 and MV2 # Frequencies of rows affected if we remove feature having MV of type: f_r_a_rm_mv1 = 100*n_r_a_rm_mv1/n_rows # MV1 f_r_a_rm_mv2 = 100*n_r_a_rm_mv2/n_rows # MV2 f_r_a_rm_mv_1o2 = 100*n_r_a_rm_mv_1o2/n_rows # MV1 or MV2 f_r_a_rm_mv1_o = 100*n_r_a_rm_mv1_o/n_rows # MV1 only f_r_a_rm_mv2_o = 100*n_r_a_rm_mv2_o/n_rows # MV2 only f_r_a_rm_mv_1a2 = 100*n_r_a_rm_mv_1a2/n_rows # MV1 and MV2 # Store the indicators in a df df_5 = pd.DataFrame({ 'n_r_a_rm_mv1': [n_r_a_rm_mv1], 'n_r_a_rm_mv2': [n_r_a_rm_mv2], 'n_r_a_rm_mv_1o2': [n_r_a_rm_mv_1o2], 'n_r_a_rm_mv1_o': [n_r_a_rm_mv1_o], 'n_r_a_rm_mv2_o': [n_r_a_rm_mv2_o], 'n_r_a_rm_mv_1a2': [n_r_a_rm_mv_1a2], 'f_r_a_rm_mv1': [f_r_a_rm_mv1], 'f_r_a_rm_mv2': [f_r_a_rm_mv2], 'f_r_a_rm_mv_1o2': [f_r_a_rm_mv_1o2], 'f_r_a_rm_mv1_o': [f_r_a_rm_mv1_o], 'f_r_a_rm_mv2_o': [f_r_a_rm_mv2_o], 'f_r_a_rm_mv_1a2': [f_r_a_rm_mv_1a2], }) # 6: Proportion of information lost when removing features with MV # Number n_v_lost_mv1 = (~df_mv1_dropped).sum().sum() n_v_lost_mv2 = (~df_mv2_dropped).sum().sum() n_v_lost_mv_1o2 = (~df_mv_1o2_dropped).sum().sum() n_v_lost_mv1_o = (~df_mv1_o_dropped).sum().sum() n_v_lost_mv2_o = (~df_mv2_o_dropped).sum().sum() n_v_lost_mv_1a2 = (~df_mv_1a2_dropped).sum().sum() # Frequencies f_v_lost_mv1 = 100*n_v_lost_mv1/n_values f_v_lost_mv2 = 100*n_v_lost_mv2/n_values f_v_lost_mv_1o2 = 100*n_v_lost_mv_1o2/n_values f_v_lost_mv1_o = 100*n_v_lost_mv1_o/n_values f_v_lost_mv2_o = 100*n_v_lost_mv2_o/n_values f_v_lost_mv_1a2 = 100*n_v_lost_mv_1a2/n_values # Store the indicators in a df df_6 = pd.DataFrame({ 'n_v_lost_mv1': [n_v_lost_mv1], 'n_v_lost_mv2': [n_v_lost_mv2], 'n_v_lost_mv_1o2': [n_v_lost_mv_1o2], 'n_v_lost_mv1_o': [n_v_lost_mv1_o], 'n_v_lost_mv2_o': [n_v_lost_mv2_o], 'n_v_lost_mv_1a2': [n_v_lost_mv_1a2], 'f_v_lost_mv1': [f_v_lost_mv1], 'f_v_lost_mv2': [f_v_lost_mv2], 'f_v_lost_mv_1o2': [f_v_lost_mv_1o2], 'f_v_lost_mv1_o': [f_v_lost_mv1_o], 'f_v_lost_mv2_o': [f_v_lost_mv2_o], 'f_v_lost_mv_1a2': [f_v_lost_mv_1a2], }) return { 'global': df_1, 'features': df_2, 'feature-wise': df_3, 'rows': df_4, 'rm_rows': df_5, 'rm_features': df_6, } def every_mv_distribution(): matplotlib.rcParams.update({ 'font.size': 14, 'axes.titlesize': 10, 'axes.labelsize': 8, # 'xtick.labelsize': 13, # 'ytick.labelsize': 13, }) fig, axes = plt.subplots(6, 3, figsize=(6, 9)) L1 = ['TB/death_pvals', 'TB/hemo', 'TB/hemo_pvals'] L2 = ['TB/platelet_pvals', 'TB/septic_pvals', None] # L2 = [None, None, None] # L3 = [None, None, None] # L4 = [None, None, None] # L5 = [None, None, None] # L6 = [None, None, None] L3 = ['UKBB/breast_25', 'UKBB/breast_pvals', 'UKBB/fluid_pvals'] L4 = ['UKBB/parkinson_pvals', 'UKBB/skin_pvals', None] L5 = ['MIMIC/hemo_pvals', 'MIMIC/septic_pvals', None] L6 = ['NHIS/income_pvals', None, None] L = [L1, L2, L3, L4, L5, L6] colors = { 'TB': 'tab:blue', 'UKBB': 'tab:orange', 'MIMIC': 'tab:green', 'NHIS': 'tab:red', # 'TB': 'blue', # 'UKBB': 'orange', # 'MIMIC': 'green', # 'NHIS': 'red', } handles_dict = {} for i, row in enumerate(tqdm(L)): for j, tag in enumerate(row): ax = axes[i][j] if tag is None: ax.axis('off') continue db, task = tag.split('/') indicators = cached_indicators(tag, encode_features=False) _, _, handles = plot_feature_wise_v2(indicators, ax=ax, plot=True, color=colors[db]) db = db.replace('TB', 'Traumabase') handles_dict[db] = handles[1] task = task.replace('_', '\\_') task = task.replace('pvals', 'screening') ax.set_title(task) # axes[-1, -1].legend(handles_dict.values(), handles_dict.keys(), # fancybox=True, shadow=True, loc='center', title='Missing values') # p_dummy, = plt.plot([0], marker='None', linestyle='None', label='dummy-tophead') # handles_dict[''] = handles[0] # handles = [p_dummy]*5+list(handles_dict.values()) # labels = ['Missing'] + ['']*3 + ['Not missing'] + list(handles_dict.keys()) # axes[-1, -1].legend(handles, labels, ncol=2, # fancybox=True, shadow=True, loc='center',) fig.tight_layout() db_titles = { 0: 'Traumabase', 2: 'UKBB', 4: 'MIMIC', 5: 'NHIS', } db_titles2 = { 1: 'Traumabase', 3: 'UKBB', 4: 'MIMIC', 5: 'NHIS', } ax = axes[0] fs = 14 lw = 1.3 dh = 1./9 l_tail = 0.03 pos_arrow = -0.45 for i, db in db_titles2.items(): # Here is the label and arrow code of interest # axes[i, 0].annotate(db, xy=(-0.4, 0.5), xycoords='axes fraction', # fontsize=fs, ha='center', va='center', # bbox=None,#dict(boxstyle='square', fc='white'), # # arrowprops=dict(arrowstyle=f'-[, widthB={70/fs}, lengthB=0.5', lw=lw), # rotation=90, # ) axes[i, -1].annotate(db, xy=(0.5, 0.5), xycoords='axes fraction', fontsize=fs, ha='center', va='center', bbox=dict(boxstyle='square', fc='white'), # arrowprops=dict(arrowstyle=f'-[, widthB={70/fs}, lengthB=0.5', lw=lw), rotation=0, ) # x, y = np.array([[0, 1], ]) dh = 1./6 # for i in range(0,7): for i in [1.05, 2.02, 3.96]: y = i*dh line = matplotlib.lines.Line2D([0, 1], [y, y], lw=1, ls='-', color='silver', alpha=1, transform=fig.transFigure) # axes[1, 0].add_line(line) fig.add_artist(line) axes[-1, 0].set_xlabel('Features') for ax in axes[:, 0]: ax.set_ylabel('Proportion') # axes[-1, 0].set_ylabel('Proportion') return fig, axes @memory.cache def cached_indicators(task_tag, encode_features=False): task = tasks[task_tag] if not encode_features and 'pvals' in task_tag: task.meta.encode_select = None task.meta.encode_transform = None mv = task.mv indicators = get_indicators_mv(mv) return indicators def run_mv(args, graphics_folder): """Show some statistics on the given df.""" if args.tag is None: every_mv_distribution() fig_folder = get_fig_folder(graphics_folder) fig_name = 'mv_distribution' plt.savefig(join(fig_folder, f'{fig_name}.pdf'), bbox_inches='tight') plt.tight_layout() return task_tag = args.tag plot = not args.hide indicators = cached_indicators(task_tag) task = tasks[task_tag] db_name = task.meta.db df_name = task_tag fig1, fig2, fig2b, fig3 = args.fig1, args.fig2, args.fig2b, args.fig3 if not any((fig1, fig2, fig2b, fig3)): fig1, fig2, fig2b, fig3 = True, True, True, True # Plot all the indicators if fig1: figure1(indicators, plot=plot, db_name=db_name, table=df_name) if fig2: figure2(indicators, plot=plot, db_name=db_name, table=df_name) if fig2b: figure2bis(indicators, plot=plot, db_name=db_name, table=df_name) if fig3: figure3(indicators, plot=plot, db_name=db_name, table=df_name) fig_folder = get_fig_folder(graphics_folder) os.makedirs(join(fig_folder, db_name), exist_ok=True) plt.savefig(join(fig_folder, f'{df_name}.pdf'), bbox_inches='tight') plt.tight_layout() plt.show() @memory.cache def cached_types(task_tag, encode_features=False, T=0): task = tasks.get(task_tag, T=T) db_name = task.meta.db db = dbs[db_name] df_name = task.meta.df_name # Load types of all inital features of the database db_types = _load_feature_types(db, df_name, anonymized=False) L = list(task.X.columns) L.sort() L = [f.split('_')[0] for f in L] L = list(set(L)) if db_name == 'TB': task_types = pd.Series(CONTINUE_R, index=L) elif db_name == 'UKBB': task_types =
pd.Series(BINARY, index=L)
pandas.Series
import matplotlib.pyplot as plt import numpy as np import pandas as pd import cantera as ct import copy from textwrap import wrap import scipy.stats as stats import math from scipy.stats import multivariate_normal from matplotlib import style from mpl_toolkits.mplot3d import Axes3D from scipy.stats import norm import matplotlib.mlab as mlab import matplotlib.cm as cm import MSI.master_equation.master_equation as meq import re import os import MSI.simulations.instruments.ignition_delay as ig import MSI.cti_core.cti_processor as pr import MSI.simulations.instruments.jsr_steadystate as jsr class Plotting(object): def __init__(self,S_matrix, s_matrix, Y_matrix, y_matrix, z_matrix, X, sigma, covarience, original_covariance, S_matrix_original, exp_dict_list_optimized, exp_dict_list_original, parsed_yaml_list, Ydf, target_value_rate_constant_csv='', target_value_rate_constant_csv_extra_values = '', k_target_value_S_matrix = None, k_target_values='Off', working_directory='', sigma_uncertainty_weighted_sensitivity_csv='', simulation_run=None, shock_tube_instance = None, cheby_sensitivity_dict = None, mapped_to_alpha_full_simulation=None, optimized_cti_file='', original_cti_file='', sigma_ones=False, T_min=200, T_max=3000, P_min=1013.25, P_max=1.013e+6): self.S_matrix = S_matrix self.s_matrix = s_matrix self.Y_matrix = Y_matrix self.y_matrix = y_matrix self.z_matrix = z_matrix self.X = X self.sigma = sigma #self.sigma = sigma self.covarience=covarience self.original_covariance=original_covariance #original self.S_matrix_original=S_matrix_original self.exp_dict_list_optimized = exp_dict_list_optimized self.exp_dict_list_original = exp_dict_list_original self.parsed_yaml_list = parsed_yaml_list self.target_value_rate_constant_csv = target_value_rate_constant_csv self.k_target_value_S_matrix = k_target_value_S_matrix self.Ydf = Ydf self.k_target_values=k_target_values self.target_value_rate_constant_csv_extra_values = target_value_rate_constant_csv_extra_values self.working_directory = working_directory self.sigma_uncertainty_weighted_sensitivity_csv = sigma_uncertainty_weighted_sensitivity_csv self.simulation_run = simulation_run self.shock_tube_instance = shock_tube_instance self.cheby_sensitivity_dict=cheby_sensitivity_dict self.mapped_to_alpha_full_simulation = mapped_to_alpha_full_simulation, self.new_cti=optimized_cti_file self.nominal_cti=original_cti_file self.sigma_ones = sigma_ones self.T_min = T_min self.T_max = T_max self.P_min = P_min self.P_max = P_max def lengths_of_experimental_data(self): simulation_lengths_of_experimental_data = [] for i,exp in enumerate(self.exp_dict_list_optimized): length_of_experimental_data=[] observable_counter=0 for j,observable in enumerate(exp['mole_fraction_observables'] + exp['concentration_observables'] + exp['ignition_delay_observables']): if observable == None: continue if observable in exp['mole_fraction_observables']: if re.match('[Ss]hock [Tt]ube',exp['simulation_type']): length_of_experimental_data.append(exp['experimental_data'][observable_counter]['Time'].shape[0]) observable_counter+=1 elif re.match('[Jj][Ss][Rr]',exp['simulation_type']): length_of_experimental_data.append(exp['experimental_data'][observable_counter]['Temperature'].shape[0]) observable_counter+=1 elif re.match('[Ss]pecies[- ][Pp]rofile',exp['experiment_type']) and re.match('[Ff]low[ -][Rr]eactor',exp['simulation_type']): length_of_experimental_data.append(exp['experimental_data'][observable_counter]['Temperature'].shape[0]) observable_counter+=1 if observable in exp['concentration_observables']: if re.match('[Ss]hock [Tt]ube',exp['simulation_type']): length_of_experimental_data.append(exp['experimental_data'][observable_counter]['Time'].shape[0]) observable_counter+=1 elif re.match('[Jj][Ss][Rr]',exp['simulation_type']): length_of_experimental_data.append(exp['experimental_data'][observable_counter]['Temperature'].shape[0]) observable_counter+=1 elif re.match('[Ss]pecies[- ][Pp]rofile',exp['experiment_type']) and re.match('[Ff]low[ -][Rr]eactor',exp['simulation_type']): length_of_experimental_data.append(exp['experimental_data'][observable_counter]['Temperature'].shape[0]) observable_counter+=1 if observable in exp['ignition_delay_observables']: if re.match('[Ss]hock [Tt]ube',exp['simulation_type']) and re.match('[iI]gnition[- ][Dd]elay',exp['experiment_type']): if 'temperature' in list(exp['experimental_data'][observable_counter].columns): length_of_experimental_data.append(exp['experimental_data'][observable_counter]['temperature'].shape[0]) observable_counter+=1 elif 'pressure' in list(exp['experimental_data'][observable_counter].columns): length_of_experimental_data.append(exp['experimental_data'][observable_counter]['pressure'].shape[0]) observable_counter+=1 else: length_of_experimental_data.append(exp['experimental_data'][observable_counter].shape[0]) observable_counter+=1 elif re.match('[Rr][Cc][Mm]',exp['simulation_type']) and re.match('[iI]gnition[- ][Dd]elay',exp['experiment_type']): if 'temperature' in list(exp['experimental_data'][observable_counter].columns): length_of_experimental_data.append(exp['experimental_data'][observable_counter]['temperature'].shape[0]) observable_counter+=1 elif 'pressure' in list(exp['experimental_data'][observable_counter].columns): length_of_experimental_data.append(exp['experimental_data'][observable_counter]['pressure'].shape[0]) observable_counter+=1 else: length_of_experimental_data.append(exp['experimental_data'][observable_counter].shape[0]) observable_counter+=1 if 'perturbed_coef' in exp.keys(): wavelengths = self.parsed_yaml_list[i]['absorbanceCsvWavelengths'] absorbance_wl=0 for k,wl in enumerate(wavelengths): length_of_experimental_data.append(exp['absorbance_experimental_data'][k]['time'].shape[0]) absorbance_wl+=1 else: absorbance_wl=0 simulation_lengths_of_experimental_data.append(length_of_experimental_data) self.simulation_lengths_of_experimental_data=simulation_lengths_of_experimental_data return observable_counter+absorbance_wl,length_of_experimental_data def calculating_sigmas(self,S_matrix,covarience): sigmas =[[] for x in range(len(self.simulation_lengths_of_experimental_data))] counter=0 for x in range(len(self.simulation_lengths_of_experimental_data)): for y in range(len(self.simulation_lengths_of_experimental_data[x])): temp=[] for z in np.arange(counter,(self.simulation_lengths_of_experimental_data[x][y]+counter)): SC = np.dot(S_matrix[z,:],covarience) sigma = np.dot(SC,np.transpose(S_matrix[z,:])) test = sigma sigma = np.sqrt(sigma) temp.append(sigma) temp = np.array(temp) sigmas[x].append(temp) counter = counter + self.simulation_lengths_of_experimental_data[x][y] return sigmas, test def run_ignition_delay(self,exp,cti,n_of_data_points=10): p=pr.Processor(cti) if 'volumeTraceCsv' not in exp['simulation'].fullParsedYamlFile.keys(): if len(exp['simulation'].fullParsedYamlFile['temperatures'])>1: tempmin=np.min(exp['simulation'].fullParsedYamlFile['temperatures']) print(tempmin , 'THis is the min temp') tempmax=np.max(exp['simulation'].fullParsedYamlFile['temperatures']) print(tempmax,'This is the max temp') total_range=tempmax-tempmin tempmax=tempmax+0.1*total_range tempmin=tempmin-0.1*total_range temprange=np.linspace(tempmin,tempmax,n_of_data_points) pressures=exp['simulation'].fullParsedYamlFile['pressures'] print(pressures,'These are the pressures') conds=exp['simulation'].fullParsedYamlFile['conditions_to_run'] print(conds,'These are the conditions') elif len(exp['simulation'].fullParsedYamlFile['pressures'])>1: pmin = exp['simulation'].fullParsedYamlFile['pressures']*0.9 pmax = exp['simulation'].fullParsedYamlFile['pressures']*1.1 total_range=pmax-pmin pmax=pmax+0.1*total_range pmin=pmin-0.1*total_range pressures = np.linspace(pmin,pmax,n_of_data_points) temprange = exp['simulation'].fullParsedYamlFile['temperatures'] conds = exp['simulation'].fullParsedYamlFile['conditions_to_run'] elif len(exp['simulation'].fullParsedYamlFile['conditions_to_run'])>1: print('Plotting for conditions depedendent ignition delay not yet installed') ig_delay=ig.ignition_delay_wrapper(pressures=pressures, temperatures=temprange, observables=exp['simulation'].fullParsedYamlFile['observables'], kineticSens=0, physicalSens=0, conditions=conds, thermalBoundary=exp['simulation'].fullParsedYamlFile['thermalBoundary'], mechanicalBoundary=exp['simulation'].fullParsedYamlFile['mechanicalBoundary'], processor=p, cti_path="", save_physSensHistories=0, fullParsedYamlFile=exp['simulation'].fullParsedYamlFile, save_timeHistories=0, log_file=True, log_name='log.txt', timeshift=exp['simulation'].fullParsedYamlFile['time_shift'], initialTime=exp['simulation'].fullParsedYamlFile['initialTime'], finalTime=exp['simulation'].fullParsedYamlFile['finalTime'], target=exp['simulation'].fullParsedYamlFile['target'], target_type=exp['simulation'].fullParsedYamlFile['target_type'], n_processors=2) soln,temp=ig_delay.run() elif 'volumeTraceCsv' in exp['simulation'].fullParsedYamlFile.keys(): if len(exp['simulation'].fullParsedYamlFile['temperatures'])>1: tempmin=np.min(exp['simulation'].fullParsedYamlFile['temperatures']) tempmax=np.max(exp['simulation'].fullParsedYamlFile['temperatures']) total_range=tempmax-tempmin tempmax=tempmax+0.1*total_range tempmin=tempmin-0.1*total_range temprange=np.linspace(tempmin,tempmax,n_of_data_points) pressures=exp['simulation'].fullParsedYamlFile['pressures'] conds=exp['simulation'].fullParsedYamlFile['conditions_to_run'] volumeTrace = exp['simulation'].fullParsedYamlFile['volumeTraceCsv'] elif len(exp['simulation'].fullParsedYamlFile['pressures'])>1: pmin = exp['simulation'].fullParsedYamlFile['pressures']*0.9 pmax = exp['simulation'].fullParsedYamlFile['pressures']*1.1 total_range=pmax-pmin pmax=pmax+0.1*total_range pmin=pmin-0.1*total_range pressures = np.linspace(pmin,pmax,n_of_data_points) temprange = exp['simulation'].fullParsedYamlFile['temperatures'] conds = exp['simulation'].fullParsedYamlFile['conditions_to_run'] volumeTrace = exp['simulation'].fullParsedYamlFile['volumeTraceCsv'] elif len(exp['simulation'].fullParsedYamlFile['conditions_to_run'])>1: print('Plotting for conditions depedendent ignition delay not yet installed') ig_delay=ig.ignition_delay_wrapper(pressures=pressures, temperatures=temprange, observables=exp['simulation'].fullParsedYamlFile['observables'], kineticSens=0, physicalSens=0, conditions=conds, thermalBoundary=exp['simulation'].fullParsedYamlFile['thermalBoundary'], mechanicalBoundary=exp['simulation'].fullParsedYamlFile['mechanicalBoundary'], processor=p, cti_path="", save_physSensHistories=0, fullParsedYamlFile=exp['simulation'].fullParsedYamlFile, save_timeHistories=0, log_file=True, log_name='log.txt', timeshift=exp['simulation'].fullParsedYamlFile['time_shift'], initialTime=exp['simulation'].fullParsedYamlFile['initialTime'], finalTime=exp['simulation'].fullParsedYamlFile['finalTime'], target=exp['simulation'].fullParsedYamlFile['target'], target_type=exp['simulation'].fullParsedYamlFile['target_type'], n_processors=2, volumeTrace=volumeTrace) soln,temp=ig_delay.run() #print(soln) return soln def run_jsr(self,exp,cti,n_of_data_points=100): p=pr.Processor(cti) tempmin=np.min(exp['simulation'].fullParsedYamlFile['temperatures']) print('Tempmin: '+str(tempmin)) tempmax=np.max(exp['simulation'].fullParsedYamlFile['temperatures']) print('Tempmax: '+str(tempmax)) if tempmax!=tempmin: total_range=tempmax-tempmin tempmax=tempmax+0.1*total_range tempmin=tempmin-0.1*total_range elif tempmax==tempmin: tempmax=tempmax*1.1 tempmin=tempmin*0.9 temprange=np.linspace(tempmin,tempmax,n_of_data_points) print(temprange) pressures=exp['simulation'].fullParsedYamlFile['pressure'] conds=exp['simulation'].fullParsedYamlFile['conditions'] jsr1=jsr.JSR_multiTemp_steadystate(volume=exp['simulation'].fullParsedYamlFile['volume'], pressure=pressures, temperatures=temprange, observables=exp['simulation'].fullParsedYamlFile['observables'], kineticSens=0, physicalSens=0, conditions=conds, thermalBoundary=exp['simulation'].fullParsedYamlFile['thermalBoundary'], mechanicalBoundary=exp['simulation'].fullParsedYamlFile['mechanicalBoundary'], processor=p, save_physSensHistories=0, save_timeHistories=0, residence_time=exp['simulation'].fullParsedYamlFile['residence_time'], moleFractionObservables = exp['simulation'].fullParsedYamlFile['moleFractionObservables'], concentrationObservables = exp['simulation'].fullParsedYamlFile['concentrationObservables'], fullParsedYamlFile = exp['simulation'].fullParsedYamlFile) soln,temp=jsr1.run() #print(soln) return soln def plotting_observables(self,sigmas_original=[],sigmas_optimized=[],file_identifier='',filetype='.png'): for i,exp in enumerate(self.exp_dict_list_optimized): observable_counter=0 for j,observable in enumerate(exp['mole_fraction_observables'] + exp['concentration_observables'] + exp['ignition_delay_observables']): if observable == None: continue plt.figure() if observable in exp['mole_fraction_observables']: if re.match('[Ss]hock [Tt]ube',exp['simulation_type']) and re.match('[Ss]pecies[ -][Pp]rofile',exp['experiment_type']): plt.plot(exp['simulation'].timeHistories[0]['time']*1e3,exp['simulation'].timeHistories[0][observable],'b',label='MSI') plt.plot(self.exp_dict_list_original[i]['simulation'].timeHistories[0]['time']*1e3,self.exp_dict_list_original[i]['simulation'].timeHistories[0][observable],'r',label= "$\it{A priori}$ model") plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3,exp['experimental_data'][observable_counter][observable],'o',color='black',label='Experimental Data') plt.xlabel('Time (ms)') plt.ylabel('Mole Fraction '+''+str(observable)) plt.title('Experiment_'+str(i+1)) if bool(sigmas_optimized) == True: high_error_optimized = np.exp(sigmas_optimized[i][observable_counter]) high_error_optimized = np.multiply(high_error_optimized,exp['simulation'].timeHistoryInterpToExperiment[observable].dropna().values) low_error_optimized = np.exp(sigmas_optimized[i][observable_counter]*-1) low_error_optimized = np.multiply(low_error_optimized,exp['simulation'].timeHistoryInterpToExperiment[observable].dropna().values) plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3, high_error_optimized,'b--') plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3,low_error_optimized,'b--') # high_error_original = np.exp(sigmas_original[i][observable_counter]) # high_error_original = np.multiply(high_error_original,self.exp_dict_list_original[i]['simulation'].timeHistoryInterpToExperiment[observable].dropna().values) # low_error_original = np.exp(sigmas_original[i][observable_counter]*-1) # low_error_original = np.multiply(low_error_original,self.exp_dict_list_original[i]['simulation'].timeHistoryInterpToExperiment[observable].dropna().values) # plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3, high_error_original,'r--') # plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3,low_error_original,'r--') #stub plt.plot([],'w' ,label= 'T:'+ str(self.exp_dict_list_original[i]['simulation'].temperature)) #plt.plot([],'w', label= 'P:'+ str(self.exp_dict_list_original[i]['simulation'].pressure)) key_list = [] for key in self.exp_dict_list_original[i]['simulation'].conditions.keys(): plt.plot([],'w',label= key+': '+str(self.exp_dict_list_original[i]['simulation'].conditions[key])) key_list.append(key) #plt.legend(handlelength=3) plt.legend(ncol=2) sp = '_'.join(key_list) #print(sp) #plt.savefig(self.working_directory+'/'+'Experiment_'+str(i+1)+'_'+str(observable)+'_'+str(self.exp_dict_list_original[i]['simulation'].temperature)+'K'+'_'+str(self.exp_dict_list_original[i]['simulation'].pressure)+'_'+sp+'_'+'.pdf', bbox_inches='tight') #stub plt.savefig(self.working_directory+'/'+'Exp_'+str(i+1)+'_'+str(observable)+'_'+str(self.exp_dict_list_original[i]['simulation'].temperature)+'K_'+sp+'.pdf', bbox_inches='tight') #plt.savefig(self.working_directory+'/'+'Exp_'+str(i+1)+'_'+str(observable)+'_'+str(self.exp_dict_list_original[i]['simulation'].temperature)+'K_'+sp+'.svg', bbox_inches='tight',transparent=True) observable_counter+=1 elif re.match('[Jj][Ss][Rr]',exp['simulation_type']): nominal=self.run_jsr(self.exp_dict_list_original[i],self.nominal_cti) MSI_model=self.run_jsr(exp,self.new_cti) plt.plot(MSI_model['temperature'],MSI_model[observable],'b',label='MSI') plt.plot(nominal['temperature'],nominal[observable],'r',label= "$\it{A priori}$ model") plt.plot(exp['experimental_data'][observable_counter]['Temperature'],exp['experimental_data'][observable_counter][observable],'o',color='black',label='Experimental Data') plt.xlabel('Temperature (K)') plt.ylabel('Mole Fraction '+''+str(observable)) plt.title('Experiment_'+str(i+1)) if bool(sigmas_optimized) == True: high_error_optimized = np.exp(sigmas_optimized[i][observable_counter]) print(high_error_optimized) high_error_optimized = np.multiply(high_error_optimized,exp['simulation'].timeHistories[0][observable].dropna().values) low_error_optimized = np.exp(sigmas_optimized[i][observable_counter]*-1) low_error_optimized = np.multiply(low_error_optimized,exp['simulation'].timeHistories[0][observable].dropna().values) #plt.figure() if len(high_error_optimized)>1 and len(low_error_optimized) > 1: plt.plot(exp['experimental_data'][observable_counter]['Temperature'], high_error_optimized,'b--') plt.plot(exp['experimental_data'][observable_counter]['Temperature'],low_error_optimized,'b--') else: print(high_error_optimized,observable,exp['simulation'].timeHistories[0][observable].dropna().values) plt.plot(exp['experimental_data'][observable_counter]['Temperature'], high_error_optimized,'rX') plt.plot(exp['experimental_data'][observable_counter]['Temperature'],low_error_optimized,'bX') #high_error_original = np.exp(sigmas_original[i][observable_counter]) # high_error_original = np.multiply(high_error_original,self.exp_dict_list_original[i]['simulation'].timeHistoryInterpToExperiment[observable].dropna().values) #low_error_original = np.exp(sigmas_original[i][observable_counter]*-1) #low_error_original = np.multiply(low_error_original,self.exp_dict_list_original[i]['simulation'].timeHistoryInterpToExperiment[observable].dropna().values) #plt.figure() # plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3, high_error_original,'r--') #plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3,low_error_original,'r--') plt.savefig(os.path.join(self.working_directory,'Experiment_'+str(i+1)+'_'+str(observable)+file_identifier+filetype), bbox_inches='tight',dpi=500) observable_counter+=1 elif re.match('[Ss]pecies[- ][Pp]rofile',exp['experiment_type']) and re.match('[Ff]low[ -][Rr]eactor',exp['simulation_type']): plt.plot(exp['simulation'].timeHistories[0]['temperature'],exp['simulation'].timeHistories[0][observable],'b',label='MSI') plt.plot(self.exp_dict_list_original[i]['simulation'].timeHistories[0]['temperature'],self.exp_dict_list_original['simulation'].timeHistories[0][observable],'r',label= "$\it{A priori}$ model") plt.plot(exp['experimental_data'][observable_counter]['Temperature'],exp['experimental_data'][observable_counter][observable],'o',color='black',label='Experimental Data') plt.xlabel('Time (ms)') plt.ylabel('Mole Fraction '+''+str(observable)) plt.title('Experiment_'+str(i+1)) if bool(sigmas_optimized) == True: high_error_optimized = np.exp(sigmas_optimized[i][observable_counter]) high_error_optimized = np.multiply(high_error_optimized,exp['simulation'].timeHistories[0][observable].dropna().values) low_error_optimized = np.exp(sigmas_optimized[i][observable_counter]*-1) low_error_optimized = np.multiply(low_error_optimized,exp['simulation'].timeHistories[0][observable].dropna().values) plt.plot(exp['experimental_data'][observable_counter]['Temperature']*1e3, high_error_optimized,'b--') plt.plot(exp['experimental_data'][observable_counter]['Temperature']*1e3,low_error_optimized,'b--') #high_error_original = np.exp(sigmas_original[i][observable_counter]) #high_error_original = np.multiply(high_error_original,self.exp_dict_list_original[i]['simulation'].timeHistories[0][observable].dropna().values) #low_error_original = np.exp(sigmas_original[i][observable_counter]*-1) #low_error_original = np.multiply(low_error_original,self.exp_dict_list_original[i]['simulation'].timeHistoryInterpToExperiment[observable].dropna().values) #plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3, high_error_original,'r--') #plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3,low_error_original,'r--') plt.savefig(self.working_directory+'/'+'Experiment_'+str(i+1)+'_'+str(observable)+'.pdf', bbox_inches='tight',dpi=1000) if observable in exp['concentration_observables']: if re.match('[Ss]hock [Tt]ube',exp['simulation_type']) and re.match('[Ss]pecies[ -][Pp]rofile',exp['experiment_type']): if observable+'_ppm' in exp['experimental_data'][observable_counter].columns: plt.plot(exp['simulation'].timeHistories[0]['time']*1e3,exp['simulation'].timeHistories[0][observable]*1e6,'b',label='MSI') plt.plot(self.exp_dict_list_original[i]['simulation'].timeHistories[0]['time']*1e3,self.exp_dict_list_original[i]['simulation'].timeHistories[0][observable]*1e6,'r',label= "$\it{A priori}$ model") plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3,exp['experimental_data'][observable_counter][observable+'_ppm'],'o',color='black',label='Experimental Data') plt.xlabel('Time (ms)') plt.ylabel(str(observable)+ ' '+ 'ppm') plt.title('Experiment_'+str(i+1)) if bool(sigmas_optimized)==True: high_error_optimized = np.exp(sigmas_optimized[i][observable_counter]) high_error_optimized = np.multiply(high_error_optimized,exp['simulation'].timeHistoryInterpToExperiment[observable].dropna().values*1e6) low_error_optimized = np.exp(np.array(sigmas_optimized[i][observable_counter])*-1) low_error_optimized = np.multiply(low_error_optimized,exp['simulation'].timeHistoryInterpToExperiment[observable].dropna().values*1e6) plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3, high_error_optimized,'b--') plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3,low_error_optimized,'b--') #high_error_original = np.exp(sigmas_original[i][observable_counter]) #high_error_original = np.multiply(high_error_original,self.exp_dict_list_original[i]['simulation'].timeHistoryInterpToExperiment[observable].dropna().values*1e6) #low_error_original = np.exp(np.array(sigmas_original[i][observable_counter])*-1) #low_error_original = np.multiply(low_error_original,self.exp_dict_list_original[i]['simulation'].timeHistoryInterpToExperiment[observable].dropna().values*1e6) #plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3, high_error_original,'r--') #plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3,low_error_original,'r--') elif observable+'_mol/cm^3' in exp['experimental_data'][observable_counter].columns: concentration_optimized = np.true_divide(1,exp['simulation'].timeHistories[0]['temperature'].to_numpy())*exp['simulation'].timeHistories[0]['pressure'].to_numpy() concentration_optimized *= (1/(8.314e6))*exp['simulation'].timeHistories[0][observable].dropna().to_numpy() concentration_original = np.true_divide(1,self.exp_dict_list_original[i]['simulation'].timeHistories[0]['temperature'].to_numpy())*self.exp_dict_list_original[i]['simulation'].timeHistories[0]['pressure'].to_numpy() concentration_original *= (1/(8.314e6))*self.exp_dict_list_original[i]['simulation'].timeHistories[0][observable].dropna().to_numpy() plt.plot(exp['simulation'].timeHistories[0]['time']*1e3,concentration_optimized,'b',label='MSI') plt.plot(self.exp_dict_list_original[i]['simulation'].timeHistories[0]['time']*1e3,concentration_original,'r',label= "$\it{A priori}$ model") plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3,exp['experimental_data'][observable_counter][observable+'_mol/cm^3'],'o',color='black',label='Experimental Data') plt.xlabel('Time (ms)') plt.ylabel(r'$\frac{mol}{cm^3}$'+''+str(observable)) plt.title('Experiment_'+str(i+1)) if bool(sigmas_optimized)==True: concentration_sig = np.true_divide(1,exp['simulation'].pressureAndTemperatureToExperiment[observable_counter]['temperature'].to_numpy())*exp['simulation'].pressureAndTemperatureToExperiment[observable_counter]['pressure'].to_numpy() concentration_sig *= (1/(8.314e6))*exp['simulation'].timeHistoryInterpToExperiment[observable].dropna().to_numpy() high_error_optimized = np.exp(sigmas_optimized[i][observable_counter]) high_error_optimized = np.multiply(high_error_optimized,concentration_sig) low_error_optimized = np.exp(np.array(sigmas_optimized[i][observable_counter])*-1) low_error_optimized = np.multiply(low_error_optimized,concentration_sig) plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3, high_error_optimized,'b--') plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3,low_error_optimized,'b--') plt.plot([],'w' ,label= 'T:'+ str(self.exp_dict_list_original[i]['simulation'].temperature)) #plt.plot([],'w', label= 'P:'+ str(self.exp_dict_list_original[i]['simulation'].pressure)) key_list = [] for key in self.exp_dict_list_original[i]['simulation'].conditions.keys(): plt.plot([],'w',label= key+': '+str(self.exp_dict_list_original[i]['simulation'].conditions[key])) key_list.append(key) #plt.legend(handlelength=3) plt.legend(ncol=2) sp = '_'.join(key_list) #print(sp) #plt.savefig(self.working_directory+'/'+'Experiment_'+str(i+1)+'_'+str(observable)+'_'+str(self.exp_dict_list_original[i]['simulation'].temperature)+'K'+'_'+str(self.exp_dict_list_original[i]['simulation'].pressure)+'_'+sp+'_'+'.pdf', bbox_inches='tight') #stub plt.savefig(self.working_directory+'/'+'Exp_'+str(i+1)+'_'+str(observable)+'_'+str(self.exp_dict_list_original[i]['simulation'].temperature)+'K_'+sp+'.pdf', bbox_inches='tight') #plt.savefig(self.working_directory+'/'+'Exp_'+str(i+1)+'_'+str(observable)+'_'+str(self.exp_dict_list_original[i]['simulation'].temperature)+'K_'+sp+'.svg', bbox_inches='tight',transparent=True) observable_counter+=1 elif re.match('[Ff]low [Rr]eactor',exp['simulation_type']) and re.match('[Ss]pecies[ -][Pp]rofile',exp['experiment_type']): plt.plot(exp['simulation'].timeHistories[0]['initial_temperature'],exp['simulation'].timeHistories[0][observable]*1e6,'b',label='MSI') plt.plot(self.exp_dict_list_original[i]['simulation'].timeHistories[0]['initial_temperature'],self.exp_dict_list_original[i]['simulation'].timeHistories[0][observable]*1e6,'r',label= "$\it{A priori}$ model") plt.plot(exp['experimental_data'][observable_counter]['Temperature'],exp['experimental_data'][observable_counter][observable+'_ppm'],'o',color='black',label='Experimental Data') plt.xlabel('Temperature (K)') plt.ylabel('ppm '+''+str(observable)) plt.title('Experiment_'+str(i+1)) if bool(sigmas_optimized) == True: #stub high_error_optimized = np.exp(sigmas_optimized[i][observable_counter]) high_error_optimized = np.multiply(high_error_optimized,exp['simulation'].timeHistories[0][observable].dropna().values*1e6) low_error_optimized = np.exp(sigmas_optimized[i][observable_counter]*-1) low_error_optimized = np.multiply(low_error_optimized,exp['simulation'].timeHistories[0][observable].dropna().values*1e6) plt.plot(exp['experimental_data'][observable_counter]['Temperature'], high_error_optimized,'b--') plt.plot(exp['experimental_data'][observable_counter]['Temperature'],low_error_optimized,'b--') #high_error_original = np.exp(sigmas_original[i][observable_counter]) #high_error_original = np.multiply(high_error_original,self.exp_dict_list_original[i]['simulation'].timeHistories[0][observable].dropna().values) #low_error_original = np.exp(sigmas_original[i][observable_counter]*-1) #low_error_original = np.multiply(low_error_original,self.exp_dict_list_original[i]['simulation'].timeHistoryInterpToExperiment[observable].dropna().values) #plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3, high_error_original,'r--') #plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3,low_error_original,'r--') plt.savefig(self.working_directory+'/'+'Experiment_'+str(i+1)+'_'+str(observable)+'.png', bbox_inches='tight',dpi=1000) observable_counter+=1 elif re.match('[Jj][Ss][Rr]',exp['simulation_type']): nominal=self.run_jsr(self.exp_dict_list_original[i],self.nominal_cti) MSI_model=self.run_jsr(exp,self.new_cti) plt.plot(MSI_model['temperature'],MSI_model[observable]*1e6,'b',label='MSI') plt.plot(nominal['temperature'],nominal[observable]*1e6,'r',label= "$\it{A priori}$ model") plt.plot(exp['experimental_data'][observable_counter]['Temperature'],exp['experimental_data'][observable_counter][observable+'_ppm'],'o',color='black',label='Experimental Data') plt.xlabel('Temperature (K)') plt.ylabel('ppm '+''+str(observable)) plt.title('Experiment_'+str(i+1)) if bool(sigmas_optimized) == True: high_error_optimized = np.exp(sigmas_optimized[i][observable_counter]) print(high_error_optimized) high_error_optimized = np.multiply(high_error_optimized,exp['simulation'].timeHistories[0][observable].dropna().values) low_error_optimized = np.exp(sigmas_optimized[i][observable_counter]*-1) low_error_optimized = np.multiply(low_error_optimized,exp['simulation'].timeHistories[0][observable].dropna().values) #plt.figure() if len(high_error_optimized)>1 and len(low_error_optimized) > 1: plt.plot(exp['experimental_data'][observable_counter]['Temperature'],high_error_optimized*1e6,'b--') plt.plot(exp['experimental_data'][observable_counter]['Temperature'],low_error_optimized*1e6,'b--') else: print(high_error_optimized,observable,exp['simulation'].timeHistories[0][observable].dropna().values) plt.plot(exp['experimental_data'][observable_counter]['Temperature'], high_error_optimized,'rX') plt.plot(exp['experimental_data'][observable_counter]['Temperature'],low_error_optimized,'bX') #high_error_original = np.exp(sigmas_original[i][observable_counter]) # high_error_original = np.multiply(high_error_original,self.exp_dict_list_original[i]['simulation'].timeHistoryInterpToExperiment[observable].dropna().values) #low_error_original = np.exp(sigmas_original[i][observable_counter]*-1) #low_error_original = np.multiply(low_error_original,self.exp_dict_list_original[i]['simulation'].timeHistoryInterpToExperiment[observable].dropna().values) #plt.figure() # plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3, high_error_original,'r--') #plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3,low_error_original,'r--') plt.savefig(os.path.join(self.working_directory,'Experiment_'+str(i+1)+'_'+str(observable)+file_identifier+filetype), bbox_inches='tight',dpi=100) observable_counter+=1 if observable in exp['ignition_delay_observables']: if re.match('[Ss]hock [Tt]ube',exp['simulation_type']): if len(exp['simulation'].temperatures)>1: nominal=self.run_ignition_delay(self.exp_dict_list_original[i], self.nominal_cti) MSI_model=self.run_ignition_delay(exp, self.new_cti) #plt.semilogy(1000/MSI_model['temperature'],MSI_model['delay'],'b',label='MSI') #changed to plotting at nominal temperature #plt.semilogy(1000/MSI_model['temperature'],MSI_model['delay'],'b',label='MSI') #a, b = zip(*sorted(zip(1000/exp['experimental_data'][observable_counter]['temperature'],exp['simulation'].timeHistories[0]['delay'].dropna().values))) a, b = zip(*sorted(zip(1000/np.array(exp['simulation'].temperatures),exp['simulation'].timeHistories[0]['delay'].dropna().values))) plt.semilogy(a,b,'b',label='MSI') plt.semilogy(1000/nominal['temperature'],nominal['delay'],'r',label= "$\it{A priori}$ model") #plt.semilogy(1000/exp['simulation'].timeHistories[0]['temperature'],exp['simulation'].timeHistories[0]['delay'],'b',label='MSI') #plt.semilogy(1000/self.exp_dict_list_original[i]['simulation'].timeHistories[0]['temperature'],self.exp_dict_list_original[i]['simulation'].timeHistories[0]['delay'],'r',label= "$\it{A priori}$ model") plt.semilogy(1000/exp['experimental_data'][observable_counter]['temperature'],exp['experimental_data'][observable_counter][observable+'_s'],'o',color='black',label='Experimental Data') plt.xlabel('1000/T') plt.ylabel('Time (s)') plt.title('Experiment_'+str(i+1)) if bool(sigmas_optimized) == True: high_error_optimized = np.exp(sigmas_optimized[i][observable_counter]) high_error_optimized = np.multiply(high_error_optimized,exp['simulation'].timeHistories[0]['delay'].dropna().values) low_error_optimized = np.exp(sigmas_optimized[i][observable_counter]*-1) low_error_optimized = np.multiply(low_error_optimized,exp['simulation'].timeHistories[0]['delay'].dropna().values) #plt.figure() #print(exp['simulation'].timeHistories[0]['delay'].dropna().values,'THIS IS IN THE PLOTTER') #a, b = zip(*sorted(zip(1000/exp['experimental_data'][observable_counter]['temperature'],high_error_optimized))) a, b = zip(*sorted(zip(1000/np.array(exp['simulation'].temperatures),high_error_optimized))) plt.semilogy(a,b,'b--') #a, b = zip(*sorted(zip(1000/exp['experimental_data'][observable_counter]['temperature'],low_error_optimized))) a, b = zip(*sorted(zip(1000/np.array(exp['simulation'].temperatures),low_error_optimized))) plt.semilogy(a,b,'b--') #plt.plot(1000/exp['experimental_data'][observable_counter]['temperature'],exp['simulation'].timeHistories[0]['delay'].dropna().values,'x') #plt.plot(1000/np.array(exp['simulation'].temperatures),exp['simulation'].timeHistories[0]['delay'].dropna().values,'o') #high_error_original = np.exp(sigmas_original[i][observable_counter]) # high_error_original = np.multiply(high_error_original,self.exp_dict_list_original[i]['simulation'].timeHistoryInterpToExperiment[observable].dropna().values) #low_error_original = np.exp(sigmas_original[i][observable_counter]*-1) #low_error_original = np.multiply(low_error_original,self.exp_dict_list_original[i]['simulation'].timeHistoryInterpToExperiment[observable].dropna().values) #plt.figure() # plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3, high_error_original,'r--') #plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3,low_error_original,'r--') #plt.plot([],'w', label= 'P:'+ str(self.exp_dict_list_original[i]['simulation'].pressures)) key_list = [] for key in self.exp_dict_list_original[i]['simulation'].fullParsedYamlFile['conditions_to_run'][0].keys(): # ['simulation'].fullParsedYamlFile['conditions_to_run'] plt.plot([],'w',label= key+': '+str(self.exp_dict_list_original[i]['simulation'].fullParsedYamlFile['conditions_to_run'][0][key])) key_list.append(key) #plt.legend(handlelength=3) plt.legend(ncol=2) sp = '_'.join(key_list) plt.savefig(os.path.join(self.working_directory,'Experiment_'+str(i+1)+'_'+str(observable)+'.pdf'), bbox_inches='tight',dpi=1000) plt.savefig(os.path.join(self.working_directory,'Experiment_'+str(i+1)+'_'+str(observable)+'.svg'), bbox_inches='tight',dpi=1000) observable_counter+=1 elif re.match('[Rr][Cc][Mm]',exp['simulation_type']): if len(exp['simulation'].temperatures)>1: plt.semilogy(1000/exp['simulation'].timeHistories[0]['ignition_temperature'],exp['simulation'].timeHistories[0]['delay']-exp['simulation'].timeHistories[0]['end_of_compression_time'],'b',label='MSI') plt.semilogy(1000/self.exp_dict_list_original[i]['simulation'].timeHistories[0]['ignition_temperature'],self.exp_dict_list_original[i]['simulation'].timeHistories[0]['delay']-self.exp_dict_list_original[i]['simulation'].timeHistories[0]['end_of_compression_time'],'r',label= "$\it{A priori}$ model") #plt.semilogy(1000/exp['simulation'].timeHistories[0]['temperature'],exp['simulation'].timeHistories[0]['delay'],'b',label='MSI') #plt.semilogy(1000/self.exp_dict_list_original[i]['simulation'].timeHistories[0]['temperature'],self.exp_dict_list_original[i]['simulation'].timeHistories[0]['delay'],'r',label= "$\it{A priori}$ model") plt.semilogy(1000/exp['experimental_data'][observable_counter]['temperature'],exp['experimental_data'][observable_counter][observable+'_s'],'o',color='black',label='Experimental Data') plt.xlabel('1000/T (1000/K)') plt.ylabel('Time (ms)') plt.title('Experiment_'+str(i+1)) if bool(sigmas_optimized) == True: high_error_optimized = np.exp(sigmas_optimized[i][observable_counter]) high_error_optimized = np.multiply(high_error_optimized,(exp['simulation'].timeHistories[0]['delay']-exp['simulation'].timeHistories[0]['end_of_compression_time']).dropna().values) low_error_optimized = np.exp(sigmas_optimized[i][observable_counter]*-1) low_error_optimized = np.multiply(low_error_optimized,(exp['simulation'].timeHistories[0]['delay']-exp['simulation'].timeHistories[0]['end_of_compression_time']).dropna().values) #plt.figure() a, b = zip(*sorted(zip(1000/exp['experimental_data'][observable_counter]['ignition_temperature'],high_error_optimized))) plt.semilogy(a,b,'b--') #plt.plot(1000/exp['experimental_data'][observable_counter]['temperature'],low_error_optimized,'b--') a, b = zip(*sorted(zip(1000/exp['experimental_data'][observable_counter]['ignition_temperature'],low_error_optimized))) plt.semilogy(a,b,'b--') #high_error_original = np.exp(sigmas_original[i][observable_counter]) # high_error_original = np.multiply(high_error_original,self.exp_dict_list_original[i]['simulation'].timeHistoryInterpToExperiment[observable].dropna().values) #low_error_original = np.exp(sigmas_original[i][observable_counter]*-1) #low_error_original = np.multiply(low_error_original,self.exp_dict_list_original[i]['simulation'].timeHistoryInterpToExperiment[observable].dropna().values) #plt.figure() # plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3, high_error_original,'r--') #plt.plot(exp['experimental_data'][observable_counter]['Time']*1e3,low_error_original,'r--') plt.savefig(os.path.join(self.working_directory,'Experiment_'+str(i+1)+'_'+str(observable)+'.pdf'), bbox_inches='tight',dpi=1000) observable_counter+=1 if 'perturbed_coef' in exp.keys(): wavelengths = self.parsed_yaml_list[i]['absorbanceCsvWavelengths'] plt.figure() for k,wl in enumerate(wavelengths): plt.plot(exp['absorbance_experimental_data'][k]['time']*1e3,exp['absorbance_experimental_data'][k]['Absorbance_'+str(wl)],'o',color='black',label='Experimental Data') plt.plot(exp['simulation'].timeHistories[0]['time']*1e3,exp['absorbance_calculated_from_model'][wl],'b',label='MSI') plt.plot(self.exp_dict_list_original[i]['simulation'].timeHistories[0]['time']*1e3,self.exp_dict_list_original[i]['absorbance_calculated_from_model'][wl],'r',label= "$\it{A priori}$ model") #plt.plot(exp['absorbance_experimental_data'][k]['time']*1e3,exp['absorbance_experimental_data'][k]['Absorbance_'+str(wl)],'o',color='black',label='Experimental Data') plt.xlabel('Time (ms)') plt.ylabel('Absorbance'+''+str(wl)) plt.title('Experiment_'+str(i+1)) if bool(sigmas_optimized)==True: high_error_optimized = np.exp(sigmas_optimized[i][observable_counter]) high_error_optimized = np.multiply(high_error_optimized,exp['absorbance_model_data'][wl]) low_error_optimized = np.exp(sigmas_optimized[i][observable_counter]*-1) low_error_optimized = np.multiply(low_error_optimized,exp['absorbance_model_data'][wl]) plt.plot(exp['absorbance_experimental_data'][k]['time']*1e3,high_error_optimized,'b--') plt.plot(exp['absorbance_experimental_data'][k]['time']*1e3,low_error_optimized,'b--') high_error_original = np.exp(sigmas_original[i][observable_counter]) high_error_original = np.multiply(high_error_original,self.exp_dict_list_original[i]['absorbance_model_data'][wl]) low_error_original = np.exp(sigmas_original[i][observable_counter]*-1) low_error_original = np.multiply(low_error_original,self.exp_dict_list_original[i]['absorbance_model_data'][wl]) #plt.plot(exp['absorbance_experimental_data'][k]['time']*1e3,high_error_original,'r--') #plt.plot(exp['absorbance_experimental_data'][k]['time']*1e3,low_error_original,'r--') # if bool(sigmas_optimized)==True and i+1 == 11: # plt.ylim(top=.35) #start here key_list=[] plt.plot([],'w' ,label= 'T:'+ str(self.exp_dict_list_original[i]['simulation'].temperature)) #plt.plot([],'w', label= 'P:'+ str(self.exp_dict_list_original[i]['simulation'].pressure)) for key in self.exp_dict_list_original[i]['simulation'].conditions.keys(): plt.plot([],'w',label= key+': '+str(self.exp_dict_list_original[i]['simulation'].conditions[key])) key_list.append(key) #plt.legend(handlelength=3) plt.legend(ncol=2) #plt.savefig(self.working_directory+'/'+'Exp_'+str(i+1)+'_'+str(observable)+'_'+str(self.exp_dict_list_original[i]['simulation'].temperature)+'K_'+sp+'.pdf', bbox_inches='tight') sp = '_'.join(key_list) plt.savefig(self.working_directory+'/'+'Exp_'+str(i+1)+' '+'Absorb at'+'_'+str(wl)+'_'+str(self.exp_dict_list_original[i]['simulation'].temperature)+'K_'+sp+'.pdf', bbox_inches='tight') plt.savefig(self.working_directory+'/'+'Exp_'+str(i+1)+' '+'Absorb at'+'_'+str(wl)+'_'+str(self.exp_dict_list_original[i]['simulation'].temperature)+'K_'+sp+'.svg', bbox_inches='tight',transparent=True) # make function to plot rate constants def plotting_rate_constants(self,optimized_cti_file='', original_cti_file='', initial_temperature=250, final_temperature=2500, master_equation_reactions=[]): gas_optimized = ct.Solution(optimized_cti_file) gas_original = ct.Solution(original_cti_file) def unique_list(seq): checked = [] for e in seq: if e not in checked: checked.append(e) return checked def target_values_for_S(target_value_csv, exp_dict_list, S_matrix, master_equation_reaction_list = [], master_equation_sensitivites = {}): target_value_csv = pd.read_csv(target_value_csv) target_reactions = target_value_csv['Reaction'] target_temp = target_value_csv['temperature'] target_press = target_value_csv['pressure'] target_k = target_value_csv['k'] reactions_in_cti_file = exp_dict_list[0]['simulation'].processor.solution.reaction_equations() number_of_reactions_in_cti = len(reactions_in_cti_file) As = [] Ns = [] Eas = [] def create_empty_nested_reaction_list(): nested_reaction_list = [[] for x in range(len(master_equation_reaction_list))] for reaction in master_equation_reaction_list: for i,MP in enumerate(master_equation_sensitivites[reaction]): nested_reaction_list[master_equation_reaction_list.index(reaction)].append(0) return nested_reaction_list def create_tuple_list(array_of_sensitivities): #stub tuple_list = [] for ix,iy in np.ndindex(array_of_sensitivities.shape): tuple_list.append((ix,iy)) return tuple_list MP_stack = [] target_values_to_stack = [] master_equation_cheby_instance = meq.Master_Equation(T_min=self.T_min,T_max=self.T_max,P_min=self.P_min,P_max=self.P_max) for i,reaction in enumerate(target_reactions): if reaction in master_equation_reaction_list: nested_reaction_list = create_empty_nested_reaction_list() for j, MP_array in enumerate(master_equation_sensitivites[reaction]): tuple_list = create_tuple_list(MP_array) temp = [] counter = 0 for sensitivity in np.nditer(MP_array,order='C'): k = tuple_list[counter][0] l= tuple_list[counter][1] counter +=1 #need to add reduced p and t, and check these units were using to map #these might not work #t_alpha= meq.Master_Equation.chebyshev_specific_poly(self,k,meq.Master_Equation.calc_reduced_T(self,target_temp[i])) t_alpha= master_equation_cheby_instance.chebyshev_specific_poly(k,master_equation_cheby_instance.calc_reduced_T(target_temp[i])) if target_press[i] ==0: target_press_new = 1e-9 else: target_press_new=target_press[i] #p_alpha = meq.Master_Equation.chebyshev_specific_poly(self,l,meq.Master_Equation.calc_reduced_P(self,target_press_new*101325)) p_alpha = master_equation_cheby_instance.chebyshev_specific_poly(l,master_equation_cheby_instance.calc_reduced_P(target_press_new*101325)) #these might nowt work single_alpha_map = t_alpha*p_alpha*sensitivity temp.append(single_alpha_map) temp =sum(temp) #should there be an = temp here #nested_reaction_list[master_equation_reaction_list.index(reaction)][j]=temp nested_reaction_list[master_equation_reaction_list.index(reaction)][j]=temp temp2 = nested_reaction_list flat_list = [item for sublist in temp2 for item in sublist] #print(flat_list) MP_stack.append(nested_reaction_list) flat_list = np.array(flat_list) flat_list = flat_list.reshape((1,flat_list.shape[0])) target_values_to_stack.append(flat_list) else: #this will need to get fixed if we want to handle all reactions as chevy A_temp = np.zeros((1,number_of_reactions_in_cti-len(master_equation_reaction_list))) N_temp = np.zeros((1,number_of_reactions_in_cti-len(master_equation_reaction_list))) Ea_temp = np.zeros((1,number_of_reactions_in_cti-len(master_equation_reaction_list))) #decide if this mapping is correct A_temp[0,reactions_in_cti_file.index(reaction)] = 1 N_temp [0,reactions_in_cti_file.index(reaction)] = np.log(target_temp[i]) Ea_temp[0,reactions_in_cti_file.index(reaction)] = (-1/target_temp[i]) As.append(A_temp) Ns.append(N_temp) Eas.append(Ea_temp) A_temp = A_temp.reshape((1,A_temp.shape[1])) N_temp = N_temp.reshape((1,N_temp.shape[1])) Ea_temp = Ea_temp.reshape((1,Ea_temp.shape[1])) target_values_to_stack.append(np.hstack((A_temp,N_temp,Ea_temp))) # might need to edit this to pass in s? and S_matrix = S_matrix shape_s = S_matrix.shape S_target_values = [] for i,row in enumerate(target_values_to_stack): if target_reactions[i] in master_equation_reaction_list: zero_to_append_infront = np.zeros((1,((number_of_reactions_in_cti-len(master_equation_reaction_list))*3))) zero_to_append_behind = np.zeros((1, shape_s[1] - ((number_of_reactions_in_cti-len(master_equation_reaction_list))*3) - np.shape(row)[1] )) temp_array = np.hstack((zero_to_append_infront,row,zero_to_append_behind)) S_target_values.append(temp_array) else: zero_to_append_behind = np.zeros((1,shape_s[1]-np.shape(row)[1])) temp_array = np.hstack((row,zero_to_append_behind)) S_target_values.append(temp_array) S_target_values = np.vstack((S_target_values)) return S_target_values def sort_rate_constant_target_values(parsed_csv,unique_reactions,gas): reaction_list_from_mechanism = gas.reaction_equations() target_value_ks = [[] for reaction in range(len(unique_reactions))] target_value_temps = [[] for reaction in range(len(unique_reactions))] reaction_list_from_mechanism = gas.reaction_equations() for i,reaction in enumerate(parsed_csv['Reaction']): idx = reaction_list_from_mechanism.index(reaction) target_value_ks[unique_reactions.index(idx)].append(parsed_csv['k'][i]) target_value_temps[unique_reactions.index(idx)].append(parsed_csv['temperature'][i]) return target_value_temps,target_value_ks def rate_constant_over_temperature_range_from_cantera(reaction_number, gas, initial_temperature=250, final_temperature=2500, pressure=1, conditions = {'H2':2,'O2':1,'N2':4}): Temp = [] k = [] for temperature in np.arange(initial_temperature,final_temperature,1): gas.TPX = temperature,pressure*101325,conditions Temp.append(temperature) k.append(gas.forward_rate_constants[reaction_number]*1000) return Temp,k def calculate_sigmas_for_rate_constants(k_target_value_S_matrix,k_target_values_parsed_csv,unique_reactions,gas,covarience): reaction_list_from_mechanism = gas.reaction_equations() sigma_list_for_target_ks = [[] for reaction in range(len(unique_reactions))] shape = k_target_value_S_matrix.shape for row in range(shape[0]): #print(row) SC = np.dot(k_target_value_S_matrix[row,:],covarience) sigma_k = np.dot(SC,np.transpose(k_target_value_S_matrix[row,:])) sigma_k = np.sqrt(sigma_k) #print(row) #print(k_target_values_parsed_csv['Reaction'][row]) indx = reaction_list_from_mechanism.index(k_target_values_parsed_csv['Reaction'][row]) sigma_list_for_target_ks[unique_reactions.index(indx)].append(sigma_k) return sigma_list_for_target_ks def calculating_target_value_ks_from_cantera_for_sigmas(k_target_values_parsed_csv,gas,unique_reactions): target_value_ks = [[] for reaction in range(len(unique_reactions))] target_reactions = k_target_values_parsed_csv['Reaction'] target_temp = k_target_values_parsed_csv['temperature'] target_press = k_target_values_parsed_csv['pressure'] reactions_in_cti_file = gas.reaction_equations() #print(reactions_in_cti_file) for i,reaction in enumerate(target_reactions): if target_press[i] == 0: pressure = 1e-9 else: pressure = target_press[i] gas.TPX = target_temp[i],pressure*101325,{'H2O2':0.003094,'O2':0.000556,'H2O':0.001113,'Ar':0.995237} reaction_number_in_cti = reactions_in_cti_file.index(reaction) k = gas.forward_rate_constants[reaction_number_in_cti] indx = reactions_in_cti_file.index(reaction) target_value_ks[unique_reactions.index(indx)].append(k*1000) return target_value_ks if bool(self.target_value_rate_constant_csv) and self.k_target_values=='On': S_matrix_k_target_values_extra = target_values_for_S(self.target_value_rate_constant_csv_extra_values, self.exp_dict_list_optimized, self.S_matrix, master_equation_reaction_list = master_equation_reactions, master_equation_sensitivites=self.cheby_sensitivity_dict) #paste here unique_reactions_optimized=[] unique_reactions_original = [] reaction_list_from_mechanism_original = gas_original.reaction_equations() reaction_list_from_mechanism = gas_optimized.reaction_equations() k_target_value_csv_extra = pd.read_csv(self.target_value_rate_constant_csv_extra_values) k_target_value_csv = pd.read_csv(self.target_value_rate_constant_csv) for row in range(k_target_value_csv_extra.shape[0]): unique_reactions_optimized.append(reaction_list_from_mechanism.index(k_target_value_csv_extra['Reaction'][row])) unique_reactions_original.append(reaction_list_from_mechanism_original.index(k_target_value_csv_extra['Reaction'][row])) unique_reactions_optimized = unique_list(unique_reactions_optimized) unique_reactions_original = unique_list(unique_reactions_original) sigma_list_for_target_ks_optimized = calculate_sigmas_for_rate_constants(S_matrix_k_target_values_extra,k_target_value_csv_extra,unique_reactions_optimized,gas_optimized,self.covarience) self.sigma_list_for_target_ks_optimized = sigma_list_for_target_ks_optimized sigma_list_for_target_ks_original = calculate_sigmas_for_rate_constants(S_matrix_k_target_values_extra,k_target_value_csv_extra,unique_reactions_original,gas_original,self.original_covariance) self.sigma_list_for_target_ks_original = sigma_list_for_target_ks_original ###################### target_value_temps_optimized,target_value_ks_optimized = sort_rate_constant_target_values(k_target_value_csv_extra,unique_reactions_optimized,gas_optimized) target_value_temps_original,target_value_ks_original = sort_rate_constant_target_values(k_target_value_csv_extra,unique_reactions_original,gas_original) ############################################# unique_reactions_optimized_for_plotting=[] unique_reactions_original_for_plotting = [] for row in range(k_target_value_csv.shape[0]): unique_reactions_optimized_for_plotting.append(reaction_list_from_mechanism.index(k_target_value_csv['Reaction'][row])) unique_reactions_original_for_plotting.append(reaction_list_from_mechanism_original.index(k_target_value_csv['Reaction'][row])) unique_reactions_optimized_for_plotting = unique_list(unique_reactions_optimized) unique_reactions_original_for_plotting = unique_list(unique_reactions_original) target_value_temps_optimized_for_plotting,target_value_ks_optimized_for_plotting = sort_rate_constant_target_values(k_target_value_csv,unique_reactions_optimized_for_plotting,gas_optimized) target_value_temps_original_for_plotting,target_value_ks_original_for_plotting = sort_rate_constant_target_values(k_target_value_csv,unique_reactions_original_for_plotting,gas_original) ############################################# target_value_ks_calculated_with_cantera_optimized = calculating_target_value_ks_from_cantera_for_sigmas(k_target_value_csv_extra,gas_optimized,unique_reactions_optimized) target_value_ks_calculated_with_cantera_original = calculating_target_value_ks_from_cantera_for_sigmas(k_target_value_csv_extra,gas_original,unique_reactions_original) for i,reaction in enumerate(unique_reactions_optimized): plt.figure() #stub Temp_optimized,k_optimized = rate_constant_over_temperature_range_from_cantera(reaction, gas_optimized, initial_temperature=initial_temperature, final_temperature=final_temperature, pressure=1, conditions={'H2':2,'O2':1,'Ar':4}) plt.semilogy(Temp_optimized,k_optimized,'b') #calculate sigmas high_error_optimized = np.exp(np.array(sigma_list_for_target_ks_optimized[i])) high_error_optimized = np.multiply(high_error_optimized,target_value_ks_calculated_with_cantera_optimized[i]) low_error_optimized = np.exp(np.array(sigma_list_for_target_ks_optimized[i])*-1) low_error_optimized = np.multiply(low_error_optimized,target_value_ks_calculated_with_cantera_optimized[i]) #plt.semilogy(target_value_temps_optimized[i],high_error_optimized,'b--') a, b = zip(*sorted(zip(target_value_temps_optimized[i],high_error_optimized))) plt.semilogy(a,b,'b--') # print(a,b) a, b = zip(*sorted(zip(target_value_temps_optimized[i],low_error_optimized))) plt.semilogy(a,b,'b--') #stubb Temp_original,k_original = rate_constant_over_temperature_range_from_cantera(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]), gas_original, initial_temperature=initial_temperature, final_temperature=final_temperature, pressure=1, conditions={'H2':2,'O2':1,'Ar':4}) plt.semilogy(Temp_original,k_original,'r') high_error_original = np.exp(sigma_list_for_target_ks_original[unique_reactions_original.index(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]))]) high_error_original = np.multiply(high_error_original,target_value_ks_calculated_with_cantera_original[unique_reactions_original.index(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]))]) low_error_original = np.exp(np.array(sigma_list_for_target_ks_original[unique_reactions_original.index(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]))])*-1) low_error_original = np.multiply(low_error_original,target_value_ks_calculated_with_cantera_original[unique_reactions_original.index(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]))]) a, b = zip(*sorted(zip(target_value_temps_original[unique_reactions_original.index(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]))],high_error_original))) plt.semilogy(a,b,'r--') a, b = zip(*sorted(zip(target_value_temps_original[unique_reactions_original.index(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]))],low_error_original))) plt.semilogy(a,b,'r--') #plt.semilogy(target_value_temps_optimized[i],target_value_ks_optimized[i],'o',color='black') plt.semilogy(target_value_temps_optimized_for_plotting[i],target_value_ks_optimized_for_plotting[i],'o',color='black') plt.xlabel('Temperature (K)') plt.ylabel('Kmol/m^3-s') plt.title(reaction_list_from_mechanism[reaction]) plt.savefig(self.working_directory+'/'+reaction_list_from_mechanism[reaction]+'.pdf', bbox_inches='tight') plt.savefig(self.working_directory+'/'+reaction_list_from_mechanism[reaction]+'.svg', bbox_inches='tight') elif bool(self.target_value_rate_constant_csv) and self.k_target_values=='Off': unique_reactions_optimized=[] unique_reactions_original = [] reaction_list_from_mechanism_original = gas_original.reaction_equations() reaction_list_from_mechanism = gas_optimized.reaction_equations() k_target_value_csv = pd.read_csv(self.target_value_rate_constant_csv) for row in range(k_target_value_csv.shape[0]): unique_reactions_optimized.append(reaction_list_from_mechanism.index(k_target_value_csv['Reaction'][row])) unique_reactions_original.append(reaction_list_from_mechanism_original.index(k_target_value_csv['Reaction'][row])) unique_reactions_optimized = unique_list(unique_reactions_optimized) unique_reactions_original = unique_list(unique_reactions_original) ###################### target_value_temps_optimized,target_value_ks_optimized = sort_rate_constant_target_values(k_target_value_csv,unique_reactions_optimized,gas_optimized) target_value_temps_original,target_value_ks_original = sort_rate_constant_target_values(k_target_value_csv,unique_reactions_original,gas_original) ############################################# target_value_ks_calculated_with_cantera_optimized = calculating_target_value_ks_from_cantera_for_sigmas(k_target_value_csv,gas_optimized,unique_reactions_optimized) target_value_ks_calculated_with_cantera_original = calculating_target_value_ks_from_cantera_for_sigmas(k_target_value_csv,gas_original,unique_reactions_original) for i,reaction in enumerate(unique_reactions_optimized): plt.figure() Temp_optimized,k_optimized = rate_constant_over_temperature_range_from_cantera(reaction, gas_optimized, initial_temperature=250, final_temperature=2500, pressure=1, conditions={'H2':2,'O2':1,'Ar':4}) plt.semilogy(Temp_optimized,k_optimized,'b') Temp_original,k_original = rate_constant_over_temperature_range_from_cantera(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]), gas_original, initial_temperature=250, final_temperature=2500, pressure=1, conditions={'H2':2,'O2':1,'Ar':4}) plt.semilogy(Temp_original,k_original,'r') plt.semilogy(target_value_temps_optimized[i],target_value_ks_optimized[i],'o',color='black') plt.xlabel('Temperature (K)') plt.ylabel('Kmol/m^3-s') plt.title(reaction_list_from_mechanism[reaction]) plt.savefig(self.working_directory+'/'+reaction_list_from_mechanism[reaction]+'.pdf', bbox_inches='tight') plt.savefig(self.working_directory+'/'+reaction_list_from_mechanism[reaction]+'.svg', bbox_inches='tight') def plotting_X_itterations(self,list_of_X_values_to_plot = [], list_of_X_array=[],number_of_iterations=None): for value in list_of_X_values_to_plot: temp = [] for array in list_of_X_array: temp.append(array[value][0]) plt.figure() plt.plot(np.arange(0,number_of_iterations,1),temp) return def getting_matrix_diag(self,cov_matrix): diag = cov_matrix.diagonal() return diag def Y_matrix_plotter(self,Y_matrix,exp_dict_list_optimized,y_matrix,sigma): #sigmas =[[] for x in range(len(self.simulation_lengths_of_experimental_data))] counter=0 for x in range(len(self.simulation_lengths_of_experimental_data)): observable_counter = 0 for y in range(len(self.simulation_lengths_of_experimental_data[x])): #for z in np.arange(counter,(self.simulation_lengths_of_experimental_data[x][y]+counter)): plt.figure() Y_values_to_plot = list(Y_matrix[counter:self.simulation_lengths_of_experimental_data[x][y]+counter,:]) y_values_to_plot = list(y_matrix[counter:self.simulation_lengths_of_experimental_data[x][y]+counter,:]) sigmas_to_plot = list(sigma[counter:self.simulation_lengths_of_experimental_data[x][y]+counter,:]) if 'perturbed_coef' in exp_dict_list_optimized[x].keys(): wavelengths = self.parsed_yaml_list[x]['absorbanceCsvWavelengths'][0] time = exp_dict_list_optimized[x]['absorbance_experimental_data'][0]['time'] plt.subplot(4, 1, 1) plt.title('Experiment_'+str(x+1)+'_Wavelength_'+str(wavelengths)) plt.plot(time*1e3,Y_values_to_plot) plt.tick_params(labelbottom=False) plt.ylabel('Y_matrix') plt.subplot(plt.subplot(4, 1, 2)) plt.plot(time*1e3,y_values_to_plot) plt.tick_params(labelbottom=False) plt.ylabel('y_matrix') plt.subplot(plt.subplot(4, 1, 3)) plt.plot(time*1e3,sigmas_to_plot) plt.tick_params(labelbottom=False) plt.ylabel('sigma') plt.subplot(plt.subplot(4, 1, 4)) plt.plot(time*1e3,np.array(Y_values_to_plot)/np.array(sigmas_to_plot)) plt.ylabel('Y/sigma') plt.xlabel('time') plt.savefig(self.working_directory+'/'+'Experiment_'+str(x+1)+' '+'Absorbance at'+'_'+str(wavelengths)+'.pdf', bbox_inches='tight') else: time = exp_dict_list_optimized[x]['experimental_data'][y]['Time'] plt.subplot(4, 1, 1) plt.plot(time*1e3,Y_values_to_plot) plt.tick_params(labelbottom=False) plt.title('Experiment_'+str(x+1)+'_observable_'+exp_dict_list_optimized[0]['observables'][observable_counter]) plt.ylabel('Y_matrix') plt.subplot(plt.subplot(4, 1, 2)) plt.plot(time*1e3,y_values_to_plot) plt.tick_params(labelbottom=False) plt.ylabel('y_matrix') plt.subplot(plt.subplot(4, 1, 3)) plt.plot(time*1e3,sigmas_to_plot) plt.tick_params(labelbottom=False) plt.ylabel('sigma') plt.subplot(plt.subplot(4, 1, 4)) plt.plot(time*1e3,np.array(Y_values_to_plot)/np.array(sigmas_to_plot)) plt.ylabel('Y/sigma') plt.xlabel('time') plt.savefig('Experiment_'+str(x+1)+'_observable_'+exp_dict_list_optimized[0]['observables'][observable_counter]+'.pdf', bbox_inches='tight') observable_counter+=1 counter = counter + self.simulation_lengths_of_experimental_data[x][y] return def shorten_sigma(self): flat_list = [item for sublist in self.simulation_lengths_of_experimental_data for item in sublist] length = sum(flat_list) observables_list = self.Ydf['value'].tolist()[length:] short_sigma = list(self.sigma)[length:] #print(flat_list) if bool(self.target_value_rate_constant_csv) and self.k_target_values=='On': k_target_value_csv = pd.read_csv(self.target_value_rate_constant_csv) shape = k_target_value_csv.shape[0] slc = len(observables_list) - shape observables_list = observables_list[:slc] short_sigma = short_sigma[:slc] short_sigma = np.array(short_sigma) self.short_sigma = short_sigma return def sort_top_uncertainty_weighted_sens(self,top_sensitivity=10): S_matrix_copy = copy.deepcopy(self.S_matrix) S_matrix_copy = copy.deepcopy(self.S_matrix_original) self.shorten_sigma() sigma_csv = self.sigma_uncertainty_weighted_sensitivity_csv if bool(sigma_csv): df = pd.read_csv(sigma_csv) Sig = np.array(df['Sigma']) Sig = Sig.reshape((Sig.shape[0],1)) elif self.sigma_ones==True: shape = len(self.short_sigma) Sig = np.ones((shape,1)) else: Sig = self.short_sigma #Sig = self.sigma for pp in range(np.shape(S_matrix_copy)[1]): S_matrix_copy[:,pp] *=Sig[pp] sensitivitys =[[] for x in range(len(self.simulation_lengths_of_experimental_data))] topSensitivities = [[] for x in range(len(self.simulation_lengths_of_experimental_data))] start=0 stop = 0 for x in range(len(self.simulation_lengths_of_experimental_data)): for y in range(len(self.simulation_lengths_of_experimental_data[x])): stop = self.simulation_lengths_of_experimental_data[x][y] + start temp = S_matrix_copy[start:stop,:] sort_s= pd.DataFrame(temp).reindex(pd.DataFrame(temp).abs().max().sort_values(ascending=False).index, axis=1) cc=pd.DataFrame(sort_s).iloc[:,:top_sensitivity] top_five_reactions=cc.columns.values.tolist() topSensitivities[x].append(top_five_reactions) #ccn=pd.DataFrame(cc).as_matrix() ccn=pd.DataFrame(cc).to_numpy() sensitivitys[x].append(ccn) start = start + self.simulation_lengths_of_experimental_data[x][y] return sensitivitys,topSensitivities def getting_time_profiles_for_experiments(self, exp_dict_list_optimized): time_profiles =[[] for x in range(len(self.simulation_lengths_of_experimental_data))] observables = [[] for x in range(len(self.simulation_lengths_of_experimental_data))] for i,exp in enumerate(self.exp_dict_list_optimized): observable_counter=0 for j,observable in enumerate(exp['mole_fraction_observables'] + exp['concentration_observables'] + exp['ignition_delay_observables']): if observable == None: continue if observable in exp['mole_fraction_observables']: if re.match('[Ss]hock [Tt]ube',exp['simulation_type']): time_profiles[i].append(exp['experimental_data'][observable_counter]['Time']*1e3) observables[i].append(observable) observable_counter+=1 elif re.match('[Jj][Ss][Rr]',exp['simulation_type']): time_profiles[i].append(exp['experimental_data'][observable_counter]['Temperature']) observables[i].append(observable) observable_counter+=1 elif re.match('[Ff]low[ -][Rr][eactor]',exp['simulation_type']): time_profiles[i].append(exp['experimental_data'][observable_counter]['Temperature']) observables[i].append(observable) observable_counter+=1 elif observable in exp['concentration_observables']: if re.match('[Ss]hock [Tt]ube',exp['simulation_type']): time_profiles[i].append(exp['experimental_data'][observable_counter]['Time']*1e3) observables[i].append(observable) observable_counter+=1 elif re.match('[Jj][Ss][Rr]',exp['simulation_type']): time_profiles[i].append(exp['experimental_data'][observable_counter]['Temperature']) observables[i].append(observable) observable_counter+=1 elif re.match('[Ff]low[ -][Rr][eactor]',exp['simulation_type']): time_profiles[i].append(exp['experimental_data'][observable_counter]['Temperature']) observables[i].append(observable) observable_counter+=1 elif observable in exp['ignition_delay_observables']: if re.match('[Ss]hock [Tt]ube',exp['simulation_type']): time_profiles[i].append(exp['experimental_data'][observable_counter]['temperature']) observables[i].append(observable) observable_counter+=1 if 'perturbed_coef' in exp.keys(): wavelengths = self.parsed_yaml_list[i]['absorbanceCsvWavelengths'] for k,wl in enumerate(wavelengths): time_profiles[i].append(exp['absorbance_experimental_data'][k]['time']*1e3) observables[i].append('Absorbance_'+str(wl)) self.time_profiles = time_profiles self.observable_list = observables return time_profiles def get_observables_list(self): #use this function to return observable list and uncertainty pass in csv and get unc and csv sigma_csv = self.sigma_uncertainty_weighted_sensitivity_csv gas = ct.Solution(self.new_cti) reaction_equations = gas.reaction_equations() if bool(sigma_csv): df = pd.read_csv(sigma_csv) Sig = df['Sigma'].values Sig = np.array(Sig) Sig = Sig.reshape((Sig.shape[0],1)) observable_list = df['Observable'].tolist() self.sigma_list = Sig #print(self.sigma_list) return observable_list else: flat_list = [item for sublist in self.simulation_lengths_of_experimental_data for item in sublist] length = sum(flat_list) observables_list = self.Ydf['value'].tolist()[length:] if bool(self.target_value_rate_constant_csv) and self.k_target_values=='On': k_target_value_csv = pd.read_csv(self.target_value_rate_constant_csv) shape = k_target_value_csv.shape[0] slc = len(observables_list) - shape observables_list = observables_list[:slc] #transform observable list observable_list_transformed = [] for obs in observables_list: lst = obs.split('_') if lst[0] =='A': reaction_indx = int(lst[1]) reaction = reaction_equations[reaction_indx] observable_list_transformed.append('A_'+reaction) elif lst[0] =='n': reaction_indx = int(lst[1]) reaction = reaction_equations[reaction_indx] observable_list_transformed.append('n_'+reaction) elif lst[0] =='Ea': reaction_indx = int(lst[1]) reaction = reaction_equations[reaction_indx] observable_list_transformed.append('Ea_'+reaction) else: observable_list_transformed.append(obs) return observable_list_transformed def plotting_uncertainty_weighted_sens(self): sensitivities,top_sensitivities = self.sort_top_uncertainty_weighted_sens() observables_list = self.get_observables_list() if bool(self.sigma_uncertainty_weighted_sensitivity_csv): sigma_list = self.sigma_list else: sigma_list = list(self.short_sigma) #start here time_profiles = self.getting_time_profiles_for_experiments(self.exp_dict_list_optimized) list_of_experiment_observables = self.observable_list def subplot_function(number_of_observables_in_simulation,time_profiles,sensitivities,top_sensitivity_single_exp,observables_list,list_of_experiment_observables,experiment_number): #plt.figure(figsize=(2,6)) #stub plt.figure() for plot_number in range(number_of_observables_in_simulation): for c,top_columns in enumerate(top_sensitivity_single_exp[plot_number]): plt.subplot(number_of_observables_in_simulation,1,plot_number+1) if plot_number==0: plt.title('Experiment_'+str(experiment_number+1)) plt.plot(time_profiles[plot_number],sensitivities[plot_number][:,c],label = observables_list[top_columns] +'_'+str(sigma_list[top_columns])) plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=1.5) plt.ylabel(list_of_experiment_observables[plot_number]) top,bottom = plt.ylim() left,right = plt.xlim() plt.legend(ncol=5, loc='upper left',bbox_to_anchor=(-.5,-.3)) #plt.legend(ncol=3, loc='upper left',bbox_to_anchor=(1.2,2),fontsize=2) if self.simulation_run==None: plt.savefig(self.working_directory+'/'+'Experiment_'+str(experiment_number+1)+'.pdf', bbox_inches='tight') else: plt.title('Experiment_'+str(self.simulation_run)) plt.savefig(self.working_directory+'/'+'Experiment_'+str(self.simulation_run)+'.pdf', bbox_inches='tight') for x in range(len(sensitivities)): number_of_observables_in_simulation = len(sensitivities[x]) subplot_function(number_of_observables_in_simulation,time_profiles[x],sensitivities[x],top_sensitivities[x],observables_list,list_of_experiment_observables[x],x) return def plotting_rate_constants_six_paramter_fit(self,optimized_cti_file='', original_cti_file='', initial_temperature=250, final_temperature=2500, master_equation_reactions = [], six_parameter_fit_dict_optimized = {}, six_parameter_fit_dict_nominal = {}, six_parameter_fit_sensitivity_dict = {}): gas_optimized = ct.Solution(optimized_cti_file) gas_original = ct.Solution(original_cti_file) def unique_list(seq): checked = [] for e in seq: if e not in checked: checked.append(e) return checked ################################################################################ def target_values_for_S_six_parameter_fit(target_value_csv, exp_dict_list, S_matrix, master_equation_reaction_list = [], six_parameter_fit_sensitivity_dict = {}): target_value_csv = pd.read_csv(target_value_csv) target_reactions = target_value_csv['Reaction'] target_temp = target_value_csv['temperature'] target_press = target_value_csv['pressure'] target_k = target_value_csv['k'] reactions_in_cti_file = exp_dict_list[0]['simulation'].processor.solution.reaction_equations() number_of_reactions_in_cti = len(reactions_in_cti_file) As = [] Ns = [] Eas = [] Number_of_MP = [] #nested_reaction_list = [[] for x in range(len(master_equation_reaction_list))] #print(six_parameter_fit_sensitivity_dict.keys()) def create_empty_nested_reaction_list(): nested_reaction_list = [[] for x in range(len(master_equation_reaction_list))] for reaction in master_equation_reaction_list: for i,MP in enumerate(six_parameter_fit_sensitivity_dict[reaction]['A']): nested_reaction_list[master_equation_reaction_list.index(reaction)].append(0) #copy.deepcopy(nested_reaction_list) #don't think i need this return nested_reaction_list MP_stack = [] target_values_to_stack = [] for i,reaction in enumerate(target_reactions): #temp_array = np.zeros((1,Number_of_MP)) if reaction in master_equation_reaction_list: nested_reaction_list = create_empty_nested_reaction_list() for s,sensitivity in enumerate(six_parameter_fit_sensitivity_dict[reaction]['A']): #stub #start here tomorrow nested_reaction_list[master_equation_reaction_list.index(reaction)][s] = 1*six_parameter_fit_sensitivity_dict[reaction]['A'][s] + np.log(target_temp[i])*six_parameter_fit_sensitivity_dict[reaction]['n'][s] + (-1000/target_temp[i])*six_parameter_fit_sensitivity_dict[reaction]['Ea'][s] + (-(1000/target_temp[i])**3)*six_parameter_fit_sensitivity_dict[reaction]['c'][s]+ (-(1000/target_temp[i])**-1)*six_parameter_fit_sensitivity_dict[reaction]['d'][s] + (-(1000/target_temp[i])**-3)*six_parameter_fit_sensitivity_dict[reaction]['f'][s] #nested_reaction_list[master_equation_reaction_list.index(reaction)][s] = 1*six_parameter_fit_sensitivity_dict[reaction]['A'][s] + np.log(target_temp[i])*six_parameter_fit_sensitivity_dict[reaction]['n'][s] + (-1/target_temp[i])*six_parameter_fit_sensitivity_dict[reaction]['Ea'][s] + (-(1000/target_temp[i])**3)*six_parameter_fit_sensitivity_dict[reaction]['c'][s]+ (-(1000/target_temp[i])**-1)*six_parameter_fit_sensitivity_dict[reaction]['d'][s]*(1000*4.184)**-1 + (-(1/target_temp[i])**-3)*six_parameter_fit_sensitivity_dict[reaction]['f'][s]*(1000*4.184)**-3 temp = nested_reaction_list flat_list = [item for sublist in temp for item in sublist] MP_stack.append(nested_reaction_list) flat_list = np.array(flat_list) flat_list = flat_list.reshape((1,flat_list.shape[0])) target_values_to_stack.append(flat_list) else: A_temp = np.zeros((1,number_of_reactions_in_cti-len(master_equation_reaction_list))) N_temp = np.zeros((1,number_of_reactions_in_cti-len(master_equation_reaction_list))) Ea_temp = np.zeros((1,number_of_reactions_in_cti-len(master_equation_reaction_list))) #decide if this mapping is correct A_temp[0,reactions_in_cti_file.index(reaction)] = 1 N_temp [0,reactions_in_cti_file.index(reaction)] = np.log(target_temp[i]) Ea_temp[0,reactions_in_cti_file.index(reaction)] = (-1/target_temp[i]) As.append(A_temp) Ns.append(N_temp) Eas.append(Ea_temp) A_temp = A_temp.reshape((1,A_temp.shape[1])) N_temp = N_temp.reshape((1,N_temp.shape[1])) Ea_temp = Ea_temp.reshape((1,Ea_temp.shape[1])) target_values_to_stack.append(np.hstack((A_temp,N_temp,Ea_temp))) # might need to edit this to pass in s? and S_matrix = S_matrix shape_s = S_matrix.shape S_target_values = [] for i,row in enumerate(target_values_to_stack): if target_reactions[i] in master_equation_reaction_list: zero_to_append_infront = np.zeros((1,((number_of_reactions_in_cti-len(master_equation_reaction_list))*3))) zero_to_append_behind = np.zeros((1, shape_s[1] - ((number_of_reactions_in_cti-len(master_equation_reaction_list))*3) - np.shape(row)[1] )) temp_array = np.hstack((zero_to_append_infront,row,zero_to_append_behind)) S_target_values.append(temp_array) else: zero_to_append_behind = np.zeros((1,shape_s[1]-np.shape(row)[1])) temp_array = np.hstack((row,zero_to_append_behind)) S_target_values.append(temp_array) S_target_values = np.vstack((S_target_values)) return S_target_values ################################################################################ def calculate_six_parameter_fit(reaction,dictonary,temperature): #finish editing this #calc Ea,c,d,F seprately A = dictonary[reaction]['A'] n = dictonary[reaction]['n'] Ea_temp = dictonary[reaction]['Ea']/(1.987*temperature) c_temp = dictonary[reaction]['c']/((1.987*temperature)**3) d_temp = dictonary[reaction]['d']*(1.987*temperature) f_temp = dictonary[reaction]['f']* ((1.987*temperature)**3) k = A*(temperature**n)*np.exp(-Ea_temp-c_temp-d_temp-f_temp) return k def sort_rate_constant_target_values(parsed_csv,unique_reactions,gas): reaction_list_from_mechanism = gas.reaction_equations() target_value_ks = [[] for reaction in range(len(unique_reactions))] target_value_temps = [[] for reaction in range(len(unique_reactions))] reaction_list_from_mechanism = gas.reaction_equations() for i,reaction in enumerate(parsed_csv['Reaction']): idx = reaction_list_from_mechanism.index(reaction) target_value_ks[unique_reactions.index(idx)].append(parsed_csv['k'][i]) target_value_temps[unique_reactions.index(idx)].append(parsed_csv['temperature'][i]) return target_value_temps,target_value_ks def rate_constant_over_temperature_range_from_cantera(reaction_number, gas, initial_temperature=250, final_temperature=2500, pressure=1, conditions = {'H2':2,'O2':1,'N2':4}, dictonary={}, master_equation_reactions=[]): Temp = [] k = [] reaction_string = gas.reaction_equations()[reaction_number] for temperature in np.arange(initial_temperature,final_temperature,1): if reaction_string in master_equation_reactions: k.append(calculate_six_parameter_fit(reaction_string,dictonary,temperature)) Temp.append(temperature) #start editing here else: gas.TPX = temperature,pressure*101325,conditions Temp.append(temperature) k.append(gas.forward_rate_constants[reaction_number]*1000) return Temp,k def calculate_sigmas_for_rate_constants(k_target_value_S_matrix,k_target_values_parsed_csv,unique_reactions,gas,covarience): reaction_list_from_mechanism = gas.reaction_equations() sigma_list_for_target_ks = [[] for reaction in range(len(unique_reactions))] shape = k_target_value_S_matrix.shape for row in range(shape[0]): #print(row) SC = np.dot(k_target_value_S_matrix[row,:],covarience) sigma_k = np.dot(SC,np.transpose(k_target_value_S_matrix[row,:])) sigma_k = np.sqrt(sigma_k) #print(row) #print(k_target_values_parsed_csv['Reaction'][row]) indx = reaction_list_from_mechanism.index(k_target_values_parsed_csv['Reaction'][row]) sigma_list_for_target_ks[unique_reactions.index(indx)].append(sigma_k) return sigma_list_for_target_ks def calculating_target_value_ks_from_cantera_for_sigmas(k_target_values_parsed_csv,gas,unique_reactions,six_parameter_fit_dictonary,master_equation_reactions): target_value_ks = [[] for reaction in range(len(unique_reactions))] target_reactions = k_target_values_parsed_csv['Reaction'] target_temp = k_target_values_parsed_csv['temperature'] target_press = k_target_values_parsed_csv['pressure'] reactions_in_cti_file = gas.reaction_equations() #print(reactions_in_cti_file) for i,reaction in enumerate(target_reactions): if reaction in master_equation_reactions: k = calculate_six_parameter_fit(reaction,six_parameter_fit_dictonary,target_temp[i]) indx = reactions_in_cti_file.index(reaction) target_value_ks[unique_reactions.index(indx)].append(k) else: if target_press[i] == 0: pressure = 1e-9 else: pressure = target_press[i] gas.TPX = target_temp[i],pressure*101325,{'H2O2':0.003094,'O2':0.000556,'H2O':0.001113,'Ar':0.995237} reaction_number_in_cti = reactions_in_cti_file.index(reaction) k = gas.forward_rate_constants[reaction_number_in_cti] indx = reactions_in_cti_file.index(reaction) target_value_ks[unique_reactions.index(indx)].append(k*1000) return target_value_ks if bool(self.target_value_rate_constant_csv) and self.k_target_values=='On': ### make new s matrix with the new csv file, and make sure we are plotting the old one S_matrix_k_target_values_extra = target_values_for_S_six_parameter_fit(self.target_value_rate_constant_csv_extra_values, self.exp_dict_list_optimized, self.S_matrix, master_equation_reaction_list = master_equation_reactions, six_parameter_fit_sensitivity_dict = six_parameter_fit_sensitivity_dict) #make two unique unique_reactions_optimized=[] unique_reactions_original = [] reaction_list_from_mechanism_original = gas_original.reaction_equations() reaction_list_from_mechanism = gas_optimized.reaction_equations() k_target_value_csv_extra = pd.read_csv(self.target_value_rate_constant_csv_extra_values) k_target_value_csv = pd.read_csv(self.target_value_rate_constant_csv) for row in range(k_target_value_csv_extra.shape[0]): unique_reactions_optimized.append(reaction_list_from_mechanism.index(k_target_value_csv_extra['Reaction'][row])) unique_reactions_original.append(reaction_list_from_mechanism_original.index(k_target_value_csv_extra['Reaction'][row])) unique_reactions_optimized = unique_list(unique_reactions_optimized) unique_reactions_original = unique_list(unique_reactions_original) sigma_list_for_target_ks_optimized = calculate_sigmas_for_rate_constants(S_matrix_k_target_values_extra,k_target_value_csv_extra,unique_reactions_optimized,gas_optimized,self.covarience) self.sigma_list_for_target_ks_optimized = sigma_list_for_target_ks_optimized sigma_list_for_target_ks_original = calculate_sigmas_for_rate_constants(S_matrix_k_target_values_extra,k_target_value_csv_extra,unique_reactions_original,gas_original,self.original_covariance) self.sigma_list_for_target_ks_original = sigma_list_for_target_ks_original ###################### target_value_temps_optimized,target_value_ks_optimized = sort_rate_constant_target_values(k_target_value_csv_extra,unique_reactions_optimized,gas_optimized) target_value_temps_original,target_value_ks_original = sort_rate_constant_target_values(k_target_value_csv_extra,unique_reactions_original,gas_original) ############################################# unique_reactions_optimized_for_plotting=[] unique_reactions_original_for_plotting = [] for row in range(k_target_value_csv.shape[0]): unique_reactions_optimized_for_plotting.append(reaction_list_from_mechanism.index(k_target_value_csv['Reaction'][row])) unique_reactions_original_for_plotting.append(reaction_list_from_mechanism_original.index(k_target_value_csv['Reaction'][row])) unique_reactions_optimized_for_plotting = unique_list(unique_reactions_optimized) unique_reactions_original_for_plotting = unique_list(unique_reactions_original) target_value_temps_optimized_for_plotting,target_value_ks_optimized_for_plotting = sort_rate_constant_target_values(k_target_value_csv,unique_reactions_optimized_for_plotting,gas_optimized) target_value_temps_original_for_plotting,target_value_ks_original_for_plotting = sort_rate_constant_target_values(k_target_value_csv,unique_reactions_original_for_plotting,gas_original) ############################################# target_value_ks_calculated_with_cantera_optimized = calculating_target_value_ks_from_cantera_for_sigmas(k_target_value_csv_extra,gas_optimized,unique_reactions_optimized,six_parameter_fit_dict_optimized,master_equation_reactions) target_value_ks_calculated_with_cantera_original = calculating_target_value_ks_from_cantera_for_sigmas(k_target_value_csv_extra,gas_original,unique_reactions_original,six_parameter_fit_dict_nominal,master_equation_reactions) #print(target_value_ks_calculated_with_cantera_original) for i,reaction in enumerate(unique_reactions_optimized): plt.figure() Temp_optimized,k_optimized = rate_constant_over_temperature_range_from_cantera(reaction, gas_optimized, initial_temperature=250, final_temperature=2500, pressure=1.635, conditions={'H2O2':0.003094,'O2':0.000556,'H2O':0.001113,'Ar':0.995237}, dictonary = six_parameter_fit_dict_optimized, master_equation_reactions = master_equation_reactions) plt.semilogy(Temp_optimized,k_optimized,'b') #calculate sigmas #print(sigma_list_for_target_ks_optimized[i]) high_error_optimized = np.exp(np.array(sigma_list_for_target_ks_optimized[i])) #print(high_error_optimized) high_error_optimized = np.multiply(high_error_optimized,target_value_ks_calculated_with_cantera_optimized[i]) low_error_optimized = np.exp(np.array(sigma_list_for_target_ks_optimized[i])*-1) low_error_optimized = np.multiply(low_error_optimized,target_value_ks_calculated_with_cantera_optimized[i]) # plt.semilogy(target_value_temps_optimized[i],high_error_optimized,'b--') a, b = zip(*sorted(zip(target_value_temps_optimized[i],high_error_optimized))) #plt.scatter(a,b,color='blue') plt.semilogy(a,b,'b--') a, b = zip(*sorted(zip(target_value_temps_optimized[i],low_error_optimized))) plt.semilogy(a,b,'b--') #plt.scatter(a,b,color='blue') # print(a,b) Temp_original,k_original = rate_constant_over_temperature_range_from_cantera(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]), gas_original, initial_temperature=250, final_temperature=2500, pressure=1.635, conditions={'H2O2':0.003094,'O2':0.000556,'H2O':0.001113,'Ar':0.995237}, dictonary = six_parameter_fit_dict_nominal, master_equation_reactions = master_equation_reactions) plt.semilogy(Temp_original,k_original,'r') # plt.xlim((0,3000)) #plt.ylim((10**9,10**15)) #print(unique_reactions_original) # print(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction])) #print(unique_reactions_original.index(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]))) high_error_original = np.exp(sigma_list_for_target_ks_original[unique_reactions_original.index(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]))]) high_error_original = np.multiply(high_error_original,target_value_ks_calculated_with_cantera_original[unique_reactions_original.index(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]))]) low_error_original = np.exp(np.array(sigma_list_for_target_ks_original[unique_reactions_original.index(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]))])*-1) low_error_original = np.multiply(low_error_original,target_value_ks_calculated_with_cantera_original[unique_reactions_original.index(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]))]) a, b = zip(*sorted(zip(target_value_temps_original[unique_reactions_original.index(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]))],high_error_original))) plt.semilogy(a,b,'r--') #plt.scatter(a,b,color='red') a, b = zip(*sorted(zip(target_value_temps_original[unique_reactions_original.index(reaction_list_from_mechanism_original.index(reaction_list_from_mechanism[reaction]))],low_error_original))) plt.semilogy(a,b,'r--') #plt.scatter(a,b,color='red') plt.semilogy(target_value_temps_optimized_for_plotting[i],target_value_ks_optimized_for_plotting[i],'o',color='black') plt.xlabel('Temperature [K]') #plt.ylabel('Kmol/m^3-s') plt.ylabel(r'k [$\frac{cm^3}{{mol s}}$]') plt.title(reaction_list_from_mechanism[reaction]) plt.tick_params(axis ='both', direction ='in') plt.tick_params(axis ='both', direction ='in',which='minor') plt.savefig(os.path.join(self.working_directory,reaction_list_from_mechanism[reaction]+'.pdf'), bbox_inches='tight') plt.savefig(os.path.join(self.working_directory,reaction_list_from_mechanism[reaction]+'.svg'), bbox_inches='tight') elif bool(self.target_value_rate_constant_csv) and self.k_target_values=='Off': unique_reactions_optimized=[] unique_reactions_original = [] reaction_list_from_mechanism_original = gas_original.reaction_equations() reaction_list_from_mechanism = gas_optimized.reaction_equations() k_target_value_csv = pd.read_csv(self.target_value_rate_constant_csv) for row in range(k_target_value_csv.shape[0]): unique_reactions_optimized.append(reaction_list_from_mechanism.index(k_target_value_csv['Reaction'][row])) unique_reactions_original.append(reaction_list_from_mechanism_original.index(k_target_value_csv['Reaction'][row])) unique_reactions_optimized = unique_list(unique_reactions_optimized) unique_reactions_original = unique_list(unique_reactions_original) ###################### target_value_temps_optimized,target_value_ks_optimized = sort_rate_constant_target_values(k_target_value_csv,unique_reactions_optimized,gas_optimized) target_value_temps_original,target_value_ks_original = sort_rate_constant_target_values(k_target_value_csv,unique_reactions_original,gas_original) ############################################# target_value_ks_calculated_with_cantera_optimized = calculating_target_value_ks_from_cantera_for_sigmas(k_target_value_csv,gas_optimized,unique_reactions_optimized) target_value_ks_calculated_with_cantera_original = calculating_target_value_ks_from_cantera_for_sigmas(k_target_value_csv,gas_original,unique_reactions_original) for i,reaction in enumerate(unique_reactions_optimized): plt.figure() Temp_optimized,k_optimized = rate_constant_over_temperature_range_from_cantera(reaction, gas_optimized, initial_temperature=250, final_temperature=2500, pressure=1, conditions={'H2':2,'O2':1,'Ar':4}) plt.semilogy(Temp_optimized,k_optimized,'b') Temp_original,k_original = rate_constant_over_temperature_range_from_cantera(unique_reactions_original[unique_reactions_original.index(reaction)], gas_original, initial_temperature=250, final_temperature=2500, pressure=1, conditions={'H2':2,'O2':1,'Ar':4}) plt.semilogy(Temp_original,k_original,'r') plt.semilogy(target_value_temps_optimized[i],target_value_ks_optimized[i],'o',color='black') plt.xlabel('Temperature (K)') plt.ylabel('Kmol/m^3-s') plt.title(reaction_list_from_mechanism[reaction]) return S_matrix_k_target_values_extra def plotting_normal_distributions(self, paramter_list, optimized_cti_file='', pdf_distribution_file='', shock_tube_instance=None): all_parameters = shock_tube_instance.posterior_diag_df['parameter'].tolist() df = shock_tube_instance.posterior_diag_df gas_optimized = ct.Solution(optimized_cti_file) for parameter in paramter_list: indx = all_parameters.index(parameter) variance = df['value'][indx] if parameter[0]=='A' or parameter[0]=='n' or parameter[0]=='E': letter,number = parameter.split('_') number = int(number) if 'ElementaryReaction' in str(type(gas_optimized.reaction(number))): A=gas_optimized.reaction(number).rate.pre_exponential_factor n=gas_optimized.reaction(number).rate.temperature_exponent Ea=gas_optimized.reaction(number).rate.activation_energy if 'FalloffReaction' in str(type(gas_optimized.reaction(number))): A=gas_optimized.reaction(number).high_rate.pre_exponential_factor n=gas_optimized.reaction(number).high_rate.temperature_exponent Ea=gas_optimized.reaction(number).high_rate.activation_energy if 'ThreeBodyReaction' in str(type(gas_optimized.reaction(number))): A=gas_optimized.reaction(number).rate.pre_exponential_factor n=gas_optimized.reaction(number).rate.temperature_exponent Ea=gas_optimized.reaction(number).rate.activation_energy else: letter = None if letter =='A': mu = np.log(A*1000) sigma = math.sqrt(variance) sigma = sigma elif letter == 'n': mu = n sigma = math.sqrt(variance) #sigma = sigma/2 elif letter == 'Ea': mu=Ea/1000/4.184 sigma = math.sqrt(variance) sigma = sigma*ct.gas_constant/(1000*4.184) #sigma = sigma/2 else: mu= 0 sigma = math.sqrt(variance) x = np.linspace(mu - 3*sigma, mu + 3*sigma, 100) plt.figure() plt.plot(x, stats.norm.pdf(x, mu, sigma)) plt.xlabel(parameter) plt.ylabel('pdf') plt.savefig(self.working_directory+'/'+parameter+'_distribution'+'_.pdf',bbox_inches='tight') if bool(pdf_distribution_file): df2 = pd.read_csv(pdf_distribution_file) #temp = np.log(np.exp(df2[parameter].values)/9.33e13) #plt.plot(temp,df2['pdf_'+parameter]) plt.plot(df2[parameter],df2['pdf_'+parameter]) plt.savefig(self.working_directory+'/'+parameter+'_distribution'+'_.pdf',bbox_inches='tight') def plotting_joint_normal_distributions(self, coupled_parameters, optimized_cti_file='', joint_data_csv=''): all_parameters = self.shock_tube_instance.posterior_diag_df['parameter'].tolist() df = self.shock_tube_instance.posterior_diag_df gas_optimized = ct.Solution(optimized_cti_file) for couple in coupled_parameters: indx1 = all_parameters.index(couple[0]) indx2 = all_parameters.index(couple[1]) variance1 = df['value'][indx1] variance2 = df['value'][indx2] if couple[0][0]=='A' or couple[0][0]=='n' or couple[0][0]=='E': letter1,number1 = couple[0].split('_') number1 = int(number1) number1_covariance = number1 if letter1=='n': number1_covariance = number1+len(gas_optimized.reaction_equations()) if letter1=='Ea': number1_covariance = number1+len(gas_optimized.reaction_equations())*2 if 'ElementaryReaction' in str(type(gas_optimized.reaction(number1))): A1=gas_optimized.reaction(number1).rate.pre_exponential_factor n1=gas_optimized.reaction(number1).rate.temperature_exponent Ea1=gas_optimized.reaction(number1).rate.activation_energy if 'FalloffReaction' in str(type(gas_optimized.reaction(number1))): A1=gas_optimized.reaction(number1).high_rate.pre_exponential_factor n1=gas_optimized.reaction(number1).high_rate.temperature_exponent Ea1=gas_optimized.reaction(number1).high_rate.activation_energy if 'ThreeBodyReaction' in str(type(gas_optimized.reaction(number1))): A1=gas_optimized.reaction(number1).rate.pre_exponential_factor n1=gas_optimized.reaction(number1).rate.temperature_exponent Ea1=gas_optimized.reaction(number1).rate.activation_energy else: letter1 = None mu1=0 mu_x=0 sigma1= math.sqrt(variance1) number1_covariance = indx1 variance_x = variance1 if couple[1][0]=='A' or couple[1][0]=='n' or couple[1][0]=='E': letter2,number2 = couple[1].split('_') number2 = int(number2) number2_covariance = number2 if letter2=='n': number2_covariance = number2+len(gas_optimized.reaction_equations()) if letter2 == 'Ea': number2_covariance = number2+len(gas_optimized.reaction_equations())*2 if 'ElementaryReaction' in str(type(gas_optimized.reaction(number2))): A2=gas_optimized.reaction(number2).rate.pre_exponential_factor n2=gas_optimized.reaction(number2).rate.temperature_exponent Ea2=gas_optimized.reaction(number2).rate.activation_energy if 'FalloffReaction' in str(type(gas_optimized.reaction(number2))): A2=gas_optimized.reaction(number2).high_rate.pre_exponential_factor n2=gas_optimized.reaction(number2).high_rate.temperature_exponent Ea2=gas_optimized.reaction(number2).high_rate.activation_energy if 'ThreeBodyReaction' in str(type(gas_optimized.reaction(number2))): A2=gas_optimized.reaction(number2).rate.pre_exponential_factor n2=gas_optimized.reaction(number2).rate.temperature_exponent Ea2=gas_optimized.reaction(number2).rate.activation_energy else: mu_y=0 mu2=0 letter2=None variance_y = variance2 sigma = math.sqrt(variance2) number2_covariance = indx2 covariance_couple = self.covarience[number1_covariance,number2_covariance] # print(number1_covariance,number2_covariance) #covariance_couple = .00760122 if letter1 =='A': mu1 = np.log(A1*1000) mu_x = mu1 variance_x = variance1 sigma = np.sqrt(variance_x) #sigma = np.exp(sigma) #sigma = sigma*1000 #sigma = np.log(sigma) #sigma = sigma/2 variance_x = sigma**2 #convert to chemkin units if letter1 == 'n': mu1 = n1 mu_x = mu1 variance_x = variance1 sigma = np.sqrt(variance_x) #sigma = sigma/2 variance_x = sigma**2 if letter1 == 'Ea': mu1=Ea1/1000/4.184 mu_x = mu1 variance_x = variance1 sigma = math.sqrt(variance_x) sigma = sigma*ct.gas_constant/(1000*4.184) #sigma = sigma/2 variance_x = sigma**2 if letter2 =='A': mu2 = np.log(A2*1000) mu_y = mu2 variance_y = variance2 sigma = np.sqrt(variance_y) sigma = sigma #sigma = np.exp(sigma) #sigma = sigma*1000 #sigma = np.log(sigma) #sigma = sigma/2 variance_y = sigma**2 #convert to chemkin units if letter2 == 'n': mu2 = n2 mu_y = mu2 variance_y = variance2 sigma = np.sqrt(variance_y) #sigma = sigma/2 variance_y = sigma**2 if letter2 == 'Ea': mu2 = Ea2/1000/4.184 mu_y = mu2 variance_y = variance2 sigma = math.sqrt(variance_y) sigma = sigma*ct.gas_constant/(1000*4.184) #sigma = sigma/2 variance_y = sigma**2 if letter2 =='Ea' or letter1 == 'Ea': covariance_couple = covariance_couple*ct.gas_constant/(1000*4.184) if letter2=='Ea' and letter1=='Ea': covariance_couple = np.sqrt(covariance_couple) covariance_couple = covariance_couple*ct.gas_constant/(1000*4.184) covariance_couple = covariance_couple**2 #if letter1=='A' or letter2=='A': #covariance_couple = np.exp(covariance_couple) #covariance_couple = covariance_couple/2 #covariance_couple = np.log(covariance_couple) x = np.linspace(mu1 - 3*np.sqrt(variance_x), mu1 + 3*np.sqrt(variance_x),1000) y = np.linspace(mu2 - 3*np.sqrt(variance_y), mu2 + 3*np.sqrt(variance_y),1000) #x = np.linspace(mu1 - 2*np.sqrt(variance_x), mu1 + 2*np.sqrt(variance_x),1000) #y = np.linspace(mu2 - 2*np.sqrt(variance_y), mu2 + 2*np.sqrt(variance_y),1000) #TEST X,Y = np.meshgrid(x,y) #X, Y = np.meshgrid(x,y) pos = np.empty(X.shape + (2,)) pos[:, :, 0] = X; pos[:, :, 1] = Y rv = multivariate_normal([mu_x, mu_y], [[variance_x, covariance_couple], [covariance_couple, variance_y]]) print(couple,[mu_x, mu_y], [[variance_x, covariance_couple], [covariance_couple, variance_y]]) fig = plt.figure() ax = fig.gca(projection='3d') ax.plot_surface(X, Y, rv.pdf(pos),cmap='viridis',linewidth=0) ax.set_xlabel(couple[0]) ax.set_ylabel(couple[1]) ax.set_zlabel('Z axis') plt.show() additional_dictionary = {'A_5':{'reaction':'H2O2 + M = 2OH + M','our_value':np.log(4.99999e8),'hong_value':np.log(5.60e8)}, 'A_6':{'reaction':'OH + H2O2 = H2O + HO2','our_value':np.log(5624842396127.52),'hong_value':np.log(6.93e12)}, 'A_7':{'reaction': 'OH + HO2 = H2O + O2' , 'our_value':np.log(16646221572429.6),'hong_value':np.log(1.82e13)}, 'A_8':{'reaction':'2HO2 = H2O2 + O2','our_value':np.log(806831822530.157),'hong_value':np.log(3.17e12)}, 'A_11':{'reaction':'2OH = H2O + O','our_value':np.log(1730749579423.63),'hong_value':np.log(2.355e12)}, 'Sigma_1':{'reaction':'sigma H2O2','our_value':-.03846,'hong_value':0}, 'Sigma_2':{'reaction':'sigma_HO2','our_value':.0721,'hong_value':0}} additional_dictionary = {'A_5':{'reaction':'H2O2 + M = 2OH + M','our_value':np.log(4.99999e8),'hong_value':np.log(5.60e8)}, 'A_6':{'reaction':'OH + H2O2 = H2O + HO2','our_value':np.log(5917630773605.197),'hong_value':np.log(6.93e12)}, 'A_7':{'reaction': 'OH + HO2 = H2O + O2' , 'our_value':np.log(18236369573049.9),'hong_value':np.log(1.82e13)}, 'A_8':{'reaction':'2HO2 = H2O2 + O2','our_value':np.log(863643827140.3533),'hong_value':np.log(3.17e12)}, 'A_11':{'reaction':'2OH = H2O + O','our_value':np.log(1734217478483.0261),'hong_value':np.log(2.355e12)}, 'Sigma_1':{'reaction':'sigma H2O2','our_value':-.03846,'hong_value':0}, 'Sigma_2':{'reaction':'sigma_HO2','our_value':.0721,'hong_value':0}} error_dictonary = {'A_5':{'reaction':'H2O2 + M = 2OH + M','our_value':None,'hong_value':0}, 'A_6':{'reaction':'OH + H2O2 = H2O + HO2','our_value':np.log(5624842396127.52),'hong_value':0}, 'A_7':{'reaction': 'OH + HO2 = H2O + O2' , 'our_value':np.log(16646221572429.6),'hong_value':0}, 'A_8':{'reaction':'2HO2 = H2O2 + O2','our_value':np.log(806831822530.157),'hong_value':0}, 'A_11':{'reaction':'2OH = H2O + O','our_value':np.log(1730749579423.63),'hong_value':0}, 'Sigma_1':{'reaction':'sigma H2O2','our_value':-.03846,'hong_value':0}, 'Sigma_2':{'reaction':'sigma_HO2','our_value':.0721,'hong_value':0}} Z = rv.pdf(pos) plt.figure() levels = [.65,.95,.99] #contour = plt.contour(X, Y, Z, levels, colors='k') #plt.clabel(contour, colors = 'k', fmt = '%2.1f', fontsize=12) # plt.colorbar(contour_filled) plt.contour(X,Y,Z) plt.xlabel(couple[0]) plt.ylabel(couple[1]) # # plt.figure() # # Z_test = mlab.bivariate_normal(X, Y,np.sqrt(covariance_couple),np.sqrt(covariance_couple),mu_x,mu_y) # z1 = mlab.bivariate_normal(0, 1 * np.sqrt(covariance_couple), np.sqrt(covariance_couple), np.sqrt(covariance_couple),mu_x,mu_y) # z2 = mlab.bivariate_normal(0, 2 * np.sqrt(covariance_couple), np.sqrt(covariance_couple), np.sqrt(covariance_couple),mu_x,mu_y) # z3 = mlab.bivariate_normal(0, 3 * np.sqrt(covariance_couple), np.sqrt(covariance_couple), np.sqrt(covariance_couple),mu_x,mu_y) # ##plot Gaussian: # im = plt.imshow(Z_test,interpolation='bilinear', origin='lower', # extent=(-50,50,-50,50),cmap=cm.gray) ##Plot contours at whatever z values we want: # CS = plt.contour(Z_test, [z1, z2, z3], origin='lower', extent=(-50,50,-50,50),colors='red') if bool(additional_dictionary): plt.xlabel(additional_dictionary[couple[0]]['reaction']) plt.ylabel(additional_dictionary[couple[1]]['reaction']) x_error = (additional_dictionary[couple[0]]['hong_value'])*(error_dictonary[couple[0]]['hong_value']) print(x_error,'this is the x error') y_error = (additional_dictionary[couple[1]]['hong_value'])*(error_dictonary[couple[1]]['hong_value']) print(y_error,'this is the y error') plt.errorbar(additional_dictionary[couple[0]]['hong_value'],additional_dictionary[couple[1]]['hong_value'],xerr=x_error,yerr=y_error) plt.scatter(additional_dictionary[couple[0]]['hong_value'],additional_dictionary[couple[1]]['hong_value'],zorder=4,label='Hong Values From Table') plt.scatter(additional_dictionary[couple[0]]['our_value'],additional_dictionary[couple[1]]['our_value'],zorder=4,marker='x',label='MSI Values') plt.legend() if bool(joint_data_csv): df2 = pd.read_csv(joint_data_csv) #plt.figure() plt.scatter(df2[couple[0]], df2[couple[1]]) plt.savefig(self.working_directory+'/'+couple[0]+'_'+couple[1]+'_distribution'+'_.pdf',bbox_inches='tight') def plotting_physical_model_parameter_distributions(self, paramter_list, shock_tube_instance, optimized_X, original_experimental_conditions, T_uncertainty=.005, P_uncertainty=.01, X_uncertainty=.025, directory_to_save_images='', experiments_want_to_plot_data_from=[]): if bool(experiments_want_to_plot_data_from)==False: experiments_want_to_plot_data_from = np.arange(0,len(self.exp_dict_list_optimized)) try: all_parameters = shock_tube_instance.posterior_diag_df['parameter'].tolist() except: all_parameters = shock_tube_instance.prior_diag_df['parameter'].tolist() parameter_groups = ['T','P','Time'] #print(all_parameters) list_of_species = [] for parameter in all_parameters: if parameter[0] == 'X': list_of_species.append(parameter.split('_')[1]) output = [] for x in list_of_species: if x not in output: output.append(x) parameter_groups = parameter_groups + output for parameter in parameter_groups: temp_list = [] parameter_counter = 0 for i,p in enumerate(all_parameters): if parameter == 'T': if p[0] == 'T' and p[1] != 'i': yaml_file = int(p.split('_')[2]) if parameter_counter in experiments_want_to_plot_data_from: temp_list.append(optimized_X[i][0]) prior_sigma=T_uncertainty parameter_counter+=1 elif parameter == 'Time': if p[0] == 'T' and p[1] == 'i': yaml_file = int(p.split('_')[3]) if parameter_counter in experiments_want_to_plot_data_from: temp_list.append(optimized_X[i][0]) prior_sigma=T_uncertainty parameter_counter+=1 elif parameter == 'P': if p[0] == 'P': yaml_file = int(p.split('_')[2]) pressure_original = original_experimental_conditions[yaml_file]['pressure'] #temp_list.append(temp_original*np.exp(optimized_X[i]) - temp_original) if parameter_counter in experiments_want_to_plot_data_from: temp_list.append(optimized_X[i][0]) prior_sigma = P_uncertainty parameter_counter+=1 elif parameter =='H2O': if p[0] == 'X' and p[2:5] == 'H2O' and p[5]== '_': yaml_file = int(p.split('_')[3]) specie_original = original_experimental_conditions[yaml_file]['conditions']['H2O'] if parameter_counter in experiments_want_to_plot_data_from: temp_list.append(optimized_X[i][0]) prior_sigma=X_uncertainty parameter_counter+=1 elif parameter =='H2O2': if p[0] == 'X' and p[2:6] == 'H2O2' and p[6]== '_': yaml_file = int(p.split('_')[3]) specie_original = original_experimental_conditions[yaml_file]['conditions']['H2O2'] if parameter_counter in experiments_want_to_plot_data_from: temp_list.append(optimized_X[i][0]) prior_sigma=X_uncertainty parameter_counter+=1 elif parameter =='O2': if p[0] == 'X' and p[2:4] == 'O2' and p[4]== '_': yaml_file = int(p.split('_')[3]) specie_original = original_experimental_conditions[yaml_file]['conditions']['O2'] if parameter_counter in experiments_want_to_plot_data_from: temp_list.append(optimized_X[i][0]) prior_sigma=X_uncertainty parameter_counter+=1 elif parameter =='H': if p[0] == 'X' and p[2:3] == 'H' and p[3]== '_': yaml_file = int(p.split('_')[3]) specie_original = original_experimental_conditions[yaml_file]['conditions']['H'] if parameter_counter in experiments_want_to_plot_data_from: temp_list.append(optimized_X[i][0]) prior_sigma=X_uncertainty parameter_counter+=1 elif parameter =='CH4': if p[0] == 'X' and p[2:5] == 'CH4' and p[5]== '_': yaml_file = int(p.split('_')[3]) specie_original = original_experimental_conditions[yaml_file]['conditions']['CH4'] if parameter_counter in experiments_want_to_plot_data_from: temp_list.append(optimized_X[i][0]) prior_sigma=X_uncertainty parameter_counter+=1 else: parameter_counter+=1 plt.figure() mu2=0 sigma2=prior_sigma n, bins, patches=plt.hist(temp_list,bins='auto',density=True,color='g') (mu, sigma) = norm.fit(temp_list) #y = mlab.normpdf( bins, mu, sigma) y = norm.pdf(bins,mu,sigma) x = np.linspace(mu - 3*sigma, mu + 3*sigma, 100) l = plt.plot(bins, y, 'b--', linewidth=2) plt.plot(x, stats.norm.pdf(x, mu, sigma),'b') x2 = np.linspace(mu2 - 3*sigma2, mu2 + 3*sigma2, 100) plt.plot(x2, stats.norm.pdf(x2, mu2, sigma2),'r') #plot plt.xlabel(parameter) #plt.ylabel('Probability') plt.title(r'$\mathrm{Histogram\ of\ physical\ model\ parameter:}\ \mu=%.3f,\ \sigma=%.3f$' %(mu, sigma)) plt.grid(True) #plt.savefig(directory_to_save_images+'/'+'Including Experiments_'+ str(experiments_want_to_plot_data_from)+parameter+'_.pdf',dpi=1000,bbox_inches='tight') def difference_plotter(self, paramter_list, optimized_cti_file='', pdf_distribution_file=''): all_parameters = self.shock_tube_instance.posterior_diag_df['parameter'].tolist() df = self.shock_tube_instance.posterior_diag_df gas_optimized = ct.Solution(optimized_cti_file) for parameter in paramter_list: indx = all_parameters.index(parameter) variance = df['value'][indx] letter,number = parameter.split('_') number = int(number) A=gas_optimized.reaction(number).rate.pre_exponential_factor n=gas_optimized.reaction(number).rate.temperature_exponent Ea=gas_optimized.reaction(number).rate.activation_energy if letter =='A': mu = np.log(A*1000) sigma = math.sqrt(variance) sigma = sigma if letter == 'n': mu = n sigma = math.sqrt(variance) #sigma = sigma/2 if letter == 'Ea': mu=Ea/1000/4.184 sigma = math.sqrt(variance) sigma = sigma*ct.gas_constant/(1000*4.184) #sigma = sigma/2 x = np.linspace(mu - 6*sigma, mu + 6*sigma, 100) #plt.figure() #plt.plot(x, stats.norm.pdf(x, mu, sigma)) # plt.xlabel(parameter) # plt.ylabel('pdf') # plt.savefig(self.working_directory+'/'+parameter+'_distribution'+'_.pdf',bbox_inches='tight') if bool(pdf_distribution_file): df2 = pd.read_csv(pdf_distribution_file) #temp = np.log(np.exp(df2[parameter].values)/9.33e13) #plt.plot(temp,df2['pdf_'+parameter]) interp_y = np.interp(df2[parameter],x,stats.norm.pdf(x, mu, sigma)) plt.figure() plt.plot(df2[parameter],interp_y) plt.plot(df2[parameter],df2['pdf_'+parameter]) interp_x = np.interp(df2['pdf_'+parameter],stats.norm.pdf(x,mu,sigma),x) y_shift = np.divide((df2['pdf_'+parameter] - interp_y),df2['pdf_'+parameter]) x_shift = np.divide((df2[parameter] - interp_x),df2[parameter]) plt.figure() plt.title('Percent Difference In Y') plt.plot(y_shift) plt.xlabel(parameter) plt.figure() plt.plot(x_shift) plt.title('Percent Difference In X') plt.xlabel(parameter) def plotting_histograms_of_MSI_simulations(self,experiments_want_to_plot_data_from=[],bins='auto',directory_to_save_images=''): s_shape = self.S_matrix.shape[1] if self.k_target_value_S_matrix.any(): target_values_for_s = self.k_target_value_S_matrix s_shape = s_shape+target_values_for_s.shape[0] y_shape = self.y_matrix.shape[0] difference = y_shape-s_shape y_values = self.y_matrix[0:difference,0] Y_values = self.Y_matrix[0:difference,0] self.lengths_of_experimental_data() #plotting_Y Histagrams if bool(experiments_want_to_plot_data_from): y_values = [] Y_values = [] start = 0 stop = 0 for x in range(len(self.simulation_lengths_of_experimental_data)): for y in range(len(self.simulation_lengths_of_experimental_data[x])): stop = self.simulation_lengths_of_experimental_data[x][y] + start if x in experiments_want_to_plot_data_from: temp = self.Y_matrix[start:stop,:] Y_values.append(temp) temp2 = self.y_matrix[start:stop,:] y_values.append(temp2) start = start + self.simulation_lengths_of_experimental_data[x][y] else: start = start + self.simulation_lengths_of_experimental_data[x][y] Y_values = np.vstack((Y_values)) y_values = np.vstack((y_values)) plt.figure() plt.subplot(2,2,1) n, bins2, patches = plt.hist(Y_values,bins=bins ,align='mid') min_value = min(Y_values) max_value=max(Y_values) plt.xlim([min_value,max_value]) plt.xlabel('Y') plt.suptitle('Including Experiments_'+ str(experiments_want_to_plot_data_from), fontsize=10) plt.subplot(2,2,2) plt.hist(y_values,bins=bins,align='mid') plt.xlabel('y') plt.subplot(2,2,3) plt.hist(Y_values,bins=bins,density=True,align='mid') plt.xlabel('Y') plt.ylabel('normalized') plt.subplot(2,2,4) plt.hist(y_values,bins=bins,density=True,align='mid') plt.xlabel('y') plt.ylabel('normalized') plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5) plt.savefig(directory_to_save_images+'/'+'Including Experiments_'+ str(experiments_want_to_plot_data_from)+'_Yy_hist_4.pdf',dpi=1000,bbox_inches='tight') #plotting two fold plots plt.figure() plt.subplot(2,1,1) plt.title('Including Experiments_'+ str(experiments_want_to_plot_data_from)) n, bins2, patches = plt.hist(Y_values,bins=bins ,align='mid') plt.xlabel('Y') #plt.xlim([-1,1]) plt.subplot(2,1,2) plt.hist(y_values,bins=bins,align='mid') plt.xlabel('y') plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5) plt.savefig(directory_to_save_images+'/'+'Including Experiments_'+ str(experiments_want_to_plot_data_from)+'_Yy_hist_2.pdf',dpi=1000,bbox_inches='tight') #plotting normalized values plt.figure() plt.subplot(2,1,1) n, bins2, patches = plt.hist(Y_values,bins=bins ,align='mid',density=True) plt.xlabel('Y') plt.title('Including Experiments_'+ str(experiments_want_to_plot_data_from)) plt.ylabel('normalized') plt.subplot(2,1,2) plt.hist(y_values,bins=bins,align='mid',density=True) plt.xlabel('y') plt.ylabel('normalized') plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5) plt.savefig(directory_to_save_images+'/'+'Including Experiments_'+ str(experiments_want_to_plot_data_from)+'_Yy_hist_2_normalized.pdf',dpi=1000,bbox_inches='tight') else: plt.figure() plt.subplot(2,2,1) min_value = min(Y_values) max_value=max(Y_values) plt.xlim([min_value,max_value]) n, bins2, patches = plt.hist(Y_values,bins=bins ,align='mid') #plt.xlim([min_value,max_value]) plt.xlabel('Y') plt.suptitle("Including All Experiments", fontsize=10) plt.subplot(2,2,2) plt.hist(y_values,bins=bins,align='mid') plt.xlabel('y') plt.subplot(2,2,3) plt.hist(Y_values,bins=bins,density=True,align='mid') plt.xlabel('Y') plt.ylabel('normalized') plt.subplot(2,2,4) plt.hist(y_values,bins=bins,density=True,align='mid') plt.xlabel('y') plt.ylabel('normalized') plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5) plt.savefig(directory_to_save_images+'/'+'Including all Experiments'+'_Yy_hist_4.pdf',dpi=1000,bbox_inches='tight') #plotting two fold plots plt.figure() plt.subplot(2,1,1) min_value = np.min(Y_values) max_value = np.max(Y_values) plt.title('Including all Experiments') n, bins2, patches = plt.hist(Y_values,bins=bins ,align='mid') plt.xlabel('Y') #plt.xlim([-1,1]) plt.subplot(2,1,2) plt.hist(y_values,bins=bins,align='mid') plt.xlabel('y') plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5) plt.savefig(directory_to_save_images+'/'+'Including all Experiments'+'_Yy_hist_2.pdf',dpi=1000,bbox_inches='tight') #plotting normalized values plt.figure() plt.subplot(2,1,1) min_value = np.min(Y_values) max_value = np.max(Y_values) n, bins2, patches = plt.hist(Y_values,bins=bins ,align='mid',density=True) plt.xlabel('Y') plt.title('Including all Experiments') plt.ylabel('normalized') plt.subplot(2,1,2) plt.hist(y_values,bins=bins,align='mid',density=True) plt.xlabel('y') plt.ylabel('normalized') plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5) plt.savefig(directory_to_save_images+'/'+'Including all Experiments'+'_Yy_hist_2_normalized.pdf',dpi=1000,bbox_inches='tight') def plotting_T_and_time_full_simulation(self,experiments_want_to_plot_data_from=[],directory_to_save_images=''): init_temperature_list = [] for exp in self.exp_dict_list_original: init_temperature_list.append(exp['simulation'].temperature) tottal_times = [] temperature_list_full_simulation = [] for i,exp in enumerate(self.exp_dict_list_optimized): single_exp_dict = [] temp_list_single_experiment = [] observable_counter=0 for j,observable in enumerate(exp['mole_fraction_observables'] + exp['concentration_observables']): if observable == None: continue if observable in exp['mole_fraction_observables']: single_exp_dict.append(exp['experimental_data'][observable_counter]['Time']*1e3) interploated_temp = np.interp(exp['experimental_data'][observable_counter]['Time'],exp['simulation'].timeHistories[0]['time'],exp['simulation'].timeHistories[0]['temperature']) temp_list_single_experiment.append(interploated_temp) observable_counter+=1 if observable in exp['concentration_observables']: single_exp_dict.append(exp['experimental_data'][observable_counter]['Time']*1e3) interploated_temp = np.interp(exp['experimental_data'][observable_counter]['Time'],exp['simulation'].timeHistories[0]['time'],exp['simulation'].timeHistories[0]['temperature']) temp_list_single_experiment.append(interploated_temp) #print(interploated_temp.shape ,exp['experimental_data'][observable_counter]['Time'].shape ) observable_counter+=1 if 'perturbed_coef' in exp.keys(): wavelengths = self.parsed_yaml_list[i]['absorbanceCsvWavelengths'] for k,wl in enumerate(wavelengths): single_exp_dict.append(exp['absorbance_experimental_data'][k]['time']*1e3) interploated_temp = np.interp(exp['absorbance_experimental_data'][k]['time'],exp['simulation'].timeHistories[0]['time'],exp['simulation'].timeHistories[0]['temperature']) temp_list_single_experiment.append(interploated_temp) #print(interploated_temp.shape, exp['absorbance_experimental_data'][k]['time'].shape ) tottal_times.append(single_exp_dict) temperature_list_full_simulation.append(temp_list_single_experiment) if bool(experiments_want_to_plot_data_from)==False: experiments_want_to_plot_data_from = np.arange(0,len(self.exp_dict_list_optimized)) else: experiments_want_to_plot_data_from = experiments_want_to_plot_data_from y_values = [] Y_values = [] temperature_values_list = [] time_values_list = [] full_temperature_range_list = [] start = 0 stop = 0 for x in range(len(self.simulation_lengths_of_experimental_data)): single_experiment_Y =[] single_experiment_y =[] single_experiment_temperature_values_list=[] single_experiment_time_values_list=[] single_experiment_full_temp_range=[] for y in range(len(self.simulation_lengths_of_experimental_data[x])): stop = self.simulation_lengths_of_experimental_data[x][y] + start if x in experiments_want_to_plot_data_from: temp = self.Y_matrix[start:stop,:] single_experiment_Y.append(temp) temp2 = self.y_matrix[start:stop,:] single_experiment_y.append(temp2) intial_temp = np.array(([init_temperature_list[x]]*temp.shape[0])) intial_temp = intial_temp.reshape((intial_temp.shape[0],1)) single_experiment_temperature_values_list.append(intial_temp) time_values = tottal_times[x][y].values time_values = time_values.reshape((time_values.shape[0],1)) single_experiment_time_values_list.append(time_values) temperature_full = temperature_list_full_simulation[x][y] temperature_full = temperature_full.reshape((temperature_full.shape[0],1)) single_experiment_full_temp_range.append(temperature_full) start = start + self.simulation_lengths_of_experimental_data[x][y] else: start = start + self.simulation_lengths_of_experimental_data[x][y] Y_values.append(single_experiment_Y) y_values.append(single_experiment_y) temperature_values_list.append(single_experiment_temperature_values_list) time_values_list.append(single_experiment_time_values_list) full_temperature_range_list.append(single_experiment_full_temp_range) x = np.arange(10) ys = [i+x+(i*x)**2 for i in range(10)] colors=cm.rainbow(np.linspace(0,1,30)) #colors = cm.rainbow(np.linspace(0, 1, len(ys))) plt.figure() for x,simulation_list in enumerate(Y_values): for y,lst in enumerate(Y_values[x]): plt.subplot(2,1,1) plt.xlabel('Y') plt.ylabel('Time') plt.scatter(Y_values[x][y],time_values_list[x][y],label='Experiment_'+str(x)+'_observable_'+str(y),color=colors[x]) plt.legend(ncol=2,bbox_to_anchor=(1, 0.5)) plt.subplot(2,1,2) plt.scatter(y_values[x][y],time_values_list[x][y],color=colors[x]) plt.xlabel('y') plt.ylabel('Time') plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5) plt.savefig(directory_to_save_images+'/'+'Including Experiments_'+ str(experiments_want_to_plot_data_from)+'_Yy_vs_time.pdf',dpi=1000,bbox_inches='tight') plt.figure() for x,simulation_list in enumerate(Y_values): for y,lst in enumerate(Y_values[x]): plt.subplot(2,1,1) plt.scatter(Y_values[x][y],temperature_values_list[x][y],label='Experiment_'+str(x)+'_observable_'+str(y),color=colors[x]) plt.legend(ncol=2,bbox_to_anchor=(1, 0.5)) plt.xlabel('Y') plt.ylabel('Initial Simulation Temp') plt.subplot(2,1,2) plt.scatter(y_values[x][y],temperature_values_list[x][y],color=colors[x]) plt.xlabel('y') plt.ylabel('Initial Simulation Temp') plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5) plt.savefig(directory_to_save_images+'/'+'Including Experiments_'+ str(experiments_want_to_plot_data_from)+'_Yy_vs_init_temp.pdf',dpi=1000,bbox_inches='tight') plt.figure() for x,simulation_list in enumerate(Y_values): for y,lst in enumerate(Y_values[x]): plt.subplot(2,1,1) plt.scatter(Y_values[x][y],full_temperature_range_list[x][y],label='Experiment_'+str(x)+'_observable_'+str(y),color=colors[x]) plt.legend(ncol=2,bbox_to_anchor=(1, 0.5)) plt.xlabel('Y') plt.ylabel('Temperature') plt.subplot(2,1,2) plt.scatter(y_values[x][y],full_temperature_range_list[x][y],color=colors[x]) plt.xlabel('y') plt.ylabel('Temperature') plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5) plt.savefig(directory_to_save_images+'/'+'Including Experiments_'+ str(experiments_want_to_plot_data_from)+'_Yy_vs_temperature.pdf',dpi=1000,bbox_inches='tight') return #working here def plotting_histograms_of_individual_observables(self,experiments_want_to_plot_data_from,bins='auto',directory_to_save_images='',csv=''): s_shape = self.S_matrix.shape[1] if self.k_target_value_S_matrix.any(): target_values_for_s = self.k_target_value_S_matrix s_shape = s_shape+target_values_for_s.shape[0] y_shape = self.y_matrix.shape[0] difference = y_shape-s_shape y_values = self.y_matrix[0:difference,0] Y_values = self.Y_matrix[0:difference,0] self.lengths_of_experimental_data() #plotting_Y Histagrams #obserervable_list = [] observables_tottal = [] for i,exp in enumerate(self.exp_dict_list_optimized): observable_counter=0 single_experiment = [] for j,observable in enumerate(exp['mole_fraction_observables'] + exp['concentration_observables']): if observable == None: continue if observable in exp['mole_fraction_observables']: single_experiment.append(observable) observable_counter+=1 if observable in exp['concentration_observables']: single_experiment.append(observable) observable_counter+=1 if 'perturbed_coef' in exp.keys(): wavelengths = self.parsed_yaml_list[i]['absorbanceCsvWavelengths'] for k,wl in enumerate(wavelengths): single_experiment.append(wl) observables_tottal.append(single_experiment) observables_flatten = [item for sublist in observables_tottal for item in sublist] from collections import OrderedDict observables_unique = list(OrderedDict.fromkeys(observables_flatten)) empty_nested_observable_list_Y = [[] for x in range(len(observables_unique))] empty_nested_observable_list_y = [[] for x in range(len(observables_unique))] if bool(experiments_want_to_plot_data_from): y_values = [] Y_values = [] start = 0 stop = 0 for x in range(len(self.simulation_lengths_of_experimental_data)): for y in range(len(self.simulation_lengths_of_experimental_data[x])): current_observable = observables_tottal[x][y] stop = self.simulation_lengths_of_experimental_data[x][y] + start if x in experiments_want_to_plot_data_from: temp = self.Y_matrix[start:stop,:] empty_nested_observable_list_Y[observables_unique.index(current_observable)].append(temp) temp2 = self.y_matrix[start:stop,:] empty_nested_observable_list_y[observables_unique.index(current_observable)].append(temp2) start = start + self.simulation_lengths_of_experimental_data[x][y] else: start = start + self.simulation_lengths_of_experimental_data[x][y] for i,observable in enumerate(empty_nested_observable_list_Y): if bool(observable): Y_values = np.vstack((observable)) y_values = np.vstack((empty_nested_observable_list_y[i])) plt.figure() plt.subplot(2,2,1) n, bins2, patches = plt.hist(Y_values,bins=bins ,align='mid') min_value = min(Y_values) max_value=max(Y_values) plt.xlim([min_value,max_value]) plt.xlabel('Y') plt.suptitle(str(observables_unique[i])+'_Including Experiments_'+ str(experiments_want_to_plot_data_from), fontsize=10) plt.subplot(2,2,2) plt.hist(y_values,bins=bins,align='mid') plt.xlabel('y') plt.subplot(2,2,3) plt.hist(Y_values,bins=bins,density=True,align='mid') plt.xlabel('Y') plt.ylabel('normalized') plt.subplot(2,2,4) plt.hist(y_values,bins=bins,density=True,align='mid') plt.xlabel('y') plt.ylabel('normalized') plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5) #plt.savefig(directory_to_save_images+'/'+str(observables_unique[i])+'_Including Experiments_'+ str(experiments_want_to_plot_data_from)+'_Yy_hist_4.pdf',dpi=1000,bbox_inches='tight') #plotting two fold plots plt.figure() plt.subplot(2,1,1) plt.title(str(observables_unique[i])+'_Including Experiments_'+ str(experiments_want_to_plot_data_from)) n, bins2, patches = plt.hist(Y_values,bins=bins ,align='mid') plt.xlabel('Y') #plt.xlim([-1,1]) plt.subplot(2,1,2) plt.hist(y_values,bins=bins,align='mid') plt.xlabel('y') plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5) #plt.savefig(directory_to_save_images+'/'+str(observables_unique[i])+'_Including Experiments_'+ str(experiments_want_to_plot_data_from)+'_Yy_hist_2.pdf',dpi=1000,bbox_inches='tight') #plotting normalized values plt.figure() plt.subplot(2,1,1) n, bins2, patches = plt.hist(Y_values,bins=bins ,align='mid',density=True) plt.xlabel('Y') plt.title(str(observables_unique[i])+'_Including Experiments_'+ str(experiments_want_to_plot_data_from)) plt.ylabel('normalized') plt.subplot(2,1,2) plt.hist(y_values,bins=bins,align='mid',density=True) plt.xlabel('y') plt.ylabel('normalized') plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5) #plotting two fold plots plt.figure() plt.subplot(2,1,1) plt.title(str(observables_unique[i])+'_Including Experiments_'+ str(experiments_want_to_plot_data_from)) n, bins2, patches = plt.hist(Y_values,bins=bins ,align='mid') plt.xlabel('Y') #plt.xlim([-1,1]) plt.subplot(2,1,2) plt.hist(y_values,bins=bins,align='mid') plt.xlabel('y') plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5) #plt.savefig(directory_to_save_images+'/'+str(observables_unique[i])+'_Including Experiments_'+ str(experiments_want_to_plot_data_from)+'_Yy_hist_2.pdf',dpi=1000,bbox_inches='tight') #plotting normalized values plt.figure() plt.subplot(2,1,1) n, bins2, patches = plt.hist(Y_values,bins=bins ,align='mid',density=True) plt.xlabel('Y') plt.title(str(observables_unique[i])+'_Including Experiments_'+ str(experiments_want_to_plot_data_from)) plt.ylabel('normalized') plt.subplot(2,1,2) plt.hist(y_values,bins=bins,align='mid',density=True) plt.xlabel('y') plt.ylabel('normalized') plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5) # plt.savefig(directory_to_save_images+'/'+str(observables_unique[i])+'_Including Experiments_'+ str(experiments_want_to_plot_data_from)+'_Yy_hist_2_normalized.pdf',dpi=1000,bbox_inches='tight') def plotting_histograms_of_individual_observables_for_paper_2(self,experiments_want_to_plot_data_from,experiments_want_to_plot_data_from_2=[],bins='auto',directory_to_save_images='',csv=''): s_shape = self.S_matrix.shape[1] if self.k_target_value_S_matrix.any(): target_values_for_s = self.k_target_value_S_matrix s_shape = s_shape+target_values_for_s.shape[0] y_shape = self.y_matrix.shape[0] difference = y_shape-s_shape y_values = self.y_matrix[0:difference,0] Y_values = self.Y_matrix[0:difference,0] self.lengths_of_experimental_data() #plotting_Y Histagrams #edit this part #obserervable_list = [] observables_tottal = [] for i,exp in enumerate(self.exp_dict_list_optimized): observable_counter=0 single_experiment = [] for j,observable in enumerate(exp['mole_fraction_observables'] + exp['concentration_observables']): if observable == None: continue if observable in exp['mole_fraction_observables']: single_experiment.append(observable) observable_counter+=1 if observable in exp['concentration_observables']: single_experiment.append(observable) observable_counter+=1 if 'perturbed_coef' in exp.keys(): wavelengths = self.parsed_yaml_list[i]['absorbanceCsvWavelengths'] for k,wl in enumerate(wavelengths): single_experiment.append(wl) observables_tottal.append(single_experiment) observables_flatten = [item for sublist in observables_tottal for item in sublist] from collections import OrderedDict observables_unique = list(OrderedDict.fromkeys(observables_flatten)) empty_nested_observable_list_Y = [[] for x in range(len(observables_unique))] empty_nested_observable_list_y = [[] for x in range(len(observables_unique))] empty_nested_observable_list_Z = [[] for x in range(len(observables_unique))] empty_nested_observable_list_Y_2 = [[] for x in range(len(observables_unique))] empty_nested_observable_list_y_2 = [[] for x in range(len(observables_unique))] empty_nested_observable_list_Z_2 = [[] for x in range(len(observables_unique))] if bool(experiments_want_to_plot_data_from): print('inside here') y_values = [] Y_values = [] start = 0 stop = 0 for x in range(len(self.simulation_lengths_of_experimental_data)): for y in range(len(self.simulation_lengths_of_experimental_data[x])): current_observable = observables_tottal[x][y] stop = self.simulation_lengths_of_experimental_data[x][y] + start if x in experiments_want_to_plot_data_from: temp = self.Y_matrix[start:stop,:] empty_nested_observable_list_Y[observables_unique.index(current_observable)].append(temp) temp2 = self.y_matrix[start:stop,:] empty_nested_observable_list_y[observables_unique.index(current_observable)].append(temp2) temp3 = self.z_matrix[start:stop,:] empty_nested_observable_list_Z[observables_unique.index(current_observable)].append(temp3) start = start + self.simulation_lengths_of_experimental_data[x][y] else: start = start + self.simulation_lengths_of_experimental_data[x][y] if bool(experiments_want_to_plot_data_from_2): start = 0 stop = 0 for x in range(len(self.simulation_lengths_of_experimental_data)): for y in range(len(self.simulation_lengths_of_experimental_data[x])): current_observable = observables_tottal[x][y] stop = self.simulation_lengths_of_experimental_data[x][y] + start if x in experiments_want_to_plot_data_from_2: temp = self.Y_matrix[start:stop,:] empty_nested_observable_list_Y_2[observables_unique.index(current_observable)].append(temp) temp2 = self.y_matrix[start:stop,:] empty_nested_observable_list_y_2[observables_unique.index(current_observable)].append(temp2) temp3 = self.z_matrix[start:stop,:] empty_nested_observable_list_Z_2[observables_unique.index(current_observable)].append(temp3) start = start + self.simulation_lengths_of_experimental_data[x][y] else: start = start + self.simulation_lengths_of_experimental_data[x][y] import matplotlib.gridspec as gridspec fig = plt.figure(figsize=(6,7)) gs = gridspec.GridSpec(3, 1,height_ratios=[3,3,3],wspace=0.1,hspace=0.1) gs.update(wspace=0, hspace=0.7) ax1=plt.subplot(gs[0]) ax2=plt.subplot(gs[1]) ax3=plt.subplot(gs[2]) for i,observable in enumerate(empty_nested_observable_list_Y): new_Y_test_2 =[] if bool(observable): Y_values = np.vstack((observable)) y_values = np.vstack((empty_nested_observable_list_y[i])) z_values = np.vstack((empty_nested_observable_list_Z[i])) indecies = np.argwhere(z_values > 100) new_y_test = copy.deepcopy(Y_values) new_y_test = np.delete(new_y_test,indecies) # print(indecies.shape) # print(indecies) # print(i) if bool(experiments_want_to_plot_data_from_2) and bool(empty_nested_observable_list_y_2[i]): Y_values_2 = np.vstack((empty_nested_observable_list_Y_2[i])) y_values_2 = np.vstack((empty_nested_observable_list_y_2[i])) z_values_2 = np.vstack((empty_nested_observable_list_Z_2[i])) indecies_2 = np.argwhere(z_values_2 > 100) new_Y_test_2 = copy.deepcopy(Y_values_2) new_Y_test_2 = np.delete(new_Y_test_2,indecies_2) #plt.figure() #plt.subplot(1,1,1) #plt.subplots(3,1,1) #n, bins2, patches = plt.hist(Y_values,bins=bins ,align='mid',density=True,label='Hong Experiments') test = [-0.06402874, -0.05325865, -0.04248857, -0.03171848, -0.02094839, -0.0101783, 0.00059179, 0.01136188, 0.02213197, 0.03290205, 0.04367214, 0.05444223, 0.06521232, 0.07598241, 0.0867525, 0.09752259, 0.10829268] if i ==0: #n, bins2, patches = plt.hist(new_y_test,bins=bins ,align='mid',density=True,label='Hong Experiments') #ax1.hist(new_y_test,bins=bins ,align='mid',density=True,label='Hong Experiments') n,bins_test_1,patches = ax1.hist(new_y_test,bins=bins ,align='mid',density=True,label='#1') ax1.set_xlim(left=-.3, right=.3, emit=True, auto=False) ax1.set_ylim(top=15,bottom=0) ax1.set_xlabel('Y') ax1.set_xlabel('Relative Difference') #plt.title(str(observables_unique[i])+'_Including Experiments_'+ str(experiments_want_to_plot_data_from)) ax1.set_title(str(observables_unique[i])) ax1.set_ylabel('pdf') #plt.ylabel('normalized') if bool(experiments_want_to_plot_data_from_2): # plt.hist(Y_values_2,bins=bins ,align='mid',density=True,alpha=0.5,label='Extra Experiments') #ax1.hist(new_Y_test_2,bins=bins ,align='mid',density=True,alpha=0.5,label='Extra Experiments') ax1.hist(new_Y_test_2,bins=bins ,align='mid',density=True,alpha=0.5,label='#2') if bool(csv): df = pd.read_csv(csv) #ax1.hist(df[str(observables_unique[i])+'_Y'].dropna()*-1,bins=bins ,align='mid',density=True,alpha=0.5,label='Hong vs. Hong') #ax1.hist(df[str(observables_unique[i])+'_Y'].dropna()*-1,bins=bins ,align='mid',density=True,alpha=0.5,label='#3') ax1.legend() if i ==1: #n, bins2, patches = plt.hist(new_y_test,bins=bins ,align='mid',density=True,label='Hong Experiments') n,bins_test_2,patches = ax2.hist(new_y_test,bins=bins ,align='mid',density=True,label='Hong Experiments') ax2.set_xlim(left=-.08, right=.08, emit=True, auto=False) ax2.set_ylim(top=28,bottom=0) ax2.set_xlabel('Y') ax2.set_xlabel('Relative Difference') #plt.title(str(observables_unique[i])+'_Including Experiments_'+ str(experiments_want_to_plot_data_from)) #ax2.set_title(str(observables_unique[i])) ax2.set_title(r'H$_2$O') ax2.set_ylabel('pdf') #plt.ylabel('normalized') if bool(experiments_want_to_plot_data_from_2): # plt.hist(Y_values_2,bins=bins ,align='mid',density=True,alpha=0.5,label='Extra Experiments') ax2.hist(new_Y_test_2,bins=bins ,align='mid',density=True,alpha=0.5,label='Extra Experiments') if bool(csv): df = pd.read_csv(csv) #ax2.hist(df[str(observables_unique[i])+'_Y'].dropna()*-1,bins=bins ,align='mid',density=True,alpha=0.5,label='Hong vs. Hong') if i ==3: #n, bins2, patches = plt.hist(new_y_test,bins=bins ,align='mid',density=True,label='Hong Experiments') n,bins_test_3,patches = ax3.hist(new_y_test,bins=bins ,align='mid',density=True,label='Hong Experiments') ax3.set_xlim(left=-.15, right=.15, emit=True, auto=False) ax3.set_ylim(top=12,bottom=0) ax3.set_xlabel('Y') ax3.set_xlabel('Relative Difference') ax3.set_ylabel('pdf') #plt.title(str(observables_unique[i])+'_Including Experiments_'+ str(experiments_want_to_plot_data_from)) ax3.set_title(str(observables_unique[i])) ax3.set_title('Absorbance '+ str(observables_unique[i])+ ' nm') #plt.ylabel('normalized') if bool(experiments_want_to_plot_data_from_2): print('inside here') print(experiments_want_to_plot_data_from_2) # plt.hist(Y_values_2,bins=bins ,align='mid',density=True,alpha=0.5,label='Extra Experiments') ax3.hist(new_Y_test_2,bins=bins ,align='mid',density=True,alpha=0.5,label='Extra Experiments') if bool(csv): df = pd.read_csv(csv) #ax3.hist(df[str(observables_unique[i])+'_Y'].dropna()*-1,bins=bins ,align='mid',density=True,alpha=0.5,label='Hong vs. Hong') plt.savefig(directory_to_save_images+'/'+str(observables_unique[i])+'_Including Experiments_'+ str(experiments_want_to_plot_data_from)+'_Yy_hist_2_normalized.pdf',dpi=1000,bbox_inches='tight') def plotting_histograms_of_individual_observables_for_paper(self,experiments_want_to_plot_data_from,experiments_want_to_plot_data_from_2=[],bins='auto',directory_to_save_images='',csv=''): s_shape = self.S_matrix.shape[1] if self.k_target_value_S_matrix.any(): target_values_for_s = self.k_target_value_S_matrix s_shape = s_shape+target_values_for_s.shape[0] y_shape = self.y_matrix.shape[0] difference = y_shape-s_shape y_values = self.y_matrix[0:difference,0] Y_values = self.Y_matrix[0:difference,0] self.lengths_of_experimental_data() #plotting_Y Histagrams #obserervable_list = [] observables_tottal = [] for i,exp in enumerate(self.exp_dict_list_optimized): observable_counter=0 single_experiment = [] for j,observable in enumerate(exp['mole_fraction_observables'] + exp['concentration_observables']): if observable == None: continue if observable in exp['mole_fraction_observables']: single_experiment.append(observable) observable_counter+=1 if observable in exp['concentration_observables']: single_experiment.append(observable) observable_counter+=1 if 'perturbed_coef' in exp.keys(): wavelengths = self.parsed_yaml_list[i]['absorbanceCsvWavelengths'] for k,wl in enumerate(wavelengths): single_experiment.append(wl) observables_tottal.append(single_experiment) observables_flatten = [item for sublist in observables_tottal for item in sublist] from collections import OrderedDict observables_unique = list(OrderedDict.fromkeys(observables_flatten)) empty_nested_observable_list_Y = [[] for x in range(len(observables_unique))] empty_nested_observable_list_y = [[] for x in range(len(observables_unique))] empty_nested_observable_list_Z = [[] for x in range(len(observables_unique))] empty_nested_observable_list_Y_2 = [[] for x in range(len(observables_unique))] empty_nested_observable_list_y_2 = [[] for x in range(len(observables_unique))] empty_nested_observable_list_Z_2 = [[] for x in range(len(observables_unique))] if bool(experiments_want_to_plot_data_from): print('inside here') y_values = [] Y_values = [] start = 0 stop = 0 for x in range(len(self.simulation_lengths_of_experimental_data)): for y in range(len(self.simulation_lengths_of_experimental_data[x])): current_observable = observables_tottal[x][y] stop = self.simulation_lengths_of_experimental_data[x][y] + start if x in experiments_want_to_plot_data_from: temp = self.Y_matrix[start:stop,:] empty_nested_observable_list_Y[observables_unique.index(current_observable)].append(temp) temp2 = self.y_matrix[start:stop,:] empty_nested_observable_list_y[observables_unique.index(current_observable)].append(temp2) temp3 = self.z_matrix[start:stop,:] empty_nested_observable_list_Z[observables_unique.index(current_observable)].append(temp3) start = start + self.simulation_lengths_of_experimental_data[x][y] else: start = start + self.simulation_lengths_of_experimental_data[x][y] if bool(experiments_want_to_plot_data_from_2): start = 0 stop = 0 for x in range(len(self.simulation_lengths_of_experimental_data)): for y in range(len(self.simulation_lengths_of_experimental_data[x])): current_observable = observables_tottal[x][y] stop = self.simulation_lengths_of_experimental_data[x][y] + start if x in experiments_want_to_plot_data_from_2: temp = self.Y_matrix[start:stop,:] empty_nested_observable_list_Y_2[observables_unique.index(current_observable)].append(temp) temp2 = self.y_matrix[start:stop,:] empty_nested_observable_list_y_2[observables_unique.index(current_observable)].append(temp2) temp3 = self.z_matrix[start:stop,:] empty_nested_observable_list_Z_2[observables_unique.index(current_observable)].append(temp3) start = start + self.simulation_lengths_of_experimental_data[x][y] else: start = start + self.simulation_lengths_of_experimental_data[x][y] import matplotlib.gridspec as gridspec for i,observable in enumerate(empty_nested_observable_list_Y): if bool(observable): Y_values = np.vstack((observable)) y_values = np.vstack((empty_nested_observable_list_y[i])) z_values = np.vstack((empty_nested_observable_list_Z[i])) indecies = np.argwhere(z_values > 100) new_y_test = copy.deepcopy(Y_values) new_y_test = np.delete(new_y_test,indecies) if bool(experiments_want_to_plot_data_from_2) and bool(empty_nested_observable_list_y_2[i]): Y_values_2 = np.vstack((empty_nested_observable_list_Y_2[i])) y_values_2 = np.vstack((empty_nested_observable_list_y_2[i])) z_values_2 = np.vstack((empty_nested_observable_list_Z_2[i])) indecies_2 = np.argwhere(z_values_2 > 100) new_Y_test_2 = copy.deepcopy(Y_values_2) new_Y_test_2 = np.delete(new_Y_test_2,indecies_2) plt.figure() plt.subplot(1,1,1) #plt.subplots(3,1,1) #n, bins2, patches = plt.hist(Y_values,bins=bins ,align='mid',density=True,label='Hong Experiments') n, bins2, patches = plt.hist(new_y_test,bins=bins ,align='mid',density=True,label='Hong Experiments') plt.xlabel('Y') #plt.title(str(observables_unique[i])+'_Including Experiments_'+ str(experiments_want_to_plot_data_from)) plt.title(str(observables_unique[i])) #plt.ylabel('normalized') if bool(experiments_want_to_plot_data_from_2): # plt.hist(Y_values_2,bins=bins ,align='mid',density=True,alpha=0.5,label='Extra Experiments') plt.hist(new_Y_test_2,bins=bins ,align='mid',density=True,alpha=0.5,label='Extra Experiments') if bool(csv): df = pd.read_csv(csv) plt.hist(df[str(observables_unique[i])+'_Y'].dropna()*-1,bins=bins ,align='mid',density=True,alpha=0.5,label='Hong vs. Hong') plt.legend() return def plotting_T_and_time_full_simulation_individual_observables(self,experiments_want_to_plot_data_from,bins='auto',directory_to_save_images=''): #working_here observables_tottal = [] for i,exp in enumerate(self.exp_dict_list_optimized): observable_counter=0 single_experiment = [] for j,observable in enumerate(exp['mole_fraction_observables'] + exp['concentration_observables']): if observable == None: continue if observable in exp['mole_fraction_observables']: single_experiment.append(observable) observable_counter+=1 if observable in exp['concentration_observables']: single_experiment.append(observable) observable_counter+=1 if 'perturbed_coef' in exp.keys(): wavelengths = self.parsed_yaml_list[i]['absorbanceCsvWavelengths'] for k,wl in enumerate(wavelengths): single_experiment.append(wl) observables_tottal.append(single_experiment) observables_flatten = [item for sublist in observables_tottal for item in sublist] from collections import OrderedDict observables_unique = list(OrderedDict.fromkeys(observables_flatten)) empty_nested_observable_list_Y = [[] for x in range(len(observables_unique))] empty_nested_observable_list_y = [[] for x in range(len(observables_unique))] empty_nested_observable_list_time = [[] for x in range(len(observables_unique))] empty_nested_observable_list_temperature = [[] for x in range(len(observables_unique))] empty_nested_observable_list_initial_temperature = [[] for x in range(len(observables_unique))] if bool(experiments_want_to_plot_data_from): start = 0 stop = 0 for x in range(len(self.simulation_lengths_of_experimental_data)): for y in range(len(self.simulation_lengths_of_experimental_data[x])): current_observable = observables_tottal[x][y] stop = self.simulation_lengths_of_experimental_data[x][y] + start if x in experiments_want_to_plot_data_from: temp = self.Y_matrix[start:stop,:] empty_nested_observable_list_Y[observables_unique.index(current_observable)].append(temp) temp2 = self.y_matrix[start:stop,:] empty_nested_observable_list_y[observables_unique.index(current_observable)].append(temp2) start = start + self.simulation_lengths_of_experimental_data[x][y] else: start = start + self.simulation_lengths_of_experimental_data[x][y] for i,exp in enumerate(self.exp_dict_list_optimized): observable_counter=0 for j,observable in enumerate(exp['mole_fraction_observables'] + exp['concentration_observables']): if observable == None: continue if i in experiments_want_to_plot_data_from: if observable in exp['mole_fraction_observables']: empty_nested_observable_list_time[observables_unique.index(observable)].append(exp['experimental_data'][observable_counter]['Time']*1e3) interploated_temp = np.interp(exp['experimental_data'][observable_counter]['Time'],exp['simulation'].timeHistories[0]['time'],exp['simulation'].timeHistories[0]['temperature']) empty_nested_observable_list_temperature[observables_unique.index(observable)].append(interploated_temp) empty_nested_observable_list_initial_temperature[observables_unique.index(observable)].append([self.exp_dict_list_original[i]['simulation'].temperature]*np.shape(interploated_temp)[0]) observable_counter+=1 if observable in exp['concentration_observables']: empty_nested_observable_list_time[observables_unique.index(observable)].append(exp['experimental_data'][observable_counter]['Time']*1e3) interploated_temp = np.interp(exp['experimental_data'][observable_counter]['Time'],exp['simulation'].timeHistories[0]['time'],exp['simulation'].timeHistories[0]['temperature']) empty_nested_observable_list_temperature[observables_unique.index(observable)].append(interploated_temp) empty_nested_observable_list_initial_temperature[observables_unique.index(observable)].append([self.exp_dict_list_original[i]['simulation'].temperature]*np.shape(interploated_temp)[0]) observable_counter+=1 if i in experiments_want_to_plot_data_from: if 'perturbed_coef' in exp.keys(): wavelengths = self.parsed_yaml_list[i]['absorbanceCsvWavelengths'] for k,wl in enumerate(wavelengths): empty_nested_observable_list_time[observables_unique.index(wl)].append(exp['absorbance_experimental_data'][k]['time']*1e3) interploated_temp = np.interp(exp['absorbance_experimental_data'][k]['time'],exp['simulation'].timeHistories[0]['time'],exp['simulation'].timeHistories[0]['temperature']) empty_nested_observable_list_temperature[observables_unique.index(wl)].append(interploated_temp) empty_nested_observable_list_initial_temperature[observables_unique.index(wl)].append([self.exp_dict_list_original[i]['simulation'].temperature]*np.shape(interploated_temp)[0]) #print(interploated_temp.shape, exp['absorbance_experimental_data'][k]['time'].shape ) x = np.arange(10) ys = [i+x+(i*x)**2 for i in range(10)] colors=cm.rainbow(np.linspace(0,1,30)) #colors = cm.rainbow(np.linspace(0, 1, len(ys))) for x,observable in enumerate(empty_nested_observable_list_Y): if bool(observable): plt.figure() for y,array in enumerate(empty_nested_observable_list_Y[x]): plt.subplot(2,1,1) plt.xlabel('Y') plt.ylabel('Time') plt.scatter(empty_nested_observable_list_Y[x][y],empty_nested_observable_list_time[x][y],label='Experiment_'+str(x)+'_observable_'+str(y),color=colors[x]) #plt.legend(ncol=2,bbox_to_anchor=(1, 0.5)) plt.title(observables_unique[x]) plt.subplot(2,1,2) plt.scatter(empty_nested_observable_list_y[x][y],empty_nested_observable_list_time[x][y],color=colors[x]) plt.xlabel('y') plt.ylabel('Time') plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5) plt.savefig(directory_to_save_images+'/'+str(observables_unique[x])+'_Including Experiments_'+str(experiments_want_to_plot_data_from)+'_Yy_vs_time.pdf',dpi=1000,bbox_inches='tight') for x,observable in enumerate(empty_nested_observable_list_Y): if bool(observable): plt.figure() for y,array in enumerate(empty_nested_observable_list_Y[x]): plt.subplot(2,1,1) plt.scatter(empty_nested_observable_list_Y[x][y],empty_nested_observable_list_temperature[x][y],label='Experiment_'+str(x)+'_observable_'+str(y),color=colors[x]) #plt.legend(ncol=2,bbox_to_anchor=(1, 0.5)) plt.xlabel('Y') plt.ylabel('Temperature') plt.title(observables_unique[x]) plt.subplot(2,1,2) plt.scatter(empty_nested_observable_list_y[x][y],empty_nested_observable_list_temperature[x][y],color=colors[x]) plt.xlabel('y') plt.ylabel('Temperature') plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5) plt.savefig(directory_to_save_images+'/'+str(observables_unique[x])+'_Including Experiments_'+str(experiments_want_to_plot_data_from)+'_Yy_vs_temperature.pdf',dpi=1000,bbox_inches='tight') for x,observable in enumerate(empty_nested_observable_list_Y): if bool(observable): plt.figure() for y,array in enumerate(empty_nested_observable_list_Y[x]): plt.subplot(2,1,1) plt.scatter(empty_nested_observable_list_Y[x][y],empty_nested_observable_list_initial_temperature[x][y],label='Experiment_'+str(x)+'_observable_'+str(y),color=colors[x]) #plt.legend(ncol=2,bbox_to_anchor=(1, 0.5)) plt.xlabel('Y') plt.ylabel('Initial Temperature') plt.title(observables_unique[x]) plt.subplot(2,1,2) plt.scatter(empty_nested_observable_list_y[x][y],empty_nested_observable_list_initial_temperature[x][y],color=colors[x]) plt.xlabel('y') plt.ylabel('Initial Temperature') plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5) plt.savefig(directory_to_save_images+'/'+str(observables_unique[x])+'_Including Experiments_'+str(experiments_want_to_plot_data_from)+'_Yy_vs_initial_temperature.pdf',dpi=1000,bbox_inches='tight') def plotting_T_and_time_full_simulation_individual_observables_for_paper(self,experiments_want_to_plot_data_from, bins='auto', directory_to_save_images='',csv='',experiments_want_to_plot_data_from_2=[]): #working_here observables_tottal = [] for i,exp in enumerate(self.exp_dict_list_optimized): observable_counter=0 single_experiment = [] for j,observable in enumerate(exp['mole_fraction_observables'] + exp['concentration_observables']): if observable == None: continue if observable in exp['mole_fraction_observables']: single_experiment.append(observable) observable_counter+=1 if observable in exp['concentration_observables']: single_experiment.append(observable) observable_counter+=1 if 'perturbed_coef' in exp.keys(): wavelengths = self.parsed_yaml_list[i]['absorbanceCsvWavelengths'] for k,wl in enumerate(wavelengths): single_experiment.append(wl) observables_tottal.append(single_experiment) observables_flatten = [item for sublist in observables_tottal for item in sublist] from collections import OrderedDict observables_unique = list(OrderedDict.fromkeys(observables_flatten)) empty_nested_observable_list_Y = [[] for x in range(len(observables_unique))] empty_nested_observable_list_y = [[] for x in range(len(observables_unique))] empty_nested_observable_list_time = [[] for x in range(len(observables_unique))] empty_nested_observable_list_temperature = [[] for x in range(len(observables_unique))] empty_nested_observable_list_initial_temperature = [[] for x in range(len(observables_unique))] if bool(experiments_want_to_plot_data_from): start = 0 stop = 0 for x in range(len(self.simulation_lengths_of_experimental_data)): for y in range(len(self.simulation_lengths_of_experimental_data[x])): current_observable = observables_tottal[x][y] stop = self.simulation_lengths_of_experimental_data[x][y] + start if x in experiments_want_to_plot_data_from: temp = self.Y_matrix[start:stop,:] empty_nested_observable_list_Y[observables_unique.index(current_observable)].append(temp) temp2 = self.y_matrix[start:stop,:] empty_nested_observable_list_y[observables_unique.index(current_observable)].append(temp2) start = start + self.simulation_lengths_of_experimental_data[x][y] else: start = start + self.simulation_lengths_of_experimental_data[x][y] for i,exp in enumerate(self.exp_dict_list_optimized): observable_counter=0 for j,observable in enumerate(exp['mole_fraction_observables'] + exp['concentration_observables']): if observable == None: continue if i in experiments_want_to_plot_data_from: if observable in exp['mole_fraction_observables']: empty_nested_observable_list_time[observables_unique.index(observable)].append(exp['experimental_data'][observable_counter]['Time']*1e3) interploated_temp = np.interp(exp['experimental_data'][observable_counter]['Time'],exp['simulation'].timeHistories[0]['time'],exp['simulation'].timeHistories[0]['temperature']) empty_nested_observable_list_temperature[observables_unique.index(observable)].append(interploated_temp) empty_nested_observable_list_initial_temperature[observables_unique.index(observable)].append([self.exp_dict_list_original[i]['simulation'].temperature]*np.shape(interploated_temp)[0]) observable_counter+=1 if observable in exp['concentration_observables']: empty_nested_observable_list_time[observables_unique.index(observable)].append(exp['experimental_data'][observable_counter]['Time']*1e3) interploated_temp = np.interp(exp['experimental_data'][observable_counter]['Time'],exp['simulation'].timeHistories[0]['time'],exp['simulation'].timeHistories[0]['temperature']) empty_nested_observable_list_temperature[observables_unique.index(observable)].append(interploated_temp) empty_nested_observable_list_initial_temperature[observables_unique.index(observable)].append([self.exp_dict_list_original[i]['simulation'].temperature]*np.shape(interploated_temp)[0]) observable_counter+=1 if i in experiments_want_to_plot_data_from: if 'perturbed_coef' in exp.keys(): wavelengths = self.parsed_yaml_list[i]['absorbanceCsvWavelengths'] for k,wl in enumerate(wavelengths): empty_nested_observable_list_time[observables_unique.index(wl)].append(exp['absorbance_experimental_data'][k]['time']*1e3) interploated_temp = np.interp(exp['absorbance_experimental_data'][k]['time'],exp['simulation'].timeHistories[0]['time'],exp['simulation'].timeHistories[0]['temperature']) empty_nested_observable_list_temperature[observables_unique.index(wl)].append(interploated_temp) empty_nested_observable_list_initial_temperature[observables_unique.index(wl)].append([self.exp_dict_list_original[i]['simulation'].temperature]*np.shape(interploated_temp)[0]) #################################################################################################################################################################################################################### empty_nested_observable_list_Y_2 = [[] for x in range(len(observables_unique))] empty_nested_observable_list_y_2 = [[] for x in range(len(observables_unique))] empty_nested_observable_list_time_2 = [[] for x in range(len(observables_unique))] empty_nested_observable_list_temperature_2 = [[] for x in range(len(observables_unique))] empty_nested_observable_list_initial_temperature_2 = [[] for x in range(len(observables_unique))] if bool(experiments_want_to_plot_data_from_2): start = 0 stop = 0 for x in range(len(self.simulation_lengths_of_experimental_data)): for y in range(len(self.simulation_lengths_of_experimental_data[x])): current_observable = observables_tottal[x][y] stop = self.simulation_lengths_of_experimental_data[x][y] + start if x in experiments_want_to_plot_data_from_2: temp = self.Y_matrix[start:stop,:] empty_nested_observable_list_Y_2[observables_unique.index(current_observable)].append(temp) temp2 = self.y_matrix[start:stop,:] empty_nested_observable_list_y_2[observables_unique.index(current_observable)].append(temp2) start = start + self.simulation_lengths_of_experimental_data[x][y] else: start = start + self.simulation_lengths_of_experimental_data[x][y] for i,exp in enumerate(self.exp_dict_list_optimized): observable_counter=0 for j,observable in enumerate(exp['mole_fraction_observables'] + exp['concentration_observables']): if observable == None: continue if i in experiments_want_to_plot_data_from_2: if observable in exp['mole_fraction_observables']: empty_nested_observable_list_time_2[observables_unique.index(observable)].append(exp['experimental_data'][observable_counter]['Time']*1e3) interploated_temp = np.interp(exp['experimental_data'][observable_counter]['Time'],exp['simulation'].timeHistories[0]['time'],exp['simulation'].timeHistories[0]['temperature']) empty_nested_observable_list_temperature_2[observables_unique.index(observable)].append(interploated_temp) empty_nested_observable_list_initial_temperature_2[observables_unique.index(observable)].append([self.exp_dict_list_original[i]['simulation'].temperature]*np.shape(interploated_temp)[0]) observable_counter+=1 if observable in exp['concentration_observables']: empty_nested_observable_list_time_2[observables_unique.index(observable)].append(exp['experimental_data'][observable_counter]['Time']*1e3) interploated_temp = np.interp(exp['experimental_data'][observable_counter]['Time'],exp['simulation'].timeHistories[0]['time'],exp['simulation'].timeHistories[0]['temperature']) empty_nested_observable_list_temperature_2[observables_unique.index(observable)].append(interploated_temp) empty_nested_observable_list_initial_temperature_2[observables_unique.index(observable)].append([self.exp_dict_list_original[i]['simulation'].temperature]*np.shape(interploated_temp)[0]) observable_counter+=1 if i in experiments_want_to_plot_data_from_2: if 'perturbed_coef' in exp.keys(): wavelengths = self.parsed_yaml_list[i]['absorbanceCsvWavelengths'] for k,wl in enumerate(wavelengths): empty_nested_observable_list_time_2[observables_unique.index(wl)].append(exp['absorbance_experimental_data'][k]['time']*1e3) interploated_temp = np.interp(exp['absorbance_experimental_data'][k]['time'],exp['simulation'].timeHistories[0]['time'],exp['simulation'].timeHistories[0]['temperature']) empty_nested_observable_list_temperature_2[observables_unique.index(wl)].append(interploated_temp) empty_nested_observable_list_initial_temperature_2[observables_unique.index(wl)].append([self.exp_dict_list_original[i]['simulation'].temperature]*np.shape(interploated_temp)[0]) ################################################################################################################################################################################################################### x = np.arange(10) ys = [i+x+(i*x)**2 for i in range(10)] colors=cm.rainbow(np.linspace(0,1,30)) #colors = cm.rainbow(np.linspace(0, 1, len(ys))) for x,observable in enumerate(empty_nested_observable_list_Y): length_of_2nd_list = len(empty_nested_observable_list_Y_2[x]) if bool(observable): plt.figure() if bool(csv): df = pd.read_csv(csv) plt.scatter(df[str(observables_unique[x])+'_Y'].dropna()*-1,df[str(observables_unique[x])+'_time'].dropna()*1e3,alpha=0.5,color='k',zorder=4) for y,array in enumerate(empty_nested_observable_list_Y[x]): plt.subplot(1,1,1) plt.xlabel('Y') plt.ylabel('Time') plt.scatter(empty_nested_observable_list_Y[x][y],empty_nested_observable_list_time[x][y],label='Experiment_'+str(x)+'_observable_'+str(y),color='blue') if y<length_of_2nd_list: plt.scatter(empty_nested_observable_list_Y_2[x][y],empty_nested_observable_list_time_2[x][y],color='red',zorder=4) #plt.legend(ncol=2,bbox_to_anchor=(1, 0.5)) plt.title(observables_unique[x]) # plt.subplot(2,1,2) # plt.scatter(empty_nested_observable_list_y[x][y],empty_nested_observable_list_time[x][y],color=colors[x]) # plt.xlabel('y') # plt.ylabel('Time') # plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.5, hspace=.5) #plt.savefig(directory_to_save_images+'/'+str(observables_unique[x])+'_Including Experiments_'+str(experiments_want_to_plot_data_from)+'_Yy_vs_time.pdf',dpi=1000,bbox_inches='tight') for x,observable in enumerate(empty_nested_observable_list_Y): length_of_2nd_list = len(empty_nested_observable_list_Y_2[x]) if bool(observable): plt.figure() if bool(csv): df =
pd.read_csv(csv)
pandas.read_csv
""" Tests dtype specification during parsing for all of the parsers defined in parsers.py """ from io import StringIO import os import numpy as np import pytest from pandas.errors import ParserWarning from pandas.core.dtypes.dtypes import CategoricalDtype import pandas as pd from pandas import Categorical, DataFrame, Index, MultiIndex, Series, Timestamp, concat import pandas._testing as tm @pytest.mark.parametrize("dtype", [str, object]) @pytest.mark.parametrize("check_orig", [True, False]) def test_dtype_all_columns(all_parsers, dtype, check_orig): # see gh-3795, gh-6607 parser = all_parsers df = DataFrame( np.random.rand(5, 2).round(4), columns=list("AB"), index=["1A", "1B", "1C", "1D", "1E"], ) with tm.ensure_clean("__passing_str_as_dtype__.csv") as path: df.to_csv(path) result = parser.read_csv(path, dtype=dtype, index_col=0) if check_orig: expected = df.copy() result = result.astype(float) else: expected = df.astype(str) tm.assert_frame_equal(result, expected) def test_dtype_all_columns_empty(all_parsers): # see gh-12048 parser = all_parsers result = parser.read_csv(StringIO("A,B"), dtype=str) expected = DataFrame({"A": [], "B": []}, index=[], dtype=str) tm.assert_frame_equal(result, expected) def test_dtype_per_column(all_parsers): parser = all_parsers data = """\ one,two 1,2.5 2,3.5 3,4.5 4,5.5""" expected = DataFrame( [[1, "2.5"], [2, "3.5"], [3, "4.5"], [4, "5.5"]], columns=["one", "two"] ) expected["one"] = expected["one"].astype(np.float64) expected["two"] = expected["two"].astype(object) result = parser.read_csv(StringIO(data), dtype={"one": np.float64, 1: str}) tm.assert_frame_equal(result, expected) def test_invalid_dtype_per_column(all_parsers): parser = all_parsers data = """\ one,two 1,2.5 2,3.5 3,4.5 4,5.5""" with pytest.raises(TypeError, match="data type [\"']foo[\"'] not understood"): parser.read_csv(StringIO(data), dtype={"one": "foo", 1: "int"}) @pytest.mark.parametrize( "dtype", [ "category", CategoricalDtype(), {"a": "category", "b": "category", "c": CategoricalDtype()}, ], ) def test_categorical_dtype(all_parsers, dtype): # see gh-10153 parser = all_parsers data = """a,b,c 1,a,3.4 1,a,3.4 2,b,4.5""" expected = DataFrame( { "a": Categorical(["1", "1", "2"]), "b": Categorical(["a", "a", "b"]), "c": Categorical(["3.4", "3.4", "4.5"]), } ) actual = parser.read_csv(StringIO(data), dtype=dtype) tm.assert_frame_equal(actual, expected) @pytest.mark.parametrize("dtype", [{"b": "category"}, {1: "category"}]) def test_categorical_dtype_single(all_parsers, dtype): # see gh-10153 parser = all_parsers data = """a,b,c 1,a,3.4 1,a,3.4 2,b,4.5""" expected = DataFrame( {"a": [1, 1, 2], "b": Categorical(["a", "a", "b"]), "c": [3.4, 3.4, 4.5]} ) actual = parser.read_csv(StringIO(data), dtype=dtype) tm.assert_frame_equal(actual, expected) def test_categorical_dtype_unsorted(all_parsers): # see gh-10153 parser = all_parsers data = """a,b,c 1,b,3.4 1,b,3.4 2,a,4.5""" expected = DataFrame( { "a":
Categorical(["1", "1", "2"])
pandas.Categorical
# General import os import math from datetime import datetime, timedelta import dateutil.parser import warnings from numpy.core.numeric import Inf # Data Science import pandas as pd import numpy as np from scipy.sparse import base from sklearn import linear_model from sklearn.metrics import r2_score, mean_absolute_error from sklearn.model_selection import KFold import scipy # Plotting import seaborn as sns import matplotlib.pyplot as plt import matplotlib.dates as mdates from src.visualization import visualize class Calibration(): def __init__(self, start_time, end_time, data_dir="../../data/", study="utx000", study_suffix="ux_s20", **kwargs): """ Initiates the calibration object Inputs: - start_time: datetime object with precision to the minute specifying the event START time - end_time: datetime object with precision to the minute specifying the event END time - data_dir: path to data directory - study: string of the study name - study_suffix: string of the suffix associated with the study Keyword Arguments: - resample_rate: integer corresponding to the resample rate in minutes - default is 1 minute - timestamp: datetime specifying the start time as reported by the laptop """ self.set_start_time(start_time) self.set_end_time(end_time) if "ref_date" in kwargs.keys(): self.date = kwargs["ref_date"].date().strftime("%m%d%Y") else: self.date = end_time.date().strftime("%m%d%Y") self.data_dir = data_dir self.study = study self.suffix = study_suffix self.set_time_offset(**kwargs) # kwargs if "resample_rate" in kwargs.keys(): self.set_resample_rate(kwargs["resample_rate"]) else: self.set_resample_rate(1) # set to default if "beacons" in kwargs.keys(): self.set_beacons(kwargs["beacons"]) else: self.set_beacons(kwargs["beacons"]) # data ## beacon print("IMPORTING BEACON DATA") if self.study == "utx000": self.set_utx000_beacon(**kwargs) else: self.set_wcwh_beacon(**kwargs) ## refererence print("IMPORTING REFERENCE DATA") self.ref = {} self.set_ref(**kwargs) ## calibration self.offsets = {} self.lms = {} # experiment detail setters def set_start_time(self, t): """sets the calibration start time""" self.start_time = t def set_end_time(self, t): """sets the calibration end_time""" self.end_time = t def set_resample_rate(self, rate): """sets the class resample rate""" self.resample_rate = rate def set_time_offset(self, **kwargs): """ Sets the offset time for measurements because the laptop time is incorrect Keyword Arguments: - timestamp: datetime specifying the start time as reported by the laptop """ if "version" in kwargs.keys(): v = kwargs["version"] else: v = "" if "timestamp" in kwargs.keys(): self.t_offset = self.start_time - kwargs["timestamp"] else: try: # attempting to read pm_mass file to get the starting timestamp recorded by the computer temp = pd.read_csv(f"{self.data_dir}calibration/pm_mass_{self.date}{v}.csv",skiprows=6,parse_dates={"timestamp": ["Date","Start Time"]},infer_datetime_format=True) self.t_offset = self.start_time - temp["timestamp"].iloc[0] except FileNotFoundError: print("No file found - try providing a `timestamp` argument instead") self.t_offset = 0 def set_beacons(self, beacon_list): """sets the list of beacons to be considered""" self.beacons = beacon_list # reference setters def set_ref(self,ref_species=["pm_number","pm_mass","no2","no","co2","tvoc","co","t","rh"],**kwargs): """ Sets the reference data Inputs: ref_species: list of strings specifying the reference species data to import """ for species in ref_species: if species in ["pm_number", "pm_mass"]: self.set_pm_ref(species[3:],**kwargs) elif species == "no2": self.set_no2_ref(**kwargs) elif species == "co2": self.set_co2_ref(**kwargs) elif species == "no": self.set_no_ref(**kwargs) elif species == "tvoc" and len(self.beacon_data) > 1: self.set_tvoc_ref() elif species == "t" or species == "rh": self.set_trh_ref(**kwargs) else: self.set_zero_baseline(species=species) def set_zero_baseline(self,species="co"): """ Sets reference of species species to zero (clean) background Inputs: - species: string representing the pollutant species to save to the reference dictionary """ dts = pd.date_range(self.start_time,self.end_time,freq=f'{self.resample_rate}T') # timestamps between start and end df = pd.DataFrame(data=np.zeros(len(dts)),index=dts,columns=["concentration"]) # creating dummy dataframe df.index.rename("timestamp",inplace=True) self.ref[species] = df def set_pm_ref(self, concentration_type="mass",**kwargs): """ Sets the reference PM data Inputs: - concentration_type: string of either "mass" or "number" Returns a dataframe with columns PM1, PM2.5, and PM10 indexed by timestamp """ # import data and correct timestamp if "version" in kwargs.keys(): v = kwargs["version"] else: v = "" try: raw_data = pd.read_csv(f"{self.data_dir}calibration/pm_{concentration_type}_{self.date}{v}.csv",skiprows=6) except FileNotFoundError: print(f"File not found - {self.data_dir}calibration/pm_{concentration_type}_{self.date}{v}.csv") return df = raw_data.drop(['Sample #','Aerodynamic Diameter'],axis=1) date = df['Date'] sample_time = df['Start Time'] datetimes = [] for i in range(len(date)): datetimes.append(datetime.strptime(date[i] + ' ' + sample_time[i],'%m/%d/%y %H:%M:%S') + self.t_offset) df['timestamp'] = datetimes df.set_index(['timestamp'],inplace=True) df = df.iloc[:,:54] df.drop(['Date','Start Time'],axis=1,inplace=True) # convert all columns to numeric types for column in df.columns: df[column] = pd.to_numeric(df[column]) # correct for units if concentration_type == "mass": factor = 1000 else: factor = 1 # sum columns for particular size concentrations df['pm1'] = df.iloc[:,:10].sum(axis=1)*factor df['pm2p5'] = df.iloc[:,:23].sum(axis=1)*factor df['pm10'] = df.iloc[:,:42].sum(axis=1)*factor # resample if "window" in kwargs.keys(): window = kwargs["window"] else: window = 5 # defaults to window size of 5 df_resampled = df.resample(f"{self.resample_rate}T").mean().rolling(window=window,min_periods=1).mean().bfill() df_resampled = df_resampled[self.start_time:self.end_time] # setting for size in ["pm1","pm2p5","pm10"]: self.ref[f"{size}_{concentration_type}"] = pd.DataFrame(df_resampled[size]).rename(columns={size:"concentration"}) def set_co2_ref(self,**kwargs): """sets the reference CO2 data""" if "version" in kwargs.keys(): v = kwargs["version"] else: v = "" try: raw_data = pd.read_csv(f"{self.data_dir}calibration/co2_{self.date}{v}.csv",usecols=[0,1],names=["timestamp","concentration"]) except FileNotFoundError: print(f"File not found - {self.data_dir}calibration/co2_{self.date}{v}.csv") return raw_data["timestamp"] = pd.to_datetime(raw_data["timestamp"],yearfirst=True) raw_data.set_index("timestamp",inplace=True) raw_data.index += self.t_offset# = df.shift(periods=3) if "window" in kwargs.keys(): window = kwargs["window"] else: window = 5 # defaults to window size of 5 df = raw_data.resample(f"{self.resample_rate}T",closed="left").mean().rolling(window=window,min_periods=1).mean().bfill() self.ref["co2"] = df[self.start_time:self.end_time] def set_trh_ref(self,**kwargs): "sets the reference temperature and relative humidity" if "version" in kwargs.keys(): v = kwargs["version"] else: v = "" try: raw_data = pd.read_csv(f"../data/calibration/trh_{self.date}{v}.csv",skiprows=11, usecols=["Date","Time","Temp","%RH"],parse_dates=[["Date","Time"]],infer_datetime_format=True) except FileNotFoundError: print(f"File not found - {self.data_dir}calibration/trh_{self.date}{v}.csv") return raw_data.columns = ["timestamp","t_c","rh"] raw_data.dropna(inplace=True) raw_data["timestamp"] = pd.to_datetime(raw_data["timestamp"],yearfirst=False,dayfirst=True) raw_data.set_index("timestamp",inplace=True) if "window" in kwargs.keys(): window = kwargs["window"] else: window = 3 # defaults to window size of 3 df = raw_data.resample(f"{self.resample_rate}T",closed="left").mean().rolling(window=window,min_periods=1).mean().bfill() df = df[self.start_time:self.end_time] df_t =
pd.DataFrame(df["t_c"])
pandas.DataFrame
# Copyright (c) 2020, NVIDIA CORPORATION. import operator import re from string import ascii_letters, digits import numpy as np import pandas as pd import pytest import cudf from cudf.tests.utils import ( DATETIME_TYPES, NUMERIC_TYPES, TIMEDELTA_TYPES, assert_eq, assert_exceptions_equal, ) def _series_na_data(): return [ pd.Series([0, 1, 2, np.nan, 4, None, 6]), pd.Series( [0, 1, 2, np.nan, 4, None, 6], index=["q", "w", "e", "r", "t", "y", "u"], name="a", ), pd.Series([0, 1, 2, 3, 4]), pd.Series(["a", "b", "u", "h", "d"]), pd.Series([None, None, np.nan, None, np.inf, -np.inf]), pd.Series([]), pd.Series( [pd.NaT, pd.Timestamp("1939-05-27"),
pd.Timestamp("1940-04-25")
pandas.Timestamp
import io import os import json import gc import pandas as pd import numpy as np from datetime import date, timedelta from fastapi import FastAPI, File, HTTPException import lightgbm as lgb from lightgbm import LGBMClassifier import matplotlib.pyplot as plt import joblib app = FastAPI( title="Home Credit Default Risk", description="""Obtain information related to probability of a client defaulting on loan.""", version="0.1.0", ) def calculate_years(days): """ Method used to calculate years based on date (today - quantity of days). Parameters: ----------------- days (int): Numbers of day to rest of today Returns: ----------------- years (int): Numbers of years """ today = date.today() initial_date = today - timedelta(abs(days)) years = today.year - initial_date.year - ((today.month, today.day) < (initial_date.month, initial_date.day)) return years ######################################################## # Columns to read on CSVs ######################################################## COLUMNS = [ "SK_ID_CURR", "AMT_INCOME_TOTAL", "CODE_GENDER", "DAYS_BIRTH", "DAYS_REGISTRATION", "DAYS_EMPLOYED", "AMT_CREDIT", "AMT_GOODS_PRICE", "EXT_SOURCE_2", "EXT_SOURCE_3", ] ######################################################## # Reading the csv ######################################################## df_clients_to_predict = pd.read_csv("datasets/df_clients_to_predict_20220221.csv") df_current_clients = pd.read_csv("datasets/df_current_clients_20220221.csv") df_current_clients["AGE"] = df_current_clients["DAYS_BIRTH"].apply(lambda x: calculate_years(x)) df_current_clients["YEARS_EMPLOYED"] = df_current_clients["DAYS_EMPLOYED"].apply(lambda x: calculate_years(x)) df_current_clients["EXT_SOURCE_2"] = df_current_clients["EXT_SOURCE_2"].round(3) df_current_clients["EXT_SOURCE_3"] = df_current_clients["EXT_SOURCE_3"].round(3) df_current_clients_by_target_repaid = df_current_clients[df_current_clients["TARGET"] == 0] df_current_clients_by_target_not_repaid = df_current_clients[df_current_clients["TARGET"] == 1] @app.get("/api/clients") async def clients_id(): """ EndPoint to get all clients id """ clients_id = df_clients_to_predict["SK_ID_CURR"].tolist() return {"clientsId": clients_id} @app.get("/api/clients/{id}") async def client_details(id: int): """ EndPoint to get client's detail """ clients_id = df_clients_to_predict["SK_ID_CURR"].tolist() if id not in clients_id: raise HTTPException(status_code=404, detail="client's id not found") else: # Filtering by client's id df_by_id = df_clients_to_predict[COLUMNS][df_clients_to_predict["SK_ID_CURR"] == id] idx = df_clients_to_predict[df_clients_to_predict["SK_ID_CURR"]==id].index[0] for col in df_by_id.columns: globals()[col] = df_by_id.iloc[0, df_by_id.columns.get_loc(col)] client = { "clientId" : int(SK_ID_CURR), "gender" : "Man" if int(CODE_GENDER) == 0 else "Woman", "age" : calculate_years(int(DAYS_BIRTH)), "antiquity" : calculate_years(int(DAYS_REGISTRATION)), "yearsEmployed" : calculate_years(int(DAYS_EMPLOYED)), "goodsPrice" : float(AMT_GOODS_PRICE), "credit" : float(AMT_CREDIT), "anualIncome" : float(AMT_INCOME_TOTAL), "source2" : float(EXT_SOURCE_2), "source3" : float(EXT_SOURCE_3), "shapPosition" : int(idx) } return client @app.get("/api/predictions/clients/{id}") async def predict(id: int): """ EndPoint to get the probability honor/compliance of a client """ clients_id = df_clients_to_predict["SK_ID_CURR"].tolist() if id not in clients_id: raise HTTPException(status_code=404, detail="client's id not found") else: # Loading the model model = joblib.load("models/model_20220220.pkl") threshold = 0.135 # Filtering by client's id df_prediction_by_id = df_clients_to_predict[df_clients_to_predict["SK_ID_CURR"] == id] df_prediction_by_id = df_prediction_by_id.drop(df_prediction_by_id.columns[[0, 1]], axis=1) # Predicting result_proba = model.predict_proba(df_prediction_by_id) y_prob = result_proba[:, 1] result = (y_prob >= threshold).astype(int) if (int(result[0]) == 0): result = "Yes" else: result = "No" return { "repay" : result, "probability0" : result_proba[0][0], "probability1" : result_proba[0][1], "threshold" : threshold } @app.get("/api/predictions/clients/shap/{id}") async def client_shap_df(id: int): """ EndPoint to return a df with all client's data """ clients_id = df_clients_to_predict["SK_ID_CURR"].tolist() if id not in clients_id: raise HTTPException(status_code=404, detail="client's id not found") else: # Filtering by client's id idx = df_clients_to_predict[df_clients_to_predict["SK_ID_CURR"]==id].index[0] client = df_clients_to_predict[df_clients_to_predict["SK_ID_CURR"] == id].drop(columns=["SK_ID_CURR", "AMT_INCOME_TOTAL"]) client = client.to_json(orient="records") return client @app.get("/api/statistics/ages") async def statistical_age(): """ EndPoint to get some statistics - ages """ ages_data_repaid = df_current_clients_by_target_repaid.groupby("AGE").size() ages_data_repaid = pd.DataFrame(ages_data_repaid).reset_index() ages_data_repaid.columns = ["AGE", "AMOUNT"] ages_data_repaid = ages_data_repaid.set_index("AGE").to_dict()["AMOUNT"] ages_data_not_repaid = df_current_clients_by_target_not_repaid.groupby("AGE").size() ages_data_not_repaid = pd.DataFrame(ages_data_not_repaid).reset_index() ages_data_not_repaid.columns = ["AGE", "AMOUNT"] ages_data_not_repaid = ages_data_not_repaid.set_index("AGE").to_dict()["AMOUNT"] return {"ages_repaid" : ages_data_repaid, "ages_not_repaid" : ages_data_not_repaid} @app.get("/api/statistics/yearsEmployed") async def statistical_years_employed(): """ EndPoint to get some statistics - years employed """ years_employed_data_repaid = df_current_clients_by_target_repaid.groupby("YEARS_EMPLOYED").size() years_employed_data_repaid = pd.DataFrame(years_employed_data_repaid).reset_index() years_employed_data_repaid.columns = ["YEARS_EMPLOYED", "AMOUNT"] years_employed_data_repaid = years_employed_data_repaid.set_index("YEARS_EMPLOYED").to_dict()["AMOUNT"] years_employed_data_not_repaid = df_current_clients_by_target_not_repaid.groupby("YEARS_EMPLOYED").size() years_employed_data_not_repaid =
pd.DataFrame(years_employed_data_not_repaid)
pandas.DataFrame
import pandas as pd import yfinance as yf class SourceYahoo: def __init__(self, index, start_date, end_date, interval="1d"): self.index = index self.start_date = start_date self.end_date = end_date self.interval = interval def _download(self, start_date, end_date, interval): return yf.download( # index to fetch tickers=self.index, # fetch data from the start date start=start_date, # fetch data from the start date to the end date end=end_date, # fetch data by interval (including intraday if period < 60 days) # valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo interval=interval, # adjust all OHLC automatically auto_adjust=True, # download pre/post regular market hours data prepost=True, # use threads for mass downloading? (True/False/Integer) threads=False, # proxy URL scheme use when downloading proxy=None ) def _format_data(self, dataframe): dataframe = dataframe.reset_index() dataframe.columns = dataframe.columns.str.lower() dataframe = dataframe.rename(columns={'datetime': 'date'}) dataframe['date'] = pd.to_datetime(dataframe['date']) dataframe['date'] = dataframe['date'].dt.tz_localize(None) return dataframe def _delta_date(self, actual_date, end_date): return
pd.Timedelta(7, "d")
pandas.Timedelta
import requests import json import pandas as pd from functools import reduce def ig_get(base_url, endpoint_parameters, to_df = True): """ Send Request to Faceboook endpoint base_url: str url to point to. Consist of endpoint_base and client_id/page_id depending on endpoint to request endpoint_parameters: dict Parameters to include in the request to_df: bool flag to transform response to pd.DataFrame return: list or df depending on 'to_df' """ req = requests.get(base_url, endpoint_parameters) respond = json.loads(req.content) if to_df: respond = pd.DataFrame(respond['data']) return respond def ig_media_insight(insight_list, endpoint_parameters, metrics = 'engagement,impressions,reach,saved'): """ Loop over ig media posts to get deeper insight and convert to tidy df Source: https://towardsdatascience.com/discover-insights-from-your-instagram-business-account-with-facebook-graph-api-and-python-81d20ee2e751 insight_list: list of media, response from '/media' endpoint endpoint_parameters: dict Parameters to include in the request metrics: str metric names comma-separated return: pd.df """ media_insight = [] # Loop Over 'Media ID' for imedia in insight_list['data']: # Define URL url = endpoint_parameters['endpoint_base'] + imedia['id'] + '/insights' # Define Endpoint Parameters parameters_media = dict() parameters_media['metric'] = metrics parameters_media['access_token'] = endpoint_parameters['access_token'] # Requests Data media_data = requests.get(url, parameters_media ) json_media_data = json.loads(media_data.content) media_insight.append(list(json_media_data['data'])) # Initialize Empty Container engagement_list = [] impressions_list = [] reach_list = [] saved_list = [] # Loop Over Insights to Fill Container for insight in media_insight: engagement_list.append(insight[0]['values'][0]['value']) impressions_list.append(insight[1]['values'][0]['value']) reach_list.append(insight[2]['values'][0]['value']) saved_list.append(insight[3]['values'][0]['value']) # Create DataFrame media_insight = list(zip(engagement_list, impressions_list, reach_list, saved_list)) media_insight_df = pd.DataFrame(media_insight, columns =['engagement', 'impressions', 'reach', 'saved']) basic_insight_df = pd.DataFrame(insight_list['data']) insight_df = pd.concat([basic_insight_df, media_insight_df], axis=1) insight_df['timestamp'] = insight_df['timestamp'].apply(pd.to_datetime) return insight_df def ig_audience_insight(endpoint_parameters): """ Get Audience insight Source: https://towardsdatascience.com/discover-insights-from-your-instagram-business-account-with-facebook-graph-api-and-python-81d20ee2e751 endpoint_parameters: dict Parameters to include in the request return: 3 pd.df """ # Define URL url_account_insights = endpoint_parameters['endpoint_base'] + endpoint_parameters['instagram_account_id'] + '/insights' # Define Endpoint Parameters parameters_account_insights = dict() parameters_account_insights['metric'] = 'audience_city,audience_country,audience_gender_age' parameters_account_insights['period'] = 'lifetime' parameters_account_insights['access_token'] = endpoint_parameters['access_token'] # Requests Data audience = ig_get(url_account_insights, parameters_account_insights, to_df=False) city = pd.Series(audience['data'][0]['values'][0]['value']).rename_axis('city').to_frame('follower_count').reset_index(level=0) country = pd.Series(audience['data'][1]['values'][0]['value']).rename_axis('country').to_frame('follower_count').reset_index(level=0) gender_age = pd.Series(audience['data'][2]['values'][0]['value']).rename_axis('gender_age').to_frame('follower_count').reset_index(level=0) return city, country, gender_age def ig_metric_to_df(metric_list): """ Helper to transform metric list into tidy dataframe metric_list: list of metric values return: pd.df """ column_name = metric_list['name'] df =
pd.DataFrame(metric_list['values'])
pandas.DataFrame
# internal modules import os from typing import Tuple from app_logic import dataframe_creation from data_structures.annotation_data import AnnotationData from data_structures.raw_data import RawData # python modules import logging # dependencies import numpy as np import pandas as pd # DEFINITIONS from util.definitions import BASELINE_NAME, EVENT_TYPE, HUMAN_RATING_LABEL, SLEEP_CLASSIFIERS, definitions_as_string from util import settings class PSG: """ Perform automated detection of motoric arousals during REM sleep consistent with RBD. :param input_path: absolute path to directory that contains an EDF file to be evaluated and all relevant annotation files :param output_path: absolute path to directory in which to create the result files """ def __init__(self, input_path: str = '', output_path: str = ''): self._input_path = input_path self._output_path = output_path self._raw_data: RawData = None # content of edf file self._annotation_data: AnnotationData = None # content of txt files self._calculated_data: pd.DataFrame = None # dataframe with all currently calculated data logging.info('Definitions:\n' f'{str(definitions_as_string())}\n' f'{str(settings.settings_as_string())}' ) logging.debug('New PSG Object created') # PUBLIC FUNCTIONS @property def input_path(self): return self._input_path @input_path.setter def input_path(self, input_path): self._input_path = input_path @property def output_path(self): return self._output_path @output_path.setter def output_path(self, output_path): self._output_path = output_path @staticmethod def prepare_evaluation(raw_data, annotation_data, signal_names, assess_flow_events): signal_names = signal_names.copy() # extract start of PSG, sample rate of chin EMG channel and number of chin EMG samples to create datetime index start_datetime = raw_data.get_header()['startdate'] sample_rate = raw_data.get_data_channels()[settings.SIGNALS_TO_EVALUATE[0]].get_sample_rate() sample_length = len(raw_data.get_data_channels()[settings.SIGNALS_TO_EVALUATE[0]].get_signal()) # prepare DataFrame with DatetimeIndex preliminary_idx = dataframe_creation.create_datetime_index(start_datetime, sample_rate, sample_length) # df = pd.DataFrame(index=preliminary_idx) # add sleep profile to df df = pd.concat(PSG.create_sleep_profile_column(preliminary_idx, annotation_data), axis=1) # add signals to DataFrame for signal_type in signal_names.copy(): logging.debug(signal_type + ' start') # Check if signal type exists in edf file try: signal_array = raw_data.get_data_channels()[signal_type].get_signal() except KeyError: signal_names.remove(signal_type) continue # Resample to 256 Hz and add to df sample_rate = raw_data.get_data_channels()[signal_type].get_sample_rate() df[signal_type] = dataframe_creation.signal_to_hz_rate_datetimeindexed_series( settings.RATE, sample_rate, signal_array, signal_type, start_datetime) # add global artifacts to df df['is_global_artifact'] = PSG.add_artifacts_to_df(df.index, annotation_data, assess_flow_events) return df[signal_names], df['is_REM'], df['is_global_artifact'], signal_names, df['sleep_phase'] @staticmethod def find_artifact_free_REM_sleep_epochs_and_miniepochs(idx: pd.DatetimeIndex, artifact_signal_series: pd.Series, is_REM_series: pd.Series): """ :param idx: :param artifact_signal_series: :param is_REM_series: :return: Tuple of pandas Series (Artifact-free REM epochs, Artifact-free REM miniepochs) """ df = pd.DataFrame(index=idx) artifact_in_3s_miniepoch = artifact_signal_series \ .resample('3s') \ .sum() \ .gt(0) df['miniepoch_contains_artifact'] = artifact_in_3s_miniepoch df['miniepoch_contains_artifact'] = df['miniepoch_contains_artifact'].ffill() df['artifact_free_rem_sleep_miniepoch'] = is_REM_series & ~df['miniepoch_contains_artifact'] # find all 30s epochs of global artifact-free REM sleep for tonic event detection artifact_in_30s_epoch = artifact_signal_series \ .resample('30s') \ .sum() \ .gt(0) df['epoch_contains_artifact'] = artifact_in_30s_epoch df['epoch_contains_artifact'] = df['epoch_contains_artifact'].ffill() df['artifact_free_rem_sleep_epoch'] = is_REM_series & ~df['epoch_contains_artifact'] return df['artifact_free_rem_sleep_epoch'], df['artifact_free_rem_sleep_miniepoch'] @staticmethod def prepare_human_signal_artifacts(annotation_data, index, signal_names): """ :param annotation_data: :param index: :param signal_names: :return: pd.DataFrame with index 'index' containing 3 columns per signal in 'signal_names'. Their column names follow the following schematic: - '<signal_name>_human_artifact' (artifacts of either human rater) - '<signal_name>_human1_artifact' (artifacts of human rater 1) - '<signal_name>_human2_artifact' (artifacts of human rater 2) """ try: # process human rating for artifact evaluation per signal and event human_rating1 = annotation_data.human_rating[0][1] human_rating_label_dict1 = human_rating1.groupby('event').groups logging.debug(human_rating_label_dict1) except IndexError as e: raise FileNotFoundError("Human rating does not exist.") try: # process second human rating for artifact extraction per signal and event human_rating2 = annotation_data.human_rating[1][1] human_rating_label_dict2 = human_rating2.groupby('event').groups logging.debug(human_rating_label_dict2) except IndexError as e: logging.info("Only one human rater file found.") human_rating2 = None df_artifacts =
pd.DataFrame(index=index)
pandas.DataFrame
import builtins from io import StringIO from itertools import product from string import ascii_lowercase import numpy as np import pytest from pandas.errors import UnsupportedFunctionCall import pandas as pd from pandas import ( DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna) import pandas.core.nanops as nanops from pandas.util import testing as tm @pytest.mark.parametrize("agg_func", ['any', 'all']) @pytest.mark.parametrize("skipna", [True, False]) @pytest.mark.parametrize("vals", [ ['foo', 'bar', 'baz'], ['foo', '', ''], ['', '', ''], [1, 2, 3], [1, 0, 0], [0, 0, 0], [1., 2., 3.], [1., 0., 0.], [0., 0., 0.], [True, True, True], [True, False, False], [False, False, False], [np.nan, np.nan, np.nan] ]) def test_groupby_bool_aggs(agg_func, skipna, vals): df = DataFrame({'key': ['a'] * 3 + ['b'] * 3, 'val': vals * 2}) # Figure out expectation using Python builtin exp = getattr(builtins, agg_func)(vals) # edge case for missing data with skipna and 'any' if skipna and all(isna(vals)) and agg_func == 'any': exp = False exp_df = DataFrame([exp] * 2, columns=['val'], index=Index( ['a', 'b'], name='key')) result = getattr(df.groupby('key'), agg_func)(skipna=skipna) tm.assert_frame_equal(result, exp_df) def test_max_min_non_numeric(): # #2700 aa = DataFrame({'nn': [11, 11, 22, 22], 'ii': [1, 2, 3, 4], 'ss': 4 * ['mama']}) result = aa.groupby('nn').max() assert 'ss' in result result = aa.groupby('nn').max(numeric_only=False) assert 'ss' in result result = aa.groupby('nn').min() assert 'ss' in result result = aa.groupby('nn').min(numeric_only=False) assert 'ss' in result def test_intercept_builtin_sum(): s = Series([1., 2., np.nan, 3.]) grouped = s.groupby([0, 1, 2, 2]) result = grouped.agg(builtins.sum) result2 = grouped.apply(builtins.sum) expected = grouped.sum() tm.assert_series_equal(result, expected) tm.assert_series_equal(result2, expected) # @pytest.mark.parametrize("f", [max, min, sum]) # def test_builtins_apply(f): @pytest.mark.parametrize("f", [max, min, sum]) @pytest.mark.parametrize('keys', [ "jim", # Single key ["jim", "joe"] # Multi-key ]) def test_builtins_apply(keys, f): # see gh-8155 df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)), columns=["jim", "joe"]) df["jolie"] = np.random.randn(1000) fname = f.__name__ result = df.groupby(keys).apply(f) ngroups = len(df.drop_duplicates(subset=keys)) assert_msg = ("invalid frame shape: {} " "(expected ({}, 3))".format(result.shape, ngroups)) assert result.shape == (ngroups, 3), assert_msg tm.assert_frame_equal(result, # numpy's equivalent function df.groupby(keys).apply(getattr(np, fname))) if f != sum: expected = df.groupby(keys).agg(fname).reset_index() expected.set_index(keys, inplace=True, drop=False) tm.assert_frame_equal(result, expected, check_dtype=False) tm.assert_series_equal(getattr(result, fname)(), getattr(df, fname)()) def test_arg_passthru(): # make sure that we are passing thru kwargs # to our agg functions # GH3668 # GH5724 df = pd.DataFrame( {'group': [1, 1, 2], 'int': [1, 2, 3], 'float': [4., 5., 6.], 'string': list('abc'), 'category_string': pd.Series(list('abc')).astype('category'), 'category_int': [7, 8, 9], 'datetime': pd.date_range('20130101', periods=3), 'datetimetz': pd.date_range('20130101', periods=3, tz='US/Eastern'), 'timedelta': pd.timedelta_range('1 s', periods=3, freq='s')}, columns=['group', 'int', 'float', 'string', 'category_string', 'category_int', 'datetime', 'datetimetz', 'timedelta']) expected_columns_numeric = Index(['int', 'float', 'category_int']) # mean / median expected = pd.DataFrame( {'category_int': [7.5, 9], 'float': [4.5, 6.], 'timedelta': [pd.Timedelta('1.5s'), pd.Timedelta('3s')], 'int': [1.5, 3], 'datetime': [pd.Timestamp('2013-01-01 12:00:00'), pd.Timestamp('2013-01-03 00:00:00')], 'datetimetz': [ pd.Timestamp('2013-01-01 12:00:00', tz='US/Eastern'), pd.Timestamp('2013-01-03 00:00:00', tz='US/Eastern')]}, index=Index([1, 2], name='group'), columns=['int', 'float', 'category_int', 'datetime', 'datetimetz', 'timedelta']) for attr in ['mean', 'median']: f = getattr(df.groupby('group'), attr) result = f() tm.assert_index_equal(result.columns, expected_columns_numeric) result = f(numeric_only=False) tm.assert_frame_equal(result.reindex_like(expected), expected) # TODO: min, max *should* handle # categorical (ordered) dtype expected_columns = Index(['int', 'float', 'string', 'category_int', 'datetime', 'datetimetz', 'timedelta']) for attr in ['min', 'max']: f = getattr(df.groupby('group'), attr) result = f() tm.assert_index_equal(result.columns, expected_columns) result = f(numeric_only=False) tm.assert_index_equal(result.columns, expected_columns) expected_columns = Index(['int', 'float', 'string', 'category_string', 'category_int', 'datetime', 'datetimetz', 'timedelta']) for attr in ['first', 'last']: f = getattr(df.groupby('group'), attr) result = f() tm.assert_index_equal(result.columns, expected_columns) result = f(numeric_only=False) tm.assert_index_equal(result.columns, expected_columns) expected_columns = Index(['int', 'float', 'string', 'category_int', 'timedelta']) for attr in ['sum']: f = getattr(df.groupby('group'), attr) result = f() tm.assert_index_equal(result.columns, expected_columns_numeric) result = f(numeric_only=False) tm.assert_index_equal(result.columns, expected_columns) expected_columns = Index(['int', 'float', 'category_int']) for attr in ['prod', 'cumprod']: f = getattr(df.groupby('group'), attr) result = f() tm.assert_index_equal(result.columns, expected_columns_numeric) result = f(numeric_only=False) tm.assert_index_equal(result.columns, expected_columns) # like min, max, but don't include strings expected_columns = Index(['int', 'float', 'category_int', 'datetime', 'datetimetz', 'timedelta']) for attr in ['cummin', 'cummax']: f = getattr(df.groupby('group'), attr) result = f() # GH 15561: numeric_only=False set by default like min/max tm.assert_index_equal(result.columns, expected_columns) result = f(numeric_only=False) tm.assert_index_equal(result.columns, expected_columns) expected_columns = Index(['int', 'float', 'category_int', 'timedelta']) for attr in ['cumsum']: f = getattr(df.groupby('group'), attr) result = f() tm.assert_index_equal(result.columns, expected_columns_numeric) result = f(numeric_only=False) tm.assert_index_equal(result.columns, expected_columns) def test_non_cython_api(): # GH5610 # non-cython calls should not include the grouper df = DataFrame( [[1, 2, 'foo'], [1, np.nan, 'bar'], [3, np.nan, 'baz']], columns=['A', 'B', 'C']) g = df.groupby('A') gni = df.groupby('A', as_index=False) # mad expected = DataFrame([[0], [np.nan]], columns=['B'], index=[1, 3]) expected.index.name = 'A' result = g.mad() tm.assert_frame_equal(result, expected) expected = DataFrame([[0., 0.], [0, np.nan]], columns=['A', 'B'], index=[0, 1]) result = gni.mad() tm.assert_frame_equal(result, expected) # describe expected_index = pd.Index([1, 3], name='A') expected_col = pd.MultiIndex(levels=[['B'], ['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max']], codes=[[0] * 8, list(range(8))]) expected = pd.DataFrame([[1.0, 2.0, np.nan, 2.0, 2.0, 2.0, 2.0, 2.0], [0.0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]], index=expected_index, columns=expected_col) result = g.describe() tm.assert_frame_equal(result, expected) expected = pd.concat([df[df.A == 1].describe().unstack().to_frame().T, df[df.A == 3].describe().unstack().to_frame().T]) expected.index = pd.Index([0, 1]) result = gni.describe() tm.assert_frame_equal(result, expected) # any expected = DataFrame([[True, True], [False, True]], columns=['B', 'C'], index=[1, 3]) expected.index.name = 'A' result = g.any() tm.assert_frame_equal(result, expected) # idxmax expected = DataFrame([[0.0], [np.nan]], columns=['B'], index=[1, 3]) expected.index.name = 'A' result = g.idxmax() tm.assert_frame_equal(result, expected) def test_cython_api2(): # this takes the fast apply path # cumsum (GH5614) df = DataFrame( [[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9] ], columns=['A', 'B', 'C']) expected = DataFrame( [[2, np.nan], [np.nan, 9], [4, 9]], columns=['B', 'C']) result = df.groupby('A').cumsum() tm.assert_frame_equal(result, expected) # GH 5755 - cumsum is a transformer and should ignore as_index result = df.groupby('A', as_index=False).cumsum() tm.assert_frame_equal(result, expected) # GH 13994 result = df.groupby('A').cumsum(axis=1) expected = df.cumsum(axis=1) tm.assert_frame_equal(result, expected) result = df.groupby('A').cumprod(axis=1) expected = df.cumprod(axis=1) tm.assert_frame_equal(result, expected) def test_cython_median(): df = DataFrame(np.random.randn(1000)) df.values[::2] = np.nan labels = np.random.randint(0, 50, size=1000).astype(float) labels[::17] = np.nan result = df.groupby(labels).median() exp = df.groupby(labels).agg(nanops.nanmedian) tm.assert_frame_equal(result, exp) df = DataFrame(np.random.randn(1000, 5)) rs = df.groupby(labels).agg(np.median) xp = df.groupby(labels).median() tm.assert_frame_equal(rs, xp) def test_median_empty_bins(observed): df = pd.DataFrame(np.random.randint(0, 44, 500)) grps = range(0, 55, 5) bins = pd.cut(df[0], grps) result = df.groupby(bins, observed=observed).median() expected = df.groupby(bins, observed=observed).agg(lambda x: x.median()) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("dtype", [ 'int8', 'int16', 'int32', 'int64', 'float32', 'float64']) @pytest.mark.parametrize("method,data", [ ('first', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}), ('last', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}), ('min', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}), ('max', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}), ('nth', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}], 'args': [1]}), ('count', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 2}], 'out_type': 'int64'}) ]) def test_groupby_non_arithmetic_agg_types(dtype, method, data): # GH9311, GH6620 df = pd.DataFrame( [{'a': 1, 'b': 1}, {'a': 1, 'b': 2}, {'a': 2, 'b': 3}, {'a': 2, 'b': 4}]) df['b'] = df.b.astype(dtype) if 'args' not in data: data['args'] = [] if 'out_type' in data: out_type = data['out_type'] else: out_type = dtype exp = data['df'] df_out = pd.DataFrame(exp) df_out['b'] = df_out.b.astype(out_type) df_out.set_index('a', inplace=True) grpd = df.groupby('a') t = getattr(grpd, method)(*data['args']) tm.assert_frame_equal(t, df_out) @pytest.mark.parametrize("i", [ (Timestamp("2011-01-15 12:50:28.502376"), Timestamp("2011-01-20 12:50:28.593448")), (24650000000000001, 24650000000000002) ]) def test_groupby_non_arithmetic_agg_int_like_precision(i): # see gh-6620, gh-9311 df = pd.DataFrame([{"a": 1, "b": i[0]}, {"a": 1, "b": i[1]}]) grp_exp = {"first": {"expected": i[0]}, "last": {"expected": i[1]}, "min": {"expected": i[0]}, "max": {"expected": i[1]}, "nth": {"expected": i[1], "args": [1]}, "count": {"expected": 2}} for method, data in grp_exp.items(): if "args" not in data: data["args"] = [] grouped = df.groupby("a") res = getattr(grouped, method)(*data["args"]) assert res.iloc[0].b == data["expected"] @pytest.mark.parametrize("func, values", [ ("idxmin", {'c_int': [0, 2], 'c_float': [1, 3], 'c_date': [1, 2]}), ("idxmax", {'c_int': [1, 3], 'c_float': [0, 2], 'c_date': [0, 3]}) ]) def test_idxmin_idxmax_returns_int_types(func, values): # GH 25444 df = pd.DataFrame({'name': ['A', 'A', 'B', 'B'], 'c_int': [1, 2, 3, 4], 'c_float': [4.02, 3.03, 2.04, 1.05], 'c_date': ['2019', '2018', '2016', '2017']}) df['c_date'] = pd.to_datetime(df['c_date']) result = getattr(df.groupby('name'), func)() expected = pd.DataFrame(values, index=Index(['A', 'B'], name="name")) tm.assert_frame_equal(result, expected) def test_fill_consistency(): # GH9221 # pass thru keyword arguments to the generated wrapper # are set if the passed kw is None (only) df = DataFrame(index=pd.MultiIndex.from_product( [['value1', 'value2'], date_range('2014-01-01', '2014-01-06')]), columns=Index( ['1', '2'], name='id')) df['1'] = [np.nan, 1, np.nan, np.nan, 11, np.nan, np.nan, 2, np.nan, np.nan, 22, np.nan] df['2'] = [np.nan, 3, np.nan, np.nan, 33, np.nan, np.nan, 4, np.nan, np.nan, 44, np.nan] expected = df.groupby(level=0, axis=0).fillna(method='ffill') result = df.T.groupby(level=0, axis=1).fillna(method='ffill').T tm.assert_frame_equal(result, expected) def test_groupby_cumprod(): # GH 4095 df = pd.DataFrame({'key': ['b'] * 10, 'value': 2}) actual = df.groupby('key')['value'].cumprod() expected = df.groupby('key')['value'].apply(lambda x: x.cumprod()) expected.name = 'value' tm.assert_series_equal(actual, expected) df = pd.DataFrame({'key': ['b'] * 100, 'value': 2}) actual = df.groupby('key')['value'].cumprod() # if overflows, groupby product casts to float # while numpy passes back invalid values df['value'] = df['value'].astype(float) expected = df.groupby('key')['value'].apply(lambda x: x.cumprod()) expected.name = 'value' tm.assert_series_equal(actual, expected) def test_ops_general(): ops = [('mean', np.mean), ('median', np.median), ('std', np.std), ('var', np.var), ('sum', np.sum), ('prod', np.prod), ('min', np.min), ('max', np.max), ('first', lambda x: x.iloc[0]), ('last', lambda x: x.iloc[-1]), ('count', np.size), ] try: from scipy.stats import sem except ImportError: pass else: ops.append(('sem', sem)) df = DataFrame(np.random.randn(1000)) labels = np.random.randint(0, 50, size=1000).astype(float) for op, targop in ops: result = getattr(df.groupby(labels), op)().astype(float) expected = df.groupby(labels).agg(targop) try: tm.assert_frame_equal(result, expected) except BaseException as exc: exc.args += ('operation: %s' % op, ) raise def test_max_nan_bug(): raw = """,Date,app,File -04-23,2013-04-23 00:00:00,,log080001.log -05-06,2013-05-06 00:00:00,,log.log -05-07,2013-05-07 00:00:00,OE,xlsx""" df = pd.read_csv(StringIO(raw), parse_dates=[0]) gb = df.groupby('Date') r = gb[['File']].max() e = gb['File'].max().to_frame() tm.assert_frame_equal(r, e) assert not r['File'].isna().any() def test_nlargest(): a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10]) b = Series(list('a' * 5 + 'b' * 5)) gb = a.groupby(b) r = gb.nlargest(3) e = Series([ 7, 5, 3, 10, 9, 6 ], index=MultiIndex.from_arrays([list('aaabbb'), [3, 2, 1, 9, 5, 8]])) tm.assert_series_equal(r, e) a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0]) gb = a.groupby(b) e = Series([ 3, 2, 1, 3, 3, 2 ], index=MultiIndex.from_arrays([list('aaabbb'), [2, 3, 1, 6, 5, 7]])) tm.assert_series_equal(gb.nlargest(3, keep='last'), e) def test_nsmallest(): a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10]) b = Series(list('a' * 5 + 'b' * 5)) gb = a.groupby(b) r = gb.nsmallest(3) e = Series([ 1, 2, 3, 0, 4, 6 ], index=MultiIndex.from_arrays([list('aaabbb'), [0, 4, 1, 6, 7, 8]])) tm.assert_series_equal(r, e) a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0]) gb = a.groupby(b) e = Series([ 0, 1, 1, 0, 1, 2 ], index=MultiIndex.from_arrays([list('aaabbb'), [4, 1, 0, 9, 8, 7]])) tm.assert_series_equal(gb.nsmallest(3, keep='last'), e) @pytest.mark.parametrize("func", [ 'mean', 'var', 'std', 'cumprod', 'cumsum' ]) def test_numpy_compat(func): # see gh-12811 df = pd.DataFrame({'A': [1, 2, 1], 'B': [1, 2, 3]}) g = df.groupby('A') msg = "numpy operations are not valid with groupby" with pytest.raises(UnsupportedFunctionCall, match=msg): getattr(g, func)(1, 2, 3) with pytest.raises(UnsupportedFunctionCall, match=msg): getattr(g, func)(foo=1) def test_cummin_cummax(): # GH 15048 num_types = [np.int32, np.int64, np.float32, np.float64] num_mins = [np.iinfo(np.int32).min, np.iinfo(np.int64).min, np.finfo(np.float32).min, np.finfo(np.float64).min] num_max = [np.iinfo(np.int32).max, np.iinfo(np.int64).max, np.finfo(np.float32).max, np.finfo(np.float64).max] base_df = pd.DataFrame({'A': [1, 1, 1, 1, 2, 2, 2, 2], 'B': [3, 4, 3, 2, 2, 3, 2, 1]}) expected_mins = [3, 3, 3, 2, 2, 2, 2, 1] expected_maxs = [3, 4, 4, 4, 2, 3, 3, 3] for dtype, min_val, max_val in zip(num_types, num_mins, num_max): df = base_df.astype(dtype) # cummin expected = pd.DataFrame({'B': expected_mins}).astype(dtype) result = df.groupby('A').cummin() tm.assert_frame_equal(result, expected) result = df.groupby('A').B.apply(lambda x: x.cummin()).to_frame() tm.assert_frame_equal(result, expected) # Test cummin w/ min value for dtype df.loc[[2, 6], 'B'] = min_val expected.loc[[2, 3, 6, 7], 'B'] = min_val result = df.groupby('A').cummin() tm.assert_frame_equal(result, expected) expected = df.groupby('A').B.apply(lambda x: x.cummin()).to_frame() tm.assert_frame_equal(result, expected) # cummax expected = pd.DataFrame({'B': expected_maxs}).astype(dtype) result = df.groupby('A').cummax() tm.assert_frame_equal(result, expected) result = df.groupby('A').B.apply(lambda x: x.cummax()).to_frame() tm.assert_frame_equal(result, expected) # Test cummax w/ max value for dtype df.loc[[2, 6], 'B'] = max_val expected.loc[[2, 3, 6, 7], 'B'] = max_val result = df.groupby('A').cummax()
tm.assert_frame_equal(result, expected)
pandas.util.testing.assert_frame_equal
#!/usr/bin/env python # coding: utf-8 # In[850]: import pandas as pd import numpy as np import matplotlib.pyplot as plt import pandas as pd import warnings warnings.filterwarnings("ignore") from sklearn.model_selection import cross_val_score,KFold from sklearn.neighbors import KNeighborsRegressor # In[851]: ###DATA FRAMES### # Supplied Dataset BAH # power_df = pd.DataFrame.from_csv("energy_db.csv") # GDP - World Bank Group # gdp_df = pd.DataFrame.from_csv("gdp_power.csv") # Population - World Bank Group # population_df = pd.DataFrame.from_csv("population.csv") # Hydropower Status Report - International Hydropower Association # hydro_df = pd.DataFrame.from_csv("hydro_power.csv") # Freedom in the World 2018 - Freedomhouse.org # freedom_df = pd.DataFrame.from_csv("freedom.csv") # Natural Disaster Probability - World Risk Report 2016 # disasters_df = pd.DataFrame.from_csv("NaturalDisasterChance.csv") # Nuclear Power Statistics - World Nuclear Association # nuclear_df = pd.DataFrame.from_csv("Third World Nuclear Totals.csv") # Wind Energy Statistics - World Energy Council # wind_df = pd.DataFrame.from_csv("Wind energy - Wind energy.csv") # Solar Energy Statistics - World Energy Council / World Bank Group # solar_df = pd.DataFrame.from_csv("solar_potential - hydro_power.csv") # Class Attributes for Training Data # classattr_df = pd.DataFrame.from_csv("TrainClassAtributes.csv") # In[852]: # Remove irrelevant values # power_df = power_df.drop(columns=["gppd_idnr", "owner", "source", "url", "geolocation_source", "year_of_capacity_data", "generation_gwh_2013", "generation_gwh_2014", "generation_gwh_2015", "generation_gwh_2016"]) # In[853]: # Dataframe Merge # # In[854]: power_df = pd.merge(power_df, gdp_df, on=["country"]) # In[855]: power_df = pd.merge(power_df, population_df, on=["country"]) # In[856]: power_df = pd.merge(power_df, hydro_df, on=["country_long"]) # In[857]: power_df = pd.merge(power_df, freedom_df, on=["country_long"]) # In[858]: power_df = pd.merge(power_df, disasters_df, on=["country_long"]) # In[859]: power_df = pd.merge(power_df, nuclear_df, on=["country_long"]) # In[860]: power_df = pd.merge(power_df, wind_df, on=["country_long"]) # In[861]: power_df =
pd.merge(power_df, solar_df, on=["country_long"])
pandas.merge
# -*- coding: utf-8 -*- # Copyright (c) 2018-2021, earthobservations developers. # Distributed under the MIT License. See LICENSE for more info. import logging import operator from abc import abstractmethod from enum import Enum from typing import Dict, Generator, List, Tuple, Union import numpy as np import pandas as pd from pint import Quantity from pytz import timezone from tqdm import tqdm from wetterdienst.core.scalar.result import StationsResult, ValuesResult from wetterdienst.metadata.columns import Columns from wetterdienst.metadata.resolution import Resolution from wetterdienst.metadata.timezone import Timezone from wetterdienst.metadata.unit import REGISTRY, OriginUnit, SIUnit from wetterdienst.util.enumeration import parse_enumeration_from_template from wetterdienst.util.logging import TqdmToLogger log = logging.getLogger(__name__) class ScalarValuesCore: """ Core for sources of point data where data is related to a station """ # Fields for type coercion, needed for separation from fields with actual data # that have to be parsed differently when having data in tabular form @property def _meta_fields(self) -> List[str]: """ Metadata fields that are independent of actual values and should be parsed differently :return: list of strings representing the metadata fields/columns """ if not self.stations.stations.tidy: fields = [ Columns.STATION_ID.value, Columns.DATE.value, ] else: fields = [ Columns.STATION_ID.value, Columns.DATASET.value, Columns.PARAMETER.value, Columns.DATE.value, Columns.VALUE.value, Columns.QUALITY.value, ] return fields # Fields for date coercion _date_fields = [Columns.DATE.value, Columns.FROM_DATE.value, Columns.TO_DATE.value] # TODO: add data type (forecast, observation, ...) # @property # @abstractmethod # def _has_quality(self) -> bool: # """Attribute that tells if a weather service has quality, which otherwise will # have to be set to NaN""" # pass @property def data_tz(self) -> timezone: """ Timezone of the published data """ return timezone(self._data_tz.value) @property @abstractmethod def _data_tz(self) -> Timezone: """ Timezone enumeration of published data. """ pass @property @abstractmethod def _irregular_parameters(self) -> Tuple[str]: """Declaration of irregular parameters which will have to be parsed differently then others e.g. when a parameter is a date.""" pass @property @abstractmethod def _integer_parameters(self) -> Tuple[str]: """ Integer parameters that will be parsed to integers. """ pass @property @abstractmethod def _string_parameters(self) -> Tuple[str]: """ String parameters that will be parsed to integers. """ pass @property def _complete_dates(self) -> pd.DatetimeIndex: """ Complete datetime index for the requested start and end date, used for building a complementary pandas DataFrame with the date column on which other DataFrames can be joined on :return: pandas.DatetimeIndex """ start_date, end_date = self.stations.start_date, self.stations.end_date if self.stations.stations.resolution == Resolution.MONTHLY: end_date += pd.Timedelta(days=31) elif self.stations.stations.resolution == Resolution.ANNUAL: end_date += pd.Timedelta(year=366) date_range = pd.date_range( start_date, end_date, freq=self.stations.frequency.value, tz=self.data_tz, ) return date_range @property def _base_df(self) -> pd.DataFrame: """ Base dataframe which is used for creating empty dataframes if no data is found or for merging other dataframes on the full dates :return: pandas DataFrame with a date column with complete dates """ return pd.DataFrame({Columns.DATE.value: self._complete_dates}) def convert_values_to_si(self, df: pd.DataFrame, dataset) -> pd.DataFrame: """ Function to convert values to metric units with help of conversion factors :param df: pandas DataFrame that should be converted to SI units :param dataset: dataset for which the conversion factors are created :return: pandas DataFrame with converted (SI) values """ def _convert_values_to_si(series): """ Helper function to apply conversion factors column wise to a pandas DataFrame :param series: pandas Series that should be converted :return: converted pandas Series """ op, factor = conversion_factors.get(series.name, (None, None)) if not op or not factor: return series return op(series, factor) conversion_factors = self._create_conversion_factors(dataset) df = df.apply(_convert_values_to_si, axis=0) return df def _create_conversion_factors( self, dataset ) -> Dict[str, Tuple[Union[operator.add, operator.mul], float]]: """ Function to create conversion factors based on a given dataset :param dataset: dataset for which conversion factors are created :return: dictionary with conversion factors for given parameter name """ dataset = dataset.name dataset_accessor = self.stations.stations._dataset_accessor if self.stations.stations._unique_dataset: units = self.stations.stations._unit_tree[dataset_accessor] else: units = self.stations.stations._unit_tree[dataset_accessor][dataset] conversion_factors = {} # TODO eventually we may split this into smaller functions for parameter in units: origin_unit, si_unit = parameter.value # Get parameter name parameter = parameter.name if self.stations.stations._unique_dataset: parameter_value = self.stations.stations._dataset_tree[ dataset_accessor ][parameter].value else: parameter_value = self.stations.stations._dataset_tree[ dataset_accessor ][dataset][parameter].value if si_unit == SIUnit.KILOGRAM_PER_SQUARE_METER.value: # Fixed conversion factors to kg / m², as it only applies # for water with density 1 g / cm³ if origin_unit == OriginUnit.MILLIMETER.value: conversion_factors[parameter_value] = (operator.mul, 1) else: raise ValueError( "manually set conversion factor for precipitation unit" ) elif si_unit == SIUnit.DEGREE_KELVIN.value: # Apply offset addition to temperature measurements # Take 0 as this is appropriate for adding on other numbers # (just the difference) degree_offset = Quantity(0, origin_unit).to(si_unit).magnitude conversion_factors[parameter_value] = (operator.add, degree_offset) elif si_unit == SIUnit.PERCENT.value: factor = REGISTRY(str(origin_unit)).to(str(si_unit)).magnitude conversion_factors[parameter_value] = (operator.mul, factor) else: # For multiplicative units we need to use 1 as quantity to apply the # appropriate factor conversion_factors[parameter_value] = ( operator.mul, Quantity(1, origin_unit).to(si_unit).magnitude, ) return conversion_factors def __init__(self, stations: StationsResult) -> None: self.stations = stations @classmethod def from_stations(cls, stations: StationsResult): return cls(stations) def __eq__(self, other): """ Equal method of request object """ return ( self.stations.station_id == other.stations.station_id and self.stations.parameter == other.stations.parameter and self.stations.start_date == other.stations.start_date and self.stations.end_date == other.stations.end_date ) pass def __str__(self): """ Str representation of request object """ # TODO: include source # TODO: include data type station_ids_joined = "& ".join( [str(station_id) for station_id in self.stations.station_id] ) parameters_joined = "& ".join( [ parameter.value for parameter, parameter_set in self.stations.stations.parameter ] ) return ", ".join( [ f"station_ids {station_ids_joined}", f"parameters {parameters_joined}", str(self.stations.start_date), str(self.stations.end_date), ] ) pass def _create_empty_station_parameter_df( self, station_id: str, parameter: Enum, dataset: Enum ) -> pd.DataFrame: """ Function to create an empty DataFrame :param station_id: :param parameter: :return: """ dataset_tree = self.stations.stations._dataset_tree resolution = self.stations.stations.resolution # if parameter is a whole dataset, take every parameter from the dataset instead if parameter == dataset: if self.stations.stations._unique_dataset: parameter = [*dataset_tree[resolution.name]] else: parameter = [*dataset_tree[resolution.name][dataset.name]] if self.stations.stations.tidy: if not self.stations.stations.start_date: return pd.DataFrame(None, columns=self._meta_fields) data = [] for par in pd.Series(parameter): if par.name.startswith("QUALITY"): continue par_df = self._base_df par_df[Columns.PARAMETER.value] = par.value data.append(par_df) df = pd.concat(data) df[Columns.STATION_ID.value] = station_id df[Columns.DATASET.value] = dataset.name df[Columns.VALUE.value] = pd.NA df[Columns.QUALITY.value] = pd.NA return df else: parameter = pd.Series(parameter).map(lambda x: x.value).tolist() # Base columns columns = [*self._meta_fields, *parameter] if self.stations.stations.start_date: return pd.DataFrame(None, columns=columns) df = self._base_df df = df.reindex(columns=columns) df[Columns.STATION_ID.value] = station_id return df def _build_complete_df( self, df: pd.DataFrame, station_id: str, parameter: Enum, dataset: Enum ) -> pd.DataFrame: # For cases where requests are not defined by start and end date but rather by # periods, use the returned df without modifications # We may put a standard date range here if no data is found if not self.stations.start_date: return df if parameter != dataset or not self.stations.stations.tidy: df = pd.merge( left=self._base_df, right=df, left_on=Columns.DATE.value, right_on=Columns.DATE.value, how="left", ) df[Columns.STATION_ID.value] = station_id if self.stations.tidy: df[Columns.PARAMETER.value] = parameter.value df[Columns.PARAMETER.value] = pd.Categorical( df[Columns.PARAMETER.value] ) if dataset: df[Columns.DATASET.value] = dataset.name.lower() df[Columns.DATASET.value] = pd.Categorical( df[Columns.DATASET.value] ) return df else: data = [] for parameter, group in df.groupby(Columns.PARAMETER.value, sort=False): if self.stations.stations._unique_dataset: parameter_ = parse_enumeration_from_template( parameter, self.stations.stations._parameter_base[ self.stations.resolution.name ], ) else: parameter_ = parse_enumeration_from_template( parameter, self.stations.stations._dataset_tree[ self.stations.resolution.name ][dataset.name], ) df = pd.merge( left=self._base_df, right=group, left_on=Columns.DATE.value, right_on=Columns.DATE.value, how="left", ) df[Columns.STATION_ID.value] = station_id df[Columns.PARAMETER.value] = parameter_.value df[Columns.DATASET.value] = dataset.name.lower() df[Columns.DATASET.value] = pd.Categorical(df[Columns.DATASET.value]) data.append(df) return pd.concat(data) def _organize_df_columns(self, df: pd.DataFrame) -> pd.DataFrame: """ Method to reorder index to always have the same order of columns :param df: :return: """ columns = self._meta_fields columns.extend(df.columns.difference(columns, sort=False)) df = df.reindex(columns=columns) return df def query(self) -> Generator[ValuesResult, None, None]: """ Core method for data collection, iterating of station ids and yielding a DataFrame for each station with all found parameters. Takes care of type coercion of data, date filtering and humanizing of parameters. :return: """ for station_id in self.stations.station_id: # TODO: add method to return empty result with correct response string e.g. # station id not available station_data = [] for parameter, dataset in self.stations.parameter: parameter_df = self._collect_station_parameter( station_id, parameter, dataset ) if parameter_df.empty: continue # Merge on full date range if values are found to ensure result # even if no actual values exist self._coerce_date_fields(parameter_df) parameter_df = self._coerce_parameter_types(parameter_df) if self.stations.stations.si_units: parameter_df = self.convert_values_to_si(parameter_df, dataset) if self.stations.stations.tidy: parameter_df = self.tidy_up_df(parameter_df, dataset) if parameter != dataset: parameter_df = parameter_df[ parameter_df[Columns.PARAMETER.value] == parameter.value.lower() ] parameter_df = self._build_complete_df( parameter_df, station_id, parameter, dataset ) parameter_df = self._organize_df_columns(parameter_df) station_data.append(parameter_df) try: station_df = pd.concat(station_data, ignore_index=True) except ValueError: station_df = self._create_empty_station_parameter_df( station_id, parameter ) station_df = self._coerce_meta_fields(station_df) # Filter for dates range if start_date and end_date are defined if not station_df.empty and self.stations.start_date: station_df = station_df[ (station_df[Columns.DATE.value] >= self.stations.start_date) & (station_df[Columns.DATE.value] <= self.stations.end_date) ] station_df = self._coerce_parameter_types(station_df) # Assign meaningful parameter names (humanized). if self.stations.humanize: station_df = self._humanize(station_df) # Empty dataframe should be skipped if station_df.empty: continue # TODO: add more meaningful metadata here yield ValuesResult(stations=self.stations, df=station_df) @abstractmethod def _collect_station_parameter( self, station_id: str, parameter: Enum, dataset: Enum ) -> pd.DataFrame: """ Implementation of data collection for a station id plus parameter from the specified weather service. Takes care of the gathering of the data and putting it in shape, either tabular with one parameter per column or tidied with a set of station id, date, parameter, value and quality in one row. :param station_id: station id for which the data is being collected :param parameter: parameter for which the data is collected :param dataset: dataset for which the data is collected :return: pandas.DataFrame with the data for given station id and parameter """ pass def tidy_up_df(self, df: pd.DataFrame, dataset: Enum) -> pd.DataFrame: """ Function to tidy a DataFrame :param df: :param dataset: :return: """ df = self._tidy_up_df(df, dataset) df[Columns.DATASET.value] = pd.Series( dataset.name.lower(), index=df.index, dtype=str ) df[Columns.VALUE.value] = pd.to_numeric(df[Columns.VALUE.value]).astype(float) if Columns.QUALITY.value not in df: df[Columns.QUALITY.value] = np.nan df[Columns.QUALITY.value] = pd.to_numeric(df[Columns.QUALITY.value]).astype( float ) df.loc[df[Columns.VALUE.value].isna(), Columns.QUALITY.value] = np.NaN return df @abstractmethod def _tidy_up_df(self, df: pd.DataFrame, dataset) -> pd.DataFrame: """ Abstract method to be implemented by services to tidy a DataFrame :param df: :return: """ pass def _coerce_date_fields(self, df: pd.DataFrame) -> pd.DataFrame: """ Function for coercion of possible date fields :param df: :return: """ for column in ( Columns.DATE.value, Columns.FROM_DATE.value, Columns.TO_DATE.value, ): try: df[column] = self._coerce_dates(df[column]) except KeyError: pass return df def _coerce_meta_fields(self, df: pd.DataFrame) -> pd.DataFrame: """ Method that coerces meta fields. Those fields are expected to be found in the DataFrame in a columnar shape. Thore are basically the station id and the date fields. Furthermore if the data is tidied parameter can be found as well as quality. For station id, parameter and quality those columns are additionally coerced to categories to reduce consumption of the DataFrame. :param df: pandas.DataFrame with the "fresh" data :return: pandas.DataFrame with meta fields being coerced """ df[Columns.STATION_ID.value] = self._parse_station_id( df[Columns.STATION_ID.value] ).astype("category") if self.stations.stations.tidy: for column in (Columns.DATASET.value, Columns.PARAMETER.value): df[column] = self._coerce_strings(df[column]).astype("category") df[Columns.VALUE.value] = pd.to_numeric(df[Columns.VALUE.value]).astype( float ) df[Columns.QUALITY.value] = pd.to_numeric(df[Columns.QUALITY.value]).astype( float ) return df def _parse_station_id(self, series: pd.Series) -> pd.Series: """ Dedicated method for parsing station ids, by default uses the same method as parse_strings but could be modified by the implementation class :param series: :return: """ return self.stations.stations._parse_station_id(series) def _coerce_dates(self, series: pd.Series) -> pd.Series: """ Method to parse dates in the pandas.DataFrame. Leverages the data timezone attribute to ensure correct comparison of dates. :param series: :return: """ return pd.to_datetime(series, infer_datetime_format=True).dt.tz_localize( self.data_tz ) @staticmethod def _coerce_integers(series: pd.Series) -> pd.Series: """ Method to parse integers for type coercion. Uses pandas.Int64Dtype() to allow missing values. :param series: :return: """ return ( pd.to_numeric(series, errors="coerce") .astype(pd.Float64Dtype()) .astype(pd.Int64Dtype()) ) @staticmethod def _coerce_strings(series: pd.Series) -> pd.Series: """ Method to parse strings for type coercion. :param series: :return: """ return series.astype(pd.StringDtype()) @staticmethod def _coerce_floats(series: pd.Series) -> pd.Series: """ Method to parse floats for type coercion. :param series: :return: """ return pd.to_numeric(series, errors="coerce") def _coerce_irregular_parameter(self, series: pd.Series) -> pd.Series: """ Method to parse irregular parameters. This will raise an error if an implementation has defined irregular parameters but has not implemented its own method of parsing irregular parameters. :param series: :return: """ if self._irregular_parameters: raise NotImplementedError( "implement _parse_irregular_parameter " "method to parse irregular parameters" ) return pd.Series(series) def _coerce_parameter_types(self, df: pd.DataFrame) -> pd.DataFrame: """ Method for parameter type coercion. Depending on the shape of the data. :param df: :return: """ for column in df.columns: if column in self._meta_fields or column in self._date_fields: continue if column in self._irregular_parameters: df[column] = self._coerce_irregular_parameter(df[column]) elif column in self._integer_parameters or column.startswith( Columns.QUALITY_PREFIX.value ): df[column] = self._coerce_integers(df[column]) elif column in self._string_parameters: df[column] = self._coerce_strings(df[column]) else: df[column] = self._coerce_floats(df[column]) return df def all(self) -> ValuesResult: """ Collect all data from self.query :return: """ data = [] tqdm_out = TqdmToLogger(log, level=logging.INFO) for result in tqdm( self.query(), total=len(self.stations.station_id), file=tqdm_out ): data.append(result.df) if not data: raise ValueError("No data available for given constraints") df =
pd.concat(data, ignore_index=True)
pandas.concat
import numpy as np import pytest import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series import pandas._testing as tm class TestDataFrameSubclassing: def test_frame_subclassing_and_slicing(self): # Subclass frame and ensure it returns the right class on slicing it # In reference to PR 9632 class CustomSeries(Series): @property def _constructor(self): return CustomSeries def custom_series_function(self): return "OK" class CustomDataFrame(DataFrame): """ Subclasses pandas DF, fills DF with simulation results, adds some custom plotting functions. """ def __init__(self, *args, **kw): super().__init__(*args, **kw) @property def _constructor(self): return CustomDataFrame _constructor_sliced = CustomSeries def custom_frame_function(self): return "OK" data = {"col1": range(10), "col2": range(10)} cdf = CustomDataFrame(data) # Did we get back our own DF class? assert isinstance(cdf, CustomDataFrame) # Do we get back our own Series class after selecting a column? cdf_series = cdf.col1 assert isinstance(cdf_series, CustomSeries) assert cdf_series.custom_series_function() == "OK" # Do we get back our own DF class after slicing row-wise? cdf_rows = cdf[1:5] assert isinstance(cdf_rows, CustomDataFrame) assert cdf_rows.custom_frame_function() == "OK" # Make sure sliced part of multi-index frame is custom class mcol = pd.MultiIndex.from_tuples([("A", "A"), ("A", "B")]) cdf_multi = CustomDataFrame([[0, 1], [2, 3]], columns=mcol) assert isinstance(cdf_multi["A"], CustomDataFrame) mcol = pd.MultiIndex.from_tuples([("A", ""), ("B", "")]) cdf_multi2 = CustomDataFrame([[0, 1], [2, 3]], columns=mcol) assert isinstance(cdf_multi2["A"], CustomSeries) def test_dataframe_metadata(self): df = tm.SubclassedDataFrame( {"X": [1, 2, 3], "Y": [1, 2, 3]}, index=["a", "b", "c"] ) df.testattr = "XXX" assert df.testattr == "XXX" assert df[["X"]].testattr == "XXX" assert df.loc[["a", "b"], :].testattr == "XXX" assert df.iloc[[0, 1], :].testattr == "XXX" # see gh-9776 assert df.iloc[0:1, :].testattr == "XXX" # see gh-10553 unpickled = tm.round_trip_pickle(df) tm.assert_frame_equal(df, unpickled) assert df._metadata == unpickled._metadata assert df.testattr == unpickled.testattr def test_indexing_sliced(self): # GH 11559 df = tm.SubclassedDataFrame( {"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["a", "b", "c"] ) res = df.loc[:, "X"] exp = tm.SubclassedSeries([1, 2, 3], index=list("abc"), name="X") tm.assert_series_equal(res, exp) assert isinstance(res, tm.SubclassedSeries) res = df.iloc[:, 1] exp = tm.SubclassedSeries([4, 5, 6], index=list("abc"), name="Y") tm.assert_series_equal(res, exp) assert isinstance(res, tm.SubclassedSeries) res = df.loc[:, "Z"] exp = tm.SubclassedSeries([7, 8, 9], index=list("abc"), name="Z") tm.assert_series_equal(res, exp) assert isinstance(res, tm.SubclassedSeries) res = df.loc["a", :] exp = tm.SubclassedSeries([1, 4, 7], index=list("XYZ"), name="a") tm.assert_series_equal(res, exp) assert isinstance(res, tm.SubclassedSeries) res = df.iloc[1, :] exp = tm.SubclassedSeries([2, 5, 8], index=list("XYZ"), name="b") tm.assert_series_equal(res, exp) assert isinstance(res, tm.SubclassedSeries) res = df.loc["c", :] exp = tm.SubclassedSeries([3, 6, 9], index=list("XYZ"), name="c") tm.assert_series_equal(res, exp) assert isinstance(res, tm.SubclassedSeries) def test_subclass_attr_err_propagation(self): # GH 11808 class A(DataFrame): @property def bar(self): return self.i_dont_exist with pytest.raises(AttributeError, match=".*i_dont_exist.*"): A().bar def test_subclass_align(self): # GH 12983 df1 = tm.SubclassedDataFrame( {"a": [1, 3, 5], "b": [1, 3, 5]}, index=list("ACE") ) df2 = tm.SubclassedDataFrame( {"c": [1, 2, 4], "d": [1, 2, 4]}, index=list("ABD") ) res1, res2 = df1.align(df2, axis=0) exp1 = tm.SubclassedDataFrame( {"a": [1, np.nan, 3, np.nan, 5], "b": [1, np.nan, 3, np.nan, 5]}, index=list("ABCDE"), ) exp2 = tm.SubclassedDataFrame( {"c": [1, 2, np.nan, 4, np.nan], "d": [1, 2, np.nan, 4, np.nan]}, index=list("ABCDE"), ) assert isinstance(res1, tm.SubclassedDataFrame) tm.assert_frame_equal(res1, exp1) assert isinstance(res2, tm.SubclassedDataFrame) tm.assert_frame_equal(res2, exp2) res1, res2 = df1.a.align(df2.c) assert isinstance(res1, tm.SubclassedSeries) tm.assert_series_equal(res1, exp1.a) assert isinstance(res2, tm.SubclassedSeries) tm.assert_series_equal(res2, exp2.c) def test_subclass_align_combinations(self): # GH 12983 df = tm.SubclassedDataFrame({"a": [1, 3, 5], "b": [1, 3, 5]}, index=list("ACE")) s = tm.SubclassedSeries([1, 2, 4], index=list("ABD"), name="x") # frame + series res1, res2 = df.align(s, axis=0) exp1 = tm.SubclassedDataFrame( {"a": [1, np.nan, 3, np.nan, 5], "b": [1, np.nan, 3, np.nan, 5]}, index=list("ABCDE"), ) # name is lost when exp2 = tm.SubclassedSeries( [1, 2, np.nan, 4, np.nan], index=list("ABCDE"), name="x" ) assert isinstance(res1, tm.SubclassedDataFrame) tm.assert_frame_equal(res1, exp1) assert isinstance(res2, tm.SubclassedSeries) tm.assert_series_equal(res2, exp2) # series + frame res1, res2 = s.align(df) assert isinstance(res1, tm.SubclassedSeries) tm.assert_series_equal(res1, exp2) assert isinstance(res2, tm.SubclassedDataFrame) tm.assert_frame_equal(res2, exp1) def test_subclass_iterrows(self): # GH 13977 df = tm.SubclassedDataFrame({"a": [1]}) for i, row in df.iterrows(): assert isinstance(row, tm.SubclassedSeries) tm.assert_series_equal(row, df.loc[i]) def test_subclass_stack(self): # GH 15564 df = tm.SubclassedDataFrame( [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["a", "b", "c"], columns=["X", "Y", "Z"], ) res = df.stack() exp = tm.SubclassedSeries( [1, 2, 3, 4, 5, 6, 7, 8, 9], index=[list("aaabbbccc"), list("XYZXYZXYZ")] ) tm.assert_series_equal(res, exp) def test_subclass_stack_multi(self): # GH 15564 df = tm.SubclassedDataFrame( [[10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33], [40, 41, 42, 43]], index=MultiIndex.from_tuples( list(zip(list("AABB"), list("cdcd"))), names=["aaa", "ccc"] ), columns=MultiIndex.from_tuples( list(zip(list("WWXX"), list("yzyz"))), names=["www", "yyy"] ), ) exp = tm.SubclassedDataFrame( [ [10, 12], [11, 13], [20, 22], [21, 23], [30, 32], [31, 33], [40, 42], [41, 43], ], index=MultiIndex.from_tuples( list(zip(list("AAAABBBB"), list("ccddccdd"), list("yzyzyzyz"))), names=["aaa", "ccc", "yyy"], ), columns=Index(["W", "X"], name="www"), ) res = df.stack() tm.assert_frame_equal(res, exp) res = df.stack("yyy") tm.assert_frame_equal(res, exp) exp = tm.SubclassedDataFrame( [ [10, 11], [12, 13], [20, 21], [22, 23], [30, 31], [32, 33], [40, 41], [42, 43], ], index=MultiIndex.from_tuples( list(zip(list("AAAABBBB"), list("ccddccdd"), list("WXWXWXWX"))), names=["aaa", "ccc", "www"], ), columns=Index(["y", "z"], name="yyy"), ) res = df.stack("www") tm.assert_frame_equal(res, exp) def test_subclass_stack_multi_mixed(self): # GH 15564 df = tm.SubclassedDataFrame( [ [10, 11, 12.0, 13.0], [20, 21, 22.0, 23.0], [30, 31, 32.0, 33.0], [40, 41, 42.0, 43.0], ], index=MultiIndex.from_tuples( list(zip(list("AABB"), list("cdcd"))), names=["aaa", "ccc"] ), columns=MultiIndex.from_tuples( list(zip(list("WWXX"), list("yzyz"))), names=["www", "yyy"] ), ) exp = tm.SubclassedDataFrame( [ [10, 12.0], [11, 13.0], [20, 22.0], [21, 23.0], [30, 32.0], [31, 33.0], [40, 42.0], [41, 43.0], ], index=MultiIndex.from_tuples( list(zip(list("AAAABBBB"), list("ccddccdd"), list("yzyzyzyz"))), names=["aaa", "ccc", "yyy"], ), columns=Index(["W", "X"], name="www"), ) res = df.stack() tm.assert_frame_equal(res, exp) res = df.stack("yyy") tm.assert_frame_equal(res, exp) exp = tm.SubclassedDataFrame( [ [10.0, 11.0], [12.0, 13.0], [20.0, 21.0], [22.0, 23.0], [30.0, 31.0], [32.0, 33.0], [40.0, 41.0], [42.0, 43.0], ], index=MultiIndex.from_tuples( list(zip(list("AAAABBBB"), list("ccddccdd"), list("WXWXWXWX"))), names=["aaa", "ccc", "www"], ), columns=Index(["y", "z"], name="yyy"), ) res = df.stack("www") tm.assert_frame_equal(res, exp) def test_subclass_unstack(self): # GH 15564 df = tm.SubclassedDataFrame( [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["a", "b", "c"], columns=["X", "Y", "Z"], ) res = df.unstack() exp = tm.SubclassedSeries( [1, 4, 7, 2, 5, 8, 3, 6, 9], index=[list("XXXYYYZZZ"), list("abcabcabc")] )
tm.assert_series_equal(res, exp)
pandas._testing.assert_series_equal
# General purpose packages import pandas as pd import numpy as np import matplotlib.pyplot as plt from scipy.stats import randint # Image processing packages from skimage import io, color from skimage.transform import resize from skimage.segmentation import slic from skimage.color import label2rgb from skimage.filters import try_all_threshold, sobel from skimage import exposure # Preprocessing modeling packages from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.model_selection import train_test_split # Modeling packages from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import VotingClassifier # Test metrics packages from sklearn.model_selection import cross_val_score, GridSearchCV, RandomizedSearchCV from sklearn.metrics import roc_curve, auc, accuracy_score, mean_squared_error as MSE, classification_report ########################################## df = pd.read_csv('signatures_data.csv', index_col=0) # Open the data frame with first column as index print(df.head()) print(df.shape) # Get the image of the row id name (the image has to be stored in the directory), return it as an array # FUNCTION 1 def get_image(row_id): filename = "{}.jpeg".format(row_id) img = io.imread(filename) img = resize(img, (200,200), anti_aliasing=True) # resize image return np.array(img) # Check the function in the first cat image and the first dog image other_1_row = df[df["label"] == 0].index[0] other_1 = get_image(other_1_row) other_1.shape plt.imshow(other_1) plt.show() personal_10_row = df[df["label"] == 1].index[9] personal_10 = get_image(personal_10_row) personal_10.shape plt.imshow(personal_10) plt.show() #################################################### # Inspect converting to grayscale other_1_grey = color.rgb2gray(other_1) plt.imshow(other_1_grey, cmap=plt.cm.gray) plt.show() personal_10_grey = color.rgb2gray(personal_10) plt.imshow(personal_10_grey, cmap=plt.cm.gray) plt.show() #################################################### # Apply edge detection other_1_sobel = sobel(other_1_grey) plt.imshow(other_1_sobel, cmap=plt.cm.gray) plt.show() personal_10_sobel = sobel(personal_10_grey) plt.imshow(personal_10_sobel, cmap=plt.cm.gray) plt.show() #######################################################################3 # Create a function that grab all the features of the RGB resized image and the superpixeled image, # then it flatten both together into the original row of the data frame. In that way every feature is converted # to a column of the data frame and it can be used in a machine learning model. # FUNCTION 2 def create_features(img): # 0. flatten all features of the RGB image # color_features = img.flatten() # 1. convert image to grayscale grey_image = color.rgb2gray(img) # 2. get the grey features grey_features = grey_image.flatten() # 3. get the sobel features from the grayscale image sobel_features = sobel(grey_image).flatten() # 4. combine the RGB and the HOG features into a single array flat_features = np.hstack((grey_features, sobel_features)) return flat_features # Check the function in the first image other_1_features = create_features(other_1) other_1_features.shape ############################################################### # Now we use functions 1 and 2 in one single new function to generate a matrix with one row for every image # and one column for every feature of the images. This can be used for amchine learning # FUNCTION 3 def create_feature_matrix(label_dataframe): feature_list = [] for img_id in label_dataframe.index: # 1. Apply function 1 (convert image to array) img = get_image(img_id) # 2. Apply function 2 (generate features and stack them) img_features = create_features(img) # 3. Append img features to the empty list feature_list.append(img_features) # Convert the list of arrays into an array matrix feature_matrix = np.array(feature_list) return feature_matrix # Apply the function to all the images id in the data frame # (remember images must be also in the directory) features_matrix = create_feature_matrix(df) # Inspect the matrix and their rows shape type(features_matrix) features_matrix.shape # 80 thousand columns each one row!! that´s big data for sure! features_matrix[0].shape features_matrix[19].shape features_matrix[28].shape features_matrix[31].shape ######################### # Resizing the matrix with Standard Scaler and Principal Component Analisis (PCA): reducing feature numbers ss = StandardScaler() # Standardize features by removing the mean and scaling to unit variance stand_matrix = ss.fit_transform(features_matrix) pca = PCA(n_components = 160) # reduce to 40 features pca_matrix = pca.fit_transform(stand_matrix) pca_matrix.shape ################################################################################################################ # Split data into train and test sets X_train, X_test, y_train, y_test = train_test_split(pca_matrix, df.label.values, test_size = .3, random_state = 123) # Check the split
pd.Series(y_train)
pandas.Series
from datetime import timedelta import pytest from pandas import PeriodIndex, Series, Timedelta, date_range, period_range, to_datetime import pandas._testing as tm class TestToTimestamp: def test_to_timestamp(self): index = period_range(freq="A", start="1/1/2001", end="12/1/2009") series = Series(1, index=index, name="foo") exp_index = date_range("1/1/2001", end="12/31/2009", freq="A-DEC") result = series.to_timestamp(how="end") exp_index = exp_index + Timedelta(1, "D") - Timedelta(1, "ns") tm.assert_index_equal(result.index, exp_index) assert result.name == "foo" exp_index = date_range("1/1/2001", end="1/1/2009", freq="AS-JAN") result = series.to_timestamp(how="start") tm.assert_index_equal(result.index, exp_index) def _get_with_delta(delta, freq="A-DEC"): return date_range( to_datetime("1/1/2001") + delta, to_datetime("12/31/2009") + delta, freq=freq, ) delta = timedelta(hours=23) result = series.to_timestamp("H", "end") exp_index = _get_with_delta(delta) exp_index = exp_index + Timedelta(1, "h") - Timedelta(1, "ns") tm.assert_index_equal(result.index, exp_index) delta = timedelta(hours=23, minutes=59) result = series.to_timestamp("T", "end") exp_index = _get_with_delta(delta) exp_index = exp_index + Timedelta(1, "m") - Timedelta(1, "ns")
tm.assert_index_equal(result.index, exp_index)
pandas._testing.assert_index_equal
import os import tempfile from StringIO import StringIO import numpy as np import numpy.testing as npt import pandas as pd import pytest import statsmodels.formula.api as smf import yaml from pandas.util import testing as pdt from statsmodels.regression.linear_model import RegressionResultsWrapper from .. import regression from ...exceptions import ModelEvaluationError from ...utils import testing @pytest.fixture def test_df(): return pd.DataFrame( {'col1': range(5), 'col2': range(5, 10)}, index=['a', 'b', 'c', 'd', 'e']) @pytest.fixture def groupby_df(test_df): test_df['group'] = ['x', 'y', 'x', 'x', 'y'] return test_df def test_fit_model(test_df): filters = [] model_exp = 'col1 ~ col2' fit = regression.fit_model(test_df, filters, model_exp) assert isinstance(fit, RegressionResultsWrapper) def test_predict(test_df): filters = ['col1 in [0, 2, 4]'] model_exp = 'col1 ~ col2' fit = regression.fit_model(test_df, filters, model_exp) predicted = regression.predict( test_df.query('col1 in [1, 3]'), None, fit) expected = pd.Series([1., 3.], index=['b', 'd']) pdt.assert_series_equal(predicted, expected) def test_predict_ytransform(test_df): def yt(x): return x / 2. filters = ['col1 in [0, 2, 4]'] model_exp = 'col1 ~ col2' fit = regression.fit_model(test_df, filters, model_exp) predicted = regression.predict( test_df.query('col1 in [1, 3]'), None, fit, ytransform=yt) expected = pd.Series([0.5, 1.5], index=['b', 'd']) pdt.assert_series_equal(predicted, expected) def test_predict_with_nans(): df = pd.DataFrame( {'col1': range(5), 'col2': [5, 6, pd.np.nan, 8, 9]}, index=['a', 'b', 'c', 'd', 'e']) with pytest.raises(ModelEvaluationError): regression.fit_model(df, None, 'col1 ~ col2') fit = regression.fit_model(df.loc[['a', 'b', 'e']], None, 'col1 ~ col2') with pytest.raises(ModelEvaluationError): regression.predict( df.loc[['c', 'd']], None, fit) def test_rhs(): assert regression._rhs('col1 + col2') == 'col1 + col2' assert regression._rhs('col3 ~ col1 + col2') == 'col1 + col2' def test_FakeRegressionResults(test_df): model_exp = 'col1 ~ col2' model = smf.ols(formula=model_exp, data=test_df) fit = model.fit() fit_parameters = regression._model_fit_to_table(fit) wrapper = regression._FakeRegressionResults( model_exp, fit_parameters, fit.rsquared, fit.rsquared_adj) test_predict = pd.DataFrame({'col2': [0.5, 10, 25.6]}) npt.assert_array_equal( wrapper.predict(test_predict), fit.predict(test_predict))
pdt.assert_series_equal(wrapper.params, fit.params, check_names=False)
pandas.util.testing.assert_series_equal
from collections import Counter import pandas as pd import pytest from simplekv import KeyValueStore from kartothek.api.discover import ( discover_cube, discover_datasets, discover_datasets_unchecked, discover_ktk_cube_dataset_ids, ) from kartothek.core.cube.constants import ( KTK_CUBE_DF_SERIALIZER, KTK_CUBE_METADATA_DIMENSION_COLUMNS, KTK_CUBE_METADATA_KEY_IS_SEED, KTK_CUBE_METADATA_PARTITION_COLUMNS, KTK_CUBE_METADATA_STORAGE_FORMAT, KTK_CUBE_METADATA_SUPPRESS_INDEX_ON, KTK_CUBE_METADATA_VERSION, ) from kartothek.core.cube.cube import Cube from kartothek.core.uuid import gen_uuid from kartothek.io.eager import ( store_dataframes_as_dataset, update_dataset_from_dataframes, ) from kartothek.io_components.metapartition import MetaPartition @pytest.fixture def cube(): return Cube( dimension_columns=["x", "y"], partition_columns=["p", "q"], uuid_prefix="cube", index_columns=["i1"], seed_dataset="myseed", ) def store_data( cube, function_store, df, name, partition_on="default", metadata_version=KTK_CUBE_METADATA_VERSION, metadata_storage_format=KTK_CUBE_METADATA_STORAGE_FORMAT, metadata=None, overwrite=False, new_ktk_cube_metadata=True, write_suppress_index_on=True, ): if partition_on == "default": partition_on = cube.partition_columns if isinstance(df, pd.DataFrame): mp = MetaPartition(label=gen_uuid(), data=df, metadata_version=metadata_version) indices_to_build = set(cube.index_columns) & set(df.columns) if name == cube.seed_dataset: indices_to_build |= set(cube.dimension_columns) - set( cube.suppress_index_on ) mp = mp.build_indices(indices_to_build) dfs = mp else: assert isinstance(df, MetaPartition) assert df.metadata_version == metadata_version dfs = df if metadata is None: metadata = { KTK_CUBE_METADATA_DIMENSION_COLUMNS: cube.dimension_columns, KTK_CUBE_METADATA_KEY_IS_SEED: (name == cube.seed_dataset), } if new_ktk_cube_metadata: metadata.update( {KTK_CUBE_METADATA_PARTITION_COLUMNS: cube.partition_columns} ) if write_suppress_index_on: metadata.update( {KTK_CUBE_METADATA_SUPPRESS_INDEX_ON: list(cube.suppress_index_on)} ) return store_dataframes_as_dataset( store=function_store, dataset_uuid=cube.ktk_dataset_uuid(name), dfs=dfs, partition_on=list(partition_on) if partition_on else None, metadata_storage_format=metadata_storage_format, metadata_version=metadata_version, df_serializer=KTK_CUBE_DF_SERIALIZER, metadata=metadata, overwrite=overwrite, ) def assert_datasets_equal(left, right): assert set(left.keys()) == set(right.keys()) for k in left.keys(): ds_l = left[k] ds_r = right[k] assert ds_l.uuid == ds_r.uuid def assert_dataset_issubset(superset, subset): assert set(subset.keys()).issubset(set(superset.keys())) for k in subset.keys(): assert subset[k].uuid == superset[k].uuid def test_discover_ktk_cube_dataset_ids(function_store): cube = Cube( dimension_columns=["dim"], partition_columns=["part"], uuid_prefix="cube", seed_dataset="seed", ) ktk_cube_dataset_ids = ["A", "B", "C"] for ktk_cube_id in ktk_cube_dataset_ids: store_data( cube=cube, function_store=function_store, df=pd.DataFrame({"dim": [0], "part": [0]}), name=ktk_cube_id, ) collected_ktk_cube_dataset_ids = discover_ktk_cube_dataset_ids( cube.uuid_prefix, function_store() ) assert collected_ktk_cube_dataset_ids == set(ktk_cube_dataset_ids) class TestDiscoverDatasetsUnchecked: def test_simple(self, cube, function_store): expected = { cube.seed_dataset: store_data( cube=cube, function_store=function_store, df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}), name=cube.seed_dataset, ), "enrich": store_data( cube=cube, function_store=function_store, df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}), name="enrich", ), } actual = discover_datasets_unchecked(cube.uuid_prefix, function_store) assert_datasets_equal(actual, expected) def test_no_seed(self, cube, function_store): expected = { "enrich": store_data( cube=cube, function_store=function_store, df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}), name="enrich", ) } actual = discover_datasets_unchecked(cube.uuid_prefix, function_store) assert_datasets_equal(actual, expected) def test_other_files(self, cube, function_store): expected = { cube.seed_dataset: store_data( cube=cube, function_store=function_store, df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}), name=cube.seed_dataset, ) } function_store().put(cube.ktk_dataset_uuid("enrich") + "/foo", b"") actual = discover_datasets_unchecked(cube.uuid_prefix, function_store) assert_datasets_equal(actual, expected) def test_no_common_metadata(self, cube, function_store): expected = { cube.seed_dataset: store_data( cube=cube, function_store=function_store, df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}), name=cube.seed_dataset, ) } store_data( cube=cube, function_store=function_store, df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}), name="enrich", ) keys = set(function_store().keys()) metadata_key = cube.ktk_dataset_uuid("enrich") + ".by-dataset-metadata.json" assert metadata_key in keys for k in keys: if (k != metadata_key) and k.startswith(cube.ktk_dataset_uuid("enrich")): function_store().delete(k) actual = discover_datasets_unchecked(cube.uuid_prefix, function_store) assert_datasets_equal(actual, expected) def test_filter_partial_datasets_found(self, cube, function_store): enrich_dataset = store_data( cube=cube, function_store=function_store, df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}), name="enrich", ) store_data( cube=cube, function_store=function_store, df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}), name="mytable", ) expected = {"enrich": enrich_dataset} actual = discover_datasets_unchecked( cube.uuid_prefix, function_store, filter_ktk_cube_dataset_ids=["enrich"] ) assert_dataset_issubset(actual, expected) def test_filter_no_datasets_found(self, cube, function_store): actual = discover_datasets_unchecked( cube.uuid_prefix, function_store, filter_ktk_cube_dataset_ids=["enrich"] ) assert actual == {} def test_msgpack_clean(self, cube, function_store): expected = { cube.seed_dataset: store_data( cube=cube, function_store=function_store, df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}), name=cube.seed_dataset, ), "enrich": store_data( cube=cube, function_store=function_store, df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}), name="enrich", metadata_storage_format="msgpack", ), } actual = discover_datasets_unchecked(cube.uuid_prefix, function_store) assert_datasets_equal(actual, expected) def test_msgpack_priority(self, cube, function_store): """ json metadata files have priority in kartothek, so the disovery should respect this """ store_data( cube=cube, function_store=function_store, df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v1": [0]}), name=cube.seed_dataset, metadata_storage_format="msgpack", ) expected = { cube.seed_dataset: store_data( cube=cube, function_store=function_store, df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v2": [0]}), name=cube.seed_dataset, overwrite=True, ) } store_data( cube=cube, function_store=function_store, df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v3": [0]}), name=cube.seed_dataset, metadata_storage_format="msgpack", overwrite=True, ) actual = discover_datasets_unchecked(cube.uuid_prefix, function_store) assert_datasets_equal(actual, expected) def test_msgpack_efficiency(self, cube, function_store): """ We should only iterate over the store once, even though we are looking for 2 suffixes. Furthermore, we must only load every dataset once. """ store_data( cube=cube, function_store=function_store, df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}), name=cube.seed_dataset, metadata_storage_format="msgpack", ) store_data( cube=cube, function_store=function_store, df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}), name=cube.seed_dataset, overwrite=True, ) class StoreMock(KeyValueStore): def __init__(self, store): self._store = store self._iter_keys_called = 0 self._iter_prefixes_called = 0 self._get_called = Counter() def iter_keys(self, prefix=""): self._iter_keys_called += 1 return self._store.iter_keys(prefix) def iter_prefixes(self, delimiter, prefix=""): self._iter_prefixes_called += 1 return self._store.iter_prefixes(delimiter, prefix) def get(self, key): self._get_called[key] += 1 return self._store.get(key) store = StoreMock(function_store()) discover_datasets_unchecked(cube.uuid_prefix, store) assert store._iter_keys_called == 0 assert store._iter_prefixes_called == 1 assert max(store._get_called.values()) == 1 class TestDiscoverDatasets: def test_seed_only(self, cube, function_store): expected = { cube.seed_dataset: store_data( cube=cube, function_store=function_store, df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}), name=cube.seed_dataset, ) } actual = discover_datasets(cube, function_store) assert_datasets_equal(actual, expected) def test_2_datasets(self, cube, function_store): expected = { cube.seed_dataset: store_data( cube=cube, function_store=function_store, df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}), name=cube.seed_dataset, ), "enrich": store_data( cube=cube, function_store=function_store, df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v1": 100}), name="enrich", ), } actual = discover_datasets(cube, function_store) assert_datasets_equal(actual, expected) def test_partitions_superset(self, cube, function_store): expected = { cube.seed_dataset: store_data( cube=cube, function_store=function_store, df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}), name=cube.seed_dataset, ), "enrich": store_data( cube=cube, function_store=function_store, df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0], "v1": 100}), name="enrich", partition_on=["p", "q", "v1"], ), } actual = discover_datasets(cube, function_store) assert_datasets_equal(actual, expected) def test_raises_no_seed(self, cube, function_store): store_data( cube=cube, function_store=function_store, df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}), name="enrich", ) with pytest.raises(ValueError) as exc: discover_datasets(cube, function_store) assert str(exc.value) == 'Seed data ("myseed") is missing.' def test_raises_wrong_partition_on_seed_other(self, cube, function_store): store_data( cube=cube, function_store=function_store, df=pd.DataFrame({"x": [0], "y": [0], "p": [0]}), name=cube.seed_dataset, partition_on=["p"], ) with pytest.raises(ValueError) as exc: discover_datasets(cube, function_store) assert ( str(exc.value) == 'Seed dataset "myseed" has missing partition columns: q' ) def test_partition_on_nonseed_no_part(self, cube, function_store): expected = { cube.seed_dataset: store_data( cube=cube, function_store=function_store, df=pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]}), name=cube.seed_dataset, ), "enrich": store_data( cube=cube, function_store=function_store, df=pd.DataFrame({"x": [0], "y": [0], "v1": [0]}), name="enrich", partition_on=[], ), } actual = discover_datasets(cube, function_store) assert_datasets_equal(actual, expected) def test_raises_wrong_metadata_version(self, cube, function_store): with pytest.raises( NotImplementedError, match="Minimal supported metadata version is" ): store_data( cube=cube, function_store=function_store, df=
pd.DataFrame({"x": [0], "y": [0], "p": [0], "q": [0]})
pandas.DataFrame
import numpy as np import pandas as pd from data_loaders.data_loader import DataLoader class FacebookLoader(DataLoader): def __init__(self, **kwargs): super().__init__(**kwargs) def get_dataset_name(self): return "facebook" def get_sens_attr_name(self): return "gender" def _load(self): positive_edges = super()._load_file("facebook_combined.txt", delimiter=" ", dtype=np.int) attributes = {} ego_ids = [0, 107, 348, 414, 686, 698, 1684, 1912, 3437, 3980] for ego_id in ego_ids: feat_names = super()._load_file(f"{ego_id}.featnames", delimiter=" ") gender_rows = feat_names[:, 1] == "gender;anonymized" gender_feat_idx = feat_names[gender_rows][:, 0].astype(np.int) ego_feats = super()._load_file(f"{ego_id}.egofeat", delimiter=" ", dtype=np.int) ego_gender_feats = ego_feats[gender_feat_idx] if np.sum(ego_gender_feats) == 1: attributes[ego_id] = ego_gender_feats[0] other_feats = super()._load_file(f"{ego_id}.feat", delimiter=" ", dtype=np.int) exactly_one_gender_given = other_feats[:, 1 + gender_feat_idx].sum(axis=1) == 1 other_feats = other_feats[exactly_one_gender_given] other_attrs = dict(zip(other_feats[:, 0], other_feats[:, 1 + gender_feat_idx[0]])) attributes.update(other_attrs) attributes =
pd.Series(attributes)
pandas.Series
from sklearn.cluster import DBSCAN from sklearn import datasets from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import MinMaxScaler import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import math pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
pandas.set_option
from pycox.datasets import metabric, nwtco, support, gbsg, flchain from sklearn.preprocessing import KBinsDiscretizer, LabelEncoder, StandardScaler import numpy as np import pandas as pd import pdb from .utils import LabelTransform def load_data(config): '''load data, return updated configuration. ''' data = config['data'] horizons = config['horizons'] assert data in ["metabric", "nwtco", "support", "gbsg", "flchain", "seer",], "Data Not Found!" get_target = lambda df: (df['duration'].values, df['event'].values) if data == "metabric": # data processing, transform all continuous data to discrete df = metabric.read_df() # evaluate the performance at the 25th, 50th and 75th event time quantile times = np.quantile(df["duration"][df["event"]==1.0], horizons).tolist() cols_categorical = ["x4", "x5", "x6", "x7"] cols_standardize = ['x0', 'x1', 'x2', 'x3', 'x8'] df_feat = df.drop(["duration","event"],axis=1) df_feat_standardize = df_feat[cols_standardize] df_feat_standardize_disc = StandardScaler().fit_transform(df_feat_standardize) df_feat_standardize_disc = pd.DataFrame(df_feat_standardize_disc, columns=cols_standardize) # must be categorical feature ahead of numerical features! df_feat = pd.concat([df_feat[cols_categorical], df_feat_standardize_disc], axis=1) vocab_size = 0 for _,feat in enumerate(cols_categorical): df_feat[feat] = LabelEncoder().fit_transform(df_feat[feat]).astype(float) + vocab_size vocab_size = df_feat[feat].max() + 1 # get the largest duraiton time max_duration_idx = df["duration"].argmax() df_test = df_feat.drop(max_duration_idx).sample(frac=0.3) df_train = df_feat.drop(df_test.index) df_val = df_train.drop(max_duration_idx).sample(frac=0.1) df_train = df_train.drop(df_val.index) # assign cuts labtrans = LabelTransform(cuts=np.array([df["duration"].min()]+times+[df["duration"].max()])) labtrans.fit(*get_target(df.loc[df_train.index])) y = labtrans.transform(*get_target(df)) # y = (discrete duration, event indicator) df_y_train = pd.DataFrame({"duration": y[0][df_train.index], "event": y[1][df_train.index], "proportion": y[2][df_train.index]}, index=df_train.index) df_y_val =
pd.DataFrame({"duration": y[0][df_val.index], "event": y[1][df_val.index], "proportion": y[2][df_val.index]}, index=df_val.index)
pandas.DataFrame
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2020/3/21 0021 # @Author : justin.郑 <EMAIL> # @File : covid.py # @Desc : 获取疫情数据 import json import time import demjson import jsonpath import requests import pandas as pd from io import BytesIO from PIL import Image from bs4 import BeautifulSoup def covid_163(indicator="实时"): """ 网易-新冠状病毒 https://news.163.com/special/epidemic/?spssid=93326430940df93a37229666dfbc4b96&spsw=4&spss=other&#map_block https://news.163.com/special/epidemic/?spssid=93326430940df93a37229666dfbc4b96&spsw=4&spss=other& :return: 返回指定 indicator 的数据 :rtype: pandas.DataFrame """ url = "https://c.m.163.com/ug/api/wuhan/app/data/list-total" headers = { "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36", } payload = { "t": int(time.time() * 1000), } r = requests.get(url, params=payload, headers=headers) data_json = r.json() # data info url = "https://news.163.com/special/epidemic/" r = requests.get(url, headers=headers) soup = BeautifulSoup(r.text, "lxml") data_info_df = pd.DataFrame( [ item.text.strip().split(".")[1] for item in soup.find("div", attrs={"class": "data_tip_pop_text"}).find_all( "p" ) ] ) data_info_df.columns = ["info"] # 中国历史时点数据 hist_today_df = pd.DataFrame( [item["today"] for item in data_json["data"]["chinaDayList"]], index=[item["date"] for item in data_json["data"]["chinaDayList"]], ) # 中国历史累计数据 hist_total_df = pd.DataFrame( [item["total"] for item in data_json["data"]["chinaDayList"]], index=[item["date"] for item in data_json["data"]["chinaDayList"]], ) # 中国实时数据 current_df = pd.DataFrame.from_dict(data_json["data"]["chinaTotal"]) # 世界历史时点数据 outside_today_df = pd.DataFrame( [item["today"] for item in data_json["data"]["areaTree"]], index=[item["name"] for item in data_json["data"]["areaTree"]], ) # 世界历史累计数据 outside_total_df = pd.DataFrame( [item["total"] for item in data_json["data"]["areaTree"]], index=[item["name"] for item in data_json["data"]["areaTree"]], ) # 全球所有国家及地区时点数据 all_world_today_df = pd.DataFrame( jsonpath.jsonpath(data_json["data"]["areaTree"], "$..today"), index=jsonpath.jsonpath(data_json["data"]["areaTree"], "$..name"), ) # 全球所有国家及地区累计数据 all_world_total_df = pd.DataFrame( jsonpath.jsonpath(data_json["data"]["areaTree"], "$..total"), index=jsonpath.jsonpath(data_json["data"]["areaTree"], "$..name"), ) # 中国各地区时点数据 area_total_df = pd.DataFrame( [item["total"] for item in data_json["data"]["areaTree"][0]["children"]], index=[item["name"] for item in data_json["data"]["areaTree"][0]["children"]], ) # 中国各地区累计数据 area_today_df = pd.DataFrame( [item["today"] for item in data_json["data"]["areaTree"][0]["children"]], index=[item["name"] for item in data_json["data"]["areaTree"][0]["children"]], ) # 疫情学术进展 url_article = "https://vip.open.163.com/api/cms/topic/list" payload_article = { "topicid": "00019NGQ", "listnum": "1000", "liststart": "0", "pointstart": "0", "pointend": "255", "useproperty": "true", } r_article = requests.get(url_article, params=payload_article) article_df = pd.DataFrame(r_article.json()["data"]).iloc[:, 1:] # 咨询 url_info = "https://ent.163.com/special/00035080/virus_report_data.js" payload_info = { "_": int(time.time() * 1000), "callback": "callback", } r_info = requests.get(url_info, params=payload_info, headers=headers) data_info_text = r_info.text data_info_json = demjson.decode(data_info_text.strip(" callback(")[:-1]) if indicator == "数据说明": print(f"数据更新时间: {data_json['data']['lastUpdateTime']}") return data_info_df if indicator == "中国实时数据": print(f"数据更新时间: {data_json['data']['lastUpdateTime']}") return current_df if indicator == "中国历史时点数据": print(f"数据更新时间: {data_json['data']['lastUpdateTime']}") return hist_today_df if indicator == "中国历史累计数据": print(f"数据更新时间: {data_json['data']['lastUpdateTime']}") return hist_total_df if indicator == "世界历史时点数据": print(f"数据更新时间: {data_json['data']['lastUpdateTime']}") return outside_today_df if indicator == "世界历史累计数据": print(f"数据更新时间: {data_json['data']['lastUpdateTime']}") return outside_total_df if indicator == "全球所有国家及地区时点数据": print(f"数据更新时间: {data_json['data']['lastUpdateTime']}") return all_world_today_df elif indicator == "全球所有国家及地区累计数据": print(f"数据更新时间: {data_json['data']['lastUpdateTime']}") return all_world_total_df elif indicator == "中国各地区时点数据": print(f"数据更新时间: {data_json['data']['lastUpdateTime']}") return area_today_df elif indicator == "中国各地区累计数据": print(f"数据更新时间: {data_json['data']['lastUpdateTime']}") return area_total_df elif indicator == "疫情学术进展": return article_df elif indicator == "实时资讯新闻播报": return pd.DataFrame(data_info_json["list"]) elif indicator == "实时医院新闻播报": return pd.DataFrame(data_info_json["hospital"]) elif indicator == "前沿知识": return pd.DataFrame(data_info_json["papers"]) elif indicator == "权威发布": return pd.DataFrame(data_info_json["power"]) elif indicator == "滚动新闻": return pd.DataFrame(data_info_json["scrollNews"]) def covid_dxy(indicator="湖北"): """ 20200315-丁香园接口更新分为国内和国外 丁香园-全国统计-info 丁香园-分地区统计-data 丁香园-全国发热门诊一览表-hospital 丁香园-全国新闻-news :param indicator: ["info", "data", "hospital", "news"] :type indicator: str :return: 返回指定 indicator 的数据 :rtype: pandas.DataFrame """ url = "https://3g.dxy.cn/newh5/view/pneumonia" r = requests.get(url) r.encoding = "utf-8" soup = BeautifulSoup(r.text, "lxml") # news-china text_data_news = str( soup.find_all("script", attrs={"id": "getTimelineServiceundefined"}) ) temp_json = text_data_news[ text_data_news.find("= [{") + 2 : text_data_news.rfind("}catch") ] if temp_json: json_data = pd.DataFrame(json.loads(temp_json)) chinese_news = json_data[ ["title", "summary", "infoSource", "provinceName", "sourceUrl"] ] # news-foreign text_data_news = str(soup.find_all("script", attrs={"id": "getTimelineService2"})) temp_json = text_data_news[ text_data_news.find("= [{") + 2 : text_data_news.rfind("}catch") ] json_data = pd.DataFrame(json.loads(temp_json)) foreign_news = json_data # data-domestic data_text = str(soup.find("script", attrs={"id": "getAreaStat"})) data_text_json = json.loads( data_text[data_text.find("= [{") + 2 : data_text.rfind("catch") - 1] ) big_df = pd.DataFrame() for i, p in enumerate(jsonpath.jsonpath(data_text_json, "$..provinceName")): temp_df = pd.DataFrame(jsonpath.jsonpath(data_text_json, "$..cities")[i]) temp_df["province"] = p big_df = big_df.append(temp_df, ignore_index=True) domestic_city_df = big_df data_df = pd.DataFrame(data_text_json).iloc[:, :7] data_df.columns = ["地区", "地区简称", "现存确诊", "累计确诊", "-", "治愈", "死亡"] domestic_province_df = data_df[["地区", "地区简称", "现存确诊", "累计确诊", "治愈", "死亡"]] # data-global data_text = str( soup.find("script", attrs={"id": "getListByCountryTypeService2true"}) ) data_text_json = json.loads( data_text[data_text.find("= [{") + 2: data_text.rfind("catch") - 1] ) global_df = pd.DataFrame(data_text_json) # info dxy_static = soup.find(attrs={"id": "getStatisticsService"}).get_text() data_json = json.loads( dxy_static[dxy_static.find("= {") + 2 : dxy_static.rfind("}c")] ) china_statistics = pd.DataFrame( [ time.strftime( "%Y-%m-%d %H:%M:%S", time.localtime(data_json["modifyTime"] / 1000) ), data_json["currentConfirmedCount"], data_json["confirmedCount"], data_json["suspectedCount"], data_json["curedCount"], data_json["deadCount"], data_json["seriousCount"], data_json["suspectedIncr"], data_json["currentConfirmedIncr"], data_json["confirmedIncr"], data_json["curedIncr"], data_json["deadIncr"], data_json["seriousIncr"], ], index=[ "数据发布时间", "现存确诊", "累计确诊", "境外输入", "累计治愈", "累计死亡", "现存重症", "境外输入较昨日", "现存确诊较昨日", "累计确诊较昨日", "累计治愈较昨日", "累计死亡较昨日", "现存重症较昨日", ], columns=["info"], ) foreign_statistics = pd.DataFrame.from_dict( data_json["foreignStatistics"], orient="index" ) global_statistics = pd.DataFrame.from_dict( data_json["globalStatistics"], orient="index" ) # hospital url = ( "https://assets.dxycdn.com/gitrepo/tod-assets/output/default/pneumonia/index.js" ) payload = {"t": str(int(time.time()))} r = requests.get(url, params=payload) hospital_df = pd.read_html(r.text)[0].iloc[:, :-1] if indicator == "中国疫情分省统计详情": return domestic_province_df if indicator == "中国疫情分市统计详情": return domestic_city_df elif indicator == "全球疫情分国家统计详情": return global_df elif indicator == "中国疫情实时统计": return china_statistics elif indicator == "国外疫情实时统计": return foreign_statistics elif indicator == "全球疫情实时统计": return global_statistics elif indicator == "中国疫情防控医院": return hospital_df elif indicator == "实时播报": return chinese_news elif indicator == "中国-新增疑似-新增确诊-趋势图": img_file = Image.open( BytesIO(requests.get(data_json["quanguoTrendChart"][0]["imgUrl"]).content) ) img_file.show() elif indicator == "中国-现存确诊-趋势图": img_file = Image.open( BytesIO(requests.get(data_json["quanguoTrendChart"][1]["imgUrl"]).content) ) img_file.show() elif indicator == "中国-现存疑似-趋势图": img_file = Image.open( BytesIO(requests.get(data_json["quanguoTrendChart"][2]["imgUrl"]).content) ) img_file.show() elif indicator == "中国-治愈-趋势图": img_file = Image.open( BytesIO(requests.get(data_json["quanguoTrendChart"][3]["imgUrl"]).content) ) img_file.show() elif indicator == "中国-死亡-趋势图": img_file = Image.open( BytesIO(requests.get(data_json["quanguoTrendChart"][4]["imgUrl"]).content) ) img_file.show() elif indicator == "中国-非湖北新增确诊-趋势图": img_file = Image.open( BytesIO(requests.get(data_json["hbFeiHbTrendChart"][0]["imgUrl"]).content) ) img_file.show() elif indicator == "中国-湖北新增确诊-趋势图": img_file = Image.open( BytesIO(requests.get(data_json["hbFeiHbTrendChart"][1]["imgUrl"]).content) ) img_file.show() elif indicator == "中国-湖北现存确诊-趋势图": img_file = Image.open( BytesIO(requests.get(data_json["hbFeiHbTrendChart"][2]["imgUrl"]).content) ) img_file.show() elif indicator == "中国-非湖北现存确诊-趋势图": img_file = Image.open( BytesIO(requests.get(data_json["hbFeiHbTrendChart"][3]["imgUrl"]).content) ) img_file.show() elif indicator == "中国-治愈-死亡-趋势图": img_file = Image.open( BytesIO(requests.get(data_json["hbFeiHbTrendChart"][4]["imgUrl"]).content) ) img_file.show() elif indicator == "国外-国外新增确诊-趋势图": img_file = Image.open( BytesIO(requests.get(data_json["foreignTrendChart"][0]["imgUrl"]).content) ) img_file.show() elif indicator == "国外-国外累计确诊-趋势图": img_file = Image.open( BytesIO(requests.get(data_json["foreignTrendChart"][1]["imgUrl"]).content) ) img_file.show() elif indicator == "国外-国外死亡-趋势图": img_file = Image.open( BytesIO(requests.get(data_json["foreignTrendChart"][2]["imgUrl"]).content) ) img_file.show() elif indicator == "国外-重点国家新增确诊-趋势图": img_file = Image.open( BytesIO( requests.get( data_json["importantForeignTrendChart"][0]["imgUrl"] ).content ) ) img_file.show() elif indicator == "国外-日本新增确诊-趋势图": img_file = Image.open( BytesIO( requests.get( data_json["importantForeignTrendChart"][1]["imgUrl"] ).content ) ) img_file.show() elif indicator == "国外-意大利新增确诊-趋势图": img_file = Image.open( BytesIO( requests.get( data_json["importantForeignTrendChart"][2]["imgUrl"] ).content ) ) img_file.show() elif indicator == "国外-伊朗新增确诊-趋势图": img_file = Image.open( BytesIO( requests.get( data_json["importantForeignTrendChart"][3]["imgUrl"] ).content ) ) img_file.show() elif indicator == "国外-美国新增确诊-趋势图": img_file = Image.open( BytesIO( requests.get( data_json["importantForeignTrendChart"][4]["imgUrl"] ).content ) ) img_file.show() elif indicator == "国外-法国新增确诊-趋势图": img_file = Image.open( BytesIO( requests.get( data_json["importantForeignTrendChart"][5]["imgUrl"] ).content ) ) img_file.show() elif indicator == "国外-德国新增确诊-趋势图": img_file = Image.open( BytesIO( requests.get( data_json["importantForeignTrendChart"][6]["imgUrl"] ).content ) ) img_file.show() elif indicator == "国外-西班牙新增确诊-趋势图": img_file = Image.open( BytesIO( requests.get( data_json["importantForeignTrendChart"][7]["imgUrl"] ).content ) ) img_file.show() elif indicator == "国外-韩国新增确诊-趋势图": img_file = Image.open( BytesIO( requests.get( data_json["importantForeignTrendChart"][8]["imgUrl"] ).content ) ) img_file.show() else: try: data_text = str(soup.find("script", attrs={"id": "getAreaStat"})) data_text_json = json.loads( data_text[data_text.find("= [{") + 2 : data_text.rfind("catch") - 1] ) data_df = pd.DataFrame(data_text_json) sub_area = pd.DataFrame( data_df[data_df["provinceName"] == indicator]["cities"].values[0] ) if sub_area.empty: return print("暂无分区域数据") sub_area.columns = ["区域", "现在确诊人数", "确诊人数", "疑似人数", "治愈人数", "死亡人数", "id"] sub_area = sub_area[["区域", "现在确诊人数", "确诊人数", "疑似人数", "治愈人数", "死亡人数"]] return sub_area except IndexError as e: print("请输入省/市的全称, 如: 浙江省/上海市 等") def covid_baidu(indicator="湖北"): """ 百度-新型冠状病毒肺炎-疫情实时大数据报告 https://voice.baidu.com/act/newpneumonia/newpneumonia/?from=osari_pc_1 :param indicator: 看说明文档 :type indicator: str :return: 指定 indicator 的数据 :rtype: pandas.DataFrame """ url = "https://huiyan.baidu.com/openapi/v1/migration/rank" payload = { "type": "move", "ak": "kgD2HiDnLdUhwzd3CLuG5AWNfX3fhLYe", "adminType": "country", "name": "全国", } r = requests.get(url, params=payload) move_in_df = pd.DataFrame(r.json()["result"]["moveInList"]) move_out_df = pd.DataFrame(r.json()["result"]["moveOutList"]) url = "https://opendata.baidu.com/api.php" payload = { "query": "全国", "resource_id": "39258", "tn": "wisetpl", "format": "json", "cb": "jsonp_1580470773343_11183", } r = requests.get(url, params=payload) text_data = r.text json_data_news = json.loads( text_data.strip("/**/jsonp_1580470773343_11183(").rstrip(");") ) url = "https://opendata.baidu.com/data/inner" payload = { "tn": "reserved_all_res_tn", "dspName": "iphone", "from_sf": "1", "dsp": "iphone", "resource_id": "28565", "alr": "1", "query": "肺炎", "cb": "jsonp_1606895491198_93137", } r = requests.get(url, params=payload) json_data = json.loads(r.text[r.text.find("({") + 1 : r.text.rfind(");")]) spot_report = pd.DataFrame(json_data["Result"][0]["DisplayData"]["result"]["items"]) # domestic-city url = "https://voice.baidu.com/act/newpneumonia/newpneumonia/?from=osari_pc_1" r = requests.get(url) soup = BeautifulSoup(r.text, "lxml") data_json = demjson.decode(soup.find(attrs={"id": "captain-config"}).text) big_df = pd.DataFrame() for i, p in enumerate( jsonpath.jsonpath(data_json["component"][0]["caseList"], "$..area") ): temp_df = pd.DataFrame( jsonpath.jsonpath(data_json["component"][0]["caseList"], "$..subList")[i] ) temp_df["province"] = p big_df = big_df.append(temp_df, ignore_index=True) domestic_city_df = big_df domestic_province_df = pd.DataFrame(data_json["component"][0]["caseList"]).iloc[ :, :-2 ] big_df = pd.DataFrame() for i, p in enumerate( jsonpath.jsonpath(data_json["component"][0]["caseOutsideList"], "$..area") ): temp_df = pd.DataFrame( jsonpath.jsonpath( data_json["component"][0]["caseOutsideList"], "$..subList" )[i] ) temp_df["province"] = p big_df = big_df.append(temp_df, ignore_index=True) outside_city_df = big_df outside_country_df = pd.DataFrame( data_json["component"][0]["caseOutsideList"] ).iloc[:, :-1] big_df = pd.DataFrame() for i, p in enumerate( jsonpath.jsonpath(data_json["component"][0]["globalList"], "$..area") ): temp_df = pd.DataFrame( jsonpath.jsonpath(data_json["component"][0]["globalList"], "$..subList")[i] ) temp_df["province"] = p big_df = big_df.append(temp_df, ignore_index=True) global_country_df = big_df global_continent_df = pd.DataFrame(data_json["component"][0]["globalList"])[ ["area", "died", "crued", "confirmed", "confirmedRelative"] ] if indicator == "热门迁入地": return move_in_df elif indicator == "热门迁出地": return move_out_df elif indicator == "今日疫情热搜": return pd.DataFrame(json_data_news["data"][0]["list"][0]["item"]) elif indicator == "防疫知识热搜": return pd.DataFrame(json_data_news["data"][0]["list"][1]["item"]) elif indicator == "热搜谣言粉碎": return pd.DataFrame(json_data_news["data"][0]["list"][2]["item"]) elif indicator == "复工复课热搜": return pd.DataFrame(json_data_news["data"][0]["list"][3]["item"]) elif indicator == "热门人物榜": return pd.DataFrame(json_data_news["data"][0]["list"][4]["item"]) elif indicator == "历史疫情热搜": return pd.DataFrame(json_data_news["data"][0]["list"][5]["item"]) elif indicator == "搜索正能量榜": return pd.DataFrame(json_data_news["data"][0]["list"][6]["item"]) elif indicator == "游戏榜": return pd.DataFrame(json_data_news["data"][0]["list"][7]["item"]) elif indicator == "影视榜": return pd.DataFrame(json_data_news["data"][0]["list"][8]["item"]) elif indicator == "小说榜": return pd.DataFrame(json_data_news["data"][0]["list"][9]["item"]) elif indicator == "疫期飙升榜": return pd.DataFrame(json_data_news["data"][0]["list"][10]["item"]) elif indicator == "实时播报": return spot_report elif indicator == "中国分省份详情": return domestic_province_df elif indicator == "中国分城市详情": return domestic_city_df elif indicator == "国外分国详情": return outside_country_df elif indicator == "国外分城市详情": return outside_city_df elif indicator == "全球分洲详情": return global_continent_df elif indicator == "全球分洲国家详情": return global_country_df def covid_hist_city(city="武汉市"): """ 疫情历史数据 城市 https://github.com/canghailan/Wuhan-2019-nCoV 2019-12-01开始 :return: 具体城市的疫情数据 :rtype: pandas.DataFrame """ url = "https://raw.githubusercontent.com/canghailan/Wuhan-2019-nCoV/master/Wuhan-2019-nCoV.json" r = requests.get(url) data_json = r.json() data_df = pd.DataFrame(data_json) return data_df[data_df["city"] == city] def covid_hist_province(province="湖北省"): """ 疫情历史数据 省份 https://github.com/canghailan/Wuhan-2019-nCoV 2019-12-01开始 :return: 具体省份的疫情数据 :rtype: pandas.DataFrame """ url = "https://raw.githubusercontent.com/canghailan/Wuhan-2019-nCoV/master/Wuhan-2019-nCoV.json" r = requests.get(url) data_json = r.json() data_df =
pd.DataFrame(data_json)
pandas.DataFrame
import random from sklearn.datasets import make_moons import numpy as np import pandas as pd import torch from sklearn.metrics import roc_auc_score from d3m.container import DataFrame as d3m_DataFrame from kf_d3m_primitives.semi_supervised.tabular_semi_supervised.tabular_semi_supervised import ( TabularSemiSupervisedPrimitive, Hyperparams as tss_hp, ) from kf_d3m_primitives.semi_supervised.tabular_semi_supervised.tabular_semi_supervised_pipeline import ( TabularSemiSupervisedPipeline, ) np.random.seed(5) torch.manual_seed(5 + 111) torch.cuda.manual_seed(5 + 222) random.seed(5 + 333) def load_moons(labeled_sample=10): data, label = make_moons(1000, False, 0.1, random_state=5) if labeled_sample == 0: return data, label else: l0_data = np.random.permutation(data[(label == 0)]) l1_data = np.random.permutation(data[(label == 1)]) X_l = np.concatenate( [l0_data[: labeled_sample // 2], l1_data[: labeled_sample // 2]] ) y_l = np.concatenate( [np.zeros(labeled_sample // 2), np.ones(labeled_sample // 2)] ) X_u = np.concatenate( [l0_data[labeled_sample // 2 :], l1_data[labeled_sample // 2 :]] ) y_u = np.concatenate([np.zeros(X_u.shape[0] // 2), np.ones(X_u.shape[0] // 2)]) return X_l, y_l, X_u, y_u def test_moons(labeled_sample=10): X_l, y_l, X_u, y_u = load_moons(labeled_sample) X = np.vstack((X_l, X_u)).astype(str) y = np.concatenate((y_l, y_u)).astype(str) y[labeled_sample:] = "" features_df =
pd.DataFrame(X)
pandas.DataFrame
import numpy as np from tspdb.src.database_module.sql_imp import SqlImplementation from tspdb.src.pindex.predict import get_prediction_range, get_prediction from tspdb.src.pindex.pindex_managment import TSPI, load_pindex from tspdb.src.pindex.pindex_utils import index_ts_mapper import time interface = SqlImplementation(driver="postgresql", host="localhost", database="querytime_test",user="aalomar",password="<PASSWORD>") import timeit import pandas as pd from tspdb.src.hdf_util import read_data from tspdb.src.tsUtils import randomlyHideValues from scipy.stats import norm from sklearn.metrics import r2_score def r2_var(y,y_h,X): average = np.mean(X**2) - np.mean(X)**2 return 1 - sum((y-y_h)**2)/sum((y-average)**2) def create_table_data(): interface = SqlImplementation(driver="postgresql", host="localhost", database="querytime_test",user="aalomar",password="<PASSWORD>") obs = np.arange(10**5).astype('float') means = obs var = np.zeros(obs.shape) obs_9 = randomlyHideValues(np.array(obs), 0.9)[0] obs_7 = randomlyHideValues(np.array(obs), 0.7)[0] print(obs_9) df = pd.DataFrame(data ={'ts':obs, 'means': means, 'ts_9':obs_9, 'ts_7' : obs_7,'var': var }) df.to_csv('testdata/tables/ts_basic_5.csv',index_label = 'time') timestamps = pd.date_range('2012-10-01 00:00:00', periods = 10**5, freq='5s') df.index = timestamps df.to_csv('testdata/tables/ts_basic_ts_5_5.csv', index_label = 'time') # real time series variance constant data = read_data('testdata/MixtureTS2.h5') obs = data['obs'][:] means = data['means'][:] var = np.ones(obs.shape) obs_9 = randomlyHideValues(np.array(obs), 0.9)[0] obs_7 = randomlyHideValues(np.array(obs), 0.7)[0] df = pd.DataFrame(data ={'ts':obs, 'means': means, 'ts_9':obs_9, 'ts_7' : obs_7 ,'var': var }) df.index_label = 'time' df.to_csv('testdata/tables/MixtureTS2.csv', index_label = 'time') # real time series variance constant data = read_data('testdata/MixtureTS.h5') obs = data['obs'][:] means = data['means'][:] var = np.ones(obs.shape) obs_9 = randomlyHideValues(np.array(obs), 0.9)[0] obs_7 = randomlyHideValues(np.array(obs), 0.7)[0] df = pd.DataFrame(data ={'ts':obs, 'means': means, 'ts_9':obs_9, 'ts_7' : obs_7,'var': var }) df.to_csv('testdata/tables/MixtureTS.csv', index_label = 'time') # real time series varaince harmonics data = read_data('testdata/MixtureTS_var_test.h5') obs = data['obs'][:] means = data['means'][:] var = data['var'][:] obs_9 = randomlyHideValues(np.array(obs), 0.9)[0] obs_7 = randomlyHideValues(np.array(obs), 0.7)[0] df =
pd.DataFrame(data ={'ts':obs, 'means': means, 'ts_9':obs_9, 'ts_7' : obs_7, 'var': var })
pandas.DataFrame
from collections import defaultdict import glob import os from matplotlib import pyplot as plt import numpy as np import seaborn as sns import pandas as pd from analysis.utils import ORDER, REGIONS, order_columns_of_df DATA_PATH = "runs/v10/models" def main(): """ This function analyses the results, and looks at different categories of NER (e.g. PER, ORG, LOC, DATE) and creates some tables for these, to see whether any categories do better / worse when changing the pre-trained model. """ # This makes multiple tables, one for each category folders = glob.glob(os.path.join(DATA_PATH, '*_50')) print(f"We have {len(folders)} folders") # Base: {target lang: score} dic = defaultdict(lambda: defaultdict(lambda: [])) # Base: {targer lang: {cat: score}} -> All categories dic_of_all = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: []))) for path in folders: f = path.split("/")[-1] KK = os.path.join(path, 'test_results.txt') if not os.path.exists(KK): print(f, 'does not exist') continue # Split the paths lang_finetune, _, _, lang_start, _, _, _, seed, _ = f.split("_") # Get the results with open(os.path.join(path, 'test_results.txt'), 'r') as f: all_lines = f.readlines() f1 = float(all_lines[0].strip().split(" = ")[1]) # Per category good_lines = all_lines[6:10] for l in good_lines: ans = [a for a in l.strip().split(" ") if a != ''] assert len(ans) == 5 cat = ans[0] _prec, _rec, this_f1, count = map(float, ans[1:]) dic_of_all[lang_start][lang_finetune][cat].append(this_f1) dic_of_all[lang_start][lang_finetune]['overall'].append(f1) dic[lang_start][lang_finetune].append(f1) print("--") # Cleans up some of the results from above new_dic_of_all = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: []))) # We make a 'lang-specific' item to compress the table somewhat. # This means the NER language used is the same as the pre-trained mode for A_, va in dic_of_all.items(): if not A_ in ['base', 'swa']: A = 'lang-specific' else: A = A_ for B, vb in va.items(): for C, vc in vb.items(): if A == 'lang-specific' and B == 'swa': continue if A == 'swa' == B == 'swa': new_dic_of_all['lang-specific'][B][C] = vc new_dic_of_all[A][B][C] = vc dic_of_all = new_dic_of_all # where to save dir = '../analysis/v10/categories'; os.makedirs(dir, exist_ok=True) total_df = None first_df = None second_df = None for my_l in ['base', 'swa', 'lang-specific']: new_dic = defaultdict(lambda: dict()) new_dic_std = defaultdict(lambda: dict()) D = dic_of_all[my_l] # For all languages for l, v in D.items(): # For all categories for b, li in v.items(): assert len(li) == 5, f"Bad, {len(li)}, {my_l}, {l}" li = np.array(li) * 100 # mean and standard deviation new_dic[l][b] = np.mean(li) new_dic_std[l][b] = np.std(li) # Order consistently df = order_columns_of_df(pd.DataFrame(new_dic)) df2 = order_columns_of_df(
pd.DataFrame(new_dic_std)
pandas.DataFrame
""" Author: <NAME> <EMAIL> http://groups.ichf.edu.pl/ochab Tested on: Python 3.6.10 :: Anaconda, Inc. jupyter-notebook 6.0.3 This script 1. Gets data on COVID-19: tested (cumulative numbers) in Poland from the Twitter account of the Polish Health Ministry: https://twitter.com/MZ_GOV_PL a) Get the images from Twitter b) OCR the images to get numbers 2. Updates an existing local CSV data file. Prerequisites: a) The old data file must exist. b) The old data file name format must be the following: old_csv_file_name = path + "cor." + day_str + ".csv" where day_str = date_i_days_ago.strftime("%Y.%m.%d") For example, the old data file is: ../cor.2020.04.07.csv c) The old data file should have the following column headers: Data,Dzień,Wykryci zakażeni,Testy,Hospitalizowani,Zmarli,Kwarantanna,Nadzór,"Testy, wartości przybliżone",Kwarantanna po powrocie do kraju,Wydarzenia,Wyzdrowiali d) The following is a bit inconsistent but, for historical reasons: - Column names are in Polish; in particular, the date column name is 'Data'. - However, dates in the date column must be in American date format: myfile_date_format = '%m/%d/%Y' Output is written to: new_csv_file_name = path + "cor." + today_str + ".csv" """ ####################################################################################### from twitter_scraper import get_tweets import re import pandas as pd import numpy as np from datetime import date, datetime, timedelta import matplotlib.dates as mdates import glob import requests import sys import os ####################################################################################### # OCR image type 1 # ocr_hqsr(path_filename_in_) # returns: hospitalized, quarantined, supervised, recovered exec(open('../code/TwitterCaptureImages_functions.py').read()) # import TwitterCaptureImages_functions # For some reason, this doesn't work in my Jupyter notebook...(?) ############################################################################################################ exec(open('../code/TwitterCaptureOther_functions.py').read()) ####################################################################################### # CSV data path path = "../data/" # Path to the directory for captured images imgpath = "../twitter_images/" # Path to the directory for captured data (CSV) twitter_data_path = "../twitter_captured_data/" # Error log path err_log_path = "../ocr_errors/" # Twitter user account twitter_user = 'MZ_GOV_PL' # Number of Twitter pages to read pages_number=3 # Note that my csv file uses the American date format! myfile_date_format = '%m/%d/%Y' # Temporarily: Data range to display when running the script data_range=slice(55,65,None) # Temporarily: Max column width to display when running the script max_column_width=20 # Strings to find in tweets start = 'Dzienny raport o' # middle = '/' # # mark parentheses with backslash to avoid misinterpretation! # end = '\(wszystkie pozytywne przypadki/w tym osoby zmarłe\)' # Create a dictionary of tweets tweets = [] print_spacer() print("Getting tweets from", twitter_user, "...") for i in get_tweets(twitter_user, pages=pages_number): tweets.append(i) # print(repr(tweets)) # Convert tweets to pandas.DataFrame df=pd.DataFrame.from_dict(tweets) # Select rows in df which contain the string defined in the start variable # and create df_hqsr (our twitter data frame) df_hqsr=df[df['text'].str.contains(start, na=False)] # Add a new column to the twitter data frame: 'tested' df_hqsr = df_hqsr.reindex( df_hqsr.columns.tolist() + ['hospitalized','quarantined','supervised','recovered'], axis=1) # Open error log file errlogfile = open(err_log_path + 'OCR_errors.log', 'a') # Open error correction file errcorrectfile_name=err_log_path + 'OCR_error_correction.csv' if not os.path.exists(errcorrectfile_name): errcorrectfile = open(errcorrectfile_name, 'w') print("\"Date\",\"Column\",\"is\",\"should be\"", file=errcorrectfile) else: if os.path.getsize(errcorrectfile_name) > 0: # Check for newline at EOF. If it is not there, add it. errcorrectfile = open(errcorrectfile_name, 'r') # Get file as string. This will also be needed later. errcorrectfile_str = str(errcorrectfile.read()) last_chr = errcorrectfile_str[-1] errcorrectfile.close() errcorrectfile = open(errcorrectfile_name, 'a') if not '\n' in last_chr: # Add newline at EOF errcorrectfile.write('\n') else: errcorrectfile = open(errcorrectfile_name, 'a') errcorrectfile_str="" print("\"Date\",\"Column\",\"is\",\"should be\"", file=errcorrectfile) ERRFLAG=0 # Download images that contain data # Find the numbers of tested in the images. # Write these numbers in the 'tested' column. # df_hqsr.iterrows() returns the list: index, row # index : a row index (a number) # row : whole row for index, row in df_hqsr.iterrows(): # Get image url photo_url = row['entries'].get('photos')[0] # Get image time stamp timestamp = row['time'].strftime("%Y.%m.%d") # Download image myfile = requests.get(photo_url) # Write image; image name will have the time stamp. img_file_name = imgpath+"TCImageHqsrMZ_GOV_PL."+timestamp+".jpg" open(img_file_name, 'wb').write(myfile.content) # OCR image to get the cumulative number of tested patients # number_list contains: hospitalized, quarantined, supervised, recovered d1=pd.to_datetime(row['time']) d2=datetime(2020,4,16,9,0,34) # change of image format on this date if(d1>=d2): numbers = ocr_hqsr(img_file_name) else: numbers = ocr_hqsr_old(img_file_name) # for old image format labels=['hospitalized', 'quarantined', 'supervised', 'recovered'] # Create a dictionary from two lists labels_numbers = {labels[i]: numbers[i] for i in range(len(labels))} for label in labels_numbers: # print(label, labels_numbers[label]) number_str = labels_numbers[label] is_number = number_str.isnumeric() #all(map(str.isdigit, number_str)) if(is_number): # Insert the cumulative number of tested patients the 'tested' column of df_hqsr. df_hqsr.loc[index,label] = int(number_str) else: ERRFLAG=1 df_hqsr.loc[index,label] = number_str error_message_str = datetime.now().strftime("%Y.%m.%d %H:%M:%S")+ " OCR error! "+ label+ " : "+ number_str+ " is not a number in "+ img_file_name print(error_message_str, file = errlogfile) print(error_message_str ,file = sys.stderr) new_error=df_hqsr.loc[index,'time'].strftime(myfile_date_format)+","+label+ ","+ number_str+ "," # Check if this error already exists in OCR error file (also if corrected) if not new_error in errcorrectfile_str: print(new_error,file = errcorrectfile) else: print("\tThis error already exists in the OCR error file "+errcorrectfile_name, file=sys.stderr) if ERRFLAG: print("\nIf not already corrected, correct these errors manually in " + err_log_path + "OCR_error_correction.csv",file = sys.stderr) print("and run the error correction script run_error_correction.sh",file = sys.stderr) # Close error log file errlogfile.close() errcorrectfile.close() # For some reason, the numbers entered to columns are float... # Convert the 'tested' column to int # df_hqsr = df_hqsr.astype({'hospitalized':int,'quarantined':int,'supervised':int,'recovered':int}) # Reset index (because old indexes were inherited from df) df_hqsr = df_hqsr.reset_index(drop=True) # For check, write the downloaded data to a file: df_hqsr_to_export = df_hqsr[['time', 'hospitalized','quarantined','supervised','recovered']] today = date.today() today_str = today.strftime("%Y.%m.%d") captured_data_file_name = twitter_data_path+"TChqsrMZ_GOV_PL."+today_str+".csv" df_hqsr_to_export.to_csv (captured_data_file_name, index = False, header=True) # Update the existing CSV data file # # Automatically find the previous data file filename = find_last_local_data_file() # For some reason, I can't use the result of glob.glob(filename) above (why?) # I use the filename instead old_csv_file_name = filename new_csv_file_name = path + "cor." + today_str + ".csv" # Read the latest existing CSV data file myfile_df = pd.read_csv(old_csv_file_name) # Show part of the old csv file as a table (I need to improve this) # Works in Jupyter notebook / IPython # Display more columns in Ipython pd.set_option('display.max_columns', 20)
pd.set_option('display.max_colwidth', max_column_width)
pandas.set_option
from distutils.version import LooseVersion try: import pandas as pd except ImportError: pd = None else: if hasattr(pd, '_version'): pv =
pd._version.get_versions()
pandas._version.get_versions
import numpy as np from scipy.stats import mode from pandas import DataFrame as df import pickle as hkl import os def process_features(my_df,type,params): try: if not os.path.isdir(params.cache_dir): os.makedirs(params.cache_dir) if type == "train": dict = hkl.load(open(str(os.path.join(params.cache_dir,"train_features.hkl")),"r")) return dict["X"],dict["y_white"],dict["y_black"] elif type == "test": dict = hkl.load(open(str(os.path.join(params.cache_dir,"test_features.hkl")),"r")) return dict["X"] except: X =
df()
pandas.DataFrame
import numpy as np import pandas as pd import sys import pickle import matplotlib.pyplot as plt from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas import pyqtgraph from PyQt5.QtWidgets import * from PyQt5.QtGui import * from PyQt5.QtCore import * from PyQt5.QtTest import * from Model_module import Model_module from Data_module import Data_module # from Sub_widget import another_result_explain class Worker(QObject): # Signal을 보낼 그릇을 생성# ############# train_value = pyqtSignal(object) # nor_ab_value = pyqtSignal(object) procedure_value = pyqtSignal(object) verif_value = pyqtSignal(object) timer = pyqtSignal(object) symptom_db = pyqtSignal(object) shap = pyqtSignal(object) plot_db = pyqtSignal(object) display_ex = pyqtSignal(object, object, object) another_shap = pyqtSignal(object, object, object) another_shap_table = pyqtSignal(object) ########################################## @pyqtSlot(object) def generate_db(self): test_db = input('구현할 시나리오를 입력해주세요 : ') print(f'입력된 시나리오 : {test_db}를 실행합니다.') Model_module() # model module 내의 빈행렬 초기화 data_module = Data_module() db, check_db = data_module.load_data(file_name=test_db) # test_db 불러오기 data_module.data_processing() # Min-Max o, 2 Dimension liner = [] plot_data = [] normal_data = [] compare_data = {'Normal':[], 'Ab21-01':[], 'Ab21-02':[], 'Ab20-04':[], 'Ab15-07':[], 'Ab15-08':[], 'Ab63-04':[], 'Ab63-02':[], 'Ab21-12':[], 'Ab19-02':[], 'Ab21-11':[], 'Ab23-03':[], 'Ab60-02':[], 'Ab59-02':[], 'Ab23-01':[], 'Ab23-06':[]} for line in range(np.shape(db)[0]): QTest.qWait(0.01) print(np.shape(db)[0], line) data = np.array([data_module.load_real_data(row=line)]) liner.append(line) check_data, check_parameter = data_module.load_real_check_data(row=line) plot_data.append(check_data[0]) try: normal_data.append(normal_db.iloc[line]) except: pass try: compare_data['Normal'].append(normal_db.iloc[line]) except: pass try: compare_data['Ab21-01'].append(ab21_01.iloc[line]) except: pass try: compare_data['Ab21-02'].append(ab21_02.iloc[line]) except: pass try: compare_data['Ab20-04'].append(ab20_04.iloc[line]) except: pass try: compare_data['Ab15-07'].append(ab15_07.iloc[line]) except: pass try: compare_data['Ab15-08'].append(ab15_08.iloc[line]) except: pass try: compare_data['Ab63-04'].append(ab63_04.iloc[line]) except: pass try: compare_data['Ab63-02'].append(ab63_02.iloc[line]) except: pass try: compare_data['Ab21-12'].append(ab21_12.iloc[line]) except: pass try: compare_data['Ab19-02'].append(ab19_02.iloc[line]) except: pass try: compare_data['Ab21-11'].append(ab21_11.iloc[line]) except: pass try: compare_data['Ab23-03'].append(ab23_03.iloc[line]) except: pass try: compare_data['Ab60-02'].append(ab60_02.iloc[line]) except: pass try: compare_data['Ab59-02'].append(ab59_02.iloc[line]) except: pass try: compare_data['Ab23-01'].append(ab23_01.iloc[line]) except: pass try: compare_data['Ab23-06'].append(ab23_06.iloc[line]) except: pass if np.shape(data) == (1, 10, 46): dim2 = np.array(data_module.load_scaled_data(row=line - 9)) # 2차원 scale # check_data, check_parameter = data_module.load_real_check_data(row=line - 8) # plot_data.append(check_data[0]) train_untrain_reconstruction_error, train_untrain_error = model_module.train_untrain_classifier(data=data) # normal_abnormal_reconstruction_error = model_module.normal_abnormal_classifier(data=data) abnormal_procedure_result, abnormal_procedure_prediction, shap_add_des, shap_value = model_module.abnormal_procedure_classifier(data=dim2) abnormal_verif_reconstruction_error, verif_threshold, abnormal_verif_error = model_module.abnormal_procedure_verification(data=data) self.train_value.emit(train_untrain_error) # self.nor_ab_value.emit(np.argmax(abnormal_procedure_result[line-9], axis=1)[0]) self.procedure_value.emit(np.argmax(abnormal_procedure_prediction, axis=1)[0]) self.verif_value.emit([abnormal_verif_error, verif_threshold]) self.timer.emit([line, check_parameter]) self.symptom_db.emit([np.argmax(abnormal_procedure_prediction, axis=1)[0], check_parameter]) self.shap.emit(shap_add_des) self.plot_db.emit([liner, plot_data]) self.display_ex.emit(shap_add_des, [liner, plot_data], normal_data) self.another_shap.emit(shap_value, [liner, plot_data], compare_data) self.another_shap_table.emit(shap_value) class AlignDelegate(QStyledItemDelegate): def initStyleOption(self, option, index): super(AlignDelegate, self).initStyleOption(option, index) option.displayAlignment = Qt.AlignCenter class Mainwindow(QWidget): def __init__(self): super().__init__() self.setWindowTitle("Real-Time Abnormal Diagnosis for NPP") self.setGeometry(150, 50, 1700, 800) # 그래프 초기조건 pyqtgraph.setConfigOption("background", "w") pyqtgraph.setConfigOption("foreground", "k") ############################################# self.selected_para = pd.read_csv('./DataBase/Final_parameter.csv') # GUI part 1 Layout (진단 부분 통합) layout_left = QVBoxLayout() # 영 번째 그룹 설정 (Time and Power) gb_0 = QGroupBox("Training Status") # 영 번째 그룹 이름 설정 layout_left.addWidget(gb_0) # 전체 틀에 영 번째 그룹 넣기 gb_0_layout = QBoxLayout(QBoxLayout.LeftToRight) # 영 번째 그룹 내용을 넣을 레이아웃 설정 # 첫 번째 그룹 설정 gb_1 = QGroupBox("Training Status") # 첫 번째 그룹 이름 설정 layout_left.addWidget(gb_1) # 전체 틀에 첫 번째 그룹 넣기 gb_1_layout = QBoxLayout(QBoxLayout.LeftToRight) # 첫 번째 그룹 내용을 넣을 레이아웃 설정 # 두 번째 그룹 설정 gb_2 = QGroupBox('NPP Status') layout_left.addWidget(gb_2) gb_2_layout = QBoxLayout(QBoxLayout.LeftToRight) # 세 번째 그룹 설정 gb_3 = QGroupBox(self) layout_left.addWidget(gb_3) gb_3_layout = QBoxLayout(QBoxLayout.LeftToRight) # 네 번째 그룹 설정 gb_4 = QGroupBox('Predicted Result Verification') layout_left.addWidget(gb_4) gb_4_layout = QBoxLayout(QBoxLayout.LeftToRight) # 다섯 번째 그룹 설정 gb_5 = QGroupBox('Symptom check in scenario') layout_left.addWidget(gb_5) gb_5_layout = QBoxLayout(QBoxLayout.TopToBottom) # Spacer 추가 # layout_part1.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)) # 영 번째 그룹 내용 self.time_label = QLabel(self) self.power_label = QPushButton(self) # 첫 번째 그룹 내용 # Trained / Untrained condition label self.trained_label = QPushButton('Trained') self.Untrained_label = QPushButton('Untrained') # 두 번째 그룹 내용 self.normal_label = QPushButton('Normal') self.abnormal_label = QPushButton('Abnormal') # 세 번째 그룹 내용 self.name_procedure = QLabel('Number of Procedure: ') self.num_procedure = QLineEdit(self) self.num_procedure.setAlignment(Qt.AlignCenter) self.name_scnario = QLabel('Name of Procedure: ') self.num_scnario = QLineEdit(self) self.num_scnario.setAlignment(Qt.AlignCenter) # 네 번째 그룹 내용 self.success_label = QPushButton('Diagnosis Success') self.failure_label = QPushButton('Diagnosis Failure') # 다섯 번째 그룹 내용 self.symptom_name = QLabel(self) self.symptom1 = QCheckBox(self) self.symptom2 = QCheckBox(self) self.symptom3 = QCheckBox(self) self.symptom4 = QCheckBox(self) self.symptom5 = QCheckBox(self) self.symptom6 = QCheckBox(self) # 영 번째 그룹 내용 입력 gb_0_layout.addWidget(self.time_label) gb_0_layout.addWidget(self.power_label) gb_0.setLayout(gb_0_layout) # 첫 번째 그룹 내용 입력 gb_1_layout.addWidget(self.trained_label) gb_1_layout.addWidget(self.Untrained_label) gb_1.setLayout(gb_1_layout) # 첫 번째 레이아웃 내용을 첫 번째 그룹 틀로 넣기 # 두 번째 그룹 내용 입력 gb_2_layout.addWidget(self.normal_label) gb_2_layout.addWidget(self.abnormal_label) gb_2.setLayout(gb_2_layout) # 세 번째 그룹 내용 입력 gb_3_layout.addWidget(self.name_procedure) gb_3_layout.addWidget(self.num_procedure) gb_3_layout.addWidget(self.name_scnario) gb_3_layout.addWidget(self.num_scnario) gb_3.setLayout(gb_3_layout) # 네 번째 그룹 내용 입력 gb_4_layout.addWidget(self.success_label) gb_4_layout.addWidget(self.failure_label) gb_4.setLayout(gb_4_layout) # 다섯 번째 그룹 내용 입력 gb_5_layout.addWidget(self.symptom_name) gb_5_layout.addWidget(self.symptom1) gb_5_layout.addWidget(self.symptom2) gb_5_layout.addWidget(self.symptom3) gb_5_layout.addWidget(self.symptom4) gb_5_layout.addWidget(self.symptom5) gb_5_layout.addWidget(self.symptom6) gb_5.setLayout(gb_5_layout) # Start 버튼 맨 아래에 위치 self.start_btn = QPushButton('Start') # layout_part1.addWidget(self.start_btn) self.tableWidget = QTableWidget(0, 0) self.tableWidget.setFixedHeight(500) self.tableWidget.setFixedWidth(800) # Plot 구현 self.plot_1 = pyqtgraph.PlotWidget(title=self) self.plot_2 = pyqtgraph.PlotWidget(title=self) self.plot_3 = pyqtgraph.PlotWidget(title=self) self.plot_4 = pyqtgraph.PlotWidget(title=self) # Explanation Alarm 구현 red_alarm = QGroupBox('Main basis for diagnosis') red_alarm_layout = QGridLayout() orange_alarm = QGroupBox('Sub basis for diagnosis') orange_alarm_layout = QGridLayout() # Display Button 생성 self.red1 = QPushButton(self) self.red2 = QPushButton(self) self.red3 = QPushButton(self) self.red4 = QPushButton(self) self.orange1 = QPushButton(self) self.orange2 = QPushButton(self) self.orange3 = QPushButton(self) self.orange4 = QPushButton(self) self.orange5 = QPushButton(self) self.orange6 = QPushButton(self) self.orange7 = QPushButton(self) self.orange8 = QPushButton(self) self.orange9 = QPushButton(self) self.orange10 = QPushButton(self) self.orange11 = QPushButton(self) self.orange12 = QPushButton(self) # Layout에 widget 삽입 red_alarm_layout.addWidget(self.red1, 0, 0) red_alarm_layout.addWidget(self.red2, 0, 1) red_alarm_layout.addWidget(self.red3, 1, 0) red_alarm_layout.addWidget(self.red4, 1, 1) orange_alarm_layout.addWidget(self.orange1, 0, 0) orange_alarm_layout.addWidget(self.orange2, 0, 1) orange_alarm_layout.addWidget(self.orange3, 1, 0) orange_alarm_layout.addWidget(self.orange4, 1, 1) orange_alarm_layout.addWidget(self.orange5, 2, 0) orange_alarm_layout.addWidget(self.orange6, 2, 1) orange_alarm_layout.addWidget(self.orange7, 3, 0) orange_alarm_layout.addWidget(self.orange8, 3, 1) orange_alarm_layout.addWidget(self.orange9, 4, 0) orange_alarm_layout.addWidget(self.orange10, 4, 1) orange_alarm_layout.addWidget(self.orange11, 5, 0) orange_alarm_layout.addWidget(self.orange12, 5, 1) # Group Box에 Layout 삽입 red_alarm.setLayout(red_alarm_layout) orange_alarm.setLayout(orange_alarm_layout) # 각 Group Box를 상위 Layout에 삽입 layout_part1 = QVBoxLayout() detail_part = QHBoxLayout() detailed_table = QPushButton('Detail Explanation [Table]') self.another_classification = QPushButton('Why other scenarios were not chosen') detail_part.addWidget(detailed_table) detail_part.addWidget(self.another_classification) alarm_main = QVBoxLayout() alarm_main.addWidget(red_alarm) alarm_main.addWidget(orange_alarm) layout_part1.addLayout(layout_left) layout_part1.addLayout(alarm_main) layout_part1.addLayout(detail_part) layout_part1.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)) # GUI part2 Layout (XAI 구현) layout_part2 = QVBoxLayout() layout_part2.addWidget(self.plot_1) layout_part2.addWidget(self.plot_2) layout_part2.addWidget(self.plot_3) layout_part2.addWidget(self.plot_4) # layout_part2.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)) # layout_part2.addWidget(self.tableWidget) # GUI part1 and part2 통합 layout_base = QHBoxLayout() layout_base.addLayout(layout_part1) layout_base.addLayout(layout_part2) # GUI 최종 통합 (start button을 하단에 배치시키기 위함) total_layout = QVBoxLayout() total_layout.addLayout(layout_base) total_layout.addWidget(self.start_btn) self.setLayout(total_layout) # setLayout : 최종 출력될 GUI 화면을 결정 # Threading Part############################################################################################################## # 데이터 연산 부분 Thread화 self.worker = Worker() self.worker_thread = QThread() # Signal을 Main Thread 내의 함수와 연결 self.worker.train_value.connect(self.Determine_train) self.worker.procedure_value.connect(self.Determine_abnormal) self.worker.procedure_value.connect(self.Determine_procedure) self.worker.verif_value.connect(self.verifit_result) self.worker.timer.connect(self.time_display) self.worker.symptom_db.connect(self.procedure_satisfaction) # self.worker.shap.connect(self.explain_result) self.worker.plot_db.connect(self.plotting) self.worker.display_ex.connect(self.display_explain) self.worker.moveToThread(self.worker_thread) # Worker class를 Thread로 이동 # self.worker_thread.started.connect(lambda: self.worker.generate_db()) self.start_btn.clicked.connect(lambda: self.worker.generate_db()) # 누르면 For문 실행 self.worker_thread.start() # Threading Part############################################################################################################## # 이벤트 처리 ---------------------------------------------------------------------------------------------------- detailed_table.clicked.connect(self.show_table) self.another_classification.clicked.connect(self.show_another_result) # Button 클릭 연동 이벤트 처리 convert_red_btn = {0: self.red1, 1: self.red2, 2: self.red3, 3: self.red4} # Red Button convert_red_plot = {0: self.red1_plot, 1: self.red2_plot, 2: self.red3_plot, 3: self.red4_plot} # convert_orange_btn = {0: self.orange1, 1: self.orange2, 2: self.orange3, 3: self.orange4, 4: self.orange5, 5: self.orange6, 6: self.orange7, 7: self.orange8, 8: self.orange9, 9: self.orange10, 10: self.orange11, 11: self.orange12} # Orange Button convert_orange_plot = {0: self.orange1_plot, 1: self.orange2_plot, 2: self.orange3_plot, 3: self.orange4_plot, 4: self.orange5_plot, 5: self.orange6_plot, 6: self.orange7_plot, 7: self.orange8_plot, 8: self.orange9_plot, 9: self.orange10_plot, 10: self.orange11_plot, 11: self.orange12_plot} # 초기 Button 위젯 선언 -> 초기에 선언해야 끊기지않고 유지됨. # Red Button [convert_red_btn[i].clicked.connect(convert_red_plot[i]) for i in range(4)] self.red_plot_1 = pyqtgraph.PlotWidget(title=self) self.red_plot_2 = pyqtgraph.PlotWidget(title=self) self.red_plot_3 = pyqtgraph.PlotWidget(title=self) self.red_plot_4 = pyqtgraph.PlotWidget(title=self) # Grid setting self.red_plot_1.showGrid(x=True, y=True, alpha=0.3) self.red_plot_2.showGrid(x=True, y=True, alpha=0.3) self.red_plot_3.showGrid(x=True, y=True, alpha=0.3) self.red_plot_4.showGrid(x=True, y=True, alpha=0.3) # Orange Button [convert_orange_btn[i].clicked.connect(convert_orange_plot[i]) for i in range(12)] self.orange_plot_1 = pyqtgraph.PlotWidget(title=self) self.orange_plot_2 = pyqtgraph.PlotWidget(title=self) self.orange_plot_3 = pyqtgraph.PlotWidget(title=self) self.orange_plot_4 = pyqtgraph.PlotWidget(title=self) self.orange_plot_5 = pyqtgraph.PlotWidget(title=self) self.orange_plot_6 = pyqtgraph.PlotWidget(title=self) self.orange_plot_7 = pyqtgraph.PlotWidget(title=self) self.orange_plot_8 = pyqtgraph.PlotWidget(title=self) self.orange_plot_9 = pyqtgraph.PlotWidget(title=self) self.orange_plot_10 = pyqtgraph.PlotWidget(title=self) self.orange_plot_11 = pyqtgraph.PlotWidget(title=self) self.orange_plot_12 = pyqtgraph.PlotWidget(title=self) # Grid setting self.orange_plot_1.showGrid(x=True, y=True, alpha=0.3) self.orange_plot_2.showGrid(x=True, y=True, alpha=0.3) self.orange_plot_3.showGrid(x=True, y=True, alpha=0.3) self.orange_plot_4.showGrid(x=True, y=True, alpha=0.3) self.orange_plot_5.showGrid(x=True, y=True, alpha=0.3) self.orange_plot_6.showGrid(x=True, y=True, alpha=0.3) self.orange_plot_7.showGrid(x=True, y=True, alpha=0.3) self.orange_plot_8.showGrid(x=True, y=True, alpha=0.3) self.orange_plot_9.showGrid(x=True, y=True, alpha=0.3) self.orange_plot_10.showGrid(x=True, y=True, alpha=0.3) self.orange_plot_11.showGrid(x=True, y=True, alpha=0.3) self.orange_plot_12.showGrid(x=True, y=True, alpha=0.3) self.show() # UI show command def time_display(self, display_variable): # display_variable[0] : time, display_variable[1].iloc[1] self.time_label.setText(f'<b>Time :<b/> {display_variable[0]} sec') self.time_label.setFont(QFont('Times new roman', 15)) self.time_label.setAlignment(Qt.AlignCenter) self.power_label.setText(f'Power : {round(display_variable[1].iloc[1]["QPROREL"]*100, 2)}%') if round(display_variable[1].iloc[1]["QPROREL"]*100, 2) < 95: self.power_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;') else: self.power_label.setStyleSheet('color : black;' 'background-color: light gray;') def Determine_train(self, train_untrain_reconstruction_error): if train_untrain_reconstruction_error[0] <= 0.00225299: # Trained Data self.trained_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: green;') self.Untrained_label.setStyleSheet('color : black;' 'background-color: light gray;') else: # Untrianed Data self.Untrained_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;') self.trained_label.setStyleSheet('color : black;' 'background-color: light gray;') def Determine_abnormal(self, abnormal_diagnosis): if abnormal_diagnosis == 0: # 정상상태 self.normal_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: green;') self.abnormal_label.setStyleSheet('color : black;' 'background-color: light gray;') else: # 비정상상태 self.abnormal_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;') self.normal_label.setStyleSheet('color : black;' 'background-color: light gray;') def Determine_procedure(self, abnormal_procedure_result): if abnormal_procedure_result == 0: self.num_procedure.setText('Normal') self.num_scnario.setText('Normal') elif abnormal_procedure_result == 1: self.num_procedure.setText('Ab21-01') self.num_scnario.setText('가압기 압력 채널 고장 "고"') elif abnormal_procedure_result == 2: self.num_procedure.setText('Ab21-02') self.num_scnario.setText('가압기 압력 채널 고장 "저"') elif abnormal_procedure_result == 3: self.num_procedure.setText('Ab20-04') self.num_scnario.setText('가압기 수위 채널 고장 "저"') elif abnormal_procedure_result == 4: self.num_procedure.setText('Ab15-07') self.num_scnario.setText('증기발생기 수위 채널 고장 "저"') elif abnormal_procedure_result == 5: self.num_procedure.setText('Ab15-08') self.num_scnario.setText('증기발생기 수위 채널 고장 "고"') elif abnormal_procedure_result == 6: self.num_procedure.setText('Ab63-04') self.num_scnario.setText('제어봉 낙하') elif abnormal_procedure_result == 7: self.num_procedure.setText('Ab63-02') self.num_scnario.setText('제어봉의 계속적인 삽입') elif abnormal_procedure_result == 8: self.num_procedure.setText('Ab21-12') # self.num_scnario.setText('가압기 PORV 열림') self.num_scnario.setText('Pressurizer PORV opening') elif abnormal_procedure_result == 9: self.num_procedure.setText('Ab19-02') self.num_scnario.setText('가압기 안전밸브 고장') elif abnormal_procedure_result == 10: self.num_procedure.setText('Ab21-11') self.num_scnario.setText('가압기 살수밸브 고장 "열림"') elif abnormal_procedure_result == 11: self.num_procedure.setText('Ab23-03') self.num_scnario.setText('1차기기 냉각수 계통으로 누설 "CVCS->CCW"') elif abnormal_procedure_result == 12: self.num_procedure.setText('Ab60-02') self.num_scnario.setText('재생열교환기 전단부위 파열') elif abnormal_procedure_result == 13: self.num_procedure.setText('Ab59-02') self.num_scnario.setText('충전수 유량조절밸브 후단 누설') elif abnormal_procedure_result == 14: self.num_procedure.setText('Ab23-01') self.num_scnario.setText('1차기기 냉각수 계통으로 누설 "RCS->CCW"') elif abnormal_procedure_result == 15: self.num_procedure.setText('Ab23-06') self.num_scnario.setText('증기발생기 전열관 누설') def verifit_result(self, verif_value): if verif_value[0] <= verif_value[1]: # 진단 성공 self.success_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: green;') self.failure_label.setStyleSheet('color : black;' 'background-color: light gray;') else: # 진단 실패 self.failure_label.setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: red;') self.success_label.setStyleSheet('color : black;' 'background-color: light gray;') def procedure_satisfaction(self, symptom_db): # symptom_db[0] : classification result [0~15] # symptom_db[1] : check_db [2,2222] -> 현시점과 이전시점 비교를 위함. # symptom_db[1].iloc[0] : 이전 시점 # symptom_db[1].iloc[1] : 현재 시점 if symptom_db[0] == 0: # 정상 상태 self.symptom_name.setText('Diagnosis Result : Normal → Symptoms : 0') self.symptom1.setText('') self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") self.symptom2.setText('') self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") self.symptom3.setText('') self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") self.symptom4.setText('') self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") self.symptom5.setText('') self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") self.symptom6.setText('') self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") elif symptom_db[0] == 1: self.symptom_name.setText('Diagnosis Result : Ab21-01 Pressurizer pressure channel failure "High" → Symptoms : 6') self.symptom1.setText("채널 고장으로 인한 가압기 '고' 압력 지시") if symptom_db[1].iloc[1]['PPRZN'] > symptom_db[1].iloc[1]['CPPRZH']: self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}") else: self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") self.symptom2.setText("가압기 살수밸브 '열림' 지시") if symptom_db[1].iloc[1]['BPRZSP'] > 0: self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}") else: self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") self.symptom3.setText("가압기 비례전열기 꺼짐") if symptom_db[1].iloc[1]['QPRZP'] == 0: self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}") else: self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") self.symptom4.setText("가압기 보조전열기 꺼짐") if symptom_db[1].iloc[1]['QPRZB'] == 0: self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}") else: self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") self.symptom5.setText("실제 가압기 '저' 압력 지시") if symptom_db[1].iloc[1]['PPRZ'] < symptom_db[1].iloc[1]['CPPRZL']: self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}") else: self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") self.symptom6.setText("가압기 PORV 차단밸브 닫힘") if symptom_db[1].iloc[1]['BHV6'] == 0: self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}") else: self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") elif symptom_db[0] == 2: self.symptom_name.setText('진단 : Ab21-02 가압기 압력 채널 고장 "저" → 증상 : 5') self.symptom1.setText("채널 고장으로 인한 가압기 '저' 압력 지시") if symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CPPRZL']: self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}") else: self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") self.symptom2.setText('가압기 저압력으로 인한 보조 전열기 켜짐 지시 및 경보 발생') if (symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CQPRZB']) and (symptom_db[1].iloc[1]['KBHON'] == 1): self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}") else: self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") self.symptom3.setText("실제 가압기 '고' 압력 지시") if symptom_db[1].iloc[1]['PPRZ'] > symptom_db[1].iloc[1]['CPPRZH']: self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}") else: self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") self.symptom4.setText('가압기 PORV 열림 지시 및 경보 발생') if symptom_db[1].iloc[1]['BPORV'] > 0: self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}") else: self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") self.symptom5.setText('실제 가압기 압력 감소로 가압기 PORV 닫힘') # 가압기 압력 감소에 대해 해결해야함. if symptom_db[1].iloc[1]['BPORV'] == 0 and (symptom_db[1].iloc[0]['PPRZ'] > symptom_db[1].iloc[1]['PPRZ']): self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}") else: self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") elif symptom_db[0] == 3: self.symptom_name.setText('진단 : Ab20-04 가압기 수위 채널 고장 "저" → 증상 : 5') self.symptom1.setText("채널 고장으로 인한 가압기 '저' 수위 지시") if symptom_db[1].iloc[1]['ZINST63'] < 17: # 나중에 다시 확인해야함. self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}") # else: # self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") self.symptom2.setText('"LETDN HX OUTLET FLOW LOW" 경보 발생') if symptom_db[1].iloc[1]['UNRHXUT'] > symptom_db[1].iloc[1]['CULDHX']: self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}") # else: # self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") self.symptom3.setText('"CHARGING LINE FLOW HI/LO" 경보 발생') if (symptom_db[1].iloc[1]['WCHGNO'] < symptom_db[1].iloc[1]['CWCHGL']) or (symptom_db[1].iloc[1]['WCHGNO'] > symptom_db[1].iloc[1]['CWCHGH']): self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}") # else: # self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") self.symptom4.setText('충전 유량 증가') if symptom_db[1].iloc[0]['WCHGNO'] < symptom_db[1].iloc[1]['WCHGNO']: self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}") # else: # self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") self.symptom5.setText('건전한 수위지시계의 수위 지시치 증가') if symptom_db[1].iloc[0]['ZPRZNO'] < symptom_db[1].iloc[1]['ZPRZNO']: self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}") # else: # self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") elif symptom_db[0] == 4: self.symptom_name.setText('진단 : Ab15-07 증기발생기 수위 채널 고장 "저" → 증상 : ') self.symptom1.setText('증기발생기 수위 "저" 경보 발생') if symptom_db[1].iloc[1]['ZINST78']*0.01 < symptom_db[1].iloc[1]['CZSGW'] or symptom_db[1].iloc[1]['ZINST77']*0.01 < symptom_db[1].iloc[1]['CZSGW'] or symptom_db[1].iloc[1]['ZINST76']*0.01 < symptom_db[1].iloc[1]['CZSGW']: self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}") else: self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") self.symptom2.setText('해당 SG MFCV 열림 방향으로 진행 및 해당 SG 실제 급수유량 증가') elif symptom_db[0] == 8: # self.symptom_name.setText('진단 : Ab21-12 가압기 PORV 열림 → 증상 : 5') self.symptom_name.setText('Diagnosis result : Ab21-12 Pressurizer PORV opening → Symptoms : 5') # self.symptom1.setText('가압기 PORV 열림 지시 및 경보 발생') self.symptom1.setText('Pressurizer PORV open indication and alarm') if symptom_db[1].iloc[1]['BPORV'] > 0: self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}") else: self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") # self.symptom2.setText('가압기 저압력으로 인한 보조 전열기 켜짐 지시 및 경보 발생') self.symptom2.setText('Aux. heater turn on instruction and alarm due to pressurizer low pressure') if (symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CQPRZB']) and (symptom_db[1].iloc[1]['KBHON'] == 1): self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}") else: self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") # self.symptom3.setText("가압기 '저' 압력 지시 및 경보 발생") self.symptom3.setText("pressurizer 'low' pressure indication and alarm") if symptom_db[1].iloc[1]['PPRZ'] < symptom_db[1].iloc[1]['CPPRZL'] : self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}") else: self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") # self.symptom4.setText("PRT 고온 지시 및 경보 발생") self.symptom4.setText("PRT high temperature indication and alarm") if symptom_db[1].iloc[1]['UPRT'] > symptom_db[1].iloc[1]['CUPRT'] : self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}") else: self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") # self.symptom5.setText("PRT 고압 지시 및 경보 발생") self.symptom5.setText("PRT high pressure indication and alarm") if (symptom_db[1].iloc[1]['PPRT'] - 0.98E5) > symptom_db[1].iloc[1]['CPPRT']: self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}") else: self.symptom5.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") self.symptom6.setText("Blank") self.symptom6.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") elif symptom_db[0] == 10: self.symptom_name.setText("진단 : Ab21-11 가압기 살수밸브 고장 '열림' → 증상 : 4") self.symptom1.setText("가압기 살수밸브 '열림' 지시 및 상태 표시등 점등") if symptom_db[1].iloc[1]['BPRZSP'] > 0: self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}") else: self.symptom1.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") self.symptom2.setText("가압기 보조전열기 켜짐 지시 및 경보 발생") if (symptom_db[1].iloc[1]['PPRZN'] < symptom_db[1].iloc[1]['CQPRZB']) and (symptom_db[1].iloc[1]['KBHON'] == 1): self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}") else: self.symptom2.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") self.symptom3.setText("가압기 '저' 압력 지시 및 경보 발생") if symptom_db[1].iloc[1]['PPRZ'] < symptom_db[1].iloc[1]['CPPRZL']: self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}") else: self.symptom3.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") self.symptom4.setText("가압기 수위 급격한 증가") # 급격한 증가에 대한 수정은 필요함 -> 추후 수정 if symptom_db[1].iloc[0]['ZINST63'] < symptom_db[1].iloc[1]['ZINST63']: self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : red;""}") else: self.symptom4.setStyleSheet("QCheckBo" "x::indicator" "{""background-color : white;""}") def explain_result(self, shap_add_des): ''' # shap_add_des['index'] : 변수 이름 / shap_add_des[0] : shap value # shap_add_des['describe'] : 변수에 대한 설명 / shap_add_des['probability'] : shap value를 확률로 환산한 값 ''' self.tableWidget.setRowCount(len(shap_add_des)) self.tableWidget.setColumnCount(4) self.tableWidget.setHorizontalHeaderLabels(["value_name", 'probability', 'describe', 'system']) header = self.tableWidget.horizontalHeader() header.setSectionResizeMode(QHeaderView.ResizeToContents) header.setSectionResizeMode(0, QHeaderView.Stretch) header.setSectionResizeMode(1, QHeaderView.Stretch) header.setSectionResizeMode(2, QHeaderView.ResizeToContents) header.setSectionResizeMode(3, QHeaderView.Stretch) [self.tableWidget.setItem(i, 0, QTableWidgetItem(f"{shap_add_des['index'][i]}")) for i in range(len(shap_add_des['index']))] [self.tableWidget.setItem(i, 1, QTableWidgetItem(f"{round(shap_add_des['probability'][i],2)}%")) for i in range(len(shap_add_des['probability']))] [self.tableWidget.setItem(i, 2, QTableWidgetItem(f"{shap_add_des['describe'][i]}")) for i in range(len(shap_add_des['describe']))] [self.tableWidget.setItem(i, 3, QTableWidgetItem(f"{shap_add_des['system'][i]}")) for i in range(len(shap_add_des['system']))] delegate = AlignDelegate(self.tableWidget) self.tableWidget.setItemDelegate(delegate) def show_table(self): self.worker.shap.connect(self.explain_result) # 클릭시 Thread를 통해 신호를 전달하기 때문에 버퍼링이 발생함. 2초 정도? 이 부분은 나중에 생각해서 초기에 불러올지 고민해봐야할듯. self.tableWidget.show() def plotting(self, symptom_db): # symptom_db[0] : liner : appended time (axis-x) / symptom_db[1].iloc[1] : check_db (:line,2222)[1] # -- scatter -- # time = [] # value1, value2, value3 = [], [], [] # time.append(symptom_db[0]) # value1.append(round(symptom_db[1].iloc[1]['ZVCT'],2)) # value2.append(round(symptom_db[1].iloc[1]['BPORV'],2)) # value3.append(round(symptom_db[1].iloc[1]['UPRZ'],2)) # self.plotting_1 = self.plot_1.plot(pen=None, symbol='o', symbolBrush='w', symbolPen='w', symbolSize=5) # self.plotting_2 = self.plot_2.plot(pen=None, symbol='o', symbolBrush='w', symbolPen='w', symbolSize=5) # self.plotting_3 = self.plot_3.plot(pen=None, symbol='o', symbolBrush='w', symbolPen='w', symbolSize=5) # -- Line plotting -- # self.plotting_1 = self.plot_1.plot(pen='w') # self.plotting_2 = self.plot_2.plot(pen='w') # self.plotting_3 = self.plot_3.plot(pen='w') # self.plotting_4 = self.plot_4.plot(pen='w') self.plot_1.showGrid(x=True, y=True, alpha=0.3) self.plot_2.showGrid(x=True, y=True, alpha=0.3) self.plot_3.showGrid(x=True, y=True, alpha=0.3) self.plot_4.showGrid(x=True, y=True, alpha=0.3) self.plotting_1 = self.plot_1.plot(pen=pyqtgraph.mkPen('k',width=3)) self.plotting_2 = self.plot_2.plot(pen=pyqtgraph.mkPen('k',width=3)) self.plotting_3 = self.plot_3.plot(pen=pyqtgraph.mkPen('k',width=3)) self.plotting_4 = self.plot_4.plot(pen=pyqtgraph.mkPen('k',width=3)) self.plotting_1.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['BPORV']) self.plot_1.setTitle('PORV open state') self.plotting_2.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['PPRZN']) self.plot_2.setTitle('Pressurizer pressure') self.plotting_3.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['UPRT']) self.plot_3.setTitle('PRT temperature') self.plotting_4.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])['PPRT']) self.plot_4.setTitle('PRT pressure') # red_range = display_db[display_db['probability'] >= 10] # 10% 이상의 확률을 가진 변수 # # print(bool(red_range["describe"].iloc[3])) # try : # self.plotting_1.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[0]]) # if red_range["describe"].iloc[0] == None: # self.plot_1.setTitle(self) # else: # self.plot_1.setTitle(f'{red_range["describe"].iloc[0]}') # # self.plot_1.clear() # except: # print('plot1 fail') # try: # self.plotting_2.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[1]]) # if red_range["describe"].iloc[1] == None: # self.plot_2.setTitle(self) # else: # self.plot_2.setTitle(f'{red_range["describe"].iloc[1]}') # # self.plot_2.clear() # except: # print('plot2 fail') # try: # self.plotting_3.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[2]]) # if red_range["describe"].iloc[2] == None: # self.plot_3.setTitle(self) # else: # self.plot_3.setTitle(f'{red_range["describe"].iloc[2]}') # # self.plot_3.clear() # except: # print('plot3 fail') # try: # self.plotting_4.setData(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[3]]) # if red_range["describe"].iloc[3] == None: # self.plot_4.setTitle(self) # else: # self.plot_4.setTitle(f'{red_range["describe"].iloc[3]}') # # self.plot_4.clear() # except: # print('plot4 fail') def display_explain(self, display_db, symptom_db, normal_db): ''' # display_db['index'] : 변수 이름 / display_db[0] : shap value # display_db['describe'] : 변수에 대한 설명 / display_db['probability'] : shap value를 확률로 환산한 값 # symptom_db[0] : liner : appended time (axis-x) / symptom_db[1].iloc[1] : check_db (:line,2222)[1] ''' red_range = display_db[display_db['probability'] >=10] orange_range = display_db[[display_db['probability'].iloc[i]<10 and display_db['probability'].iloc[i]>1 for i in range(len(display_db['probability']))]] convert_red = {0: self.red1, 1: self.red2, 2: self.red3, 3: self.red4} convert_orange = {0: self.orange1, 1: self.orange2, 2: self.orange3, 3: self.orange4, 4: self.orange5, 5: self.orange6, 6: self.orange7, 7: self.orange8, 8: self.orange9, 9: self.orange10, 10: self.orange11, 11: self.orange12} if 4-len(red_range) == 0: red_del = [] elif 4-len(red_range) == 1: red_del = [3] elif 4-len(red_range) == 2: red_del = [2,3] elif 4-len(red_range) == 3: red_del = [1,2,3] elif 4-len(red_range) == 4: red_del = [0,1,2,3] if 12-len(orange_range) == 0: orange_del = [] elif 12-len(orange_range) == 1: orange_del = [11] elif 12-len(orange_range) == 2: orange_del = [10,11] elif 12-len(orange_range) == 3: orange_del = [9,10,11] elif 12-len(orange_range) == 4: orange_del = [8,9,10,11] elif 12-len(orange_range) == 5: orange_del = [7,8,9,10,11] elif 12-len(orange_range) == 6: orange_del = [6,7,8,9,10,11] elif 12-len(orange_range) == 7: orange_del = [5,6,7,8,9,10,11] elif 12-len(orange_range) == 8: orange_del = [4,5,6,7,8,9,10,11] elif 12-len(orange_range) == 9: orange_del = [3,4,5,6,7,8,9,10,11] elif 12-len(orange_range) == 10: orange_del = [2,3,4,5,6,7,8,9,10,11] elif 12-len(orange_range) == 11: orange_del = [1,2,3,4,5,6,7,8,9,10,11] elif 12-len(orange_range) == 12: orange_del = [0,1,2,3,4,5,6,7,8,9,10,11] [convert_red[i].setText(f'{red_range["describe"].iloc[i]} \n[{round(red_range["probability"].iloc[i],2)}%]') for i in range(len(red_range))] [convert_red[i].setText('None\nParameter') for i in red_del] [convert_red[i].setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: blue;') for i in range(len(red_range))] [convert_red[i].setStyleSheet('color : black;' 'background-color: light gray;') for i in red_del] [convert_orange[i].setText(f'{orange_range["describe"].iloc[i]} \n[{round(orange_range["probability"].iloc[i],2)}%]') for i in range(len(orange_range))] [convert_orange[i].setText('None\nParameter') for i in orange_del] # [convert_orange[i].setStyleSheet('color : white;' 'font-weight: bold;' 'background-color: orange;') for i in range(len(orange_range))] # [convert_orange[i].setStyleSheet('color : black;' 'background-color: light gray;') for i in orange_del] # 각 Button에 호환되는 Plotting 데이터 구축 # Red1 Button if self.red1.text().split()[0] != 'None': self.red_plot_1.clear() self.red_plot_1.setTitle(red_range['describe'].iloc[0]) self.red_plot_1.addLegend(offset=(-30,20)) self.red_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[0]], pen=pyqtgraph.mkPen('b', width=3), name = 'Real Data') self.red_plot_1.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[red_range['index'].iloc[0]], pen=pyqtgraph.mkPen('k', width=3), name = 'Normal Data') # Red2 Button if self.red2.text().split()[0] != 'None': self.red_plot_2.clear() self.red_plot_2.setTitle(red_range['describe'].iloc[1]) self.red_plot_2.addLegend(offset=(-30, 20)) self.red_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(symptom_db[1])[red_range['index'].iloc[1]], pen=pyqtgraph.mkPen('b', width=3), name='Real Data') self.red_plot_2.plot(x=symptom_db[0], y=pd.DataFrame(normal_db)[red_range['index'].iloc[1]], pen=pyqtgraph.mkPen('k', width=3), name='Normal Data') # Red3 Button if self.red3.text().split()[0] != 'None': self.red_plot_3.clear() self.red_plot_3.setTitle(red_range['describe'].iloc[2]) self.red_plot_3.addLegend(offset=(-30, 20)) self.red_plot_3.plot(x=symptom_db[0], y=
pd.DataFrame(symptom_db[1])
pandas.DataFrame
def CCA_analysis(TaXon_table_xlsx, meta_data_to_test, taxonomic_level, width, height, cca_scatter_size, path_to_outdirs, template, font_size, color_discrete_sequence): import pandas as pd import numpy as np from skbio.diversity import beta_diversity from skbio.stats.ordination import cca from skbio.stats.distance import anosim import plotly.graph_objects as go from plotly.subplots import make_subplots import plotly.express as px from pathlib import Path import PySimpleGUI as sg import os, webbrowser from itertools import combinations TaXon_table_xlsx = Path(TaXon_table_xlsx) Meta_data_table_xlsx = Path(str(path_to_outdirs) + "/" + "Meta_data_table" + "/" + TaXon_table_xlsx.stem + "_metadata.xlsx") TaXon_table_df =
pd.read_excel(TaXon_table_xlsx, header=0)
pandas.read_excel
import pandas as pd import pytest import featuretools as ft from featuretools.entityset import EntitySet, Relationship from featuretools.utils.cudf_utils import pd_to_cudf_clean from featuretools.utils.gen_utils import import_or_none cudf = import_or_none('cudf') # TODO: Fix vjawa @pytest.mark.skipif('not cudf') def test_create_entity_from_cudf_df(pd_es): cleaned_df = pd_to_cudf_clean(pd_es["log"].df) log_cudf = cudf.from_pandas(cleaned_df) print(pd_es["log"].variable_types) cudf_es = EntitySet(id="cudf_es") cudf_es = cudf_es.entity_from_dataframe( entity_id="log_cudf", dataframe=log_cudf, index="id", time_index="datetime", variable_types=pd_es["log"].variable_types ) pd.testing.assert_frame_equal(cleaned_df, cudf_es["log_cudf"].df.to_pandas(), check_like=True) @pytest.mark.skipif('not cudf') def test_create_entity_with_non_numeric_index(pd_es, cudf_es): df = pd.DataFrame({"id": ["A_1", "A_2", "C", "D"], "values": [1, 12, -34, 27]}) cudf_df = cudf.from_pandas(df) pd_es.entity_from_dataframe( entity_id="new_entity", dataframe=df, index="id") cudf_es.entity_from_dataframe( entity_id="new_entity", dataframe=cudf_df, index="id", variable_types={"id": ft.variable_types.Id, "values": ft.variable_types.Numeric}) pd.testing.assert_frame_equal(pd_es['new_entity'].df.reset_index(drop=True), cudf_es['new_entity'].df.to_pandas()) @pytest.mark.skipif('not cudf') def test_create_entityset_with_mixed_dataframe_types(pd_es, cudf_es): df = pd.DataFrame({"id": [0, 1, 2, 3], "values": [1, 12, -34, 27]}) cudf_df = cudf.from_pandas(df) # Test error is raised when trying to add Koalas entity to entitset with existing pandas entities err_msg = "All entity dataframes must be of the same type. " \ "Cannot add entity of type {} to an entityset with existing entities " \ "of type {}".format(type(cudf_df), type(pd_es.entities[0].df)) with pytest.raises(ValueError, match=err_msg): pd_es.entity_from_dataframe( entity_id="new_entity", dataframe=cudf_df, index="id") # Test error is raised when trying to add pandas entity to entitset with existing cudf entities err_msg = "All entity dataframes must be of the same type. " \ "Cannot add entity of type {} to an entityset with existing entities " \ "of type {}".format(type(df), type(cudf_es.entities[0].df)) with pytest.raises(ValueError, match=err_msg): cudf_es.entity_from_dataframe( entity_id="new_entity", dataframe=df, index="id") @pytest.mark.skipif('not cudf') def test_add_last_time_indexes(): pd_es = EntitySet(id="pd_es") cudf_es = EntitySet(id="cudf_es") sessions = pd.DataFrame({"id": [0, 1, 2, 3], "user": [1, 2, 1, 3], "time": [pd.to_datetime('2019-01-10'), pd.to_datetime('2019-02-03'), pd.to_datetime('2019-01-01'), pd.to_datetime('2017-08-25')], "strings": ["I am a string", "23", "abcdef ghijk", ""]}) sessions_cudf = cudf.from_pandas(sessions) sessions_vtypes = { "id": ft.variable_types.Id, "user": ft.variable_types.Id, "time": ft.variable_types.DatetimeTimeIndex, "strings": ft.variable_types.NaturalLanguage } transactions = pd.DataFrame({"id": [0, 1, 2, 3, 4, 5], "session_id": [0, 0, 1, 2, 2, 3], "amount": [1.23, 5.24, 123.52, 67.93, 40.34, 50.13], "time": [pd.to_datetime('2019-01-10 03:53'), pd.to_datetime('2019-01-10 04:12'), pd.to_datetime('2019-02-03 10:34'), pd.to_datetime('2019-01-01 12:35'), pd.to_datetime('2019-01-01 12:49'), pd.to_datetime('2017-08-25 04:53')]}) transactions_cudf = cudf.from_pandas(transactions) transactions_vtypes = { "id": ft.variable_types.Id, "session_id": ft.variable_types.Id, "amount": ft.variable_types.Numeric, "time": ft.variable_types.DatetimeTimeIndex, } pd_es.entity_from_dataframe(entity_id="sessions", dataframe=sessions, index="id", time_index="time") cudf_es.entity_from_dataframe(entity_id="sessions", dataframe=sessions_cudf, index="id", time_index="time", variable_types=sessions_vtypes) pd_es.entity_from_dataframe(entity_id="transactions", dataframe=transactions, index="id", time_index="time") cudf_es.entity_from_dataframe(entity_id="transactions", dataframe=transactions_cudf, index="id", time_index="time", variable_types=transactions_vtypes) new_rel = Relationship(pd_es["sessions"]["id"], pd_es["transactions"]["session_id"]) cudf_rel = Relationship(cudf_es["sessions"]["id"], cudf_es["transactions"]["session_id"]) pd_es = pd_es.add_relationship(new_rel) cudf_es = cudf_es.add_relationship(cudf_rel) assert pd_es['sessions'].last_time_index is None assert cudf_es['sessions'].last_time_index is None pd_es.add_last_time_indexes() cudf_es.add_last_time_indexes() pd.testing.assert_series_equal(pd_es['sessions'].last_time_index.sort_index(), cudf_es['sessions'].last_time_index.to_pandas().sort_index(), check_names=False) @pytest.mark.skipif('not cudf') def test_create_entity_with_make_index(): values = [1, 12, -23, 27] df = pd.DataFrame({"values": values}) cudf_df = cudf.from_pandas(df) cudf_es = EntitySet(id="cudf_es") vtypes = {"values": ft.variable_types.Numeric} cudf_es.entity_from_dataframe(entity_id="new_entity", dataframe=cudf_df, make_index=True, index="new_index", variable_types=vtypes) expected_df = pd.DataFrame({"new_index": range(len(values)), "values": values}) pd.testing.assert_frame_equal(expected_df, cudf_es['new_entity'].df.to_pandas().sort_index()) @pytest.mark.skipif('not cudf') def test_single_table_cudf_entityset(): primitives_list = ['absolute', 'is_weekend', 'year', 'day', 'num_characters', 'num_words'] cudf_es = EntitySet(id="cudf_es") df = pd.DataFrame({"id": [0, 1, 2, 3], "values": [1, 12, -34, 27], "dates": [pd.to_datetime('2019-01-10'), pd.to_datetime('2019-02-03'), pd.to_datetime('2019-01-01'), pd.to_datetime('2017-08-25')], "strings": ["I am a string", "23", "abcdef ghijk", ""]}) values_dd = cudf.from_pandas(df) vtypes = { "id": ft.variable_types.Id, "values": ft.variable_types.Numeric, "dates": ft.variable_types.Datetime, "strings": ft.variable_types.NaturalLanguage } cudf_es.entity_from_dataframe(entity_id="data", dataframe=values_dd, index="id", variable_types=vtypes) cudf_fm, _ = ft.dfs(entityset=cudf_es, target_entity="data", trans_primitives=primitives_list) pd_es = ft.EntitySet(id="pd_es") pd_es.entity_from_dataframe(entity_id="data", dataframe=df, index="id", variable_types={"strings": ft.variable_types.NaturalLanguage}) fm, _ = ft.dfs(entityset=pd_es, target_entity="data", trans_primitives=primitives_list) cudf_computed_fm = cudf_fm.to_pandas().set_index('id').loc[fm.index][fm.columns] # NUM_WORDS(strings) is int32 in koalas for some reason pd.testing.assert_frame_equal(fm, cudf_computed_fm, check_dtype=False) @pytest.mark.skipif('not cudf') def test_single_table_cudf_entityset_ids_not_sorted(): primitives_list = ['absolute', 'is_weekend', 'year', 'day', 'num_characters', 'num_words'] cudf_es = EntitySet(id="cudf_es") df = pd.DataFrame({"id": [2, 0, 1, 3], "values": [1, 12, -34, 27], "dates": [pd.to_datetime('2019-01-10'), pd.to_datetime('2019-02-03'), pd.to_datetime('2019-01-01'), pd.to_datetime('2017-08-25')], "strings": ["I am a string", "23", "abcdef ghijk", ""]}) values_dd = cudf.from_pandas(df) vtypes = { "id": ft.variable_types.Id, "values": ft.variable_types.Numeric, "dates": ft.variable_types.Datetime, "strings": ft.variable_types.NaturalLanguage } cudf_es.entity_from_dataframe(entity_id="data", dataframe=values_dd, index="id", variable_types=vtypes) cudf_fm, _ = ft.dfs(entityset=cudf_es, target_entity="data", trans_primitives=primitives_list) pd_es = ft.EntitySet(id="pd_es") pd_es.entity_from_dataframe(entity_id="data", dataframe=df, index="id", variable_types={"strings": ft.variable_types.NaturalLanguage}) fm, _ = ft.dfs(entityset=pd_es, target_entity="data", trans_primitives=primitives_list) # Make sure both indexes are sorted the same pd.testing.assert_frame_equal(fm, cudf_fm.to_pandas().set_index('id').loc[fm.index], check_dtype=False) @pytest.mark.skipif('not cudf') def test_single_table_cudf_entityset_with_instance_ids(): primitives_list = ['absolute', 'is_weekend', 'year', 'day', 'num_characters', 'num_words'] instance_ids = [0, 1, 3] cudf_es = EntitySet(id="cudf_es") df = pd.DataFrame({"id": [0, 1, 2, 3], "values": [1, 12, -34, 27], "dates": [pd.to_datetime('2019-01-10'), pd.to_datetime('2019-02-03'), pd.to_datetime('2019-01-01'), pd.to_datetime('2017-08-25')], "strings": ["I am a string", "23", "abcdef ghijk", ""]}) values_dd = cudf.from_pandas(df) vtypes = { "id": ft.variable_types.Id, "values": ft.variable_types.Numeric, "dates": ft.variable_types.Datetime, "strings": ft.variable_types.NaturalLanguage } cudf_es.entity_from_dataframe(entity_id="data", dataframe=values_dd, index="id", variable_types=vtypes) cudf_fm, _ = ft.dfs(entityset=cudf_es, target_entity="data", trans_primitives=primitives_list, instance_ids=instance_ids) pd_es = ft.EntitySet(id="pd_es") pd_es.entity_from_dataframe(entity_id="data", dataframe=df, index="id", variable_types={"strings": ft.variable_types.NaturalLanguage}) fm, _ = ft.dfs(entityset=pd_es, target_entity="data", trans_primitives=primitives_list, instance_ids=instance_ids) print(fm) # # Make sure both indexes are sorted the same pd.testing.assert_frame_equal(fm, cudf_fm.to_pandas().set_index('id').loc[fm.index], check_dtype=False) @pytest.mark.skipif('not cudf') def test_single_table_cudf_entityset_single_cutoff_time(): primitives_list = ['absolute', 'is_weekend', 'year', 'day', 'num_characters', 'num_words'] cudf_es = EntitySet(id="cudf_es") df = pd.DataFrame({"id": [0, 1, 2, 3], "values": [1, 12, -34, 27], "dates": [pd.to_datetime('2019-01-10'), pd.to_datetime('2019-02-03'), pd.to_datetime('2019-01-01'), pd.to_datetime('2017-08-25')], "strings": ["I am a string", "23", "abcdef ghijk", ""]}) values_dd = cudf.from_pandas(df) vtypes = { "id": ft.variable_types.Id, "values": ft.variable_types.Numeric, "dates": ft.variable_types.Datetime, "strings": ft.variable_types.NaturalLanguage } cudf_es.entity_from_dataframe(entity_id="data", dataframe=values_dd, index="id", variable_types=vtypes) cudf_fm, _ = ft.dfs(entityset=cudf_es, target_entity="data", trans_primitives=primitives_list, cutoff_time=pd.Timestamp("2019-01-05 04:00")) pd_es = ft.EntitySet(id="pd_es") pd_es.entity_from_dataframe(entity_id="data", dataframe=df, index="id", variable_types={"strings": ft.variable_types.NaturalLanguage}) fm, _ = ft.dfs(entityset=pd_es, target_entity="data", trans_primitives=primitives_list, cutoff_time=pd.Timestamp("2019-01-05 04:00")) # Make sure both indexes are sorted the same pd.testing.assert_frame_equal(fm, cudf_fm.to_pandas().set_index('id').loc[fm.index], check_dtype=False) @pytest.mark.skipif('not cudf') def test_single_table_cudf_entityset_cutoff_time_df(): primitives_list = ['absolute', 'is_weekend', 'year', 'day', 'num_characters', 'num_words'] cudf_es = EntitySet(id="cudf_es") df = pd.DataFrame({"id": [0, 1, 2], "values": [1, 12, -34], "dates": [pd.to_datetime('2019-01-10'), pd.to_datetime('2019-02-03'), pd.to_datetime('2019-01-01')], "strings": ["I am a string", "23", "abcdef ghijk"]}) values_dd = cudf.from_pandas(df) vtypes = { "id": ft.variable_types.Id, "values": ft.variable_types.Numeric, "dates": ft.variable_types.Datetime, "strings": ft.variable_types.NaturalLanguage } cudf_es.entity_from_dataframe(entity_id="data", dataframe=values_dd, index="id", time_index="dates", variable_types=vtypes) ids = [0, 1, 2, 0] times = [pd.Timestamp("2019-01-05 04:00"), pd.Timestamp("2019-01-05 04:00"),
pd.Timestamp("2019-01-05 04:00")
pandas.Timestamp
import numpy as np import pandas as pd from collections import OrderedDict from pandas.api.types import is_numeric_dtype, is_object_dtype, is_categorical_dtype from typing import List, Optional, Tuple, Callable def inspect_df(df: pd.DataFrame) -> pd.DataFrame: """ Show column types and null values in DataFrame df """ resdict = OrderedDict() # Inspect nulls null_series = df.isnull().sum() resdict["column"] = null_series.index resdict["null_fraction"] = np.round(null_series.values / len(df), 3) resdict["nulls"] = null_series.values # Inspect types types = df.dtypes.values type_names = [t.name for t in types] resdict["type"] = type_names # Is numeric? is_numeric = [] for col in df.columns: is_numeric.append(
is_numeric_dtype(df[col])
pandas.api.types.is_numeric_dtype
# Copyright (c) 2020 Google LLC # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of # the Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """Builds a table with SMILES and yield information.""" import collections import dataclasses import pandas as pd from rdkit import Chem from rdkit.Chem import AllChem def location_to_row_col(location, block, plate): """Converts a block location to (row, col) on the plate. Args: location: Text location in the block, e.g. "1:A". block: Integer block number. plate: Integer plate number. Returns: Tuple of (row, col) integers; the location on the plate. """ if plate == 3: row_letter, col = location.split(':') else: col, row_letter = location.split(':') col = int(col) row = ord(row_letter) - 64 if block == 2: col += 24 elif block == 3: row += 16 elif block == 4: row += 16 col += 24 return row, col def read_yield_data(): """Reads location/yield data from the yield_data/ directory. Returns: DataFrame with the following columns: * plate * row * col * yield """ data = [] for plate in [1, 2, 3]: for block in [1, 2, 3, 4]: filename = f'yield_data/plate{plate}.{block}.csv' print(filename) df = pd.read_csv(filename) mask = df.Location.isna() if mask.any(): print(df[mask]) df = df[~mask] locations = df.apply( lambda x: location_to_row_col(x.Location, block, plate), axis=1, result_type='expand') locations.rename(columns={0: 'row', 1: 'col'}, inplace=True) df = pd.concat([df, locations], axis=1) df['plate'] = plate df = df[['plate', 'row', 'col', 'product_scaled']] df.rename(columns={'product_scaled': 'yield'}, inplace=True) data.append(df) return
pd.concat(data, ignore_index=True)
pandas.concat
# -*- coding: utf-8 -*- # Data Science Packages import pandas as pd import numpy as np import scipy.stats as stats # Useful from datetime import datetime, timedelta import math # Operations import os # Extra import ast import warnings warnings.filterwarnings('ignore') class wcwh(): """ Class dedicated to processing data from the wcwh studies """ def __init__(self,study,suffix,data_dir="../../data"): # study specifics self.study = study self.suffix = suffix self.data_dir = data_dir # participant and device ids self.id_crossover = pd.read_excel(f'{self.data_dir}/raw/{self.study}/admin/id_crossover.xlsx',sheet_name='id') self.beacon_id = pd.read_excel(f'{self.data_dir}/raw/{self.study}/admin/id_crossover.xlsx',sheet_name='beacon') # beacon correction factors self.correction = {} for file in os.listdir(f"{self.data_dir}/interim/"): file_info = file.split("-") if len(file_info) == 3: if file_info[1] == "linear_model" and file_info[-1] == self.suffix+".csv": try: self.correction[file_info[0]] = pd.read_csv(f'{self.data_dir}/interim/{file}',index_col=0) except FileNotFoundError: print(f"Missing offset for {file_info[0]} - padding with zeros") self.correction[file_info[0]] = pd.DataFrame(data={"beacon":np.arange(1,51),"constant":np.zeros(51),"coefficient":np.ones(51)}).set_index("beacon") # EMA Attributes self.ema_start = datetime(2020,6,1) self.ema_end = datetime(2020,9,1) def move_to_purgatory(self,path_to_file,path_to_destination): ''' Moves problematic file to the purgatory data directory Returns void ''' print('\t\tMoving to purgatory...') os.replace(path_to_file, path_to_destination) def process_beacon(self,extreme=''): ''' Combines data from all sensors on all beacons Returns True if able to save one dataframe that contains all the data at regular intervals in /data/processed directory ''' beacon_data = pd.DataFrame() # dataframe to hold the final set of data beacons_folder=f"{self.data_dir}/raw/{self.study}/beacon" print('\tProcessing beacon data...\n\t\tReading for beacon:') for beacon in self.beacon_id["beacon"].to_list(): # correcting the number since the values <10 have leading zero in directory number = f'{beacon:02}' print(f'\t\t{number}') beacon_folder=f'{beacons_folder}/B{number}' beacon_df = pd.DataFrame() # dataframe specific to the beacon # getting other ids beacon_crossover_info = self.id_crossover.loc[self.id_crossover['beacon'] == beacon].reset_index(drop=True) beiwe = beacon_crossover_info['beiwe'][0] fitbit = beacon_crossover_info['fitbit'][0] redcap = beacon_crossover_info['redcap'][0] del beacon_crossover_info def import_and_merge(csv_dir,number,resample_rate=2): df_list = [] for file in os.listdir(csv_dir+'/'): try: # reading in raw data (csv for one day at a time) and appending it to the overal dataframe day_df = pd.read_csv(f'{csv_dir}/{file}', index_col='Timestamp',parse_dates=True, infer_datetime_format=True) df_list.append(day_df) except Exception: # for whatever reason, some files have header issues - these are moved to purgatory to undergo triage print(f'Issue encountered while importing {csv_dir}/{file}, skipping...') self.move_to_purgatory(f'{csv_dir}/{file}',f'{self.data_dir}/purgatory/B{number}-py3-{file}-{self.suffix}') try: df = pd.concat(df_list).resample(f'{resample_rate}T').mean() # resampling to 5 minute intervals (raw data is at about 1 min) return df except ValueError: return pd.DataFrame() # empty dataframe # Python3 Sensors # --------------- py3_df = import_and_merge(f'{beacon_folder}/adafruit', number) if len(py3_df) == 0: continue # Changing NO2 readings on beacons without NO2 readings to CO (wiring issues - see Hagen) if int(number) >= 28: print('\t\t\tNo NO2 sensor - removing values') py3_df[['CO','T_CO','RH_CO']] = py3_df[['NO2','T_NO2','RH_NO2']] py3_df[['NO2','T_NO2','RH_NO2']] = np.nan py3_df['CO'] /= 1000 # converting ppb measurements to ppm # Python2 Sensors # --------------- py2_df = import_and_merge(f'{beacon_folder}/sensirion', number) if len(py2_df) == 0: continue # Cleaning # -------- # merging python2 and 3 sensor dataframes beacon_df = py3_df.merge(right=py2_df,left_index=True,right_index=True,how='outer') # getting relevant data only start_date = self.beacon_id[self.beacon_id['beiwe'] == beiwe]['start_date'].values[0] end_date = self.beacon_id[self.beacon_id['beiwe'] == beiwe]['end_date'].values[0] beacon_df = beacon_df[start_date:end_date] # combing T/RH readings and dropping the bad ones beacon_df['temperature_c'] = beacon_df[['T_CO','T_NO2']].mean(axis=1) beacon_df['rh'] = beacon_df[['RH_CO','RH_NO2']].mean(axis=1) beacon_df.drop(["T_NO2","T_CO","RH_NO2","RH_CO","Temperature [C]","Relative Humidity"],axis=1,inplace=True) # dropping unecessary columns beacon_df.drop(["Visible","Infrared","eCO2","PM_N_0p5","PM_N_4","PM_C_4"],axis=1,inplace=True) # renaming columns beacon_df.rename(columns={"TVOC":"tvoc","Lux":"lux","NO2":"no2","CO":"co","CO2":"co2", "PM_N_1":"pm1_number","PM_N_2p5":"pm2p5_number","PM_N_10":"pm10_number", "PM_C_1":"pm1_mass","PM_C_2p5":"pm2p5_mass","PM_C_10":"pm10_mass"},inplace=True) beacon_df.index.rename("timestamp",inplace=True) # offsetting measurements with linear model for var in self.correction.keys(): print() beacon_df[var] = beacon_df[var] * self.correction[var].loc[beacon,"coefficient"] + self.correction[var].loc[beacon,"constant"] # variables that should never have anything less than zero for var in ["lux",'temperature_c','rh']: beacon_df[var].mask(beacon_df[var] < 0, np.nan, inplace=True) # variables that should never be less than a certain limit #for var, threshold in zip(['co2'],[100]): # beacon_df[var].mask(beacon_df[var] < threshold, np.nan, inplace=True) # removing extreme values if extreme == 'zscore': # zscore greater than 2.5 for var in beacon_df.columns: beacon_df['z'] = abs(beacon_df[var] - beacon_df[var].mean()) / beacon_df[var].std(ddof=0) beacon_df.loc[beacon_df['z'] > 2.5, var] = np.nan beacon_df.drop(['z'],axis=1,inplace=True) elif extreme == 'iqr': for var in beacon_df.columns: # Computing IQR Q1 = beacon_df[var].quantile(0.25) Q3 = beacon_df[var].quantile(0.75) IQR = Q3 - Q1 # Filtering Values between Q1-1.5IQR and Q3+1.5IQR beacon_df[var].mask(beacon_df[var]<Q1-1.5*IQR,np.nan,inplace=True) beacon_df[var].mask(beacon_df[var]>Q3+1.5*IQR,np.nan,inplace=True) else: print('\t\t\tExtreme values retained') # dropping NaN values that get in beacon_df.dropna(how='all',inplace=True) # adding columns for the pt details beacon_df['beacon'] = beacon beacon_df['beiwe'] = beiwe beacon_df['fitbit'] = fitbit beacon_df['redcap'] = redcap beacon_data = pd.concat([beacon_data,beacon_df]) # saving try: beacon_data.to_csv(f'{self.data_dir}/processed/beacon-{self.suffix}.csv') except: return False return True def process_gps(self, data_dir='/Volumes/HEF_Dissertation_Research/utx000/data/raw/utx000/beiwe/gps/', home=False): ''' Processes the raw gps data into one csv file for each participant and saves into /data/processed/ All GPS data are recorded at 1-second intervals and stored in separate data files for every hour. The data are combined into one dataframe per participant, downsampled to 5-minute intervals using the mode value for those 5-minutes (after rounding coordinates to five decimal places), and combined into a final dataframe that contains all participants' data. Returns True is able to process the data, false otherwise. ''' print('\tProcessing gps data...') gps_df = pd.DataFrame() for participant in os.listdir(data_dir): if len(participant) == 8: # checking to make sure we only look for participant directories pid = participant print(f'\t\tWorking for Participant: {pid}') participant_df = pd.DataFrame() # for file in os.listdir(f'{data_dir}{pid}/gps/'): if file[-1] == 'v': # so we only import cs[v] files try: hourly_df = pd.read_csv(f'{data_dir}{pid}/gps/{file}',usecols=[1,2,3,4,5]) # all columns but UTC except KeyError: print(f'Problem with gps data for {file} for Participant {pid}') self.move_to_purgatory(f'{data_dir}{pid}/gps/{file}',f'../../data/purgatory/{pid}-gps-{file}-{self.suffix}') if len(hourly_df) > 0: # append to participant df if there were data for that hour participant_df = participant_df.append(hourly_df,ignore_index=True) # converting utc to cdt participant_df['timestamp'] = pd.to_datetime(participant_df['UTC time']) - timedelta(hours=5) participant_df.set_index('timestamp',inplace=True) # rounding gps and taking the mode for every 1-minutes participant_df = round(participant_df,5) participant_df = participant_df.resample('1T').apply({lambda x: stats.mode(x)[0]}) # converting values to numeric and removing NaN datapoints participant_df.columns = ['utc','lat','long','altitude','accuracy'] for col in ['lat','long','altitude','accuracy']: participant_df[col] = pd.to_numeric(participant_df[col],errors='coerce') participant_df.dropna(inplace=True) if home == True: # getting participant's home coordinates home_coords = self.beacon_id.set_index('beiwe') home_lat = home_coords.loc[pid,'lat'] home_long = home_coords.loc[pid,'long'] # getting distance R = 6.371*10**6 # radius of the earth in meters participant_df['x_distance'] = abs( R * (participant_df['lat'] - home_lat) * math.pi * math.cos(home_long) / 180) participant_df['y_distance'] = abs( R * (participant_df['long'] - home_long) * math.pi / 180) dist = [] for i in range(len(participant_df)): dist.append(math.sqrt(math.pow(participant_df.iloc[i,-2],2) + math.pow(participant_df.iloc[i,-1],2))) participant_df['home_distance'] = dist participant_df['beiwe'] = pid gps_df = gps_df.append(participant_df) try: gps_df.to_csv(f'../../data/processed/beiwe-gps-{self.suffix}.csv') except: return False return True def process_weekly_surveys(self, data_dir='../../data/raw/utx000/beiwe/survey_answers/'): ''' Processes raw weekly survey answers. The survey IDs are: - eQ2L3J08ChlsdSXXKOoOjyLJ: morning - 7TaT8zapOWO0xdtONnsY8CE0: evening Returns True if able to save two dataframes for morning/evening survey data in /data/processed directory ''' # defining some variables for ease of understanding morning_survey_id = 'eQ2L3J08ChlsdSXXKOoOjyLJ' evening_survey_id = '7TaT8zapOWO0xdtONnsY8CE0' weekly_survey_id = 'lh9veS0aSw2KfrfwSytYjxVr' # defining the final dataframes to append to evening_survey_df = pd.DataFrame() morning_survey_df = pd.DataFrame() weekly_survey_df = pd.DataFrame() # Morning Survey Data # ------------------- print('\tProcessing morning survey data...') # looping through the participants and then all their data for participant in os.listdir(data_dir): # making sure we don't read from any hidden directories/files if len(participant) == 8: pid = participant participant_df = pd.DataFrame(columns=['ID','Content','Stress','Lonely','Sad','Energy','TST','SOL','NAW','Restful']) for file in os.listdir(f'{data_dir}{participant}/survey_answers/{morning_survey_id}/'): # reading raw data df = pd.read_csv(f'{data_dir}{participant}/survey_answers/{morning_survey_id}/{file}') # adding new row try: participant_df.loc[datetime.strptime(file[:-4],'%Y-%m-%d %H_%M_%S') - timedelta(hours=5)] = [pid,df.loc[4,'answer'],df.loc[5,'answer'],df.loc[6,'answer'],df.loc[7,'answer'],df.loc[8,'answer'], df.loc[0,'answer'],df.loc[1,'answer'],df.loc[2,'answer'],df.loc[3,'answer']] except KeyError: print(f'\t\tProblem with morning survey {file} for Participant {pid} - Participant most likely did not answer a question') self.move_to_purgatory(f'{data_dir}{participant}/survey_answers/{morning_survey_id}/{file}',f'../../data/purgatory/{pid}-survey-morning-{file}-{self.suffix}') # appending participant df to overall df morning_survey_df = morning_survey_df.append(participant_df) else: print(f'\t\tDirectory {participant} is not valid') # replacing string values with numeric morning_survey_df.replace({'Not at all':0,'A little bit':1,'Quite a bit':2,'Very Much':3, 'Low energy':0,'Low Energy':0,'Somewhat low energy':1,'Neutral':2,'Somewhat high energy':3,'High energy':4,'High Energy':4, 'Not at all restful':0,'Slightly restful':1,'Somewhat restful':2,'Very restful':3, 'NO_ANSWER_SELECTED':-1,'NOT_PRESENTED':-1,'SKIP QUESTION':-1},inplace=True) # fixing any string inputs outside the above range morning_survey_df['NAW'] = pd.to_numeric(morning_survey_df['NAW'],errors='coerce') morning_survey_df.columns = ['beiwe','content','stress','lonely','sad','energy','tst','sol','naw','restful'] morning_survey_df.index.rename("timestamp",inplace=True) morning_survey_df = morning_survey_df.sort_index()[self.ema_start:self.ema_end] # Evening Survey Data # ------------------- print('\tProcessing evening survey data...') for participant in os.listdir(data_dir): if len(participant) == 8: pid = participant # less columns participant_df = pd.DataFrame(columns=['ID','Content','Stress','Lonely','Sad','Energy']) for file in os.listdir(f'{data_dir}{participant}/survey_answers/{evening_survey_id}/'): df = pd.read_csv(f'{data_dir}{participant}/survey_answers/{evening_survey_id}/{file}') try: participant_df.loc[datetime.strptime(file[:-4],'%Y-%m-%d %H_%M_%S') - timedelta(hours=5)] = [pid,df.loc[0,'answer'],df.loc[1,'answer'],df.loc[2,'answer'],df.loc[3,'answer'],df.loc[4,'answer']] except KeyError: print(f'\t\tProblem with evening survey {file} for Participant {pid} - Participant most likely did not answer a question') self.move_to_purgatory(f'{data_dir}{participant}/survey_answers/{evening_survey_id}/{file}',f'../../data/purgatory/{pid}-survey-evening-{file}-{self.suffix}') evening_survey_df = evening_survey_df.append(participant_df) else: print(f'\t\tDirectory {participant} is not valid') evening_survey_df.replace({'Not at all':0,'A little bit':1,'Quite a bit':2,'Very Much':3, 'Low energy':0,'Low Energy':0,'Somewhat low energy':1,'Neutral':2,'Somewhat high energy':3,'High energy':4,'High Energy':4, 'Not at all restful':0,'Slightly restful':1,'Somewhat restful':2,'Very restful':3, 'NO_ANSWER_SELECTED':-1,'NOT_PRESENTED':-1,'SKIP QUESTION':-1},inplace=True) evening_survey_df.columns = ['beiwe','content','stress','lonely','sad','energy'] evening_survey_df.index.rename("timestamp",inplace=True) evening_survey_df = evening_survey_df.sort_index()[self.ema_start:self.ema_end] # Weekly Survey Data # ------------------- print('\tProcessing weekly survey data...') for participant in os.listdir(data_dir): if len(participant) == 8: pid = participant # less columns participant_df = pd.DataFrame(columns=['ID','Upset','Unable','Stressed','Confident','Your_Way','Cope','Able','Top','Angered','Overcome']) try: for file in os.listdir(f'{data_dir}{participant}/survey_answers/{weekly_survey_id}/'): df = pd.read_csv(f'{data_dir}{participant}/survey_answers/{weekly_survey_id}/{file}') try: participant_df.loc[datetime.strptime(file[:-4],'%Y-%m-%d %H_%M_%S') - timedelta(hours=6)] = [pid,df.loc[1,'answer'],df.loc[2,'answer'],df.loc[3,'answer'],df.loc[4,'answer'],df.loc[5,'answer'],df.loc[6,'answer'],df.loc[7,'answer'],df.loc[8,'answer'],df.loc[9,'answer'],df.loc[10,'answer']] except KeyError: try: participant_df.loc[datetime.strptime(file[:-4],'%Y-%m-%d %H_%M_%S') - timedelta(hours=6)] = [pid,df.loc[0,'answer'],df.loc[1,'answer'],df.loc[2,'answer'],df.loc[3,'answer'],df.loc[4,'answer'],df.loc[5,'answer'],df.loc[6,'answer'],df.loc[7,'answer'],df.loc[8,'answer'],df.loc[9,'answer']] except: print(f'\t\tProblem with weekly survey {file} for Participant {pid} - Participant most likely did not answer a question') self.move_to_purgatory(f'{data_dir}{participant}/survey_answers/{weekly_survey_id}/{file}',f'../../data/purgatory/{pid}-survey-weekly-{file}-{self.suffix}') weekly_survey_df = weekly_survey_df.append(participant_df) except FileNotFoundError: print(f'\t\tParticipant {pid} does not seem to have submitted any weekly surveys - check directory') else: print(f'\t\tDirectory {participant} is not valid') weekly_survey_df.replace({'Not at all':0,'A little bit':1,'Quite a bit':2,'Very Much':3, 'Never':0,'Almost Never':1,'Sometimes':2,'Fairly Often':3,'Very Often':4, 'Low energy':0,'Low Energy':0,'Somewhat low energy':1,'Neutral':2,'Somewhat high energy':3,'High energy':4,'High Energy':4, 'Not at all restful':0,'Slightly restful':1,'Somewhat restful':2,'Very restful':3, 'NO_ANSWER_SELECTED':-1,'NOT_PRESENTED':-1,'SKIP QUESTION':-1},inplace=True) weekly_survey_df.columns = ['beiwe','upset','unable','stressed','confident','your_way','cope','able','top','angered','overcome'] weekly_survey_df.index.rename("timestamp",inplace=True) weekly_survey_df = weekly_survey_df.sort_index()[self.ema_start:self.ema_end] # saving try: morning_survey_df.to_csv(f'../../data/processed/beiwe-morning_ema-{self.suffix}.csv') evening_survey_df.to_csv(f'../../data/processed/beiwe-evening_ema-{self.suffix}.csv') weekly_survey_df.to_csv(f'../../data/processed/beiwe-weekly_ema-{self.suffix}.csv') except: print("Problem saving Data") return False return True def process_environment_survey(self, data_file='../../data/raw/utx000/surveys/EESurvey_E1_raw.csv'): ''' Processes raw environment survey (first instance) and combines relevant data into processed directory Returns True if processed, False otherwise ''' print('\tProcessing first environment survey...') ee = pd.read_csv(data_file,usecols=[0,2,4,5,6,7,8,9],parse_dates=[1]) ee.columns = ['redcap','timestamp','apartment','duplex','house','dorm','hotel','other_living'] ee.dropna(subset=['timestamp'],inplace=True) ee.set_index('timestamp',inplace=True) # saving try: ee.to_csv(f'../../data/processed/ee-survey-{self.suffix}.csv') except: return False return True def process_fitbit(self): ''' Processes fitbit data Returns True if processed, False otherwise ''' print('\tProcessing Fitbit data...') def import_fitbit(filename, data_dir=f"../../data/raw/utx000/fitbit/"): ''' Imports the specified file for each participant in the directory Inputs: - filename: string corresponding to the filename to look for for each participant Returns a dataframe with the combined data from all participants ''' print(f"\tReading from file {filename}") df = pd.DataFrame() for pt in os.listdir(data_dir): if pt[0] != ".": print(f"\t\tReading for participant {pt}") try: temp = pd.read_csv(f"{data_dir}{pt}/fitbit_{filename}.csv", index_col=0, parse_dates=True) if filename[:4] == "intr": temp = process_fitbit_intraday(temp) temp["beiwe"] = pt df = df.append(temp) except FileNotFoundError: print(f"\t\tFile {filename} not found for participant {pt}") df.index.rename("timestamp",inplace=True) return df def get_device_df(info_df): ''' Take dictionary-like entries for fitbit info dataframe for each row in a dataframe and makes a new dataframe Inputs: - info_df: the fitbit info dataframe with the dictionary-like entries Returns a dataframe for the device column ''' overall_dict = {} for row in range(len(info_df)): Dict = ast.literal_eval(info_df['devices'][row]) if type(Dict) == dict: Dict = Dict elif type(Dict) in [tuple,list] and len(Dict) > 1: Dict = Dict[0] else: continue for key in Dict.keys(): overall_dict.setdefault(key, []) overall_dict[key].append(Dict[key]) # adding in the date of recording overall_dict.setdefault('date', []) overall_dict['date'].append(info_df.index[row]) df = pd.DataFrame(overall_dict) df['timestamp'] = pd.to_datetime(df['date'],errors='coerce') df.drop("date",axis=1,inplace=True) return df.set_index('timestamp') def get_daily_sleep(daily_df): ''' Creates a dataframe with the daily sleep data summarized Inputs: - daily_df: dataframe created from the daily fitbit csv file Returns a dataframe of the daily sleep data ''' overall_dict = {} for row in range(len(daily_df)): # in case Fitbit didn't record sleep records for that night - value is NaN pt = daily_df['beiwe'][row] # pts with classic sleep data if pt in ['awa8uces','ewvz3zm1','pgvvwyvh']: continue if type(daily_df['sleep'][row]) == float: continue else: Dict = ast.literal_eval(daily_df['sleep'][row]) if type(Dict) == dict: Dict = Dict else: Dict = Dict[0] for key in Dict.keys(): overall_dict.setdefault(key, []) overall_dict[key].append(Dict[key]) # adding in the date of recording overall_dict.setdefault('date', []) overall_dict['date'].append(daily_df.index[row]) # adding beiwe id overall_dict.setdefault('beiwe', []) overall_dict['beiwe'].append(daily_df['beiwe'][row]) df = pd.DataFrame(overall_dict) df['date'] = pd.to_datetime(df['date'],errors='coerce') # removing classic sleep stage data df = df[df['type'] != 'classic'] # dropping/renaming columns df.drop(["dateOfSleep","infoCode","logId","type"],axis=1,inplace=True) df.columns = ["duration_ms","efficiency","end_time","main_sleep","levels","minutes_after_wakeup","minutes_asleep","minutes_awake","minutes_to_sleep","start_time","time_in_bed","date","beiwe"] df.set_index("date",inplace=True) return df def get_sleep_stages(daily_sleep): ''' Creates a dataframe for the minute sleep data Input(s): - daily_sleep: dataframe holding the daily sleep data with a column called minuteData Returns: - sleep_stages: a dataframe with sleep stage data for every stage transition - summary: a dataframe with the nightly sleep stage information ''' data_dict = {'startDate':[],'endDate':[],'dateTime':[],'level':[],'seconds':[],'beiwe':[]} summary_dict = {'start_date':[],'end_date':[],'deep_count':[],'deep_minutes':[],'light_count':[],'light_minutes':[], 'rem_count':[],'rem_minutes':[],'wake_count':[],'wake_minutes':[],'beiwe':[]} for i in range(len(daily_sleep)): d0 =
pd.to_datetime(daily_sleep.iloc[i,:]["start_time"])
pandas.to_datetime
#====================================================== # General Utility Functions #====================================================== ''' Info: Utility functions for general applications. Version: 2.0 Author: <NAME> Created: Saturday, 13 April 2019 ''' # Import modules import os import numpy as np import pandas as pd import sys #import dill as pickle import pickle from datetime import datetime #------------------------------ # Utility Functions #------------------------------ # Set title def set_title(string): # Check if string is too long string_size = len(string) max_length = 57 if string_size > max_length: print('TITLE TOO LONG') else: lr_buffer_len = int((max_length - string_size) / 2) full_buffer_len = lr_buffer_len * 2 + string_size print('\n') print(full_buffer_len * '=') print(full_buffer_len * ' ') print(lr_buffer_len * ' ' + string + lr_buffer_len * ' ') print(full_buffer_len * ' ') print(full_buffer_len * '='+'\n\n') # Set section def set_section(string): # Check if string is too long string_size = len(string) max_length = 100 if string_size > max_length: print('TITLE TOO LONG') else: full_buffer_len = string_size print('\n') print(full_buffer_len * '-') print(string) print(full_buffer_len * '-'+'\n') # Print time taken def print_dur(string, st): print(string, datetime.now() - st) # Date conversion def pdf_cast_date(df, date_field): #df.loc[:, date_field] = list(map(lambda x: pd.to_datetime(x, format='%m/%d/%Y %H:%M'), list(df.loc[:, date_field]))) #df.loc[:, date_field] = list(map(lambda x: pd.to_datetime(x, format='%Y/%m/%d %H:%M:%S'), list(df.loc[:, date_field]))) df.loc[:, date_field] = list(map(lambda x: pd.to_datetime(x, format='%d/%m/%Y %H:%M'), list(df.loc[:, date_field]))) return df # Create date table def create_date_table(start='2000-01-01', end='2050-12-31'): start_ts =
pd.to_datetime(start)
pandas.to_datetime
#!/usr/bin/python3 # -*- coding: utf-8 -*- # *****************************************************************************/ # * Authors: <NAME> # *****************************************************************************/ """transformCSV.py This module contains the basic functions for creating the content of a configuration file from CSV. Args: --inFile: Path for the configuration file where the time series data values CSV --outFile: Path for the configuration file where the time series data values INI --debug: Boolean flag to activate verbose printing for debug use Example: Default usage: $ python transformCSV.py Specific usage: $ python transformCSV.py --inFile C:\raad\src\software\time-series.csv --outFile C:\raad\src\software\time-series.ini --debug True """ import sys import datetime import optparse import traceback import pandas import numpy import os import pprint import csv if sys.version_info.major > 2: import configparser as cF else: import ConfigParser as cF class TransformMetaData(object): debug = False fileName = None fileLocation = None columnsList = None analysisFrameFormat = None uniqueLists = None analysisFrame = None def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None, outFile='time-series-madness.ini'): if isinstance(debug, bool): self.debug = debug if inputFileName is None: return elif os.path.exists(os.path.abspath(inputFileName)): self.fileName = inputFileName self.fileLocation = os.path.exists(os.path.abspath(inputFileName)) (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame( inputFileName=self.fileName) self.analysisFrame = analysisFrame self.columnsList = columnNamesList self.analysisFrameFormat = analysisFrameFormat self.uniqueLists = uniqueLists if transform: passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder, outFile=outFile) print(f"Pass Status is : {passWrite}") return def getColumnList(self): return self.columnsList def getAnalysisFrameFormat(self): return self.analysisFrameFormat def getuniqueLists(self): return self.uniqueLists def getAnalysisFrame(self): return self.analysisFrame @staticmethod def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"): return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000 def getHeaderFromFile(self, headerFilePath=None, method=1): if headerFilePath is None: return (None, None) if method == 1: fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist() elif method == 2: with open(headerFilePath, 'r') as infile: reader = csv.DictReader(infile) fieldnames = list(reader.fieldnames) elif method == 3: fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns) else: fieldnames = None fieldDict = {} for indexName, valueName in enumerate(fieldnames): fieldDict[valueName] = pandas.StringDtype() return (fieldnames, fieldDict) def CSVtoFrame(self, inputFileName=None): if inputFileName is None: return (None, None) # Load File print("Processing File: {0}...\n".format(inputFileName)) self.fileLocation = inputFileName # Create data frame analysisFrame = pandas.DataFrame() analysisFrameFormat = self._getDataFormat() inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName, sep='\t', names=self._getDataFormat(), # dtype=self._getDataFormat() # header=None # float_precision='round_trip' # engine='c', # parse_dates=['date_column'], # date_parser=True, # na_values=['NULL'] ) if self.debug: # Preview data. print(inputDataFrame.head(5)) # analysisFrame.astype(dtype=analysisFrameFormat) # Cleanup data analysisFrame = inputDataFrame.copy(deep=True) analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN) # Create lists of unique strings uniqueLists = [] columnNamesList = [] for columnName in analysisFrame.columns: if self.debug: print('Column Name : ', columnName) print('Column Contents : ', analysisFrame[columnName].values) if isinstance(analysisFrame[columnName].dtypes, str): columnUniqueList = analysisFrame[columnName].unique().tolist() else: columnUniqueList = None columnNamesList.append(columnName) uniqueLists.append([columnName, columnUniqueList]) if self.debug: # Preview data. print(analysisFrame.head(5)) return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'): if analysisFrame is None: return False try: if outFolder is None: outFolder = os.getcwd() configFilePath = os.path.join(outFolder, outFile) configINI = cF.ConfigParser() configINI.add_section(sectionName) for (columnName, columnData) in analysisFrame: if self.debug: print('Column Name : ', columnName) print('Column Contents : ', columnData.values) print("Column Contents Length:", len(columnData.values)) print("Column Contents Type", type(columnData.values)) writeList = "[" for colIndex, colValue in enumerate(columnData): writeList = f"{writeList}'{colValue}'" if colIndex < len(columnData) - 1: writeList = f"{writeList}, " writeList = f"{writeList}]" configINI.set(sectionName, columnName, writeList) if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0: with open(configFilePath, 'w') as configWritingFile: configINI.write(configWritingFile) noErrors = True except ValueError as e: errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__), framePrintNo=str( sys._getframe().f_lineno), ErrorFound=e)) print(errorString) noErrors = False return noErrors @staticmethod def _validNumericalFloat(inValue): """ Determines if the value is a valid numerical object. Args: inValue: floating-point value Returns: Value in floating-point or Not-A-Number. """ try: return numpy.float128(inValue) except ValueError: return numpy.nan @staticmethod def _calculateMean(x): """ Calculates the mean in a multiplication method since division produces an infinity or NaN Args: x: Input data set. We use a data frame. Returns: Calculated mean for a vector data frame. """ try: mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size))) except ValueError: mean = 0 pass return mean def _calculateStd(self, data): """ Calculates the standard deviation in a multiplication method since division produces a infinity or NaN Args: data: Input data set. We use a data frame. Returns: Calculated standard deviation for a vector data frame. """ sd = 0 try: n = numpy.float128(data.size) if n <= 1: return numpy.float128(0.0) # Use multiplication version of mean since numpy bug causes infinity. mean = self._calculateMean(data) sd = numpy.float128(mean) # Calculate standard deviation for el in data: diff = numpy.float128(el) - numpy.float128(mean) sd += (diff) ** 2 points = numpy.float128(n - 1) sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points))) except ValueError: pass return sd def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0): """ Determines stats based on a vector to get the data shape. Args: dataAnalysisFrame: Dataframe to do analysis on. columnName: Column name of the data frame. multiplierSigma: Sigma range for the stats. Returns: Set of stats. """ meanValue = 0 sigmaValue = 0 sigmaRangeValue = 0 topValue = 0 try: # Clean out anomoly due to random invalid inputs. if (columnName is not None): meanValue = self._calculateMean(dataAnalysisFrame[columnName]) if meanValue == numpy.nan: meanValue = numpy.float128(1) sigmaValue = self._calculateStd(dataAnalysisFrame[columnName]) if float(sigmaValue) is float(numpy.nan): sigmaValue = numpy.float128(1) multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7 sigmaRangeValue = (sigmaValue * multiplier) if float(sigmaRangeValue) is float(numpy.nan): sigmaRangeValue = numpy.float128(1) topValue = numpy.float128(meanValue + sigmaRangeValue) print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName, meanValue, sigmaValue, multiplier, sigmaRangeValue)) except ValueError: pass return (meanValue, sigmaValue, sigmaRangeValue, topValue) def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'): """ Cleans the data frame with data values that are invalid. I.E. inf, NaN Args: dataAnalysisFrame: Dataframe to do analysis on. columnName: Column name of the data frame. Returns: Cleaned dataframe. """ dataAnalysisCleaned = None try: # Clean out anomoly due to random invalid inputs. (meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats( dataAnalysisFrame=dataAnalysisFrame, columnName=columnName) # dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0] # When the cycles are negative or zero we missed cleaning up a row. # logicVector = (dataAnalysisFrame[columnName] != 0) # dataAnalysisCleaned = dataAnalysisFrame[logicVector] logicVector = (dataAnalysisCleaned[columnName] >= 1) dataAnalysisCleaned = dataAnalysisCleaned[logicVector] # These timed out mean + 2 * sd logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range dataAnalysisCleaned = dataAnalysisCleaned[logicVector] except ValueError: pass return dataAnalysisCleaned def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'): """ Args: dataAnalysisTemp: Dataframe to do analysis on. cleanColumn: Flag to clean the data frame. columnName: Column name of the data frame. Returns: cleaned dataframe """ try: replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL'] if cleanColumn is True: dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName) dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList, value=numpy.nan) dataAnalysisTemp = dataAnalysisTemp.dropna() except ValueError: pass return dataAnalysisTemp @staticmethod def _getDataFormat(): """ Return the dataframe setup for the CSV file generated from server. Returns: dictionary data format for pandas. """ dataFormat = { "Serial_Number": pandas.StringDtype(), "LogTime0": pandas.StringDtype(), # @todo force rename "Id0": pandas.StringDtype(), # @todo force rename "DriveId": pandas.StringDtype(), "JobRunId": pandas.StringDtype(), "LogTime1": pandas.StringDtype(), # @todo force rename "Comment0": pandas.StringDtype(), # @todo force rename "CriticalWarning": pandas.StringDtype(), "Temperature": pandas.StringDtype(), "AvailableSpare": pandas.StringDtype(), "AvailableSpareThreshold": pandas.StringDtype(), "PercentageUsed": pandas.StringDtype(), "DataUnitsReadL": pandas.StringDtype(), "DataUnitsReadU": pandas.StringDtype(), "DataUnitsWrittenL": pandas.StringDtype(), "DataUnitsWrittenU": pandas.StringDtype(), "HostReadCommandsL": pandas.StringDtype(), "HostReadCommandsU": pandas.StringDtype(), "HostWriteCommandsL": pandas.StringDtype(), "HostWriteCommandsU": pandas.StringDtype(), "ControllerBusyTimeL": pandas.StringDtype(), "ControllerBusyTimeU": pandas.StringDtype(), "PowerCyclesL": pandas.StringDtype(), "PowerCyclesU": pandas.StringDtype(), "PowerOnHoursL": pandas.StringDtype(), "PowerOnHoursU": pandas.StringDtype(), "UnsafeShutdownsL": pandas.StringDtype(), "UnsafeShutdownsU": pandas.StringDtype(), "MediaErrorsL": pandas.StringDtype(), "MediaErrorsU": pandas.StringDtype(), "NumErrorInfoLogsL": pandas.StringDtype(), "NumErrorInfoLogsU": pandas.StringDtype(), "ProgramFailCountN": pandas.StringDtype(), "ProgramFailCountR": pandas.StringDtype(), "EraseFailCountN": pandas.StringDtype(), "EraseFailCountR": pandas.StringDtype(), "WearLevelingCountN": pandas.StringDtype(), "WearLevelingCountR": pandas.StringDtype(), "E2EErrorDetectCountN": pandas.StringDtype(), "E2EErrorDetectCountR": pandas.StringDtype(), "CRCErrorCountN": pandas.StringDtype(), "CRCErrorCountR": pandas.StringDtype(), "MediaWearPercentageN": pandas.StringDtype(), "MediaWearPercentageR": pandas.StringDtype(), "HostReadsN": pandas.StringDtype(), "HostReadsR": pandas.StringDtype(), "TimedWorkloadN": pandas.StringDtype(), "TimedWorkloadR": pandas.StringDtype(), "ThermalThrottleStatusN": pandas.StringDtype(), "ThermalThrottleStatusR": pandas.StringDtype(), "RetryBuffOverflowCountN": pandas.StringDtype(), "RetryBuffOverflowCountR": pandas.StringDtype(), "PLLLockLossCounterN": pandas.StringDtype(), "PLLLockLossCounterR": pandas.StringDtype(), "NandBytesWrittenN": pandas.StringDtype(), "NandBytesWrittenR": pandas.StringDtype(), "HostBytesWrittenN": pandas.StringDtype(), "HostBytesWrittenR": pandas.StringDtype(), "SystemAreaLifeRemainingN": pandas.StringDtype(), "SystemAreaLifeRemainingR": pandas.StringDtype(), "RelocatableSectorCountN": pandas.StringDtype(), "RelocatableSectorCountR": pandas.StringDtype(), "SoftECCErrorRateN": pandas.StringDtype(), "SoftECCErrorRateR": pandas.StringDtype(), "UnexpectedPowerLossN": pandas.StringDtype(), "UnexpectedPowerLossR": pandas.StringDtype(), "MediaErrorCountN": pandas.StringDtype(), "MediaErrorCountR": pandas.StringDtype(), "NandBytesReadN": pandas.StringDtype(), "NandBytesReadR": pandas.StringDtype(), "WarningCompTempTime": pandas.StringDtype(), "CriticalCompTempTime": pandas.StringDtype(), "TempSensor1": pandas.StringDtype(), "TempSensor2": pandas.StringDtype(), "TempSensor3": pandas.StringDtype(), "TempSensor4": pandas.StringDtype(), "TempSensor5": pandas.StringDtype(), "TempSensor6": pandas.StringDtype(), "TempSensor7": pandas.StringDtype(), "TempSensor8": pandas.StringDtype(), "ThermalManagementTemp1TransitionCount": pandas.StringDtype(), "ThermalManagementTemp2TransitionCount": pandas.StringDtype(), "TotalTimeForThermalManagementTemp1": pandas.StringDtype(), "TotalTimeForThermalManagementTemp2": pandas.StringDtype(), "Core_Num": pandas.StringDtype(), "Id1": pandas.StringDtype(), # @todo force rename "Job_Run_Id": pandas.StringDtype(), "Stats_Time": pandas.StringDtype(), "HostReads": pandas.StringDtype(), "HostWrites": pandas.StringDtype(), "NandReads": pandas.StringDtype(), "NandWrites": pandas.StringDtype(), "ProgramErrors": pandas.StringDtype(), "EraseErrors": pandas.StringDtype(), "ErrorCount": pandas.StringDtype(), "BitErrorsHost1": pandas.StringDtype(), "BitErrorsHost2": pandas.StringDtype(), "BitErrorsHost3": pandas.StringDtype(), "BitErrorsHost4": pandas.StringDtype(), "BitErrorsHost5": pandas.StringDtype(), "BitErrorsHost6": pandas.StringDtype(), "BitErrorsHost7": pandas.StringDtype(), "BitErrorsHost8": pandas.StringDtype(), "BitErrorsHost9": pandas.StringDtype(), "BitErrorsHost10": pandas.StringDtype(), "BitErrorsHost11": pandas.StringDtype(), "BitErrorsHost12": pandas.StringDtype(), "BitErrorsHost13": pandas.StringDtype(), "BitErrorsHost14": pandas.StringDtype(), "BitErrorsHost15": pandas.StringDtype(), "ECCFail": pandas.StringDtype(), "GrownDefects": pandas.StringDtype(), "FreeMemory": pandas.StringDtype(), "WriteAllowance": pandas.StringDtype(), "ModelString": pandas.StringDtype(), "ValidBlocks": pandas.StringDtype(), "TokenBlocks": pandas.StringDtype(), "SpuriousPFCount": pandas.StringDtype(), "SpuriousPFLocations1": pandas.StringDtype(), "SpuriousPFLocations2": pandas.StringDtype(), "SpuriousPFLocations3": pandas.StringDtype(), "SpuriousPFLocations4": pandas.StringDtype(), "SpuriousPFLocations5": pandas.StringDtype(), "SpuriousPFLocations6": pandas.StringDtype(), "SpuriousPFLocations7": pandas.StringDtype(), "SpuriousPFLocations8": pandas.StringDtype(), "BitErrorsNonHost1": pandas.StringDtype(), "BitErrorsNonHost2": pandas.StringDtype(), "BitErrorsNonHost3": pandas.StringDtype(), "BitErrorsNonHost4": pandas.StringDtype(), "BitErrorsNonHost5": pandas.StringDtype(), "BitErrorsNonHost6": pandas.StringDtype(), "BitErrorsNonHost7": pandas.StringDtype(), "BitErrorsNonHost8": pandas.StringDtype(), "BitErrorsNonHost9": pandas.StringDtype(), "BitErrorsNonHost10": pandas.StringDtype(), "BitErrorsNonHost11": pandas.StringDtype(), "BitErrorsNonHost12": pandas.StringDtype(), "BitErrorsNonHost13": pandas.StringDtype(), "BitErrorsNonHost14": pandas.StringDtype(), "BitErrorsNonHost15": pandas.StringDtype(), "ECCFailNonHost": pandas.StringDtype(), "NSversion": pandas.StringDtype(), "numBands": pandas.StringDtype(), "minErase": pandas.StringDtype(), "maxErase": pandas.StringDtype(), "avgErase": pandas.StringDtype(), "minMVolt": pandas.StringDtype(), "maxMVolt": pandas.StringDtype(), "avgMVolt": pandas.StringDtype(), "minMAmp": pandas.StringDtype(), "maxMAmp": pandas.StringDtype(), "avgMAmp": pandas.StringDtype(), "comment1": pandas.StringDtype(), # @todo force rename "minMVolt12v": pandas.StringDtype(), "maxMVolt12v": pandas.StringDtype(), "avgMVolt12v": pandas.StringDtype(), "minMAmp12v": pandas.StringDtype(), "maxMAmp12v": pandas.StringDtype(), "avgMAmp12v": pandas.StringDtype(), "nearMissSector": pandas.StringDtype(), "nearMissDefect": pandas.StringDtype(), "nearMissOverflow": pandas.StringDtype(), "replayUNC": pandas.StringDtype(), "Drive_Id": pandas.StringDtype(), "indirectionMisses": pandas.StringDtype(), "BitErrorsHost16": pandas.StringDtype(), "BitErrorsHost17": pandas.StringDtype(), "BitErrorsHost18": pandas.StringDtype(), "BitErrorsHost19": pandas.StringDtype(), "BitErrorsHost20": pandas.StringDtype(), "BitErrorsHost21": pandas.StringDtype(), "BitErrorsHost22": pandas.StringDtype(), "BitErrorsHost23": pandas.StringDtype(), "BitErrorsHost24": pandas.StringDtype(), "BitErrorsHost25": pandas.StringDtype(), "BitErrorsHost26": pandas.StringDtype(), "BitErrorsHost27": pandas.StringDtype(), "BitErrorsHost28": pandas.StringDtype(), "BitErrorsHost29": pandas.StringDtype(), "BitErrorsHost30": pandas.StringDtype(), "BitErrorsHost31": pandas.StringDtype(), "BitErrorsHost32": pandas.StringDtype(), "BitErrorsHost33": pandas.StringDtype(), "BitErrorsHost34": pandas.StringDtype(), "BitErrorsHost35": pandas.StringDtype(), "BitErrorsHost36": pandas.StringDtype(), "BitErrorsHost37": pandas.StringDtype(), "BitErrorsHost38": pandas.StringDtype(), "BitErrorsHost39": pandas.StringDtype(), "BitErrorsHost40": pandas.StringDtype(), "XORRebuildSuccess": pandas.StringDtype(), "XORRebuildFail": pandas.StringDtype(), "BandReloForError": pandas.StringDtype(), "mrrSuccess": pandas.StringDtype(), "mrrFail": pandas.StringDtype(), "mrrNudgeSuccess": pandas.StringDtype(), "mrrNudgeHarmless": pandas.StringDtype(), "mrrNudgeFail": pandas.StringDtype(), "totalErases": pandas.StringDtype(), "dieOfflineCount": pandas.StringDtype(), "curtemp": pandas.StringDtype(), "mintemp": pandas.StringDtype(), "maxtemp": pandas.StringDtype(), "oventemp": pandas.StringDtype(), "allZeroSectors": pandas.StringDtype(), "ctxRecoveryEvents": pandas.StringDtype(), "ctxRecoveryErases": pandas.StringDtype(), "NSversionMinor": pandas.StringDtype(), "lifeMinTemp": pandas.StringDtype(), "lifeMaxTemp": pandas.StringDtype(), "powerCycles": pandas.StringDtype(), "systemReads": pandas.StringDtype(), "systemWrites": pandas.StringDtype(), "readRetryOverflow": pandas.StringDtype(), "unplannedPowerCycles": pandas.StringDtype(), "unsafeShutdowns": pandas.StringDtype(), "defragForcedReloCount": pandas.StringDtype(), "bandReloForBDR": pandas.StringDtype(), "bandReloForDieOffline": pandas.StringDtype(), "bandReloForPFail": pandas.StringDtype(), "bandReloForWL": pandas.StringDtype(), "provisionalDefects": pandas.StringDtype(), "uncorrectableProgErrors": pandas.StringDtype(), "powerOnSeconds": pandas.StringDtype(), "bandReloForChannelTimeout": pandas.StringDtype(), "fwDowngradeCount": pandas.StringDtype(), "dramCorrectablesTotal": pandas.StringDtype(), "hb_id": pandas.StringDtype(), "dramCorrectables1to1": pandas.StringDtype(), "dramCorrectables4to1": pandas.StringDtype(), "dramCorrectablesSram": pandas.StringDtype(), "dramCorrectablesUnknown": pandas.StringDtype(), "pliCapTestInterval": pandas.StringDtype(), "pliCapTestCount": pandas.StringDtype(), "pliCapTestResult": pandas.StringDtype(), "pliCapTestTimeStamp": pandas.StringDtype(), "channelHangSuccess": pandas.StringDtype(), "channelHangFail": pandas.StringDtype(), "BitErrorsHost41": pandas.StringDtype(), "BitErrorsHost42": pandas.StringDtype(), "BitErrorsHost43": pandas.StringDtype(), "BitErrorsHost44": pandas.StringDtype(), "BitErrorsHost45": pandas.StringDtype(), "BitErrorsHost46": pandas.StringDtype(), "BitErrorsHost47": pandas.StringDtype(), "BitErrorsHost48": pandas.StringDtype(), "BitErrorsHost49": pandas.StringDtype(), "BitErrorsHost50": pandas.StringDtype(), "BitErrorsHost51": pandas.StringDtype(), "BitErrorsHost52": pandas.StringDtype(), "BitErrorsHost53": pandas.StringDtype(), "BitErrorsHost54": pandas.StringDtype(), "BitErrorsHost55": pandas.StringDtype(), "BitErrorsHost56": pandas.StringDtype(), "mrrNearMiss": pandas.StringDtype(), "mrrRereadAvg": pandas.StringDtype(), "readDisturbEvictions": pandas.StringDtype(), "L1L2ParityError": pandas.StringDtype(), "pageDefects": pandas.StringDtype(), "pageProvisionalTotal": pandas.StringDtype(), "ASICTemp": pandas.StringDtype(), "PMICTemp": pandas.StringDtype(), "size": pandas.StringDtype(), "lastWrite": pandas.StringDtype(), "timesWritten": pandas.StringDtype(), "maxNumContextBands": pandas.StringDtype(), "blankCount": pandas.StringDtype(), "cleanBands": pandas.StringDtype(), "avgTprog": pandas.StringDtype(), "avgEraseCount": pandas.StringDtype(), "edtcHandledBandCnt": pandas.StringDtype(), "bandReloForNLBA": pandas.StringDtype(), "bandCrossingDuringPliCount": pandas.StringDtype(), "bitErrBucketNum": pandas.StringDtype(), "sramCorrectablesTotal": pandas.StringDtype(), "l1SramCorrErrCnt": pandas.StringDtype(), "l2SramCorrErrCnt": pandas.StringDtype(), "parityErrorValue": pandas.StringDtype(), "parityErrorType": pandas.StringDtype(), "mrr_LutValidDataSize": pandas.StringDtype(), "pageProvisionalDefects": pandas.StringDtype(), "plisWithErasesInProgress": pandas.StringDtype(), "lastReplayDebug": pandas.StringDtype(), "externalPreReadFatals": pandas.StringDtype(), "hostReadCmd": pandas.StringDtype(), "hostWriteCmd": pandas.StringDtype(), "trimmedSectors": pandas.StringDtype(), "trimTokens": pandas.StringDtype(), "mrrEventsInCodewords": pandas.StringDtype(), "mrrEventsInSectors": pandas.StringDtype(), "powerOnMicroseconds": pandas.StringDtype(), "mrrInXorRecEvents": pandas.StringDtype(), "mrrFailInXorRecEvents": pandas.StringDtype(), "mrrUpperpageEvents": pandas.StringDtype(), "mrrLowerpageEvents": pandas.StringDtype(), "mrrSlcpageEvents": pandas.StringDtype(), "mrrReReadTotal": pandas.StringDtype(), "powerOnResets": pandas.StringDtype(), "powerOnMinutes": pandas.StringDtype(), "throttleOnMilliseconds": pandas.StringDtype(), "ctxTailMagic": pandas.StringDtype(), "contextDropCount": pandas.StringDtype(), "lastCtxSequenceId": pandas.StringDtype(), "currCtxSequenceId": pandas.StringDtype(), "mbliEraseCount": pandas.StringDtype(), "pageAverageProgramCount": pandas.StringDtype(), "bandAverageEraseCount": pandas.StringDtype(), "bandTotalEraseCount": pandas.StringDtype(), "bandReloForXorRebuildFail": pandas.StringDtype(), "defragSpeculativeMiss": pandas.StringDtype(), "uncorrectableBackgroundScan": pandas.StringDtype(), "BitErrorsHost57": pandas.StringDtype(), "BitErrorsHost58": pandas.StringDtype(), "BitErrorsHost59": pandas.StringDtype(), "BitErrorsHost60": pandas.StringDtype(), "BitErrorsHost61": pandas.StringDtype(), "BitErrorsHost62": pandas.StringDtype(), "BitErrorsHost63": pandas.StringDtype(), "BitErrorsHost64": pandas.StringDtype(), "BitErrorsHost65": pandas.StringDtype(), "BitErrorsHost66": pandas.StringDtype(), "BitErrorsHost67": pandas.StringDtype(), "BitErrorsHost68": pandas.StringDtype(), "BitErrorsHost69": pandas.StringDtype(), "BitErrorsHost70": pandas.StringDtype(), "BitErrorsHost71": pandas.StringDtype(), "BitErrorsHost72": pandas.StringDtype(), "BitErrorsHost73": pandas.StringDtype(), "BitErrorsHost74": pandas.StringDtype(), "BitErrorsHost75": pandas.StringDtype(), "BitErrorsHost76": pandas.StringDtype(), "BitErrorsHost77": pandas.StringDtype(), "BitErrorsHost78": pandas.StringDtype(), "BitErrorsHost79": pandas.StringDtype(), "BitErrorsHost80": pandas.StringDtype(), "bitErrBucketArray1": pandas.StringDtype(), "bitErrBucketArray2": pandas.StringDtype(), "bitErrBucketArray3": pandas.StringDtype(), "bitErrBucketArray4": pandas.StringDtype(), "bitErrBucketArray5": pandas.StringDtype(), "bitErrBucketArray6": pandas.StringDtype(), "bitErrBucketArray7": pandas.StringDtype(), "bitErrBucketArray8": pandas.StringDtype(), "bitErrBucketArray9": pandas.StringDtype(), "bitErrBucketArray10": pandas.StringDtype(), "bitErrBucketArray11": pandas.StringDtype(), "bitErrBucketArray12": pandas.StringDtype(), "bitErrBucketArray13": pandas.StringDtype(), "bitErrBucketArray14": pandas.StringDtype(), "bitErrBucketArray15": pandas.StringDtype(), "bitErrBucketArray16": pandas.StringDtype(), "bitErrBucketArray17": pandas.StringDtype(), "bitErrBucketArray18": pandas.StringDtype(), "bitErrBucketArray19": pandas.StringDtype(), "bitErrBucketArray20": pandas.StringDtype(), "bitErrBucketArray21": pandas.StringDtype(), "bitErrBucketArray22": pandas.StringDtype(), "bitErrBucketArray23": pandas.StringDtype(), "bitErrBucketArray24": pandas.StringDtype(), "bitErrBucketArray25": pandas.StringDtype(), "bitErrBucketArray26": pandas.StringDtype(), "bitErrBucketArray27": pandas.StringDtype(), "bitErrBucketArray28": pandas.StringDtype(), "bitErrBucketArray29": pandas.StringDtype(), "bitErrBucketArray30": pandas.StringDtype(), "bitErrBucketArray31": pandas.StringDtype(), "bitErrBucketArray32": pandas.StringDtype(), "bitErrBucketArray33": pandas.StringDtype(), "bitErrBucketArray34": pandas.StringDtype(), "bitErrBucketArray35": pandas.StringDtype(), "bitErrBucketArray36": pandas.StringDtype(), "bitErrBucketArray37": pandas.StringDtype(), "bitErrBucketArray38": pandas.StringDtype(), "bitErrBucketArray39": pandas.StringDtype(), "bitErrBucketArray40": pandas.StringDtype(), "bitErrBucketArray41": pandas.StringDtype(), "bitErrBucketArray42": pandas.StringDtype(), "bitErrBucketArray43": pandas.StringDtype(), "bitErrBucketArray44": pandas.StringDtype(), "bitErrBucketArray45": pandas.StringDtype(), "bitErrBucketArray46": pandas.StringDtype(), "bitErrBucketArray47": pandas.StringDtype(), "bitErrBucketArray48": pandas.StringDtype(), "bitErrBucketArray49": pandas.StringDtype(), "bitErrBucketArray50": pandas.StringDtype(), "bitErrBucketArray51": pandas.StringDtype(), "bitErrBucketArray52": pandas.StringDtype(), "bitErrBucketArray53": pandas.StringDtype(), "bitErrBucketArray54": pandas.StringDtype(), "bitErrBucketArray55": pandas.StringDtype(), "bitErrBucketArray56": pandas.StringDtype(), "bitErrBucketArray57": pandas.StringDtype(), "bitErrBucketArray58": pandas.StringDtype(), "bitErrBucketArray59": pandas.StringDtype(), "bitErrBucketArray60": pandas.StringDtype(), "bitErrBucketArray61": pandas.StringDtype(), "bitErrBucketArray62": pandas.StringDtype(), "bitErrBucketArray63": pandas.StringDtype(), "bitErrBucketArray64": pandas.StringDtype(), "bitErrBucketArray65": pandas.StringDtype(), "bitErrBucketArray66": pandas.StringDtype(), "bitErrBucketArray67": pandas.StringDtype(), "bitErrBucketArray68": pandas.StringDtype(), "bitErrBucketArray69": pandas.StringDtype(), "bitErrBucketArray70": pandas.StringDtype(), "bitErrBucketArray71": pandas.StringDtype(), "bitErrBucketArray72": pandas.StringDtype(), "bitErrBucketArray73": pandas.StringDtype(), "bitErrBucketArray74": pandas.StringDtype(), "bitErrBucketArray75": pandas.StringDtype(), "bitErrBucketArray76": pandas.StringDtype(), "bitErrBucketArray77": pandas.StringDtype(), "bitErrBucketArray78": pandas.StringDtype(), "bitErrBucketArray79": pandas.StringDtype(), "bitErrBucketArray80": pandas.StringDtype(), "mrr_successDistribution1": pandas.StringDtype(), "mrr_successDistribution2": pandas.StringDtype(), "mrr_successDistribution3": pandas.StringDtype(), "mrr_successDistribution4": pandas.StringDtype(), "mrr_successDistribution5": pandas.StringDtype(), "mrr_successDistribution6": pandas.StringDtype(), "mrr_successDistribution7": pandas.StringDtype(), "mrr_successDistribution8": pandas.StringDtype(), "mrr_successDistribution9": pandas.StringDtype(), "mrr_successDistribution10": pandas.StringDtype(), "mrr_successDistribution11": pandas.StringDtype(), "mrr_successDistribution12": pandas.StringDtype(), "mrr_successDistribution13": pandas.StringDtype(), "mrr_successDistribution14": pandas.StringDtype(), "mrr_successDistribution15": pandas.StringDtype(), "mrr_successDistribution16": pandas.StringDtype(), "mrr_successDistribution17": pandas.StringDtype(), "mrr_successDistribution18": pandas.StringDtype(), "mrr_successDistribution19": pandas.StringDtype(), "mrr_successDistribution20": pandas.StringDtype(), "mrr_successDistribution21": pandas.StringDtype(), "mrr_successDistribution22": pandas.StringDtype(), "mrr_successDistribution23": pandas.StringDtype(), "mrr_successDistribution24": pandas.StringDtype(), "mrr_successDistribution25": pandas.StringDtype(), "mrr_successDistribution26": pandas.StringDtype(), "mrr_successDistribution27": pandas.StringDtype(), "mrr_successDistribution28": pandas.StringDtype(), "mrr_successDistribution29": pandas.StringDtype(), "mrr_successDistribution30": pandas.StringDtype(), "mrr_successDistribution31": pandas.StringDtype(), "mrr_successDistribution32": pandas.StringDtype(), "mrr_successDistribution33": pandas.StringDtype(), "mrr_successDistribution34": pandas.StringDtype(), "mrr_successDistribution35": pandas.StringDtype(), "mrr_successDistribution36": pandas.StringDtype(), "mrr_successDistribution37": pandas.StringDtype(), "mrr_successDistribution38": pandas.StringDtype(), "mrr_successDistribution39": pandas.StringDtype(), "mrr_successDistribution40": pandas.StringDtype(), "mrr_successDistribution41": pandas.StringDtype(), "mrr_successDistribution42": pandas.StringDtype(), "mrr_successDistribution43": pandas.StringDtype(), "mrr_successDistribution44": pandas.StringDtype(), "mrr_successDistribution45": pandas.StringDtype(), "mrr_successDistribution46": pandas.StringDtype(), "mrr_successDistribution47": pandas.StringDtype(), "mrr_successDistribution48": pandas.StringDtype(), "mrr_successDistribution49": pandas.StringDtype(), "mrr_successDistribution50": pandas.StringDtype(), "mrr_successDistribution51": pandas.StringDtype(), "mrr_successDistribution52": pandas.StringDtype(), "mrr_successDistribution53": pandas.StringDtype(), "mrr_successDistribution54": pandas.StringDtype(), "mrr_successDistribution55": pandas.StringDtype(), "mrr_successDistribution56": pandas.StringDtype(), "mrr_successDistribution57": pandas.StringDtype(), "mrr_successDistribution58": pandas.StringDtype(), "mrr_successDistribution59": pandas.StringDtype(), "mrr_successDistribution60": pandas.StringDtype(), "mrr_successDistribution61": pandas.StringDtype(), "mrr_successDistribution62": pandas.StringDtype(), "mrr_successDistribution63": pandas.StringDtype(), "mrr_successDistribution64": pandas.StringDtype(), "blDowngradeCount": pandas.StringDtype(), "snapReads": pandas.StringDtype(), "pliCapTestTime": pandas.StringDtype(), "currentTimeToFreeSpaceRecovery": pandas.StringDtype(), "worstTimeToFreeSpaceRecovery": pandas.StringDtype(), "rspnandReads": pandas.StringDtype(), "cachednandReads": pandas.StringDtype(), "spnandReads": pandas.StringDtype(), "dpnandReads": pandas.StringDtype(), "qpnandReads": pandas.StringDtype(), "verifynandReads": pandas.StringDtype(), "softnandReads": pandas.StringDtype(), "spnandWrites": pandas.StringDtype(), "dpnandWrites": pandas.StringDtype(), "qpnandWrites": pandas.StringDtype(), "opnandWrites": pandas.StringDtype(), "xpnandWrites": pandas.StringDtype(), "unalignedHostWriteCmd": pandas.StringDtype(), "randomReadCmd": pandas.StringDtype(), "randomWriteCmd": pandas.StringDtype(), "secVenCmdCount": pandas.StringDtype(), "secVenCmdCountFails": pandas.StringDtype(), "mrrFailOnSlcOtfPages": pandas.StringDtype(), "mrrFailOnSlcOtfPageMarkedAsMBPD": pandas.StringDtype(), "lcorParitySeedErrors": pandas.StringDtype(), "fwDownloadFails": pandas.StringDtype(), "fwAuthenticationFails": pandas.StringDtype(), "fwSecurityRev": pandas.StringDtype(), "isCapacitorHealthly": pandas.StringDtype(), "fwWRCounter": pandas.StringDtype(), "sysAreaEraseFailCount": pandas.StringDtype(), "iusDefragRelocated4DataRetention": pandas.StringDtype(), "I2CTemp": pandas.StringDtype(), "lbaMismatchOnNandReads": pandas.StringDtype(), "currentWriteStreamsCount": pandas.StringDtype(), "nandWritesPerStream1": pandas.StringDtype(), "nandWritesPerStream2": pandas.StringDtype(), "nandWritesPerStream3": pandas.StringDtype(), "nandWritesPerStream4": pandas.StringDtype(), "nandWritesPerStream5": pandas.StringDtype(), "nandWritesPerStream6": pandas.StringDtype(), "nandWritesPerStream7": pandas.StringDtype(), "nandWritesPerStream8": pandas.StringDtype(), "nandWritesPerStream9": pandas.StringDtype(), "nandWritesPerStream10": pandas.StringDtype(), "nandWritesPerStream11": pandas.StringDtype(), "nandWritesPerStream12": pandas.StringDtype(), "nandWritesPerStream13": pandas.StringDtype(), "nandWritesPerStream14": pandas.StringDtype(), "nandWritesPerStream15": pandas.StringDtype(), "nandWritesPerStream16": pandas.StringDtype(), "nandWritesPerStream17": pandas.StringDtype(), "nandWritesPerStream18": pandas.StringDtype(), "nandWritesPerStream19": pandas.StringDtype(), "nandWritesPerStream20": pandas.StringDtype(), "nandWritesPerStream21": pandas.StringDtype(), "nandWritesPerStream22": pandas.StringDtype(), "nandWritesPerStream23": pandas.StringDtype(), "nandWritesPerStream24": pandas.StringDtype(), "nandWritesPerStream25": pandas.StringDtype(), "nandWritesPerStream26": pandas.StringDtype(), "nandWritesPerStream27": pandas.StringDtype(), "nandWritesPerStream28": pandas.StringDtype(), "nandWritesPerStream29": pandas.StringDtype(), "nandWritesPerStream30": pandas.StringDtype(), "nandWritesPerStream31": pandas.StringDtype(), "nandWritesPerStream32": pandas.StringDtype(), "hostSoftReadSuccess": pandas.StringDtype(), "xorInvokedCount": pandas.StringDtype(), "comresets": pandas.StringDtype(), "syncEscapes": pandas.StringDtype(), "rErrHost": pandas.StringDtype(), "rErrDevice": pandas.StringDtype(), "iCrcs": pandas.StringDtype(), "linkSpeedDrops": pandas.StringDtype(), "mrrXtrapageEvents": pandas.StringDtype(), "mrrToppageEvents": pandas.StringDtype(), "hostXorSuccessCount": pandas.StringDtype(), "hostXorFailCount": pandas.StringDtype(), "nandWritesWithPreReadPerStream1": pandas.StringDtype(), "nandWritesWithPreReadPerStream2": pandas.StringDtype(), "nandWritesWithPreReadPerStream3": pandas.StringDtype(), "nandWritesWithPreReadPerStream4": pandas.StringDtype(), "nandWritesWithPreReadPerStream5": pandas.StringDtype(), "nandWritesWithPreReadPerStream6": pandas.StringDtype(), "nandWritesWithPreReadPerStream7": pandas.StringDtype(), "nandWritesWithPreReadPerStream8": pandas.StringDtype(), "nandWritesWithPreReadPerStream9": pandas.StringDtype(), "nandWritesWithPreReadPerStream10": pandas.StringDtype(), "nandWritesWithPreReadPerStream11": pandas.StringDtype(), "nandWritesWithPreReadPerStream12": pandas.StringDtype(), "nandWritesWithPreReadPerStream13": pandas.StringDtype(), "nandWritesWithPreReadPerStream14": pandas.StringDtype(), "nandWritesWithPreReadPerStream15": pandas.StringDtype(), "nandWritesWithPreReadPerStream16": pandas.StringDtype(), "nandWritesWithPreReadPerStream17": pandas.StringDtype(), "nandWritesWithPreReadPerStream18": pandas.StringDtype(), "nandWritesWithPreReadPerStream19": pandas.StringDtype(), "nandWritesWithPreReadPerStream20": pandas.StringDtype(), "nandWritesWithPreReadPerStream21": pandas.StringDtype(), "nandWritesWithPreReadPerStream22": pandas.StringDtype(), "nandWritesWithPreReadPerStream23": pandas.StringDtype(), "nandWritesWithPreReadPerStream24": pandas.StringDtype(), "nandWritesWithPreReadPerStream25": pandas.StringDtype(), "nandWritesWithPreReadPerStream26": pandas.StringDtype(), "nandWritesWithPreReadPerStream27": pandas.StringDtype(), "nandWritesWithPreReadPerStream28": pandas.StringDtype(), "nandWritesWithPreReadPerStream29": pandas.StringDtype(), "nandWritesWithPreReadPerStream30": pandas.StringDtype(), "nandWritesWithPreReadPerStream31": pandas.StringDtype(), "nandWritesWithPreReadPerStream32": pandas.StringDtype(), "dramCorrectables8to1": pandas.StringDtype(), "driveRecoveryCount": pandas.StringDtype(), "mprLiteReads": pandas.StringDtype(), "eccErrOnMprLiteReads": pandas.StringDtype(), "readForwardingXpPreReadCount": pandas.StringDtype(), "readForwardingUpPreReadCount": pandas.StringDtype(), "readForwardingLpPreReadCount": pandas.StringDtype(), "pweDefectCompensationCredit": pandas.StringDtype(), "planarXorRebuildFailure": pandas.StringDtype(), "itgXorRebuildFailure": pandas.StringDtype(), "planarXorRebuildSuccess": pandas.StringDtype(), "itgXorRebuildSuccess": pandas.StringDtype(), "xorLoggingSkippedSIcBand": pandas.StringDtype(), "xorLoggingSkippedDieOffline": pandas.StringDtype(), "xorLoggingSkippedDieAbsent": pandas.StringDtype(), "xorLoggingSkippedBandErased": pandas.StringDtype(), "xorLoggingSkippedNoEntry": pandas.StringDtype(), "xorAuditSuccess": pandas.StringDtype(), "maxSuspendCount":
pandas.StringDtype()
pandas.StringDtype
from fastapi import FastAPI,Request import logging import os import json import sys import re import pandas as pd app = FastAPI() logging.basicConfig(stream=sys.stdout, format="%(asctime)s %(levelname)s %(name)s: %(message)s") logger = logging.getLogger("API") if os.getenv('LOG_LEVEL') == 'INFO': logger.setLevel(logging.INFO) elif os.getenv('LOG_LEVEL') == 'WARNING': logger.setLevel(logging.WARNING) elif os.getenv('LOG_LEVEL') == 'DEBUG': logger.setLevel(logging.DEBUG) def get_aggr_func(func_string: str): if func_string in ['mean', 'median', 'mode', 'sum', 'count', 'max', 'min', 'std', 'var', 'skew', 'kurt']: return func_string elif re.search(r'^p\d\d?(\.\d+)?$', func_string): # matches strings like 'p99', 'p99.99', 'p1', 'p0.001' def percentile(x): return x.quantile(float(func_string[1:]) / 100) percentile.__name__ = func_string return percentile else: raise ValueError('Invalid function string.') def aggr_query(values: dict, warmup: int, aggr_func): df = pd.DataFrame.from_dict(values) df.columns = ['timestamp', 'value'] filtered = df[df['timestamp'] >= (df['timestamp'][0] + warmup)] filtered['value'] = filtered['value'].astype(int) return filtered['value'].aggregate(aggr_func) def check_result(result, operator: str, threshold): if operator == 'lt': return result < threshold if operator == 'lte': return result <= threshold if operator == 'gt': return result > threshold if operator == 'gte': return result >= threshold else: raise ValueError('Invalid operator string.') @app.post("/",response_model=bool) async def check_slo(request: Request): data = json.loads(await request.body()) logger.info('Received request with metadata: %s', data['metadata']) warmup = int(data['metadata']['warmup']) query_aggregation = get_aggr_func(data['metadata']['queryAggregation']) rep_aggregation = get_aggr_func(data['metadata']['repetitionAggregation']) operator = data['metadata']['operator'] threshold = int(data['metadata']['threshold']) query_results = [aggr_query(r[0]["values"], warmup, query_aggregation) for r in data["results"]] result =
pd.DataFrame(query_results)
pandas.DataFrame
#Z0096 # import standard libraries import pandas as pd import numpy as np import re # import word cleaning tools import unicodedata import nltk from sklearn.preprocessing import RobustScaler, MinMaxScaler, StandardScaler # hide warnings import warnings warnings.filterwarnings("ignore") #################### Prep MVC Data #################### def misc_prep(df): ''' Initial basic preparation of auto collision data. Remove duplicate observations and convert crash_data to approprate datatime format using 24-hour time. Drops unnecessary columns. ''' # drop duplicate observations df = df.drop_duplicates() # convert crash_date to datetime dtype and 24-hour clock df.crash_date = df.crash_date.apply(lambda row: \ pd.to_datetime(row).strftime('%m/%d/%Y %H:%M')) df.crash_date = pd.to_datetime(df.crash_date) # drop columns df = df.drop(columns=['case_id', 'crash_city', 'crash_url', 'police_dept', 'crash_location', 'driver_residence', 'driver_insured']) # remove mph and convert speed_limit column to integer data type df.speed_limit = df.speed_limit.apply(lambda row: re.search(r'(\D?\d{1,2}\s)', row)\ .group(1)).astype('int') # drop observations without latitude value df = df[df.crash_latitude != 0] # rename columns rename_dict = {'accident_factor':'fault_narrative', 'at_fault':'fault_class', 'num_of_injuries':'injury_crash_total', 'speed_limit':'factors_spd_lmt_mph', 'car_contained_injury':'injury_class', 'num_of_vehicles':'crash_vehicle_count', 'num_of_occupants':'crash_occupant_count', 'car_airbags_deployed':'damage_airbag', 'occupants_in_car':'vehicle_occupant_count'} df = df.rename(columns=rename_dict) return df #################### Prep Driver Data #################### def create_dl_classes(df): ''' Use existing "driver_license_type" column to derive boolean columns for if driver as a CDL, is unlicensed, and has a Class A, B, and or M driver's license. ''' # create cdl boolean column where DL type contains "commericial" df['dl_cdl'] = df.driver_license_type.apply(lambda row: \ 1 if str(row).lower().__contains__('commercial') else 0) # create unlicensed bool column wher DL type contains "unlicensed" df['dl_unlicensed'] = df.driver_license_type.apply(lambda row: \ 1 if str(row).lower().__contains__('unlicensed') else 0) # create bool columns where DL type contains "a", "b", and "m" df['dl_class_a'] = df.driver_license_type.apply(lambda row: \ 1 if str(row).lower().__contains__('class a') else 0) df['dl_class_b'] = df.driver_license_type.apply(lambda row: \ 1 if str(row).lower().__contains__('class b') else 0) df['dl_class_m'] = df.driver_license_type.apply(lambda row: \ 1 if str(row).lower().__contains__('class m') or \ str(row).lower().__contains__('and m') else 0) return df def clean_driver_race(df): ''' Takes existing racial data on driver and converts to a simplified US Census style abbreviation of race, replaces missing values with np.nan for future imputation or other handling ''' # replace unknown with NaN df.driver_ethnicity = np.where(df.driver_ethnicity == \ 'Amer. indian/alaskan native', 'indigenous', df.driver_ethnicity) df.driver_ethnicity = df.driver_ethnicity.apply( lambda row: str(row).lower().strip()) df.driver_ethnicity = df.driver_ethnicity.apply( lambda row: np.nan if any(x in row for x in ['unknown','nan']) else row) df = df.rename(columns={'driver_ethnicity':'driver_race'}) return df def clean_driver_age(df): ''' Converts driver_age column into integer data type and replaces missing or inappropriate values with -1 for future imputing or other means of handling ''' # create mask where driver's age is non-digit mask = df.driver_age.apply(lambda row: bool( re.search(r'\D*\d+\D*', str(row)))) # replace non-digit drivers age with -1 df.driver_age = np.where(df.driver_age.isin(df[mask].driver_age), df.driver_age, -1) # replace driver_age less than 6 with -1 df.driver_age = np.where(df.driver_age >= 10, df.driver_age, -1) # set driver_age data type as integer df.driver_age = df.driver_age.astype('int') return df def clean_driver_gender(df): ''' ''' # replace "Unknown" gender with NaN df.driver_gender = np.where(df.driver_gender == 'Unknown', np.nan, df.driver_gender) # replace NaN with -1 for later handling df.driver_gender = np.where(df.driver_gender.isna(), -1, df.driver_gender) # change to one-hot where male gender driver == 1 df.driver_gender = np.where(df.driver_gender == 'Male', 1, df.driver_gender) df.driver_gender = np.where(df.driver_gender == 'Female', 0, df.driver_gender) # change dtype to int and rename df.driver_gender = df.driver_gender.astype('int') df = df.rename(columns={'driver_gender':'driver_male'}) return df def prep_driver_data(df): ''' Uses functions to prepare driver data with cleaned up demographics and drivers license data encoded variables for class and type, or if the driver was confirmed as unlicensed. Drops unneeded column used to derive other features ''' # lowercase state value and strip terminal whitespace df.driver_license_state = df.driver_license_state\ .apply(lambda row: str(row).lower().strip()) # set DL state as Texas, Other, or NaN df.driver_license_state = np.where(df.driver_license_state.isin(\ ['unknown', 'nan']), np.nan, df.driver_license_state) df.driver_license_state = np.where(df.driver_license_state.isin(\ ['texas', np.nan]), df.driver_license_state, 'other') # rename column df = df.rename(columns={'driver_license_state':'dl_state'}) # use function to change gender to one-hot df = clean_driver_gender(df) # use function to create DL class bool columns df = create_dl_classes(df) # use function to clean driver_ethnicity column df = clean_driver_race(df) # use function to clean driver_age column df = clean_driver_age(df) # drop lengthy dl_type column used to derive DL classes df = df.drop(columns='driver_license_type') return df #################### Prep Vehicle Data #################### def clean_vin(df): ''' Takes the auto collision data and prepares VIN by repalcing existing "X"s with asterisks to improve cross referencing with NHTSA VIN decoder for additional vehicle information ''' # replace Xs for improved use with NTHSA cross reference df.car_vin = df.car_vin.apply(lambda row: re.sub(r'(\w{4})X{9}(\d{4})', r'\1*********\2', row)) # set mask for appropriate VIN values mask = df.car_vin.apply(lambda row: bool(re.search(r'(\w{4})\*{9}(\d{4})', row))) # replace inappropriate VIN values with "Unknown" df.car_vin = np.where(df.car_vin.isin(df[mask].car_vin),df.car_vin, np.nan) # rename column df = df.rename(columns={'car_vin':'vehicle_id'}) return df def clean_make(df): ''' Takes existing vehicle make data and consolidates variations on labels by striping terminal whitespace and grouping mislabeled data. Additionally takes vehicles not in the top 25 makes and consolidates into "other" group for improved label encoding. Repalces unknown makes with np.nan for late imputation or other handling. ''' # strip beginning and ending white space df.car_make = df.car_make.apply(lambda row: str(row).strip().lower()) # fix partial and model matches to consolidate make groups df.car_make = df.car_make.apply(lambda row: 'gmc' \ if str(row).lower().__contains__('gm') else row) df.car_make = df.car_make.apply(lambda row: 'dodge' \ if str(row).lower().__contains__('ram') else row) # add mislabeled data to unknown category df.car_make = df.car_make.apply(lambda row: 'unknown' \ if str(row).lower() == 'd' else row) df.car_make = df.car_make.apply(lambda row: 'unknown' \ if str(row).lower() == 'nan' else row) # set all unknowns as np.nan for later imputation or other handling df.car_make = df.car_make.apply(lambda row: np.nan \ if str(row).lower().__contains__('unk') else row) # set top makes for filtering top_makes = df.car_make.value_counts(dropna=False).head(26).index # for all makes not in top 25 occurences, set as "other" df.car_make = np.where(df.car_make.isin(top_makes), df.car_make, 'other') # rename column df = df.rename(columns={'car_make':'vehicle_make'}) return df def clean_year(df): ''' Takes existing vehicle manufactured year and removes non numerical values. Replaces value with -1 for vehicles without known manufacutring year for later imputation or other handling. ''' # pull numerical, four digit vehicle manufacture years df.car_year = df.car_year.apply(lambda row: re.sub(r'\s?(\d+)(.0)?', r'\1', str(row))) # set mislabeled and unknown years as -1 df.car_year = df.car_year.apply(lambda row: -1 if row == 'nan' or row == '0' else row) # # convert data type to integer df.car_year = df.car_year.astype('int') # # rename column df = df.rename(columns={'car_year':'vehicle_year'}) return df def clean_color(df): ''' Takes existing vehicle color data and strips terminal whitespace, replaces unknown values with np.nan for later handling, and groups less common types into "other" category for more efficient label encoding. ''' # clean up vehicle color to contain only single word df.car_color = df[~df.car_color.isna()].car_color.apply(lambda row: \ re.search(r'\W*(\w+)[\W]*', row)\ .group(1).lower()) # convert unknowns to NaN for later imputation or other handling df.car_color = np.where(df.car_color.isin(['Unknown']), np.nan, df.car_color) # group less common colors into "other" category df.car_color = np.where(df.car_color.isin(['Copper', 'Pink', 'Teal', 'Bronze', 'Turquoise', 'Purple']), 'Other', df.car_color) # rename column df = df.rename(columns={'car_color':'vehicle_color'}) return df def clean_type(df): ''' ''' # df.car_type = df.car_type.apply(lambda row: str(row).strip().lower()) non_pass = ['incomplete', 'trailer', 'low'] df.car_type = df.car_type.apply(lambda row: row if all(x not in row for x in non_pass) else 'non-passenger') df.car_type = np.where( df.car_type == 'multipurpose passenger vehicle (mpv)', 'mpv', df.car_type) df.car_type = np.where(df.car_type == 'passenger car', 'car', df.car_type) df.car_type = np.where(df.car_type == 'nan', np.nan, df.car_type) df = df.rename(columns={'car_type':'vehicle_type'}) return df def prep_vehicle_data(df): ''' Used functions defined above to prepare data related to vehicle, removing inappropraite values and missing data for better imputing and handling, and returns vehicle data prepped for exploration. ''' # use function to prepare vin with asterisk df = clean_vin(df) # use function to consolidate and clean vehicle make df = clean_make(df) # use function to remove inappropraite values from year df = clean_year(df) # use function to consolidate and clean vehicle color df = clean_color(df) # df = clean_type(df) return df #################### Prep Damage Data #################### def make_vehicle_dmg_zone(df): ''' Takes in the auto collision DataFrame and creates vehicle_impact_zone column, where each integer value corresponds to a specific aspect of vehicle damage incurrect in incident: Zone 0 *** Undercarriage Zone 1 *** Front End Zone 2 *** Back End Zone 3 *** Driver Front Quarter Zone 4 *** Driver Side Zone 5 *** Driver Back Quarter Zone 6 *** Passenger Front Quarter Zone 7 *** Passenger Side Zone 8 *** Passenger Back Quarter Zone 9 *** Severe Damage (Burned, Rollover) Zone 10 *** Motorcycle, scooter, etc. ''' # set pattern for regex search of damage column pattern = r'^(\S+){1,4}.+' # create series of impact types impact_type = df.driver_car_damage.apply( lambda row: re.search(pattern, row).group(1)) # create column for impact zone as described in docstring df['damage_zone'] = np.where(impact_type\ .isin(['VX', 'MC']), 0, -1) df['damage_zone'] = np.where(impact_type\ .isin(['FL','FR','FC','FD']), 1, df.damage_zone) df['damage_zone'] = np.where(impact_type\ .isin(['BL','BR','BC','BD']), 2, df.damage_zone) df['damage_zone'] = np.where(impact_type\ .isin(['LFQ']), 3, df.damage_zone) df['damage_zone'] = np.where(impact_type\ .isin(['LP', 'LD', 'L&T']), 4, df.damage_zone) df['damage_zone'] = np.where(impact_type\ .isin(['LBQ']), 5, df.damage_zone) df['damage_zone'] = np.where(impact_type\ .isin(['RFQ']), 6, df.damage_zone) df['damage_zone'] = np.where(impact_type\ .isin(['RD', 'RP', 'R&T']), 7, df.damage_zone) df['damage_zone'] = np.where(impact_type\ .isin(['RBQ']), 8, df.damage_zone) df['damage_zone'] = np.where(impact_type\ .isin(['TP','L&T','R&T','VB']), 9, df.damage_zone) df['damage_zone'] = np.where(impact_type\ .isin(['MC']), 10, df.damage_zone) df.damage_zone = df.damage_zone.astype('int') return df def make_dmg_type_columns(df): ''' Takes in auto collision DataFrame creates columns with boolean values of damage type incurred in incident, where 0 is False and 1 is True "concentrated_damage": damage caused by narrow object, e.g. tree, utility pole "distributed_damage": damage cause by broad object, e.g. building wall, another motor vehicle "rollover_damage": incident included at least partial vehicle rotation with top damage "vehicle_burned": vehicle fire occured as a result of collision ''' # set pattern for regex search of damage column pattern= r'^(\S+){1,4}.+' # create series of impact types impact_type = df.driver_car_damage.apply( lambda row: re.search(pattern, row).group(1)) # create columns for boolean of damage types df['damage_concentrated'] = np.where(impact_type.isin(['FC', 'BC']), 1, 0) df['damage_distributed'] = np.where(impact_type.isin( ['FD', 'BD', 'LD', 'RD']), 1, 0) df['damage_rollover'] = np.where(impact_type.isin( ['TP', 'L&T', 'R&T']), 1, 0) df['damage_burned'] = np.where(impact_type.isin(['VB']), 1, 0) return df def prep_damage_data(df): ''' Uses functions to create column with area of damage for the vehicle and columns of possible types of damage incurred, drops driver_car_damage column that contains lengthy string descriptor ''' # use function to make vehicle_dmg_zone column df = make_vehicle_dmg_zone(df) # use function to make damage type columns df = make_dmg_type_columns(df) # drop wordy column used to construct above df = df.drop(columns='driver_car_damage') return df #################### Language Prep Tools #################### def basic_clean(string): ''' ''' # convert applicable characters to lowercase string = string.lower() # normalize unicode characters string = unicodedata.normalize('NFKD', string)\ .encode('ascii', 'ignore')\ .decode('utf-8') # substitute non-alphanums, spaces, and # single quotes/apostrophes string = re.sub(r'[^0-9a-z\s\']', '', string) return string def tokenize(string): ''' ''' # create tokenizer object tokenizer = nltk.tokenize.ToktokTokenizer() # tokenize string and return as string string = tokenizer.tokenize(string, return_str=True) return string def remove_stopwords(string, extra_words=None, exclude_words=None): ''' ''' # create stopwords list stopword_list = nltk.corpus.stopwords.words('english') # add more stop words if needed if extra_words != None: stopword_list.extend(extra_words) # remove stop words if needed if exclude_words != None: if len(exclude_words) > 1: for word in exclude_words: stopword_list.remove(word) else: stopword_list.remove(exclude_words[0]) # obtain list and join filtered for stopwords string = ' '.join( [word for word in string.split() if word not in stopword_list]) return string def lemmatize(string): ''' ''' # create lemmatizer object lemmer = nltk.stem.WordNetLemmatizer() # get list of lems for words in split string string_lems = [lemmer.lemmatize(word) for word in string.split()] # join stems back as string from list string = ' '.join(string_lems) return string #################### Prep Factor Data #################### def lang_prep_factor_col(df): ''' Takes the fault_narrative column from the auto collision data and prepares it in a manner for deriving one-hot encoded features for MVC causes and future use in NLP exploration. ''' # create list of common, non-insightful words in narrative more_words = ['"', '\'', 'driver', 'explain', 'narrative', 'nan', 'undefined', 'unknown'] # perform cleaning on text in fault_narrative df.fault_narrative = df.fault_narrative.apply(lambda row: lemmatize(remove_stopwords(tokenize( basic_clean(str(row))), more_words))) # remove vehicle years from string df.fault_narrative = df.fault_narrative.apply(lambda row: re.sub(r'\d{4}\s?', '', row)) df.fault_narrative = df.fault_narrative.apply(lambda row: row.strip()) return df def create_fault_narrative_cols(df): ''' Takes the auto collision DataFrame and uses keywords from the fault_narrative column to derive one-hot encoded columns for possible fault factors, where all 0s indicates "Other" causes ''' df = lang_prep_factor_col(df) # create boolean col for "distraction" cause dist = ['inatt', 'distr', 'cell'] df['fault_distraction'] = df.apply( lambda row: 1 if any(x in row.fault_narrative for x in dist) else 0, axis=1) # create boolean col for "meaneuver" related cause manu = ['lane','turn','follo','pas','back','evas'] df['fault_maneuver'] = df.apply( lambda row: 1 if any(x in row.fault_narrative for x in manu) else 0, axis=1) # create boolean col for "speed" related cause df['fault_speed'] = df.apply( lambda row: 1 if 'speed' in row.fault_narrative else 0, axis=1) # create boolean col for intoxication realted causes intx = ['drink', 'infl', 'medi'] df['fault_intoxication'] = df.apply( lambda row: 1 if any(x in row.fault_narrative for x in intx) else 0, axis=1) # create boolean col for fatigue realted causes fati = ['sleep', 'fatig', 'ill'] df['fault_fatigue'] = df.apply( lambda row: 1 if any(x in row.fault_narrative for x in fati) else 0, axis=1) # create boolean col for failing to "yield" or stop related causes yild = ['stop', 'yiel'] df['fault_yield'] = df.apply( lambda row: 1 if any(x in row.fault_narrative for x in fati) else 0, axis=1) return df #################### Prep Road Conditions #################### def clean_traffic_cats(df): ''' ''' # turn object into string and lowercase, strip terminal whitespace df.traffic_conditions = df.traffic_conditions.apply( lambda row: str(row).lower().strip()) df.traffic_conditions = df.traffic_conditions.apply( lambda row: 'signal light' if 'signal' in row else row) df.traffic_conditions = df.traffic_conditions.apply( lambda row: 'flashing light' if 'flashing' in row else row) # convert categories no in road list into "other" road = ['marked', 'none', 'signal', 'stop', 'flashing', 'center', 'nan', 'yield', 'officer'] df.traffic_conditions = df.traffic_conditions.apply( lambda row: row if any(x in row for x in road) else 'other') # fill string "nan" values with np.nan for later handling df.traffic_conditions = df.traffic_conditions.apply( lambda row: np.nan if row == 'nan' else row) # rename column df = df.rename(columns={'traffic_conditions':'factors_road'}) return df def clean_weather_cats(df): ''' ''' # turn object into string and lowercase, strip terminal whitespace df.weather_conditions = df.weather_conditions.apply( lambda row: str(row).lower().strip()) # convert categories no in weather list into "other" weather = ['clear', 'cloudy', 'rain', 'sleet', 'hail', 'snow', 'nan'] df.weather_conditions = df.weather_conditions.apply( lambda row: row if any(x in row for x in weather) and 'sand' not in row else 'other') # fill string "nan" values with np.nan for later handling df.weather_conditions = df.weather_conditions.apply( lambda row: np.nan if row == 'nan' else row) # rename column df = df.rename(columns={'weather_conditions':'factors_weather'}) return df #################### Final Data Prep #################### def prep_collision_data(): ''' ''' # read in csv df =
pd.read_csv('accident_data_final.csv')
pandas.read_csv
import pandas as pd import copy pd.options.mode.chained_assignment = None ''' Function takes as argument path to json file. It reads the json file. For the content found in result for each element, it stocks the arguments, the displaylink, link, snippet and title. The function further merges the title and the snippet, removing the Nan values and splits the arguments into two columns. The function finally saves the csv file to the place specified in the constructor. ''' def json_to_csv(path_source, path_sink): json = pd.read_json(path_source, lines=True) df = pd.DataFrame() for i in range(len(json)): element = pd.DataFrame(json.loc[i].result) element['args'] = json.loc[i].args['q'] element['date'] = pd.to_datetime(json.loc[i].date['$date']).date() element = element.set_index(['link']) temp = pd.DataFrame(element[['displayLink', 'snippet', 'title', 'args']]) df = df.append(temp) df['args_split'] = df.args.str.split('+') args = df['args_split'].apply(pd.Series) df['first'] = args[0] df['drug'] = args[1] df = df[~df['title'].isnull()] df = df[~df['snippet'].isnull()] df['text'] = df['snippet'] + ' ' + df['title'] df = df.drop(columns=['snippet','title','args', 'args_split']) df.to_csv(path_sink) ''' Function takes a csv file as source, creates a new column called count, which counts the number of occurrences per display link. The last link and snippet is kept, the displaylink is kept as index and the column displaylink is dropped. Finally the csv file is saved to the specified place. ''' def unique_display_link(path_source, path_sink): df = pd.read_csv(path_source) df['count'] = 1 df['count'] = df['count'].groupby(df['displayLink']).transform('sum') df.drop_duplicates(subset=['displayLink'], keep='last', inplace=True) df.sort_values(by='count', ascending=False, inplace=True) df.index.names = ['index'] df = df[['displayLink', 'text', 'first', 'drug', 'count']] df.index = df.displayLink df = df.drop(columns=['displayLink']) df.to_csv(path_sink) ''' Function takes as argument path to csv file containing 1m most popular websites from alexa and path to csv source. It first takes the most_popular websites, extracts the domain name, keeps only the first 1000, which are longer than 5 characters and joins them on '|'. The function then removes all elements from the source, which contain one of the 1000 words extracted from the most popular websites in the display link. ''' def remove_most_popular(path_source, path_sink, most_popular): most_popular_websites = list(pd.read_csv(most_popular, header=None).loc[:, 1].values) most_popular_websites = [x.split('.')[0] for x in most_popular_websites] most_popular_websites = [x for x in most_popular_websites if len(x) >= 5] most_popular_websites = most_popular_websites[:1000] most_popular_websites = '|'.join(most_popular_websites) drug_websites =
pd.read_csv(path_source, index_col=['displayLink'])
pandas.read_csv
#import functions from pathlib import Path import pandas as pd import numpy as np import os import matplotlib.pyplot as plt import matplotlib.ticker as mtick import matplotlib.dates as mdates pd.options.mode.chained_assignment = None # default='warn' def find_folders(xpath, amode): result = [] listdir = os.listdir(xpath) print('ListDIR', listdir) ''' for filename in listdir: # loop through all the files and folders if os.path.isdir( os.path.join(os.path.abspath("."), filename)): # check whether the current object is a folder or not result.append(filename) ''' for filename in listdir: # loop through all the files and folders if os.path.isdir(xpath / filename): # check whether the current object is a folder or not if filename.split('_')[0] == amode: result.append(filename) result.sort() return result # Create custom reports and analysis def create_analysis_oil(): basepath = Path('.') #folder_type = str(input('Escolha a pasta para analisar: (Research ou Fleets): ')).lower() folder_type = 'Fleets' report_path = basepath / 'Reports' / folder_type #folders_list = functions.find_folders(report_path) folders_list = find_folders(report_path, 'oil') print(folders_list) print('Analysis is running... \n\n') for group in folders_list: print(f'\n\n ---------------------------- Analyzing the group: ___ {group} ___ ' f'----------------------------') trip_path = report_path / group / 'viagens' loads_path = report_path / group / 'cargas' ports_path = basepath / 'Inputs' print(trip_path) trips_df = pd.read_csv(trip_path / 'Resultado.csv', sep=';', encoding='cp1252') #print('\n\ndescribe: \n', trips_df.describe()) trips_df[['VLPesoCargaBruta_IN', 'VLPesoCargaBruta_OUT']] = trips_df[['VLPesoCargaBruta_IN', 'VLPesoCargaBruta_OUT']].fillna(0) trips_df['VLPesoCargaBruta'] = trips_df['VLPesoCargaBruta_IN'] + trips_df['VLPesoCargaBruta_OUT'] trips_df['Prancha'] = trips_df['VLPesoCargaBruta'] / trips_df['TOperacao'] trips_df = trips_df.astype({'Prancha': float}) #trips_df_ok = trips_df[~trips_df['Prancha'].isin([np.nan, np.inf, -np.inf, 0])] #print('\n\ndescribe new nok: \n', trips_df.describe()) #prancha_mean = trips_df_ok.describe()['Prancha']['mean'] #okrecords = trips_df_ok.describe()['Prancha']['count'] # TODO: Adjust missing values or non operational trips and times meaures!!!!!!!!!!!!!!!!!!!!!!! trips_df = trips_df.fillna(0) # Print graphics - FILAS trips_df['TFilas'] = trips_df['TEsperaAtracacao'] + trips_df['TEsperaInicioOp'] + trips_df['TEsperaDesatracacao'] data_filas = trips_df['TFilas'] std_filas = int(data_filas.std(axis=0, skipna=True)) data_filas = [record for record in data_filas if record < 5*std_filas] data_size = len(data_filas) nbins = 30 if data_size > 30 else 20 hist1 = plt.figure(3) plt.hist(data_filas, weights=(np.ones(data_size) / data_size), bins=nbins, histtype="step") plt.gca().yaxis.set_major_formatter(mtick.PercentFormatter(1)) plt.xlabel('Tempo de Fila (h)', fontsize=18) plt.ylabel('Ocorrência (%)', fontsize=16) plt.savefig(trip_path / f'Filas-Hist-{group}.pdf') #plt.show() # Print graphics - PRANCHA trips_df['Prancha'] = pd.to_numeric(trips_df['Prancha'].fillna(0), errors='coerce') trips_df['Prancha'].loc[(trips_df['Prancha'].isin([np.inf, -np.inf, np.nan, 0])) & (trips_df['VLPesoCargaBruta'] > 0)] = trips_df.loc[(~trips_df['Prancha'].isin([np.inf, -np.inf, np.nan, 0])) & (trips_df['VLPesoCargaBruta'] > 0)].describe()['Prancha']['mean'] trips_df_prancha = trips_df.loc[trips_df['VLPesoCargaBruta'] > 0] data_prancha_old = trips_df_prancha['Prancha'].tolist() trips_df_prancha = trips_df_prancha.loc[(trips_df_prancha['Prancha'] < (3*trips_df_prancha['Prancha'].std(axis=0, skipna=True))) & (trips_df_prancha['Prancha'] > (0.1*trips_df_prancha['Prancha'].mean(axis=0, skipna=True)))] data_prancha = trips_df_prancha['Prancha'].tolist() diffs = [data for data in data_prancha_old if data not in data_prancha] data_size = len(data_prancha) nbins = 30 if data_size > 30 else 20 data_prancha.sort() hist1 = plt.figure(1) plt.hist(data_prancha, weights=(np.ones(data_size) / data_size), bins=nbins, histtype="step") plt.gca().yaxis.set_major_formatter(mtick.PercentFormatter(1)) plt.xlabel('Prancha (ton/h)', fontsize=18) plt.ylabel('Ocorrência (%)', fontsize=16) plt.savefig(trip_path / f'Prancha-Hist-{group}.pdf') #plt.show() # Print graphics - OPERAÇÃO data_ops = trips_df['TOperacao'] std_prancha = int(data_ops.std(axis=0, skipna=True)) data_ops = [record for record in data_ops if record < 5*std_prancha] data_size = len(data_ops) nbins = 30 if data_size > 30 else 20 hist2 = plt.figure(2) plt.hist(data_ops, weights=(np.ones(data_size) / data_size), bins=nbins, histtype="step") plt.gca().yaxis.set_major_formatter(mtick.PercentFormatter(1)) plt.xlabel('Tempo de Operação (h)', fontsize=18) plt.ylabel('Ocorrência (%)', fontsize=16) plt.savefig(trip_path / f'Operação-Hist-{group}.pdf') #plt.show() # Print graphics - ESTADIA data_est = trips_df['TEstadia'] std_prancha = int(data_est.std(axis=0, skipna=True)) data_est = [record for record in data_est if record < 5*std_prancha] data_size = len(data_est) nbins = 30 if data_size > 30 else 20 hist1 = plt.figure(5) plt.hist(data_est, weights=(np.ones(data_size) / data_size), bins=nbins, histtype="step") plt.gca().yaxis.set_major_formatter(mtick.PercentFormatter(1)) plt.xlabel('Tempo de Estadia (h)', fontsize=18) plt.ylabel('Ocorrência (%)', fontsize=16) plt.savefig(trip_path / f'Estadia-Hist-{group}.pdf') #plt.show() # Print graphics - VIAGENS # TODO: Adjust values to hours -> bug: in days, Separate ships from files trips_df['Chegada'] = pd.to_datetime(trips_df['Chegada'], errors='coerce', format='%d/%m/%Y %H:%M:%S') trips_df = trips_df.astype({'TEstadia': 'float'}) ships_groups = trips_df.groupby(['IMO']) print('keys', ships_groups.groups.keys()) #List of accumulated data of each ship to create a table IMOList = [] TEsperaAtracacao = [] TEsperaInicioOp = [] TOperacao = [] TEsperaDesatracacao = [] TAtracado = [] TEstadia = [] TFilas = [] VLPesoCargaBruta_IN = [] VLPesoCargaBruta_OUT = [] VLPesoCargaBruta = [] Total_atracts = [] for name, ship_df in ships_groups: IMOList.append(int(name)) print(name) #print(ship_df) ship_df = ship_df.sort_values(by=['Chegada'], ascending=True) ship_df['Trip_Time_aux'] = ship_df.Chegada.diff() / np.timedelta64(1, 'D') #ship_df['Trip_Time'] = (ship_df['Trip_Time_aux'] - (ship_df['TEstadia'].shift(1) / 24)) ship_df['Trip_Time'] = 0 ship_df['Trip_Time'].loc[(ship_df['Trip_Time_aux'] == 0)] = 0 ship_df['Trip_Time'].loc[(ship_df['Trip_Time_aux'] != 0)] = (ship_df['Trip_Time_aux'] - (ship_df['TEstadia'].shift(1) / 24)) ship_df['Trip_Time'] = ship_df['Trip_Time'].shift(-1) data_trip = ship_df['TEstadia'] std_prancha = int(data_trip.std(axis=0, skipna=True)) data_trip = [record for record in data_trip if record < 10 * std_prancha] data_size = len(data_trip) nbins = 30 if data_size > 30 else 20 hist1 = plt.figure(4) plt.hist(data_trip, weights=(np.ones(data_size) / data_size), bins=nbins, histtype="step") plt.gca().yaxis.set_major_formatter(mtick.PercentFormatter(1)) plt.xlabel('Tempo de Viagem (h)', fontsize=18) plt.ylabel('Ocorrência (%)', fontsize=16) plt.savefig(trip_path / f'Viagem-Hist-{group}.pdf') #plt.show() ship_df.to_csv(trip_path / f'Resultado-{name}.csv', index=False, sep=';', encoding='cp1252') # Analyze load transfer and Ports movement loads_df = pd.read_csv(loads_path / 'Cargas.csv', sep=';', encoding='cp1252') loads_df = loads_df[['IDAtracacao', 'Destino', 'Origem', 'Tipo Operação da Carga', 'Natureza da Carga', 'Sentido']] ships_loads_df = pd.merge(ship_df, loads_df, left_on=['IDAtracacao'], right_on=['IDAtracacao'], how='left') ports_df =
pd.read_csv(ports_path / 'portcodes.csv', sep=';', encoding='utf-8-sig')
pandas.read_csv
# pylint: disable-msg=E1101,W0612 from datetime import datetime, time, timedelta, date import sys import os import operator from distutils.version import LooseVersion import nose import numpy as np randn = np.random.randn from pandas import (Index, Series, TimeSeries, DataFrame, isnull, date_range, Timestamp, Period, DatetimeIndex, Int64Index, to_datetime, bdate_range, Float64Index) import pandas.core.datetools as datetools import pandas.tseries.offsets as offsets import pandas.tseries.tools as tools import pandas.tseries.frequencies as fmod import pandas as pd from pandas.util.testing import assert_series_equal, assert_almost_equal import pandas.util.testing as tm from pandas.tslib import NaT, iNaT import pandas.lib as lib import pandas.tslib as tslib import pandas.index as _index from pandas.compat import range, long, StringIO, lrange, lmap, zip, product import pandas.core.datetools as dt from numpy.random import rand from numpy.testing import assert_array_equal from pandas.util.testing import assert_frame_equal import pandas.compat as compat import pandas.core.common as com from pandas import concat from pandas import _np_version_under1p7 from numpy.testing.decorators import slow def _skip_if_no_pytz(): try: import pytz except ImportError: raise nose.SkipTest("pytz not installed") def _skip_if_has_locale(): import locale lang, _ = locale.getlocale() if lang is not None: raise nose.SkipTest("Specific locale is set {0}".format(lang)) class TestTimeSeriesDuplicates(tm.TestCase): _multiprocess_can_split_ = True def setUp(self): dates = [datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 3), datetime(2000, 1, 3), datetime(2000, 1, 3), datetime(2000, 1, 4), datetime(2000, 1, 4), datetime(2000, 1, 4), datetime(2000, 1, 5)] self.dups = Series(np.random.randn(len(dates)), index=dates) def test_constructor(self): tm.assert_isinstance(self.dups, TimeSeries) tm.assert_isinstance(self.dups.index, DatetimeIndex) def test_is_unique_monotonic(self): self.assertFalse(self.dups.index.is_unique) def test_index_unique(self): uniques = self.dups.index.unique() expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3), datetime(2000, 1, 4), datetime(2000, 1, 5)]) self.assertEqual(uniques.dtype, 'M8[ns]') # sanity self.assertTrue(uniques.equals(expected)) self.assertEqual(self.dups.index.nunique(), 4) # #2563 self.assertTrue(isinstance(uniques, DatetimeIndex)) dups_local = self.dups.index.tz_localize('US/Eastern') dups_local.name = 'foo' result = dups_local.unique() expected = DatetimeIndex(expected, tz='US/Eastern') self.assertTrue(result.tz is not None) self.assertEqual(result.name, 'foo') self.assertTrue(result.equals(expected)) # NaT arr = [ 1370745748 + t for t in range(20) ] + [iNaT] idx = DatetimeIndex(arr * 3) self.assertTrue(idx.unique().equals(DatetimeIndex(arr))) self.assertEqual(idx.nunique(), 21) arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT] idx = DatetimeIndex(arr * 3) self.assertTrue(idx.unique().equals(DatetimeIndex(arr))) self.assertEqual(idx.nunique(), 21) def test_index_dupes_contains(self): d = datetime(2011, 12, 5, 20, 30) ix = DatetimeIndex([d, d]) self.assertTrue(d in ix) def test_duplicate_dates_indexing(self): ts = self.dups uniques = ts.index.unique() for date in uniques: result = ts[date] mask = ts.index == date total = (ts.index == date).sum() expected = ts[mask] if total > 1: assert_series_equal(result, expected) else: assert_almost_equal(result, expected[0]) cp = ts.copy() cp[date] = 0 expected = Series(np.where(mask, 0, ts), index=ts.index) assert_series_equal(cp, expected) self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6)) # new index ts[datetime(2000,1,6)] = 0 self.assertEqual(ts[datetime(2000,1,6)], 0) def test_range_slice(self): idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000', '1/4/2000']) ts = Series(np.random.randn(len(idx)), index=idx) result = ts['1/2/2000':] expected = ts[1:] assert_series_equal(result, expected) result = ts['1/2/2000':'1/3/2000'] expected = ts[1:4] assert_series_equal(result, expected) def test_groupby_average_dup_values(self): result = self.dups.groupby(level=0).mean() expected = self.dups.groupby(self.dups.index).mean() assert_series_equal(result, expected) def test_indexing_over_size_cutoff(self): import datetime # #1821 old_cutoff = _index._SIZE_CUTOFF try: _index._SIZE_CUTOFF = 1000 # create large list of non periodic datetime dates = [] sec = datetime.timedelta(seconds=1) half_sec = datetime.timedelta(microseconds=500000) d = datetime.datetime(2011, 12, 5, 20, 30) n = 1100 for i in range(n): dates.append(d) dates.append(d + sec) dates.append(d + sec + half_sec) dates.append(d + sec + sec + half_sec) d += 3 * sec # duplicate some values in the list duplicate_positions = np.random.randint(0, len(dates) - 1, 20) for p in duplicate_positions: dates[p + 1] = dates[p] df = DataFrame(np.random.randn(len(dates), 4), index=dates, columns=list('ABCD')) pos = n * 3 timestamp = df.index[pos] self.assertIn(timestamp, df.index) # it works! df.ix[timestamp] self.assertTrue(len(df.ix[[timestamp]]) > 0) finally: _index._SIZE_CUTOFF = old_cutoff def test_indexing_unordered(self): # GH 2437 rng = date_range(start='2011-01-01', end='2011-01-15') ts = Series(randn(len(rng)), index=rng) ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]]) for t in ts.index: s = str(t) expected = ts[t] result = ts2[t] self.assertTrue(expected == result) # GH 3448 (ranges) def compare(slobj): result = ts2[slobj].copy() result = result.sort_index() expected = ts[slobj] assert_series_equal(result,expected) compare(slice('2011-01-01','2011-01-15')) compare(slice('2010-12-30','2011-01-15')) compare(slice('2011-01-01','2011-01-16')) # partial ranges compare(slice('2011-01-01','2011-01-6')) compare(slice('2011-01-06','2011-01-8')) compare(slice('2011-01-06','2011-01-12')) # single values result = ts2['2011'].sort_index() expected = ts['2011'] assert_series_equal(result,expected) # diff freq rng = date_range(datetime(2005, 1, 1), periods=20, freq='M') ts = Series(np.arange(len(rng)), index=rng) ts = ts.take(np.random.permutation(20)) result = ts['2005'] for t in result.index: self.assertTrue(t.year == 2005) def test_indexing(self): idx = date_range("2001-1-1", periods=20, freq='M') ts = Series(np.random.rand(len(idx)),index=idx) # getting # GH 3070, make sure semantics work on Series/Frame expected = ts['2001'] df = DataFrame(dict(A = ts)) result = df['2001']['A'] assert_series_equal(expected,result) # setting ts['2001'] = 1 expected = ts['2001'] df.loc['2001','A'] = 1 result = df['2001']['A'] assert_series_equal(expected,result) # GH3546 (not including times on the last day) idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H') ts = Series(lrange(len(idx)), index=idx) expected = ts['2013-05'] assert_series_equal(expected,ts) idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S') ts = Series(lrange(len(idx)), index=idx) expected = ts['2013-05'] assert_series_equal(expected,ts) idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))] ts = Series(lrange(len(idx)), index=idx) expected = ts['2013'] assert_series_equal(expected,ts) # GH 3925, indexing with a seconds resolution string / datetime object df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s')) expected = df.loc[[df.index[2]]] result = df['2012-01-02 18:01:02'] assert_frame_equal(result,expected) # this is a single date, so will raise self.assertRaises(KeyError, df.__getitem__, df.index[2],) def test_recreate_from_data(self): if _np_version_under1p7: freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H'] else: freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C'] for f in freqs: org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1) idx = DatetimeIndex(org, freq=f) self.assertTrue(idx.equals(org)) # unbale to create tz-aware 'A' and 'C' freq if _np_version_under1p7: freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H'] else: freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N'] for f in freqs: org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1) idx = DatetimeIndex(org, freq=f, tz='US/Pacific') self.assertTrue(idx.equals(org)) def assert_range_equal(left, right): assert(left.equals(right)) assert(left.freq == right.freq) assert(left.tz == right.tz) class TestTimeSeries(tm.TestCase): _multiprocess_can_split_ = True def test_is_(self): dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M') self.assertTrue(dti.is_(dti)) self.assertTrue(dti.is_(dti.view())) self.assertFalse(dti.is_(dti.copy())) def test_dti_slicing(self): dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M') dti2 = dti[[1, 3, 5]] v1 = dti2[0] v2 = dti2[1] v3 = dti2[2] self.assertEqual(v1, Timestamp('2/28/2005')) self.assertEqual(v2, Timestamp('4/30/2005')) self.assertEqual(v3, Timestamp('6/30/2005')) # don't carry freq through irregular slicing self.assertIsNone(dti2.freq) def test_pass_datetimeindex_to_index(self): # Bugs in #1396 rng = date_range('1/1/2000', '3/1/2000') idx = Index(rng, dtype=object) expected = Index(rng.to_pydatetime(), dtype=object) self.assert_numpy_array_equal(idx.values, expected.values) def test_contiguous_boolean_preserve_freq(self): rng = date_range('1/1/2000', '3/1/2000', freq='B') mask = np.zeros(len(rng), dtype=bool) mask[10:20] = True masked = rng[mask] expected = rng[10:20] self.assertIsNotNone(expected.freq) assert_range_equal(masked, expected) mask[22] = True masked = rng[mask] self.assertIsNone(masked.freq) def test_getitem_median_slice_bug(self): index = date_range('20090415', '20090519', freq='2B') s = Series(np.random.randn(13), index=index) indexer = [slice(6, 7, None)] result = s[indexer] expected = s[indexer[0]] assert_series_equal(result, expected) def test_series_box_timestamp(self): rng = date_range('20090415', '20090519', freq='B') s = Series(rng) tm.assert_isinstance(s[5], Timestamp) rng = date_range('20090415', '20090519', freq='B') s = Series(rng, index=rng) tm.assert_isinstance(s[5], Timestamp) tm.assert_isinstance(s.iget_value(5), Timestamp) def test_date_range_ambiguous_arguments(self): # #2538 start = datetime(2011, 1, 1, 5, 3, 40) end = datetime(2011, 1, 1, 8, 9, 40) self.assertRaises(ValueError, date_range, start, end, freq='s', periods=10) def test_timestamp_to_datetime(self): _skip_if_no_pytz() rng = date_range('20090415', '20090519', tz='US/Eastern') stamp = rng[0] dtval = stamp.to_pydatetime() self.assertEqual(stamp, dtval) self.assertEqual(stamp.tzinfo, dtval.tzinfo) def test_index_convert_to_datetime_array(self): _skip_if_no_pytz() def _check_rng(rng): converted = rng.to_pydatetime() tm.assert_isinstance(converted, np.ndarray) for x, stamp in zip(converted, rng): tm.assert_isinstance(x, datetime) self.assertEqual(x, stamp.to_pydatetime()) self.assertEqual(x.tzinfo, stamp.tzinfo) rng = date_range('20090415', '20090519') rng_eastern = date_range('20090415', '20090519', tz='US/Eastern') rng_utc = date_range('20090415', '20090519', tz='utc') _check_rng(rng) _check_rng(rng_eastern) _check_rng(rng_utc) def test_ctor_str_intraday(self): rng = DatetimeIndex(['1-1-2000 00:00:01']) self.assertEqual(rng[0].second, 1) def test_series_ctor_plus_datetimeindex(self): rng = date_range('20090415', '20090519', freq='B') data = dict((k, 1) for k in rng) result = Series(data, index=rng) self.assertIs(result.index, rng) def test_series_pad_backfill_limit(self): index = np.arange(10) s = Series(np.random.randn(10), index=index) result = s[:2].reindex(index, method='pad', limit=5) expected = s[:2].reindex(index).fillna(method='pad') expected[-3:] = np.nan assert_series_equal(result, expected) result = s[-2:].reindex(index, method='backfill', limit=5) expected = s[-2:].reindex(index).fillna(method='backfill') expected[:3] = np.nan assert_series_equal(result, expected) def test_series_fillna_limit(self): index = np.arange(10) s = Series(np.random.randn(10), index=index) result = s[:2].reindex(index) result = result.fillna(method='pad', limit=5) expected = s[:2].reindex(index).fillna(method='pad') expected[-3:] = np.nan assert_series_equal(result, expected) result = s[-2:].reindex(index) result = result.fillna(method='bfill', limit=5) expected = s[-2:].reindex(index).fillna(method='backfill') expected[:3] = np.nan assert_series_equal(result, expected) def test_frame_pad_backfill_limit(self): index = np.arange(10) df = DataFrame(np.random.randn(10, 4), index=index) result = df[:2].reindex(index, method='pad', limit=5) expected = df[:2].reindex(index).fillna(method='pad') expected.values[-3:] = np.nan tm.assert_frame_equal(result, expected) result = df[-2:].reindex(index, method='backfill', limit=5) expected = df[-2:].reindex(index).fillna(method='backfill') expected.values[:3] = np.nan tm.assert_frame_equal(result, expected) def test_frame_fillna_limit(self): index = np.arange(10) df = DataFrame(np.random.randn(10, 4), index=index) result = df[:2].reindex(index) result = result.fillna(method='pad', limit=5) expected = df[:2].reindex(index).fillna(method='pad') expected.values[-3:] = np.nan tm.assert_frame_equal(result, expected) result = df[-2:].reindex(index) result = result.fillna(method='backfill', limit=5) expected = df[-2:].reindex(index).fillna(method='backfill') expected.values[:3] = np.nan tm.assert_frame_equal(result, expected) def test_frame_setitem_timestamp(self): # 2155 columns = DatetimeIndex(start='1/1/2012', end='2/1/2012', freq=datetools.bday) index = lrange(10) data = DataFrame(columns=columns, index=index) t = datetime(2012, 11, 1) ts = Timestamp(t) data[ts] = np.nan # works def test_sparse_series_fillna_limit(self): index = np.arange(10) s = Series(np.random.randn(10), index=index) ss = s[:2].reindex(index).to_sparse() result = ss.fillna(method='pad', limit=5) expected = ss.fillna(method='pad', limit=5) expected = expected.to_dense() expected[-3:] = np.nan expected = expected.to_sparse() assert_series_equal(result, expected) ss = s[-2:].reindex(index).to_sparse() result = ss.fillna(method='backfill', limit=5) expected = ss.fillna(method='backfill') expected = expected.to_dense() expected[:3] = np.nan expected = expected.to_sparse() assert_series_equal(result, expected) def test_sparse_series_pad_backfill_limit(self): index = np.arange(10) s = Series(np.random.randn(10), index=index) s = s.to_sparse() result = s[:2].reindex(index, method='pad', limit=5) expected = s[:2].reindex(index).fillna(method='pad') expected = expected.to_dense() expected[-3:] = np.nan expected = expected.to_sparse() assert_series_equal(result, expected) result = s[-2:].reindex(index, method='backfill', limit=5) expected = s[-2:].reindex(index).fillna(method='backfill') expected = expected.to_dense() expected[:3] = np.nan expected = expected.to_sparse() assert_series_equal(result, expected) def test_sparse_frame_pad_backfill_limit(self): index = np.arange(10) df = DataFrame(np.random.randn(10, 4), index=index) sdf = df.to_sparse() result = sdf[:2].reindex(index, method='pad', limit=5) expected = sdf[:2].reindex(index).fillna(method='pad') expected = expected.to_dense() expected.values[-3:] = np.nan expected = expected.to_sparse() tm.assert_frame_equal(result, expected) result = sdf[-2:].reindex(index, method='backfill', limit=5) expected = sdf[-2:].reindex(index).fillna(method='backfill') expected = expected.to_dense() expected.values[:3] = np.nan expected = expected.to_sparse() tm.assert_frame_equal(result, expected) def test_sparse_frame_fillna_limit(self): index = np.arange(10) df = DataFrame(np.random.randn(10, 4), index=index) sdf = df.to_sparse() result = sdf[:2].reindex(index) result = result.fillna(method='pad', limit=5) expected = sdf[:2].reindex(index).fillna(method='pad') expected = expected.to_dense() expected.values[-3:] = np.nan expected = expected.to_sparse() tm.assert_frame_equal(result, expected) result = sdf[-2:].reindex(index) result = result.fillna(method='backfill', limit=5) expected = sdf[-2:].reindex(index).fillna(method='backfill') expected = expected.to_dense() expected.values[:3] = np.nan expected = expected.to_sparse() tm.assert_frame_equal(result, expected) def test_pad_require_monotonicity(self): rng = date_range('1/1/2000', '3/1/2000', freq='B') rng2 = rng[::2][::-1] self.assertRaises(ValueError, rng2.get_indexer, rng, method='pad') def test_frame_ctor_datetime64_column(self): rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', freq='10s') dates = np.asarray(rng) df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates}) self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]'))) def test_frame_add_datetime64_column(self): rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', freq='10s') df = DataFrame(index=np.arange(len(rng))) df['A'] = rng self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]'))) def test_frame_datetime64_pre1900_repr(self): df = DataFrame({'year': date_range('1/1/1700', periods=50, freq='A-DEC')}) # it works! repr(df) def test_frame_add_datetime64_col_other_units(self): n = 100 units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y'] ns_dtype = np.dtype('M8[ns]') for unit in units: dtype = np.dtype('M8[%s]' % unit) vals = np.arange(n, dtype=np.int64).view(dtype) df = DataFrame({'ints': np.arange(n)}, index=np.arange(n)) df[unit] = vals ex_vals = to_datetime(vals.astype('O')) self.assertEqual(df[unit].dtype, ns_dtype) self.assertTrue((df[unit].values == ex_vals).all()) # Test insertion into existing datetime64 column df = DataFrame({'ints': np.arange(n)}, index=np.arange(n)) df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype) for unit in units: dtype = np.dtype('M8[%s]' % unit) vals = np.arange(n, dtype=np.int64).view(dtype) tmp = df.copy() tmp['dates'] = vals ex_vals = to_datetime(vals.astype('O')) self.assertTrue((tmp['dates'].values == ex_vals).all()) def test_to_datetime_unit(self): epoch = 1370745748 s = Series([ epoch + t for t in range(20) ]) result = to_datetime(s,unit='s') expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ]) assert_series_equal(result,expected) s = Series([ epoch + t for t in range(20) ]).astype(float) result = to_datetime(s,unit='s') expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ]) assert_series_equal(result,expected) s = Series([ epoch + t for t in range(20) ] + [iNaT]) result = to_datetime(s,unit='s') expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]) assert_series_equal(result,expected) s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float) result = to_datetime(s,unit='s') expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]) assert_series_equal(result,expected) s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True) result = to_datetime(s,unit='s') expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]) assert_series_equal(result,expected) def test_series_ctor_datetime64(self): rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', freq='10s') dates = np.asarray(rng) series = Series(dates) self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]'))) def test_index_cast_datetime64_other_units(self): arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]') idx = Index(arr) self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all()) def test_index_astype_datetime64(self): idx = Index([datetime(2012, 1, 1)], dtype=object) if not _np_version_under1p7: raise nose.SkipTest("test only valid in numpy < 1.7") casted = idx.astype(np.dtype('M8[D]')) expected = DatetimeIndex(idx.values) tm.assert_isinstance(casted, DatetimeIndex) self.assertTrue(casted.equals(expected)) def test_reindex_series_add_nat(self): rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s') series = Series(rng) result = series.reindex(lrange(15)) self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]'))) mask = result.isnull() self.assertTrue(mask[-5:].all()) self.assertFalse(mask[:-5].any()) def test_reindex_frame_add_nat(self): rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s') df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng}) result = df.reindex(lrange(15)) self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]'))) mask = com.isnull(result)['B'] self.assertTrue(mask[-5:].all()) self.assertFalse(mask[:-5].any()) def test_series_repr_nat(self): series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]') result = repr(series) expected = ('0 1970-01-01 00:00:00\n' '1 1970-01-01 00:00:00.000001\n' '2 1970-01-01 00:00:00.000002\n' '3 NaT\n' 'dtype: datetime64[ns]') self.assertEqual(result, expected) def test_fillna_nat(self): series = Series([0, 1, 2, iNaT], dtype='M8[ns]') filled = series.fillna(method='pad') filled2 = series.fillna(value=series.values[2]) expected = series.copy() expected.values[3] = expected.values[2] assert_series_equal(filled, expected) assert_series_equal(filled2, expected) df = DataFrame({'A': series}) filled = df.fillna(method='pad') filled2 = df.fillna(value=series.values[2]) expected = DataFrame({'A': expected}) assert_frame_equal(filled, expected) assert_frame_equal(filled2, expected) series = Series([iNaT, 0, 1, 2], dtype='M8[ns]') filled = series.fillna(method='bfill') filled2 = series.fillna(value=series[1]) expected = series.copy() expected[0] = expected[1] assert_series_equal(filled, expected) assert_series_equal(filled2, expected) df = DataFrame({'A': series}) filled = df.fillna(method='bfill') filled2 = df.fillna(value=series[1]) expected = DataFrame({'A': expected}) assert_frame_equal(filled, expected) assert_frame_equal(filled2, expected) def test_string_na_nat_conversion(self): # GH #999, #858 from pandas.compat import parse_date strings = np.array(['1/1/2000', '1/2/2000', np.nan, '1/4/2000, 12:34:56'], dtype=object) expected = np.empty(4, dtype='M8[ns]') for i, val in enumerate(strings): if com.isnull(val): expected[i] = iNaT else: expected[i] = parse_date(val) result = tslib.array_to_datetime(strings) assert_almost_equal(result, expected) result2 = to_datetime(strings) tm.assert_isinstance(result2, DatetimeIndex) assert_almost_equal(result, result2) malformed = np.array(['1/100/2000', np.nan], dtype=object) result = to_datetime(malformed) assert_almost_equal(result, malformed) self.assertRaises(ValueError, to_datetime, malformed, errors='raise') idx = ['a', 'b', 'c', 'd', 'e'] series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan, '1/5/2000'], index=idx, name='foo') dseries = Series([to_datetime('1/1/2000'), np.nan, to_datetime('1/3/2000'), np.nan, to_datetime('1/5/2000')], index=idx, name='foo') result = to_datetime(series) dresult = to_datetime(dseries) expected = Series(np.empty(5, dtype='M8[ns]'), index=idx) for i in range(5): x = series[i] if isnull(x): expected[i] = iNaT else: expected[i] = to_datetime(x) assert_series_equal(result, expected) self.assertEqual(result.name, 'foo') assert_series_equal(dresult, expected) self.assertEqual(dresult.name, 'foo') def test_to_datetime_iso8601(self): result = to_datetime(["2012-01-01 00:00:00"]) exp = Timestamp("2012-01-01 00:00:00") self.assertEqual(result[0], exp) result = to_datetime(['20121001']) # bad iso 8601 exp = Timestamp('2012-10-01') self.assertEqual(result[0], exp) def test_to_datetime_default(self): rs = to_datetime('2001') xp = datetime(2001, 1, 1) self.assertTrue(rs, xp) #### dayfirst is essentially broken #### to_datetime('01-13-2012', dayfirst=True) #### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True)) def test_to_datetime_on_datetime64_series(self): # #2699 s = Series(date_range('1/1/2000', periods=10)) result = to_datetime(s) self.assertEqual(result[0], s[0]) def test_to_datetime_with_apply(self): # this is only locale tested with US/None locales _skip_if_has_locale() # GH 5195 # with a format and coerce a single item to_datetime fails td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3]) expected = pd.to_datetime(td, format='%b %y') result = td.apply(pd.to_datetime, format='%b %y') assert_series_equal(result, expected) td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3]) self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y')) self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y')) expected = pd.to_datetime(td, format='%b %y', coerce=True) result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True)) assert_series_equal(result, expected) def test_nat_vector_field_access(self): idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000']) fields = ['year', 'quarter', 'month', 'day', 'hour', 'minute', 'second', 'microsecond', 'nanosecond', 'week', 'dayofyear'] for field in fields: result = getattr(idx, field) expected = [getattr(x, field) if x is not NaT else -1 for x in idx] self.assert_numpy_array_equal(result, expected) def test_nat_scalar_field_access(self): fields = ['year', 'quarter', 'month', 'day', 'hour', 'minute', 'second', 'microsecond', 'nanosecond', 'week', 'dayofyear'] for field in fields: result = getattr(NaT, field) self.assertEqual(result, -1) self.assertEqual(NaT.weekday(), -1) def test_to_datetime_types(self): # empty string result = to_datetime('') self.assertIs(result, NaT) result = to_datetime(['', '']) self.assertTrue(isnull(result).all()) # ints result = Timestamp(0) expected = to_datetime(0) self.assertEqual(result, expected) # GH 3888 (strings) expected = to_datetime(['2012'])[0] result = to_datetime('2012') self.assertEqual(result, expected) ### array = ['2012','20120101','20120101 12:01:01'] array = ['20120101','20120101 12:01:01'] expected = list(to_datetime(array)) result = lmap(Timestamp,array) tm.assert_almost_equal(result,expected) ### currently fails ### ### result = Timestamp('2012') ### expected = to_datetime('2012') ### self.assertEqual(result, expected) def test_to_datetime_unprocessable_input(self): # GH 4928 self.assert_numpy_array_equal( to_datetime([1, '1']), np.array([1, '1'], dtype='O') ) self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise') def test_to_datetime_other_datetime64_units(self): # 5/25/2012 scalar = np.int64(1337904000000000).view('M8[us]') as_obj = scalar.astype('O') index = DatetimeIndex([scalar]) self.assertEqual(index[0], scalar.astype('O')) value = Timestamp(scalar) self.assertEqual(value, as_obj) def test_to_datetime_list_of_integers(self): rng = date_range('1/1/2000', periods=20) rng = DatetimeIndex(rng.values) ints = list(rng.asi8) result = DatetimeIndex(ints) self.assertTrue(rng.equals(result)) def test_to_datetime_dt64s(self): in_bound_dts = [ np.datetime64('2000-01-01'), np.datetime64('2000-01-02'), ] for dt in in_bound_dts: self.assertEqual( pd.to_datetime(dt), Timestamp(dt) ) oob_dts = [ np.datetime64('1000-01-01'), np.datetime64('5000-01-02'), ] for dt in oob_dts: self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise') self.assertRaises(ValueError, tslib.Timestamp, dt) self.assertIs(pd.to_datetime(dt, coerce=True), NaT) def test_to_datetime_array_of_dt64s(self): dts = [ np.datetime64('2000-01-01'), np.datetime64('2000-01-02'), ] # Assuming all datetimes are in bounds, to_datetime() returns # an array that is equal to Timestamp() parsing self.assert_numpy_array_equal( pd.to_datetime(dts, box=False), np.array([Timestamp(x).asm8 for x in dts]) ) # A list of datetimes where the last one is out of bounds dts_with_oob = dts + [np.datetime64('9999-01-01')] self.assertRaises( ValueError, pd.to_datetime, dts_with_oob, coerce=False, errors='raise' ) self.assert_numpy_array_equal( pd.to_datetime(dts_with_oob, box=False, coerce=True), np.array( [ Timestamp(dts_with_oob[0]).asm8, Timestamp(dts_with_oob[1]).asm8, iNaT, ], dtype='M8' ) ) # With coerce=False and errors='ignore', out of bounds datetime64s # are converted to their .item(), which depending on the version of # numpy is either a python datetime.datetime or datetime.date self.assert_numpy_array_equal( pd.to_datetime(dts_with_oob, box=False, coerce=False), np.array( [dt.item() for dt in dts_with_oob], dtype='O' ) ) def test_index_to_datetime(self): idx = Index(['1/1/2000', '1/2/2000', '1/3/2000']) result = idx.to_datetime() expected = DatetimeIndex(datetools.to_datetime(idx.values)) self.assertTrue(result.equals(expected)) today = datetime.today() idx = Index([today], dtype=object) result = idx.to_datetime() expected = DatetimeIndex([today]) self.assertTrue(result.equals(expected)) def test_to_datetime_freq(self): xp = bdate_range('2000-1-1', periods=10, tz='UTC') rs = xp.to_datetime() self.assertEqual(xp.freq, rs.freq) self.assertEqual(xp.tzinfo, rs.tzinfo) def test_range_misspecified(self): # GH #1095 self.assertRaises(ValueError, date_range, '1/1/2000') self.assertRaises(ValueError, date_range, end='1/1/2000') self.assertRaises(ValueError, date_range, periods=10) self.assertRaises(ValueError, date_range, '1/1/2000', freq='H') self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H') self.assertRaises(ValueError, date_range, periods=10, freq='H') def test_reasonable_keyerror(self): # GH #1062 index = DatetimeIndex(['1/3/2000']) try: index.get_loc('1/1/2000') except KeyError as e: self.assertIn('2000', str(e)) def test_reindex_with_datetimes(self): rng = date_range('1/1/2000', periods=20) ts = Series(np.random.randn(20), index=rng) result = ts.reindex(list(ts.index[5:10])) expected = ts[5:10] tm.assert_series_equal(result, expected) result = ts[list(ts.index[5:10])] tm.assert_series_equal(result, expected) def test_promote_datetime_date(self): rng = date_range('1/1/2000', periods=20) ts = Series(np.random.randn(20), index=rng) ts_slice = ts[5:] ts2 = ts_slice.copy() ts2.index = [x.date() for x in ts2.index] result = ts + ts2 result2 = ts2 + ts expected = ts + ts[5:] assert_series_equal(result, expected) assert_series_equal(result2, expected) # test asfreq result = ts2.asfreq('4H', method='ffill') expected = ts[5:].asfreq('4H', method='ffill') assert_series_equal(result, expected) result = rng.get_indexer(ts2.index) expected = rng.get_indexer(ts_slice.index) self.assert_numpy_array_equal(result, expected) def test_asfreq_normalize(self): rng = date_range('1/1/2000 09:30', periods=20) norm = date_range('1/1/2000', periods=20) vals = np.random.randn(20) ts = Series(vals, index=rng) result = ts.asfreq('D', normalize=True) norm = date_range('1/1/2000', periods=20) expected = Series(vals, index=norm) assert_series_equal(result, expected) vals = np.random.randn(20, 3) ts = DataFrame(vals, index=rng) result = ts.asfreq('D', normalize=True) expected = DataFrame(vals, index=norm) assert_frame_equal(result, expected) def test_date_range_gen_error(self): rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min') self.assertEqual(len(rng), 4) def test_first_subset(self): ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h') result = ts.first('10d') self.assertEqual(len(result), 20) ts = _simple_ts('1/1/2000', '1/1/2010') result = ts.first('10d') self.assertEqual(len(result), 10) result = ts.first('3M') expected = ts[:'3/31/2000'] assert_series_equal(result, expected) result = ts.first('21D') expected = ts[:21] assert_series_equal(result, expected) result = ts[:0].first('3M') assert_series_equal(result, ts[:0]) def test_last_subset(self): ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h') result = ts.last('10d') self.assertEqual(len(result), 20) ts = _simple_ts('1/1/2000', '1/1/2010') result = ts.last('10d') self.assertEqual(len(result), 10) result = ts.last('21D') expected = ts['12/12/2009':] assert_series_equal(result, expected) result = ts.last('21D') expected = ts[-21:] assert_series_equal(result, expected) result = ts[:0].last('3M') assert_series_equal(result, ts[:0]) def test_add_offset(self): rng = date_range('1/1/2000', '2/1/2000') result = rng + offsets.Hour(2) expected = date_range('1/1/2000 02:00', '2/1/2000 02:00') self.assertTrue(result.equals(expected)) def test_format_pre_1900_dates(self): rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC') rng.format() ts = Series(1, index=rng) repr(ts) def test_repeat(self): rng = date_range('1/1/2000', '1/1/2001') result = rng.repeat(5) self.assertIsNone(result.freq) self.assertEqual(len(result), 5 * len(rng)) def test_at_time(self): rng = date_range('1/1/2000', '1/5/2000', freq='5min') ts = Series(np.random.randn(len(rng)), index=rng) rs = ts.at_time(rng[1]) self.assertTrue((rs.index.hour == rng[1].hour).all()) self.assertTrue((rs.index.minute == rng[1].minute).all()) self.assertTrue((rs.index.second == rng[1].second).all()) result = ts.at_time('9:30') expected = ts.at_time(time(9, 30)) assert_series_equal(result, expected) df = DataFrame(np.random.randn(len(rng), 3), index=rng) result = ts[time(9, 30)] result_df = df.ix[time(9, 30)] expected = ts[(rng.hour == 9) & (rng.minute == 30)] exp_df = df[(rng.hour == 9) & (rng.minute == 30)] # expected.index = date_range('1/1/2000', '1/4/2000') assert_series_equal(result, expected) tm.assert_frame_equal(result_df, exp_df) chunk = df.ix['1/4/2000':] result = chunk.ix[time(9, 30)] expected = result_df[-1:] tm.assert_frame_equal(result, expected) # midnight, everything rng = date_range('1/1/2000', '1/31/2000') ts = Series(np.random.randn(len(rng)), index=rng) result = ts.at_time(time(0, 0)) assert_series_equal(result, ts) # time doesn't exist rng = date_range('1/1/2012', freq='23Min', periods=384) ts = Series(np.random.randn(len(rng)), rng) rs = ts.at_time('16:00') self.assertEqual(len(rs), 0) def test_at_time_frame(self): rng = date_range('1/1/2000', '1/5/2000', freq='5min') ts = DataFrame(np.random.randn(len(rng), 2), index=rng) rs = ts.at_time(rng[1]) self.assertTrue((rs.index.hour == rng[1].hour).all()) self.assertTrue((rs.index.minute == rng[1].minute).all()) self.assertTrue((rs.index.second == rng[1].second).all()) result = ts.at_time('9:30') expected = ts.at_time(time(9, 30)) assert_frame_equal(result, expected) result = ts.ix[time(9, 30)] expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)] assert_frame_equal(result, expected) # midnight, everything rng = date_range('1/1/2000', '1/31/2000') ts = DataFrame(np.random.randn(len(rng), 3), index=rng) result = ts.at_time(time(0, 0)) assert_frame_equal(result, ts) # time doesn't exist rng = date_range('1/1/2012', freq='23Min', periods=384) ts = DataFrame(np.random.randn(len(rng), 2), rng) rs = ts.at_time('16:00') self.assertEqual(len(rs), 0) def test_between_time(self): rng = date_range('1/1/2000', '1/5/2000', freq='5min') ts = Series(np.random.randn(len(rng)), index=rng) stime = time(0, 0) etime = time(1, 0) close_open = product([True, False], [True, False]) for inc_start, inc_end in close_open: filtered = ts.between_time(stime, etime, inc_start, inc_end) exp_len = 13 * 4 + 1 if not inc_start: exp_len -= 5 if not inc_end: exp_len -= 4 self.assertEqual(len(filtered), exp_len) for rs in filtered.index: t = rs.time() if inc_start: self.assertTrue(t >= stime) else: self.assertTrue(t > stime) if inc_end: self.assertTrue(t <= etime) else: self.assertTrue(t < etime) result = ts.between_time('00:00', '01:00') expected = ts.between_time(stime, etime) assert_series_equal(result, expected) # across midnight rng = date_range('1/1/2000', '1/5/2000', freq='5min') ts = Series(np.random.randn(len(rng)), index=rng) stime = time(22, 0) etime = time(9, 0) close_open = product([True, False], [True, False]) for inc_start, inc_end in close_open: filtered = ts.between_time(stime, etime, inc_start, inc_end) exp_len = (12 * 11 + 1) * 4 + 1 if not inc_start: exp_len -= 4 if not inc_end: exp_len -= 4 self.assertEqual(len(filtered), exp_len) for rs in filtered.index: t = rs.time() if inc_start: self.assertTrue((t >= stime) or (t <= etime)) else: self.assertTrue((t > stime) or (t <= etime)) if inc_end: self.assertTrue((t <= etime) or (t >= stime)) else: self.assertTrue((t < etime) or (t >= stime)) def test_between_time_frame(self): rng = date_range('1/1/2000', '1/5/2000', freq='5min') ts = DataFrame(np.random.randn(len(rng), 2), index=rng) stime = time(0, 0) etime = time(1, 0) close_open = product([True, False], [True, False]) for inc_start, inc_end in close_open: filtered = ts.between_time(stime, etime, inc_start, inc_end) exp_len = 13 * 4 + 1 if not inc_start: exp_len -= 5 if not inc_end: exp_len -= 4 self.assertEqual(len(filtered), exp_len) for rs in filtered.index: t = rs.time() if inc_start: self.assertTrue(t >= stime) else: self.assertTrue(t > stime) if inc_end: self.assertTrue(t <= etime) else: self.assertTrue(t < etime) result = ts.between_time('00:00', '01:00') expected = ts.between_time(stime, etime) assert_frame_equal(result, expected) # across midnight rng = date_range('1/1/2000', '1/5/2000', freq='5min') ts = DataFrame(np.random.randn(len(rng), 2), index=rng) stime = time(22, 0) etime = time(9, 0) close_open = product([True, False], [True, False]) for inc_start, inc_end in close_open: filtered = ts.between_time(stime, etime, inc_start, inc_end) exp_len = (12 * 11 + 1) * 4 + 1 if not inc_start: exp_len -= 4 if not inc_end: exp_len -= 4 self.assertEqual(len(filtered), exp_len) for rs in filtered.index: t = rs.time() if inc_start: self.assertTrue((t >= stime) or (t <= etime)) else: self.assertTrue((t > stime) or (t <= etime)) if inc_end: self.assertTrue((t <= etime) or (t >= stime)) else: self.assertTrue((t < etime) or (t >= stime)) def test_dti_constructor_preserve_dti_freq(self): rng = date_range('1/1/2000', '1/2/2000', freq='5min') rng2 = DatetimeIndex(rng) self.assertEqual(rng.freq, rng2.freq) def test_normalize(self): rng = date_range('1/1/2000 9:30', periods=10, freq='D') result = rng.normalize() expected = date_range('1/1/2000', periods=10, freq='D') self.assertTrue(result.equals(expected)) rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]")) rng_ns_normalized = rng_ns.normalize() expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]")) self.assertTrue(rng_ns_normalized.equals(expected)) self.assertTrue(result.is_normalized) self.assertFalse(rng.is_normalized) def test_to_period(self): from pandas.tseries.period import period_range ts = _simple_ts('1/1/2000', '1/1/2001') pts = ts.to_period() exp = ts.copy() exp.index = period_range('1/1/2000', '1/1/2001') assert_series_equal(pts, exp) pts = ts.to_period('M') self.assertTrue(pts.index.equals(exp.index.asfreq('M'))) def create_dt64_based_index(self): data = [Timestamp('2007-01-01 10:11:12.123456Z'), Timestamp('2007-01-01 10:11:13.789123Z')] index = DatetimeIndex(data) return index def test_to_period_millisecond(self): index = self.create_dt64_based_index() period = index.to_period(freq='L') self.assertEqual(2, len(period)) self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L')) self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L')) def test_to_period_microsecond(self): index = self.create_dt64_based_index() period = index.to_period(freq='U') self.assertEqual(2, len(period)) self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U')) self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U')) def test_to_period_tz(self): _skip_if_no_pytz() from dateutil.tz import tzlocal from pytz import utc as UTC xp = date_range('1/1/2000', '4/1/2000').to_period() ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern') result = ts.to_period()[0] expected = ts[0].to_period() self.assertEqual(result, expected) self.assertTrue(ts.to_period().equals(xp)) ts = date_range('1/1/2000', '4/1/2000', tz=UTC) result = ts.to_period()[0] expected = ts[0].to_period() self.assertEqual(result, expected) self.assertTrue(ts.to_period().equals(xp)) ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal()) result = ts.to_period()[0] expected = ts[0].to_period() self.assertEqual(result, expected) self.assertTrue(ts.to_period().equals(xp)) def test_frame_to_period(self): K = 5 from pandas.tseries.period import period_range dr = date_range('1/1/2000', '1/1/2001') pr = period_range('1/1/2000', '1/1/2001') df = DataFrame(randn(len(dr), K), index=dr) df['mix'] = 'a' pts = df.to_period() exp = df.copy() exp.index = pr assert_frame_equal(pts, exp) pts = df.to_period('M') self.assertTrue(pts.index.equals(exp.index.asfreq('M'))) df = df.T pts = df.to_period(axis=1) exp = df.copy() exp.columns = pr assert_frame_equal(pts, exp) pts = df.to_period('M', axis=1) self.assertTrue(pts.columns.equals(exp.columns.asfreq('M'))) self.assertRaises(ValueError, df.to_period, axis=2) def test_timestamp_fields(self): # extra fields from DatetimeIndex like quarter and week idx = tm.makeDateIndex(100) fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end'] for f in fields: expected = getattr(idx, f)[-1] result = getattr(Timestamp(idx[-1]), f) self.assertEqual(result, expected) self.assertEqual(idx.freq, Timestamp(idx[-1], idx.freq).freq) self.assertEqual(idx.freqstr, Timestamp(idx[-1], idx.freq).freqstr) def test_woy_boundary(self): # make sure weeks at year boundaries are correct d = datetime(2013,12,31) result = Timestamp(d).week expected = 1 # ISO standard self.assertEqual(result, expected) d = datetime(2008,12,28) result = Timestamp(d).week expected = 52 # ISO standard self.assertEqual(result, expected) d = datetime(2009,12,31) result = Timestamp(d).week expected = 53 # ISO standard self.assertEqual(result, expected) d = datetime(2010,1,1) result = Timestamp(d).week expected = 53 # ISO standard self.assertEqual(result, expected) d = datetime(2010,1,3) result = Timestamp(d).week expected = 53 # ISO standard self.assertEqual(result, expected) result = np.array([Timestamp(datetime(*args)).week for args in [(2000,1,1),(2000,1,2),(2005,1,1),(2005,1,2)]]) self.assertTrue((result == [52, 52, 53, 53]).all()) def test_timestamp_date_out_of_range(self): self.assertRaises(ValueError, Timestamp, '1676-01-01') self.assertRaises(ValueError, Timestamp, '2263-01-01') # 1475 self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01']) self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)]) def test_timestamp_repr(self): # pre-1900 stamp = Timestamp('1850-01-01', tz='US/Eastern') repr(stamp) iso8601 = '1850-01-01 01:23:45.012345' stamp = Timestamp(iso8601, tz='US/Eastern') result = repr(stamp) self.assertIn(iso8601, result) def test_timestamp_from_ordinal(self): # GH 3042 dt = datetime(2011, 4, 16, 0, 0) ts = Timestamp.fromordinal(dt.toordinal()) self.assertEqual(ts.to_pydatetime(), dt) # with a tzinfo stamp = Timestamp('2011-4-16', tz='US/Eastern') dt_tz = stamp.to_pydatetime() ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern') self.assertEqual(ts.to_pydatetime(), dt_tz) def test_datetimeindex_integers_shift(self): rng = date_range('1/1/2000', periods=20) result = rng + 5 expected = rng.shift(5) self.assertTrue(result.equals(expected)) result = rng - 5 expected = rng.shift(-5) self.assertTrue(result.equals(expected)) def test_astype_object(self): # NumPy 1.6.1 weak ns support rng = date_range('1/1/2000', periods=20) casted = rng.astype('O') exp_values = list(rng) self.assert_numpy_array_equal(casted, exp_values) def test_catch_infinite_loop(self): offset = datetools.DateOffset(minute=5) # blow up, don't loop forever self.assertRaises(Exception, date_range, datetime(2011, 11, 11), datetime(2011, 11, 12), freq=offset) def test_append_concat(self): rng = date_range('5/8/2012 1:45', periods=10, freq='5T') ts = Series(np.random.randn(len(rng)), rng) df = DataFrame(np.random.randn(len(rng), 4), index=rng) result = ts.append(ts) result_df = df.append(df) ex_index = DatetimeIndex(np.tile(rng.values, 2)) self.assertTrue(result.index.equals(ex_index)) self.assertTrue(result_df.index.equals(ex_index)) appended = rng.append(rng) self.assertTrue(appended.equals(ex_index)) appended = rng.append([rng, rng]) ex_index = DatetimeIndex(np.tile(rng.values, 3)) self.assertTrue(appended.equals(ex_index)) # different index names rng1 = rng.copy() rng2 = rng.copy() rng1.name = 'foo' rng2.name = 'bar' self.assertEqual(rng1.append(rng1).name, 'foo') self.assertIsNone(rng1.append(rng2).name) def test_append_concat_tz(self): #GH 2938 _skip_if_no_pytz() rng = date_range('5/8/2012 1:45', periods=10, freq='5T', tz='US/Eastern') rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T', tz='US/Eastern') rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T', tz='US/Eastern') ts = Series(np.random.randn(len(rng)), rng) df = DataFrame(np.random.randn(len(rng), 4), index=rng) ts2 = Series(np.random.randn(len(rng2)), rng2) df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2) result = ts.append(ts2) result_df = df.append(df2) self.assertTrue(result.index.equals(rng3)) self.assertTrue(result_df.index.equals(rng3)) appended = rng.append(rng2) self.assertTrue(appended.equals(rng3)) def test_set_dataframe_column_ns_dtype(self): x = DataFrame([datetime.now(), datetime.now()]) self.assertEqual(x[0].dtype, np.dtype('M8[ns]')) def test_groupby_count_dateparseerror(self): dr = date_range(start='1/1/2012', freq='5min', periods=10) # BAD Example, datetimes first s = Series(np.arange(10), index=[dr, lrange(10)]) grouped = s.groupby(lambda x: x[1] % 2 == 0) result = grouped.count() s = Series(np.arange(10), index=[lrange(10), dr]) grouped = s.groupby(lambda x: x[0] % 2 == 0) expected = grouped.count() assert_series_equal(result, expected) def test_datetimeindex_repr_short(self): dr = date_range(start='1/1/2012', periods=1) repr(dr) dr = date_range(start='1/1/2012', periods=2) repr(dr) dr = date_range(start='1/1/2012', periods=3) repr(dr) def test_constructor_int64_nocopy(self): # #1624 arr = np.arange(1000, dtype=np.int64) index = DatetimeIndex(arr) arr[50:100] = -1 self.assertTrue((index.asi8[50:100] == -1).all()) arr = np.arange(1000, dtype=np.int64) index = DatetimeIndex(arr, copy=True) arr[50:100] = -1 self.assertTrue((index.asi8[50:100] != -1).all()) def test_series_interpolate_method_values(self): # #1646 ts = _simple_ts('1/1/2000', '1/20/2000') ts[::2] = np.nan result = ts.interpolate(method='values') exp = ts.interpolate() assert_series_equal(result, exp) def test_frame_datetime64_handling_groupby(self): # it works! df = DataFrame([(3, np.datetime64('2012-07-03')), (3, np.datetime64('2012-07-04'))], columns=['a', 'date']) result = df.groupby('a').first() self.assertEqual(result['date'][3], Timestamp('2012-07-03')) def test_series_interpolate_intraday(self): # #1698 index = pd.date_range('1/1/2012', periods=4, freq='12D') ts = pd.Series([0, 12, 24, 36], index) new_index = index.append(index + pd.DateOffset(days=1)).order() exp = ts.reindex(new_index).interpolate(method='time') index = pd.date_range('1/1/2012', periods=4, freq='12H') ts = pd.Series([0, 12, 24, 36], index) new_index = index.append(index + pd.DateOffset(hours=1)).order() result = ts.reindex(new_index).interpolate(method='time') self.assert_numpy_array_equal(result.values, exp.values) def test_frame_dict_constructor_datetime64_1680(self): dr = date_range('1/1/2012', periods=10) s = Series(dr, index=dr) # it works! DataFrame({'a': 'foo', 'b': s}, index=dr) DataFrame({'a': 'foo', 'b': s.values}, index=dr) def test_frame_datetime64_mixed_index_ctor_1681(self): dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI') ts = Series(dr) # it works! d = DataFrame({'A': 'foo', 'B': ts}, index=dr) self.assertTrue(d['B'].isnull().all()) def test_frame_timeseries_to_records(self): index = date_range('1/1/2000', periods=10) df = DataFrame(np.random.randn(10, 3), index=index, columns=['a', 'b', 'c']) result = df.to_records() result['index'].dtype == 'M8[ns]' result = df.to_records(index=False) def test_frame_datetime64_duplicated(self): dates = date_range('2010-07-01', end='2010-08-05') tst = DataFrame({'symbol': 'AAA', 'date': dates}) result = tst.duplicated(['date', 'symbol']) self.assertTrue((-result).all()) tst = DataFrame({'date': dates}) result = tst.duplicated() self.assertTrue((-result).all()) def test_timestamp_compare_with_early_datetime(self): # e.g. datetime.min stamp = Timestamp('2012-01-01') self.assertFalse(stamp == datetime.min) self.assertFalse(stamp == datetime(1600, 1, 1)) self.assertFalse(stamp == datetime(2700, 1, 1)) self.assertNotEqual(stamp, datetime.min) self.assertNotEqual(stamp, datetime(1600, 1, 1)) self.assertNotEqual(stamp, datetime(2700, 1, 1)) self.assertTrue(stamp > datetime(1600, 1, 1)) self.assertTrue(stamp >= datetime(1600, 1, 1)) self.assertTrue(stamp < datetime(2700, 1, 1)) self.assertTrue(stamp <= datetime(2700, 1, 1)) def test_to_html_timestamp(self): rng = date_range('2000-01-01', periods=10) df = DataFrame(np.random.randn(10, 4), index=rng) result = df.to_html() self.assertIn('2000-01-01', result) def test_to_csv_numpy_16_bug(self): frame = DataFrame({'a': date_range('1/1/2000', periods=10)}) buf = StringIO() frame.to_csv(buf) result = buf.getvalue() self.assertIn('2000-01-01', result) def test_series_map_box_timestamps(self): # #2689, #2627 s = Series(date_range('1/1/2000', periods=10)) def f(x): return (x.hour, x.day, x.month) # it works! s.map(f) s.apply(f) DataFrame(s).applymap(f) def test_concat_datetime_datetime64_frame(self): # #2624 rows = [] rows.append([datetime(2010, 1, 1), 1]) rows.append([datetime(2010, 1, 2), 'hi']) df2_obj = DataFrame.from_records(rows, columns=['date', 'test']) ind = date_range(start="2000/1/1", freq="D", periods=10) df1 = DataFrame({'date': ind, 'test':lrange(10)}) # it works! pd.concat([df1, df2_obj]) def test_period_resample(self): # GH3609 s = Series(range(100),index=date_range('20130101', freq='s', periods=100), dtype='float') s[10:30] = np.nan expected = Series([34.5, 79.5], index=[Period('2013-01-01 00:00', 'T'), Period('2013-01-01 00:01', 'T')]) result = s.to_period().resample('T', kind='period') assert_series_equal(result, expected) result2 = s.resample('T', kind='period') assert_series_equal(result2, expected) def test_period_resample_with_local_timezone(self): # GH5430 _skip_if_no_pytz() import pytz local_timezone = pytz.timezone('America/Los_Angeles') start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=pytz.utc) # 1 day later end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=pytz.utc) index = pd.date_range(start, end, freq='H') series = pd.Series(1, index=index) series = series.tz_convert(local_timezone) result = series.resample('D', kind='period') # Create the expected series expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific expected = pd.Series(1, index=expected_index) assert_series_equal(result, expected) def test_pickle(self): #GH4606 from pandas.compat import cPickle import pickle for pick in [pickle, cPickle]: p = pick.loads(pick.dumps(NaT)) self.assertTrue(p is NaT) idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06']) idx_p = pick.loads(pick.dumps(idx)) self.assertTrue(idx_p[0] == idx[0]) self.assertTrue(idx_p[1] is NaT) self.assertTrue(idx_p[2] == idx[2]) def _simple_ts(start, end, freq='D'): rng = date_range(start, end, freq=freq) return Series(np.random.randn(len(rng)), index=rng) class TestDatetimeIndex(tm.TestCase): _multiprocess_can_split_ = True def test_hash_error(self): index = date_range('20010101', periods=10) with tm.assertRaisesRegexp(TypeError, "unhashable type: %r" % type(index).__name__): hash(index) def test_stringified_slice_with_tz(self): #GH2658 import datetime start=datetime.datetime.now() idx=DatetimeIndex(start=start,freq="1d",periods=10) df=DataFrame(lrange(10),index=idx) df["2013-01-14 23:44:34.437768-05:00":] # no exception here def test_append_join_nondatetimeindex(self): rng = date_range('1/1/2000', periods=10) idx = Index(['a', 'b', 'c', 'd']) result = rng.append(idx) tm.assert_isinstance(result[0], Timestamp) # it works rng.join(idx, how='outer') def test_astype(self): rng = date_range('1/1/2000', periods=10) result = rng.astype('i8') self.assert_numpy_array_equal(result, rng.asi8) def test_to_period_nofreq(self): idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04']) self.assertRaises(ValueError, idx.to_period) idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'], freq='infer') idx.to_period() def test_000constructor_resolution(self): # 2252 t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1) idx = DatetimeIndex([t1]) self.assertEqual(idx.nanosecond[0], t1.nanosecond) def test_constructor_coverage(self): rng = date_range('1/1/2000', periods=10.5) exp = date_range('1/1/2000', periods=10) self.assertTrue(rng.equals(exp)) self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000', periods='foo', freq='D') self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000', end='1/10/2000') self.assertRaises(ValueError, DatetimeIndex, '1/1/2000') # generator expression gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10)) result = DatetimeIndex(gen) expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i) for i in range(10)]) self.assertTrue(result.equals(expected)) # NumPy string array strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03']) result = DatetimeIndex(strings) expected = DatetimeIndex(strings.astype('O')) self.assertTrue(result.equals(expected)) from_ints = DatetimeIndex(expected.asi8) self.assertTrue(from_ints.equals(expected)) # non-conforming self.assertRaises(ValueError, DatetimeIndex, ['2000-01-01', '2000-01-02', '2000-01-04'], freq='D') self.assertRaises(ValueError, DatetimeIndex, start='2011-01-01', freq='b') self.assertRaises(ValueError, DatetimeIndex, end='2011-01-01', freq='B') self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D') def test_constructor_name(self): idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A', name='TEST') self.assertEqual(idx.name, 'TEST') def test_comparisons_coverage(self): rng = date_range('1/1/2000', periods=10) # raise TypeError for now self.assertRaises(TypeError, rng.__lt__, rng[3].value) result = rng == list(rng) exp = rng == rng self.assert_numpy_array_equal(result, exp) def test_map(self): rng = date_range('1/1/2000', periods=10) f = lambda x: x.strftime('%Y%m%d') result = rng.map(f) exp = [f(x) for x in rng] self.assert_numpy_array_equal(result, exp) def test_add_union(self): rng = date_range('1/1/2000', periods=5) rng2 = date_range('1/6/2000', periods=5) result = rng + rng2 expected = rng.union(rng2) self.assertTrue(result.equals(expected)) def test_misc_coverage(self): rng = date_range('1/1/2000', periods=5) result = rng.groupby(rng.day) tm.assert_isinstance(list(result.values())[0][0], Timestamp) idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02']) self.assertTrue(idx.equals(list(idx))) non_datetime = Index(list('abc')) self.assertFalse(idx.equals(list(non_datetime))) def test_union_coverage(self): idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02']) ordered = DatetimeIndex(idx.order(), freq='infer') result = ordered.union(idx) self.assertTrue(result.equals(ordered)) result = ordered[:0].union(ordered) self.assertTrue(result.equals(ordered)) self.assertEqual(result.freq, ordered.freq) def test_union_bug_1730(self): rng_a = date_range('1/1/2012', periods=4, freq='3H') rng_b = date_range('1/1/2012', periods=4, freq='4H') result = rng_a.union(rng_b) exp = DatetimeIndex(sorted(set(list(rng_a)) | set(list(rng_b)))) self.assertTrue(result.equals(exp)) def test_union_bug_1745(self): left = DatetimeIndex(['2012-05-11 15:19:49.695000']) right = DatetimeIndex(['2012-05-29 13:04:21.322000', '2012-05-11 15:27:24.873000', '2012-05-11 15:31:05.350000']) result = left.union(right) exp = DatetimeIndex(sorted(set(list(left)) | set(list(right)))) self.assertTrue(result.equals(exp)) def test_union_bug_4564(self): from pandas import DateOffset left = date_range("2013-01-01", "2013-02-01") right = left + DateOffset(minutes=15) result = left.union(right) exp = DatetimeIndex(sorted(set(list(left)) | set(list(right)))) self.assertTrue(result.equals(exp)) def test_intersection_bug_1708(self): from pandas import DateOffset index_1 = date_range('1/1/2012', periods=4, freq='12H') index_2 = index_1 + DateOffset(hours=1) result = index_1 & index_2 self.assertEqual(len(result), 0) # def test_add_timedelta64(self): # rng = date_range('1/1/2000', periods=5) # delta = rng.values[3] - rng.values[1] # result = rng + delta # expected = rng + timedelta(2) # self.assertTrue(result.equals(expected)) def test_get_duplicates(self): idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02', '2000-01-03', '2000-01-03', '2000-01-04']) result = idx.get_duplicates() ex = DatetimeIndex(['2000-01-02', '2000-01-03']) self.assertTrue(result.equals(ex)) def test_argmin_argmax(self): idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02']) self.assertEqual(idx.argmin(), 1) self.assertEqual(idx.argmax(), 0) def test_order(self): idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02']) ordered = idx.order() self.assertTrue(ordered.is_monotonic) ordered = idx.order(ascending=False) self.assertTrue(ordered[::-1].is_monotonic) ordered, dexer = idx.order(return_indexer=True) self.assertTrue(ordered.is_monotonic) self.assert_numpy_array_equal(dexer, [1, 2, 0]) ordered, dexer = idx.order(return_indexer=True, ascending=False) self.assertTrue(ordered[::-1].is_monotonic) self.assert_numpy_array_equal(dexer, [0, 2, 1]) def test_insert(self): idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02']) result = idx.insert(2, datetime(2000, 1, 5)) exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05', '2000-01-02']) self.assertTrue(result.equals(exp)) # insertion of non-datetime should coerce to object index result = idx.insert(1, 'inserted') expected = Index([datetime(2000, 1, 4), 'inserted', datetime(2000, 1, 1), datetime(2000, 1, 2)]) self.assertNotIsInstance(result, DatetimeIndex) tm.assert_index_equal(result, expected) idx = date_range('1/1/2000', periods=3, freq='M') result = idx.insert(3, datetime(2000, 4, 30)) self.assertEqual(result.freqstr, 'M') def test_map_bug_1677(self): index = DatetimeIndex(['2012-04-25 09:30:00.393000']) f = index.asof result = index.map(f) expected = np.array([f(index[0])]) self.assert_numpy_array_equal(result, expected) def test_groupby_function_tuple_1677(self): df = DataFrame(np.random.rand(100), index=date_range("1/1/2000", periods=100)) monthly_group = df.groupby(lambda x: (x.year, x.month)) result = monthly_group.mean() tm.assert_isinstance(result.index[0], tuple) def test_append_numpy_bug_1681(self): # another datetime64 bug dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI') a = DataFrame() c = DataFrame({'A': 'foo', 'B': dr}, index=dr) result = a.append(c) self.assertTrue((result['B'] == dr).all()) def test_isin(self): index = tm.makeDateIndex(4) result = index.isin(index) self.assertTrue(result.all()) result = index.isin(list(index)) self.assertTrue(result.all()) assert_almost_equal(index.isin([index[2], 5]), [False, False, True, False]) def test_union(self): i1 = Int64Index(np.arange(0, 20, 2)) i2 = Int64Index(np.arange(10, 30, 2)) result = i1.union(i2) expected = Int64Index(np.arange(0, 30, 2)) self.assert_numpy_array_equal(result, expected) def test_union_with_DatetimeIndex(self): i1 = Int64Index(np.arange(0, 20, 2)) i2 = DatetimeIndex(start='2012-01-03 00:00:00', periods=10, freq='D') i1.union(i2) # Works i2.union(i1) # Fails with "AttributeError: can't set attribute" def test_time(self): rng = pd.date_range('1/1/2000', freq='12min', periods=10) result = pd.Index(rng).time expected = [t.time() for t in rng] self.assertTrue((result == expected).all()) def test_date(self): rng = pd.date_range('1/1/2000', freq='12H', periods=10) result = pd.Index(rng).date expected = [t.date() for t in rng] self.assertTrue((result == expected).all()) def test_does_not_convert_mixed_integer(self): df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs: randn(), r_idx_type='i', c_idx_type='dt') cols = df.columns.join(df.index, how='outer') joined = cols.join(df.columns) self.assertEqual(cols.dtype, np.dtype('O')) self.assertEqual(cols.dtype, joined.dtype) assert_array_equal(cols.values, joined.values) def test_slice_keeps_name(self): # GH4226 st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles') et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles') dr = pd.date_range(st, et, freq='H', name='timebucket') self.assertEqual(dr[1:].name, dr.name) def test_join_self(self): index = date_range('1/1/2000', periods=10) kinds = 'outer', 'inner', 'left', 'right' for kind in kinds: joined = index.join(index, how=kind) self.assertIs(index, joined) def assert_index_parameters(self, index): assert index.freq == '40960N' assert index.inferred_freq == '40960N' def test_ns_index(self): if _np_version_under1p7: raise nose.SkipTest nsamples = 400 ns = int(1e9 / 24414) dtstart = np.datetime64('2012-09-20T00:00:00') dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns') freq = ns * pd.datetools.Nano() index = pd.DatetimeIndex(dt, freq=freq, name='time') self.assert_index_parameters(index) new_index = pd.DatetimeIndex(start=index[0], end=index[-1], freq=index.freq) self.assert_index_parameters(new_index) def test_join_with_period_index(self): df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args: np.random.randint(2), c_idx_type='p', r_idx_type='dt') s = df.iloc[:5, 0] joins = 'left', 'right', 'inner', 'outer' for join in joins: with tm.assertRaisesRegexp(ValueError, 'can only call with other ' 'PeriodIndex-ed objects'): df.columns.join(s.index, how=join) def test_factorize(self): idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02', '2014-02', '2014-03', '2014-03']) exp_arr = np.array([0, 0, 1, 1, 2, 2]) exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03']) arr, idx = idx1.factorize() self.assert_numpy_array_equal(arr, exp_arr) self.assertTrue(idx.equals(exp_idx)) arr, idx = idx1.factorize(sort=True) self.assert_numpy_array_equal(arr, exp_arr) self.assertTrue(idx.equals(exp_idx)) # tz must be preserved idx1 = idx1.tz_localize('Asia/Tokyo') exp_idx = exp_idx.tz_localize('Asia/Tokyo') arr, idx = idx1.factorize() self.assert_numpy_array_equal(arr, exp_arr) self.assertTrue(idx.equals(exp_idx)) idx2 = pd.DatetimeIndex(['2014-03', '2014-03', '2014-02', '2014-01', '2014-03', '2014-01']) exp_arr = np.array([2, 2, 1, 0, 2, 0]) exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03']) arr, idx = idx2.factorize(sort=True) self.assert_numpy_array_equal(arr, exp_arr) self.assertTrue(idx.equals(exp_idx)) exp_arr = np.array([0, 0, 1, 2, 0, 2]) exp_idx = DatetimeIndex(['2014-03', '2014-02', '2014-01']) arr, idx = idx2.factorize() self.assert_numpy_array_equal(arr, exp_arr) self.assertTrue(idx.equals(exp_idx)) # freq must be preserved idx3 = date_range('2000-01', periods=4, freq='M', tz='Asia/Tokyo') exp_arr = np.array([0, 1, 2, 3]) arr, idx = idx3.factorize() self.assert_numpy_array_equal(arr, exp_arr) self.assertTrue(idx.equals(idx3)) class TestDatetime64(tm.TestCase): """ Also test support for datetime64[ns] in Series / DataFrame """ def setUp(self): dti = DatetimeIndex(start=datetime(2005, 1, 1), end=datetime(2005, 1, 10), freq='Min') self.series = Series(rand(len(dti)), dti) def test_datetimeindex_accessors(self): dti = DatetimeIndex( freq='D', start=datetime(1998, 1, 1), periods=365) self.assertEqual(dti.year[0], 1998) self.assertEqual(dti.month[0], 1) self.assertEqual(dti.day[0], 1) self.assertEqual(dti.hour[0], 0) self.assertEqual(dti.minute[0], 0) self.assertEqual(dti.second[0], 0) self.assertEqual(dti.microsecond[0], 0) self.assertEqual(dti.dayofweek[0], 3) self.assertEqual(dti.dayofyear[0], 1) self.assertEqual(dti.dayofyear[120], 121) self.assertEqual(dti.weekofyear[0], 1) self.assertEqual(dti.weekofyear[120], 18) self.assertEqual(dti.quarter[0], 1) self.assertEqual(dti.quarter[120], 2) self.assertEqual(dti.is_month_start[0], True) self.assertEqual(dti.is_month_start[1], False) self.assertEqual(dti.is_month_start[31], True) self.assertEqual(dti.is_quarter_start[0], True) self.assertEqual(dti.is_quarter_start[90], True) self.assertEqual(dti.is_year_start[0], True) self.assertEqual(dti.is_year_start[364], False) self.assertEqual(dti.is_month_end[0], False) self.assertEqual(dti.is_month_end[30], True) self.assertEqual(dti.is_month_end[31], False) self.assertEqual(dti.is_month_end[364], True) self.assertEqual(dti.is_quarter_end[0], False) self.assertEqual(dti.is_quarter_end[30], False) self.assertEqual(dti.is_quarter_end[89], True) self.assertEqual(dti.is_quarter_end[364], True) self.assertEqual(dti.is_year_end[0], False) self.assertEqual(dti.is_year_end[364], True) self.assertEqual(len(dti.year), 365) self.assertEqual(len(dti.month), 365) self.assertEqual(len(dti.day), 365) self.assertEqual(len(dti.hour), 365) self.assertEqual(len(dti.minute), 365) self.assertEqual(len(dti.second), 365) self.assertEqual(len(dti.microsecond), 365) self.assertEqual(len(dti.dayofweek), 365) self.assertEqual(len(dti.dayofyear), 365) self.assertEqual(len(dti.weekofyear), 365) self.assertEqual(len(dti.quarter), 365) self.assertEqual(len(dti.is_month_start), 365) self.assertEqual(len(dti.is_month_end), 365) self.assertEqual(len(dti.is_quarter_start), 365) self.assertEqual(len(dti.is_quarter_end), 365) self.assertEqual(len(dti.is_year_start), 365) self.assertEqual(len(dti.is_year_end), 365) dti = DatetimeIndex( freq='BQ-FEB', start=datetime(1998, 1, 1), periods=4) self.assertEqual(sum(dti.is_quarter_start), 0) self.assertEqual(sum(dti.is_quarter_end), 4) self.assertEqual(sum(dti.is_year_start), 0) self.assertEqual(sum(dti.is_year_end), 1) # Ensure is_start/end accessors throw ValueError for CustomBusinessDay, CBD requires np >= 1.7 if not _np_version_under1p7: bday_egypt = offsets.CustomBusinessDay(weekmask='Sun Mon Tue Wed Thu') dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt) self.assertRaises(ValueError, lambda: dti.is_month_start) dti = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03']) self.assertEqual(dti.is_month_start[0], 1) tests = [ (Timestamp('2013-06-01', offset='M').is_month_start, 1), (Timestamp('2013-06-01', offset='BM').is_month_start, 0), (Timestamp('2013-06-03', offset='M').is_month_start, 0), (Timestamp('2013-06-03', offset='BM').is_month_start, 1), (Timestamp('2013-02-28', offset='Q-FEB').is_month_end, 1), (Timestamp('2013-02-28', offset='Q-FEB').is_quarter_end, 1), (Timestamp('2013-02-28', offset='Q-FEB').is_year_end, 1), (Timestamp('2013-03-01', offset='Q-FEB').is_month_start, 1), (Timestamp('2013-03-01', offset='Q-FEB').is_quarter_start, 1), (Timestamp('2013-03-01', offset='Q-FEB').is_year_start, 1), (Timestamp('2013-03-31', offset='QS-FEB').is_month_end, 1), (Timestamp('2013-03-31', offset='QS-FEB').is_quarter_end, 0), (Timestamp('2013-03-31', offset='QS-FEB').is_year_end, 0), (Timestamp('2013-02-01', offset='QS-FEB').is_month_start, 1), (Timestamp('2013-02-01', offset='QS-FEB').is_quarter_start, 1), (Timestamp('2013-02-01', offset='QS-FEB').is_year_start, 1), (Timestamp('2013-06-30', offset='BQ').is_month_end, 0), (Timestamp('2013-06-30', offset='BQ').is_quarter_end, 0), (Timestamp('2013-06-30', offset='BQ').is_year_end, 0), (Timestamp('2013-06-28', offset='BQ').is_month_end, 1), (Timestamp('2013-06-28', offset='BQ').is_quarter_end, 1), (Timestamp('2013-06-28', offset='BQ').is_year_end, 0), (Timestamp('2013-06-30', offset='BQS-APR').is_month_end, 0), (Timestamp('2013-06-30', offset='BQS-APR').is_quarter_end, 0), (Timestamp('2013-06-30', offset='BQS-APR').is_year_end, 0), (Timestamp('2013-06-28', offset='BQS-APR').is_month_end, 1), (Timestamp('2013-06-28', offset='BQS-APR').is_quarter_end, 1), (Timestamp('2013-03-29', offset='BQS-APR').is_year_end, 1), (Timestamp('2013-11-01', offset='AS-NOV').is_year_start, 1), (Timestamp('2013-10-31', offset='AS-NOV').is_year_end, 1)] for ts, value in tests: self.assertEqual(ts, value) def test_nanosecond_field(self): dti = DatetimeIndex(np.arange(10)) self.assert_numpy_array_equal(dti.nanosecond, np.arange(10)) def test_datetimeindex_diff(self): dti1 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31), periods=100) dti2 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31), periods=98) self.assertEqual(len(dti1.diff(dti2)), 2) def test_fancy_getitem(self): dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1), end=datetime(2010, 1, 1)) s = Series(np.arange(len(dti)), index=dti) self.assertEqual(s[48], 48) self.assertEqual(s['1/2/2009'], 48) self.assertEqual(s['2009-1-2'], 48) self.assertEqual(s[datetime(2009, 1, 2)], 48) self.assertEqual(s[lib.Timestamp(datetime(2009, 1, 2))], 48) self.assertRaises(KeyError, s.__getitem__, '2009-1-3') assert_series_equal(s['3/6/2009':'2009-06-05'], s[datetime(2009, 3, 6):datetime(2009, 6, 5)]) def test_fancy_setitem(self): dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1), end=datetime(2010, 1, 1)) s = Series(np.arange(len(dti)), index=dti) s[48] = -1 self.assertEqual(s[48], -1) s['1/2/2009'] = -2 self.assertEqual(s[48], -2) s['1/2/2009':'2009-06-05'] = -3 self.assertTrue((s[48:54] == -3).all()) def test_datetimeindex_constructor(self): arr = ['1/1/2005', '1/2/2005', 'Jn 3, 2005', '2005-01-04'] self.assertRaises(Exception, DatetimeIndex, arr) arr = ['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04'] idx1 = DatetimeIndex(arr) arr = [datetime(2005, 1, 1), '1/2/2005', '1/3/2005', '2005-01-04'] idx2 = DatetimeIndex(arr) arr = [lib.Timestamp(datetime(2005, 1, 1)), '1/2/2005', '1/3/2005', '2005-01-04'] idx3 = DatetimeIndex(arr) arr = np.array(['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04'], dtype='O') idx4 = DatetimeIndex(arr) arr = to_datetime(['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04']) idx5 = DatetimeIndex(arr) arr = to_datetime( ['1/1/2005', '1/2/2005', 'Jan 3, 2005', '2005-01-04']) idx6 = DatetimeIndex(arr) idx7 = DatetimeIndex(['12/05/2007', '25/01/2008'], dayfirst=True) idx8 = DatetimeIndex(['2007/05/12', '2008/01/25'], dayfirst=False, yearfirst=True) self.assertTrue(idx7.equals(idx8)) for other in [idx2, idx3, idx4, idx5, idx6]: self.assertTrue((idx1.values == other.values).all()) sdate = datetime(1999, 12, 25) edate = datetime(2000, 1, 1) idx = DatetimeIndex(start=sdate, freq='1B', periods=20) self.assertEqual(len(idx), 20) self.assertEqual(idx[0], sdate + 0 * dt.bday) self.assertEqual(idx.freq, 'B') idx = DatetimeIndex(end=edate, freq=('D', 5), periods=20) self.assertEqual(len(idx), 20) self.assertEqual(idx[-1], edate) self.assertEqual(idx.freq, '5D') idx1 = DatetimeIndex(start=sdate, end=edate, freq='W-SUN') idx2 = DatetimeIndex(start=sdate, end=edate, freq=dt.Week(weekday=6)) self.assertEqual(len(idx1), len(idx2)) self.assertEqual(idx1.offset, idx2.offset) idx1 = DatetimeIndex(start=sdate, end=edate, freq='QS') idx2 = DatetimeIndex(start=sdate, end=edate, freq=
dt.QuarterBegin(startingMonth=1)
pandas.core.datetools.QuarterBegin
import pandas as pd import numpy as np data_notes = pd.read_pickle('out/annotated_note_all.pkl') file1 = 'AP_SURGERY_PROCEDURE.xlsx' x1 = pd.ExcelFile(file1) data_surgeries = x1.parse('Sheet1') print(data_notes.shape) #(145556, 8) print(data_surgeries.shape) #(105976, 16) dep_affirmed = data_notes.loc[data_notes['STATUS_FINAL'].isin(['Affirmed'])] print(dep_affirmed.shape) #(138233, 8) dep_negated = data_notes.loc[data_notes['STATUS_FINAL'].isin(['Negated'])] print(dep_negated.shape) #(7323, 8) data_surgeries_required = data_surgeries[['PAT_DEID','START_DATE']] dep_affirmed_surgeries = pd.merge(dep_affirmed, data_surgeries_required, on='PAT_DEID', how='inner') print(dep_affirmed_surgeries.shape) #(338977, 9) print(dep_affirmed.PAT_DEID.nunique()) #5530 print(dep_affirmed_surgeries.PAT_DEID.nunique()) #4501 dep_affirmed_surgeries1 = dep_affirmed_surgeries.copy() dep_affirmed_surgeries1['START_DATE_start'] = pd.to_datetime(dep_affirmed_surgeries1['START_DATE'],format='%d-%b-%y') dep_affirmed_surgeries1['START_DATE_end'] = dep_affirmed_surgeries1['START_DATE_start'] - pd.to_timedelta(365,unit='d') dep_affirmed_surgeries1['NOTE_DATE_dt'] = pd.to_datetime(dep_affirmed_surgeries1['NOTE_DATE'],format='%d-%b-%y') dep_affirmed_surgeries1['ENCOUNTER_DATE_dt'] = pd.to_datetime(dep_affirmed_surgeries1['ENCOUNTER_DATE'],format='%d-%b-%y') dep_aff_sur_1year = dep_affirmed_surgeries.loc[(dep_affirmed_surgeries1['ENCOUNTER_DATE_dt'] <= dep_affirmed_surgeries1['START_DATE_start']) & (dep_affirmed_surgeries1['ENCOUNTER_DATE_dt'] >= dep_affirmed_surgeries1['START_DATE_end'])] print(dep_aff_sur_1year.shape) #(86248, 9) print(dep_aff_sur_1year.PAT_DEID.nunique()) #3663 dep_aff_sur_1year.to_pickle("out/depression_affirmed_1yr.pkl") writer =
pd.ExcelWriter('out/depression_affirmed_1yr.xlsx')
pandas.ExcelWriter
# -*- coding: utf-8 -*- """ Main. """ import logging import re import os import sys import gc import tqdm import pandas as pd import numpy as np from scipy import optimize from functools import partial import matplotlib.pyplot as plt from riana import accmass, constants, models, params, __version__ def strip_concat(sequence: str, ) -> str: """ Cleans up concat sequences (peptide_charge) and remove modifications to return peptide string for labeling site calculations :param sequence: concat sequence containing charge and modificaitons :return: """ # 2021-05-18 strip all N-terminal n from Comet sequence = re.sub('^n', '', sequence) # Strip all modifications sequence = re.sub('\\[.*?\\]', '', sequence) # Strip the underscore and charge sequence = re.sub('_[0-9]+', '', sequence) return sequence def calculate_a0(sequence: str, label: str, ) -> float: """ Calculates the initial isotope enrichment of a peptide prior to heavy water labeling :param sequence: str: concat sequences :param label: str: aa, hw, or o18, if aa, return 1 assuming no heavy prior to labeling :return: float: mi at time 0 """ if label == 'aa': return 1 else: sequence = strip_concat(sequence) res_atoms = accmass.count_atoms(sequence) a0 = np.product([np.power(constants.iso_abundances[i], res_atoms[i]) for i, v in enumerate(res_atoms)]) return a0 def calculate_label_n(sequence: str, label: str, ) -> float: """ Calculates labeling sites of the peptide sequence in heavy water or amino acid labeling :param sequence: the peptide sequence :param label: aa, hw, or o18; if aa, only return the labelable residues :return: """ # strip modification site and charge from concat sequence sequence = strip_concat(sequence) # if amino acid labeling, return number of labeled residues if label == 'aa': return sequence.count(params.labeled_residue) # if d2o, return the number of labeling site in heavy water labeling elif label == 'hw': return sum([constants.label_hydrogens.get(char) for char in sequence]) # else if o18, return the number of labeling sites for o18 else: return sum([constants.label_oxygens.get(char) for char in sequence]) - 1. def calculate_fs(a, a_0, a_max): """ Calculates fractional synthesis based on a_t, a_0 (initial), and a_max (asymptote) :param a: mi at a particular time :param a_0: initial mi value before label onset :param a_max: final mi value at plateau based on labeling site and precursor RIA :return: """ # catch errors from no ria or no labeling site if a_max - a_0 == 0: # repeat an array of 0 if the input is an ndarray, otherwise return 0 return np.repeat(0, len(a)) if isinstance(a, np.ndarray) else 0 else: return (a-a_0)/(a_max-a_0) def fit_all(args): """ Performs kinetic curve-fitting of integration output using a kinetics model returns a csv file containing the peptide concatamer, k_deg, sd, and r2 :param args: :return: """ # # parse arguments # riana_list = args.riana_path # list of integration output model_pars = {'k_p': args.kp, 'k_r': args.kr, 'r_p': args.rp} q_threshold = args.q_value # peptide q value threshold t_threshold = args.depth # minimal number of time points threshold ria_max = args.ria # final isotope enrichment level (e.g., 0.046) outdir = args.out # output directory label_ = args.label # select model if args.model == 'simple': model = models.one_exponent elif args.model == 'guan': model = models.two_compartment_guan elif args.model == 'fornasiero': model = models.two_compartment_fornasiero else: raise Exception('Unknown kinetics model.') # if not os.path.exists(outdir): # os.makedirs(outdir) if args.thread: try: num_threads = min(os.cpu_count() * 4, int(args.thread)) except ValueError or TypeError: num_threads = 1 # os.cpu_count() * 4 else: num_threads = 1 # os.cpu_count() * 4 # # logging # fit_log = logging.getLogger('riana.fit') fit_log.setLevel(logging.DEBUG) # create file handler which logs even debug messages fh = logging.FileHandler(os.path.join(outdir, 'riana_fit.log')) fh.setLevel(logging.INFO) # create console handler with a higher log level ch = logging.StreamHandler() ch.setLevel(logging.ERROR) # create formatter and add it to the handlers formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') fh.setFormatter(formatter) ch.setFormatter(formatter) # add the handlers to the logger fit_log.addHandler(fh) fit_log.addHandler(ch) fit_log.info(args) fit_log.info(__version__) # # read the integration output files in # rdf =
pd.DataFrame()
pandas.DataFrame
# module model import pandas as pd from fbprophet import Prophet import matplotlib.pyplot as plt from sklearn import metrics, ensemble, model_selection from sklearn.preprocessing import MinMaxScaler from math import sqrt import numpy as np import datetime from dateutil import relativedelta import os import io import json import base64 from xgboost import XGBRegressor import tensorflow as tf from tensorflow import keras from statsmodels.tsa.ar_model import AutoReg np.random.seed(42) tf.random.set_seed(42) def buildProphet(train_data_path, test_data_path): print("\nBuilding Prophet model ...") df =
pd.read_csv(train_data_path)
pandas.read_csv
# -*- coding: utf-8 -*- import pytest import numpy as np import pandas as pd from pandas import Timestamp def create_dataframe(tuple_data): """Create pandas df from tuple data with a header.""" return pd.DataFrame.from_records(tuple_data[1:], columns=tuple_data[0]) ### REUSABLE FIXTURES -------------------------------------------------------- @pytest.fixture() def indices_3years(): """Three indices over 3 years.""" return pd.DataFrame.from_records( [ (Timestamp('2012-01-01 00:00:00'), 100.0, 100.0, 100.0), (Timestamp('2012-02-01 00:00:00'), 101.239553643, 96.60525323799999, 97.776838217), (Timestamp('2012-03-01 00:00:00'), 102.03030533, 101.450821724, 96.59101862), (Timestamp('2012-04-01 00:00:00'), 104.432402661, 98.000263617, 94.491213369), (Timestamp('2012-05-01 00:00:00'), 105.122830333, 95.946873831, 93.731891785), (Timestamp('2012-06-01 00:00:00'), 103.976692567, 97.45914568100001, 90.131064035), (Timestamp('2012-07-01 00:00:00'), 106.56768678200001, 94.788761174, 94.53487522), (Timestamp('2012-08-01 00:00:00'), 106.652151036, 98.478217946, 92.56165627700001), (Timestamp('2012-09-01 00:00:00'), 108.97290730799999, 99.986521241, 89.647230903), (Timestamp('2012-10-01 00:00:00'), 106.20124385700001, 99.237117891, 92.27819603799999), (Timestamp('2012-11-01 00:00:00'), 104.11913898700001, 100.993436318, 95.758970985), (Timestamp('2012-12-01 00:00:00'), 107.76600978, 99.60424011299999, 95.697091336), (Timestamp('2013-01-01 00:00:00'), 98.74350698299999, 100.357120656, 100.24073830200001), (Timestamp('2013-02-01 00:00:00'), 100.46305431100001, 99.98213513200001, 99.499007278), (Timestamp('2013-03-01 00:00:00'), 101.943121499, 102.034291064, 96.043392231), (Timestamp('2013-04-01 00:00:00'), 99.358987741, 106.513055039, 97.332012817), (Timestamp('2013-05-01 00:00:00'), 97.128074038, 106.132168479, 96.799806436), (Timestamp('2013-06-01 00:00:00'), 94.42944162, 106.615734964, 93.72086654600001), (Timestamp('2013-07-01 00:00:00'), 94.872365481, 103.069773446, 94.490515359), (Timestamp('2013-08-01 00:00:00'), 98.239415397, 105.458081805, 93.57271149299999), (Timestamp('2013-09-01 00:00:00'), 100.36774827100001, 106.144579258, 90.314524375), (Timestamp('2013-10-01 00:00:00'), 100.660205114, 101.844838294, 88.35136848399999), (Timestamp('2013-11-01 00:00:00'), 101.33948384799999, 100.592230114, 93.02874928899999), (Timestamp('2013-12-01 00:00:00'), 101.74876982299999, 102.709038791, 93.38277933200001), (Timestamp('2014-01-01 00:00:00'), 101.73439491, 99.579700011, 104.755837919), (Timestamp('2014-02-01 00:00:00'), 100.247760523, 100.76732961, 100.197855834), (Timestamp('2014-03-01 00:00:00'), 102.82080245600001, 99.763171909, 100.252537549), (Timestamp('2014-04-01 00:00:00'), 104.469889684, 96.207920184, 98.719797067), (Timestamp('2014-05-01 00:00:00'), 105.268899775, 99.357641836, 99.99786671), (Timestamp('2014-06-01 00:00:00'), 107.41649204299999, 100.844974811, 96.463821506), (Timestamp('2014-07-01 00:00:00'), 110.146087435, 102.01075029799999, 94.332755083), (Timestamp('2014-08-01 00:00:00'), 109.17068484100001, 101.562418115, 91.15410351700001), (Timestamp('2014-09-01 00:00:00'), 109.872892919, 101.471759564, 90.502291475), (Timestamp('2014-10-01 00:00:00'), 108.508436998, 98.801947543, 93.97423224399999), (
Timestamp('2014-11-01 00:00:00')
pandas.Timestamp
# -*- coding: utf-8 -*- """ pytests for resource extractors """ from click.testing import CliRunner import numpy as np import os import pandas as pd from pandas.testing import assert_frame_equal import pytest import tempfile import traceback from rex.resource_extraction.wind_cli import main from rex.resource_extraction.resource_extraction import (MultiFileWindX, MultiFileNSRDBX, MultiTimeWindX, MultiTimeNSRDBX, MultiYearWindX, MultiYearNSRDBX, NSRDBX, WindX, WaveX) from rex.resource_extraction.resource_extraction import TREE_DIR from rex.utilities.exceptions import ResourceValueError from rex.utilities.loggers import LOGGERS from rex import TESTDATADIR @pytest.fixture(scope="module") def runner(): """ cli runner """ return CliRunner() @pytest.fixture def NSRDBX_cls(): """ Init NSRDB resource handler """ path = os.path.join(TESTDATADIR, 'nsrdb/ri_100_nsrdb_2012.h5') return NSRDBX(path) @pytest.fixture def MultiFileNSRDBX_cls(): """ Init MultiFileNSRDB resource handler """ path = os.path.join(TESTDATADIR, 'nsrdb', 'nsrdb*2018.h5') return MultiFileNSRDBX(path) @pytest.fixture def MultiYearNSRDBX_cls(): """ Init MultiYearNSRDB resource handler """ path = os.path.join(TESTDATADIR, 'nsrdb/ri_100_nsrdb_*.h5') return MultiYearNSRDBX(path) @pytest.fixture def MultiTimeNSRDBX_cls(): """ Init MulitTimeNSRDB resource handler """ path = os.path.join(TESTDATADIR, 'nsrdb/ri_100_nsrdb_*.h5') return MultiTimeNSRDBX(path) @pytest.fixture def WindX_cls(): """ Init WindResource resource handler """ path = os.path.join(TESTDATADIR, 'wtk/ri_100_wtk_2012.h5') return WindX(path) @pytest.fixture def MultiFileWindX_cls(): """ Init WindResource resource handler """ path = os.path.join(TESTDATADIR, 'wtk', 'wtk*m.h5') return MultiFileWindX(path) @pytest.fixture def MultiYearWindX_cls(): """ Init MultiYearWindResource resource handler """ path = os.path.join(TESTDATADIR, 'wtk/ri_100_wtk_*.h5') return MultiYearWindX(path) @pytest.fixture def MultiTimeWindX_cls(): """ Init MultiTimeWindResource resource handler """ path = os.path.join(TESTDATADIR, 'wtk/ri_100_wtk_*.h5') return MultiTimeWindX(path) def check_props(res_cls): """ Test extraction class properties """ time_index = res_cls.time_index meta = res_cls.meta res_shape = (len(time_index), len(meta)) assert len(res_cls) == len(time_index) assert res_cls.shape == res_shape assert np.all(np.isin(['meta', 'time_index'], res_cls.datasets)) assert np.all(~np.isin(['meta', 'time_index', 'coordinates'], res_cls.resource_datasets)) assert np.all(np.in1d(res_cls.countries, meta['country'].unique())) assert np.all(np.in1d(res_cls.states, meta['state'].unique())) assert np.all(np.in1d(res_cls.counties, meta['county'].unique())) def extract_site(res_cls, ds_name): """ Run tests extracting a single site """ time_index = res_cls.time_index meta = res_cls.meta site = np.random.choice(len(meta), 1)[0] lat_lon = meta.loc[site, ['latitude', 'longitude']].values truth_ts = res_cls[ds_name, :, site] truth_df = pd.DataFrame(truth_ts, columns=[site], index=
pd.Index(time_index, name='time_index')
pandas.Index
#!/usr/bin/env python import asyncio import aiohttp import logging import pandas as pd from typing import ( Any, AsyncIterable, Dict, List, Optional, ) import time import ujson import websockets from websockets.exceptions import ConnectionClosed from hummingbot.core.data_type.order_book import OrderBook from hummingbot.connector.exchange.eterbase.eterbase_order_book import EterbaseOrderBook from hummingbot.core.data_type.order_book_tracker_data_source import OrderBookTrackerDataSource from hummingbot.core.utils import async_ttl_cache from hummingbot.logger import HummingbotLogger from hummingbot.core.data_type.order_book_message import OrderBookMessage from hummingbot.connector.exchange.eterbase.eterbase_active_order_tracker import EterbaseActiveOrderTracker import hummingbot.connector.exchange.eterbase.eterbase_constants as constants from hummingbot.connector.exchange.eterbase.eterbase_utils import ( convert_to_exchange_trading_pair, convert_from_exchange_trading_pair) MAX_RETRIES = 20 NaN = float("nan") class EterbaseAPIOrderBookDataSource(OrderBookTrackerDataSource): MESSAGE_TIMEOUT = 30.0 PING_TIMEOUT = 10.0 API_CALL_TIMEOUT = 30.0 _eaobds_logger: Optional[HummingbotLogger] = None @classmethod def logger(cls) -> HummingbotLogger: if cls._eaobds_logger is None: cls._eaobds_logger = logging.getLogger(__name__) return cls._eaobds_logger def __init__(self, trading_pairs: List[str]): super().__init__(trading_pairs) self._tp_map_mrktid: Dict[str, str] = None @classmethod async def get_last_traded_prices(cls, trading_pairs: List[str]) -> Dict[str, float]: results = dict() async with aiohttp.ClientSession() as client: resp = await client.get(f"{constants.REST_URL}/tickers") resp_json = await resp.json() for trading_pair in trading_pairs: resp_record = [o for o in resp_json if o["symbol"] == convert_to_exchange_trading_pair(trading_pair)][0] results[trading_pair] = float(resp_record["price"]) return results @classmethod @async_ttl_cache(ttl=60 * 30, maxsize=1) async def get_active_exchange_markets(cls) -> pd.DataFrame: """ *required Returns all currently active BTC trading pairs from Eterbase, sorted by volume in descending order. """ async with aiohttp.ClientSession() as client: async with client.get(f"{constants.REST_URL}/markets") as products_response: products_response: aiohttp.ClientResponse = products_response if products_response.status != 200: raise IOError(f"Error fetching active Eterbase markets. HTTP status is {products_response.status}.") data = await products_response.json() for pair in data: pair["symbol"] = convert_from_exchange_trading_pair(pair["symbol"]) all_markets: pd.DataFrame = pd.DataFrame.from_records(data=data, index="id") all_markets.rename({"base": "baseAsset", "quote": "quoteAsset"}, axis="columns", inplace=True) all_markets = all_markets[(all_markets.state == 'Trading')] ids: List[str] = list(all_markets.index) volumes: List[float] = [] prices: List[float] = [] tickers = None async with client.get(f"{constants.REST_URL}/tickers") as tickers_response: tickers_response: aiohttp.ClientResponse = tickers_response if tickers_response.status == 200: data = await tickers_response.json() tickers: pd.DataFrame = pd.DataFrame.from_records(data=data, index="marketId") else: raise IOError(f"Error fetching tickers on Eterbase. " f"HTTP status is {tickers_response.status}.") for product_id in ids: volumes.append(float(tickers.loc[product_id].volume)) prices.append(float(tickers.loc[product_id].price)) all_markets["volume"] = volumes all_markets["price"] = prices cross_rates = None async with client.get(f"{constants.REST_URL}/tickers/cross-rates") as crossrates_response: crossrates_response: aiohttp.ClientResponse = crossrates_response if crossrates_response.status == 200: data = await crossrates_response.json() cross_rates: pd.DataFrame = pd.json_normalize(data, record_path ='rates', meta = ['base']) else: raise IOError(f"Error fetching cross-rates on Eterbase. " f"HTTP status is {crossrates_response.status}.") usd_volume: List[float] = [] cross_rates_ids: List[str] = list(cross_rates.base) for row in all_markets.itertuples(): quote_name: str = row.quoteAsset quote_volume: float = row.volume quote_price: float = row.price found = False for product_id in cross_rates_ids: if quote_name == product_id: rate: float = cross_rates.loc[(cross_rates['base'] == product_id) & (cross_rates['quote'].str.startswith("USDT"))].iat[0, 1] usd_volume.append(quote_volume * quote_price * rate) found = True break if found is False: usd_volume.append(NaN) cls.logger().error(f"Unable to convert volume to USD for market - {quote_name}.") all_markets["USDVolume"] = usd_volume return all_markets.sort_values(by = ["USDVolume"], ascending = False) async def get_map_marketid(self) -> Dict[str, str]: """ Get a list of active trading pairs (if the market class already specifies a list of trading pairs, returns that list instead of all active trading pairs) :returns: A list of trading pairs defined by the market class, or all active trading pairs from the rest API """ if not self._tp_map_mrktid: try: active_markets: pd.DataFrame = await self.get_active_exchange_markets() active_markets['id'] = active_markets.index self._tp_map_mrktid = dict(zip(active_markets.symbol, active_markets.id)) except Exception: self._tp_map_mrktid = None self.logger().network( "Error getting active exchange information.", exc_info=True, app_warning_msg="Error getting active exchange information. Check network connection." ) return self._tp_map_mrktid @staticmethod async def get_map_market_id() -> Dict[str, str]: """ """ tp_map_mid: Dict[str, str] = {} async with aiohttp.ClientSession() as client: async with client.get(f"{constants.REST_URL}/markets") as products_response: products_response: aiohttp.ClientResponse = products_response if products_response.status != 200: raise IOError(f"Error fetching active Eterbase markets. HTTP status is {products_response.status}.") data = await products_response.json() for dt in data: tp_map_mid[convert_from_exchange_trading_pair(dt['symbol'])] = dt['id'] return tp_map_mid @staticmethod async def get_snapshot(client: aiohttp.ClientSession, trading_pair: str) -> Dict[str, any]: """ Fetches order book snapshot for a particular trading pair from the rest API :returns: Response from the rest API """ map_market = await EterbaseAPIOrderBookDataSource.get_map_market_id() market_id = map_market[trading_pair] product_order_book_url: str = f"{constants.REST_URL}/markets/{market_id}/order-book" async with client.get(product_order_book_url) as response: response: aiohttp.ClientResponse = response if response.status != 200: raise IOError(f"Error fetching Eterbase market snapshot for marketId: {market_id}. " f"HTTP status is {response.status}.") data: Dict[str, Any] = await response.json() return data async def get_new_order_book(self, trading_pair: str) -> OrderBook: async with aiohttp.ClientSession() as client: td_map_id: Dict[str, str] = await self.get_map_marketid() snapshot: Dict[str, any] = await self.get_snapshot(client, trading_pair) snapshot_timestamp: float = time.time() snapshot_msg: OrderBookMessage = EterbaseOrderBook.snapshot_message_from_exchange( snapshot, snapshot_timestamp, metadata={"trading_pair": trading_pair, "market_id": td_map_id[trading_pair]} ) order_book: OrderBook = self.order_book_create_function() active_order_tracker: EterbaseActiveOrderTracker = EterbaseActiveOrderTracker() bids, asks = active_order_tracker.convert_snapshot_message_to_order_book_row(snapshot_msg) order_book.apply_snapshot(bids, asks, snapshot_msg.update_id) return order_book async def _inner_messages(self, ws: websockets.WebSocketClientProtocol) -> AsyncIterable[str]: """ Generator function that returns messages from the web socket stream :param ws: current web socket connection :returns: message in AsyncIterable format """ # Terminate the recv() loop as soon as the next message timed out, so the outer loop can reconnect. try: while True: try: msg: str = await asyncio.wait_for(ws.recv(), timeout = self.MESSAGE_TIMEOUT) yield msg except asyncio.TimeoutError: try: await ws.send('{"type": "ping"}') except asyncio.TimeoutError: raise except asyncio.TimeoutError: self.logger().warning("WebSocket ping timed out. Going to reconnect...") return except ConnectionClosed: return finally: await ws.close() async def listen_for_trades(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue): # Trade messages are received from the order book web socket pass async def listen_for_order_book_diffs(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue): """ *required Subscribe to diff channel via web socket, and keep the connection open for incoming messages :param ev_loop: ev_loop to execute this function in :param output: an async queue where the incoming messages are stored """ while True: try: trading_pairs = self._trading_pairs tp_map_mrktid = await self.get_map_marketid() marketsDict = dict(zip(tp_map_mrktid.values(), tp_map_mrktid.keys())) marketIds = [] for tp in trading_pairs: marketIds.append(tp_map_mrktid[tp]) async with websockets.connect(constants.WSS_URL) as ws: ws: websockets.WebSocketClientProtocol = ws subscribe_request: Dict[str, Any] = { "type": "subscribe", "channelId": "order_book", "marketIds": marketIds, } await ws.send(ujson.dumps(subscribe_request)) async for raw_msg in self._inner_messages(ws): msg = ujson.loads(raw_msg) msg_type: str = msg.get("type", None) if msg_type is None: raise ValueError(f"Eterbase Websocket message does not contain a type - {msg}") elif msg_type == "error": raise ValueError(f"Eterbase Websocket received error message - {msg['message']}") elif msg_type == "pong": self.logger().debug("Eterbase websocket received event pong - {msg}") elif msg_type == "ob_snapshot": order_book_message: OrderBookMessage = EterbaseOrderBook.snapshot_message_from_exchange(msg,
pd.Timestamp.now("UTC")
pandas.Timestamp.now
import pandas as pd import math from sklearn.preprocessing import MinMaxScaler class DataProcessor: def __init__(self): self.df_train = None self.df_test = None self.df_store = None self.scale_y = None '''importing data''' def load_data(self, path): self.df_train = pd.read_csv(path+'/train.csv', parse_dates=['Date'], dtype={'StateHoliday': 'category'}) self.df_test = pd.read_csv(path+'/test.csv', parse_dates=['Date'], dtype={'StateHoliday': 'category'}) self.df_store = pd.read_csv(path+'/store.csv') '''sorting data to prepare a timeseries''' def preprocessing(self, n_input): self.df_train.sort_values('Date', ascending=True, inplace=True) self.df_test.sort_values('Date', ascending=True, inplace=True) '''joining train and test data to manipulate them together''' train_test = pd.concat([self.df_train, self.df_test]) '''to keep the shape same between train and test, filling in na for all test rows''' train_test.loc[train_test['Sales'].isna(), 'Sales'] = -1 '''Splitting date into day, month, year and weekofyear''' train_test['Month'] = train_test['Date'].dt.month train_test['Year'] = train_test['Date'].dt.year train_test['Day'] = train_test['Date'].dt.day train_test['WeekOfYear'] = train_test['Date'].dt.weekofyear ''' # df_open = self.df_train.loc[df_train['Open'] == 0] # df_open.groupby('DayOfWeek')['Open'].describe() # df_open = self.df_test.loc[df_test['Open'] == 0] # df_open.groupby('DayOfWeek')['Open'].describe() In test set, values are missing for Column = Open, based on the trend uing train set, it's concluded that shops remain open on week days mostly, hence filling these missing values with 1 ''' train_test['Open'].fillna(1, inplace=True) '''store file has 3 missing values for CompetitionDistance, filling in these with the median''' self.df_store['CompetitionDistance'].fillna(self.df_store['CompetitionDistance'].median(), inplace=True) '''merging store data with train and test concatenated dataset''' train_test_merged = pd.merge(train_test, self.df_store, on='Store', how='left') '''Evaluating CompetitionOpenMonths and PromoOpenMonths''' train_test_merged['CompetitionOpenMonths'] = 12 * ( train_test_merged['Year'] - train_test_merged['CompetitionOpenSinceYear']) + train_test_merged[ 'Month'] - \ train_test_merged['CompetitionOpenSinceMonth'] train_test_merged['PromoOpenMonths'] = 12 * ( train_test_merged['Year'] - train_test_merged['Promo2SinceYear']) + ( train_test_merged['WeekOfYear'] - train_test_merged[ 'Promo2SinceWeek']) / 4.0 train_test_merged['CompetitionOpenSinceMonth'].fillna(0, inplace=True) train_test_merged['CompetitionOpenSinceMonth'].fillna(0, inplace=True) train_test_merged['CompetitionOpenSinceYear'].fillna(0, inplace=True) train_test_merged['Promo2SinceWeek'].fillna(0, inplace=True) train_test_merged['Promo2SinceYear'].fillna(0, inplace=True) train_test_merged['PromoInterval'].fillna(0, inplace=True) train_test_merged['CompetitionOpenMonths'].fillna(0, inplace=True) train_test_merged['PromoOpenMonths'].fillna(0, inplace=True) '''Splitting train and test for separate evaluation and processing''' train_data = train_test_merged.loc[:self.df_train.index.size - 1, :] test_data = train_test_merged.loc[self.df_train.index.size:, :] ''' #train_data[train_data['Customers'] != 0].groupby(['StoreType', 'DayOfWeek'])['Sales', 'Customers'].sum() Based on the above result, finding 1. average Sales per storetype per dayofweek 2. average number of customers per storetype per dayofweek ''' df_avg = pd.DataFrame(train_data[train_data['Customers'] != 0].groupby(['StoreType', 'DayOfWeek']).apply( lambda x: x['Sales'].sum() / x['Customers'].sum())) df_avg_cust = pd.DataFrame( train_data[train_data['Customers'] != 0].groupby(['StoreType', 'DayOfWeek'])['Customers'].mean()) df_avg_cust.columns = ['AvgCustomer'] df_avg.columns = ['AvgSalesPCustomer'] train_data = train_data.merge(df_avg, on=['StoreType', 'DayOfWeek'], how='left') train_data = train_data.merge(df_avg_cust, on=['StoreType', 'DayOfWeek'], how='left') test_data = test_data.merge(df_avg, on=['StoreType', 'DayOfWeek'], how='left') test_data = test_data.merge(df_avg_cust, on=['StoreType', 'DayOfWeek'], how='left') '''Filling Na''' test_data['AvgCustomer'].fillna(0, inplace=True) test_data['AvgSalesPCustomer'].fillna(0, inplace=True) train_data['AvgCustomer'].fillna(0, inplace=True) train_data['AvgSalesPCustomer'].fillna(0, inplace=True) '''With the help of a key map for the months, finding out those months in which promo was active''' month2str = {1: 'Jan', 2: 'Feb', 3: 'Mar', 4: 'Apr', 5: 'May', 6: 'Jun', 7: 'Jul', 8: 'Aug', 9: 'Sept', 10: 'Oct', 11: 'Nov', 12: 'Dec'} train_data['monthStr'] = train_data.Month.map(month2str) test_data['monthStr'] = test_data.Month.map(month2str) train_data['IsPromoMonth'] = 0 for interval in train_data.PromoInterval.unique(): interval = str(interval) if interval != '': for month in interval.split(','): train_data.loc[ (train_data.monthStr == month) & (train_data.PromoInterval == interval), 'IsPromoMonth'] = 1 test_data['IsPromoMonth'] = 0 for interval in test_data.PromoInterval.unique(): interval = str(interval) if interval != '': for month in interval.split(','): test_data.loc[ (test_data.monthStr == month) & (test_data.PromoInterval == interval), 'IsPromoMonth'] = 1 '''Checking data types at this state to make sure everything in float for model to process #test_data.dtypes In case of StateHoliday one values is numeric and others are string. In order to get dummies, changing numeric to string ''' train_data.loc[train_data['StateHoliday'] == 0, 'StateHoliday'] = 'd' test_data.loc[test_data['StateHoliday'] == 0, 'StateHoliday'] = 'd' train_data = pd.get_dummies(train_data, columns=["StateHoliday", "StoreType", "Assortment"], drop_first=False) test_data =
pd.get_dummies(test_data, columns=["StateHoliday", "StoreType", "Assortment"], drop_first=False)
pandas.get_dummies
from typing import Union import pandas as pd import matplotlib.pyplot as plt from pandas.core.frame import DataFrame from pandas.core.series import Series import requests as req import numpy as np from math import floor from termcolor import colored as cl plt.style.use('fivethirtyeight') plt.rcParams['figure.figsize'] = (20, 10) def compute_ewm_rsi(close: Series, period: int=14) -> DataFrame: delta = close.diff() upwards = [] downwards = [] for i in range(len(delta)): if delta[i] < 0: upwards.append(0) downwards.append(delta[i]) else: upwards.append(delta[i]) downwards.append(0) up_series =
pd.Series(upwards)
pandas.Series
# -*- coding: utf-8 -*- """ Created on Sun May 22 10:25:19 2016 Process signups from google drive @author: tkc """ #%% import pandas as pd import os import pkg.SC_signup_functions as SC import pkg.SC_config as cnf import pkg.SC_signup_google_API_functions as SCapi # /from pandas_ods_reader import read_ods # too slow #%% from importlib import reload reload(SC) reload(cnf) #%% Load and process raw signup file from Fall2016_signups.xlsx # raw tab contains unprocessed google drive signups plus selected info from paper os.chdir('C:\\Users\\kevin\\Documents\\Python_Scripts\\SC\\') os.chdir(cnf._OUTPUT_DIR) signupfile='Winter2017_signups.xlsx' signupfile='Spring2019_signups.xlsx' signupfile='Fall2018_signups.xlsx' signupfile=cnf._INPUT_DIR +'\\Fall2019_signups.csv' signupfile=cnf._INPUT_DIR +'\\Fall2019_signups.xlsx' signupfile=cnf._INPUT_DIR +'Spring2019_signups.xlsx' #%% Testing new google sheets API download # ID and range of Fall 2020 (up to Gkey) sheetID = '1mexU5HW8Va1QXN43eN2zvHQysJINw6tdwJ7psOKmQng' rangeName = 'Form Responses!A:AX' # get allinclude plakey/famkey manual mode' # ID and range of Winter 2019 basketball sheetID = '182QFOXdz0cjQCTlxl2Gb9b_oEqInH93Peo6EKkKod-g' rangeName = 'Form Responses 1!A:AC' # include plakey/famkey manual mode' # spring signups sheetID='1lppbr8srsVbN48RYrfRr58sd7yfUnJM21sSSx2C0mG8' rangeName = 'Form Responses!A:Z' # include plakey/famkey manual mode' gsignups = SCapi.downloadSignups(sheetID, rangeName) # TODO write unique Gkey column... assign values season='Fall' year=2020 # Load signups,player and family contact info; format names/numbers, eliminate duplicates players, famcontact, gsignups = SC.loadProcessGfiles(gsignups, season, year) players, famcontact = SC.loadProcessPlayerInfo() # version w/o signup processing # Preliminary summary of signups (w/o ID or master signups assignments) coach=SC.findCoaches(gsignups, **{'gradeGenders': [ [0,'m'],[0,'f'],[1,'m'],[1,'f']] }) # Co-ed K-1 team coach=SC.findCoaches(gsignups) # coach candidates all grade/genders #%% # Find player number and assign to signup rows # SCsignup, players, famcontact =SC.findplayers(SCsignup, players, famcontact) # test w/ gsignups gsignups, players, famcontact =SC.findplayers(gsignups, players, famcontact, year) # Save SC signups back to xls file (incl. altered names) SC.writetoxls(SCsignup,'Raw', signupfile) os.chdir(cnf._INPUT_DIR) SCsignup.to_csv(signupfile,index=False) # CSV version #TODO save method back to google signups? # Update missing info for manually entered players (no full google drive entry info) SCsignup = SC.findmissinginfo(gsignups, players, famcontact) SCsignup = findmissinginfo(SCsignup, players, famcontact) unmatch=gsignups[pd.isnull(gsignups['Plakey'])] #%% Process data changes from google drive info... works but check/correct using log # email, phone, parent names, address changes (house # detection) players, famcontact=SC.processdatachanges(gsignups, players, famcontact, year) players, famcontact=processdatachanges(gsignups, players, famcontact, year) # load Mastersignups and add signups to master signups list (duplicates eliminated so no danger with re-run) Mastersignups = pd.read_csv(cnf._INPUT_DIR +'\\\master_signups.csv', encoding='cp437') Mastersignups = SC.createsignups(gsignups, Mastersignups, season, year) # new signups are auto-saved # Summarize signups by sport-gender-grade (written into signup file) # TODO fix... redirect to output_dir SC.summarizesignups(Mastersignups, season, year, **{'XLSpath':signupfile}) # write to tab in excel signup file SC.summarizesignups(Mastersignups, season, year, **{'saveCSV':True}) # save to season_yr_signup_summary.csv (not Excel) # gsignups version # TODO make a summary tool before/without adding to master signups # Feasibility before official signup, but needs split of multiple signups sportsumm=SC.summarizesignups(gsignups, season, year, **{'toDf':True}) SC.summarizesignups(gsignups, season, year) # save to csv SC.summarizesignups(gsignups, season, year, **{'XLSpath':signupfile}) # save to sheet in xls signup # Manually create desired teams in Teams_coaches.xlsx (teams tab should only have this sport season not older teams) # TODO really slow... find a replacement method for .ods reads teams=pd.read_csv(cnf._INPUT_DIR +'\\Teams_2019.csv', encoding='cp437') #teams=pd.read_excel('Teams_coaches.xlsx', sheetname='Teams') # # teams = read_ods(cnf._INPUT_DIR +'\\Teams_coaches.ods', 'Teams') # read ods team file #coaches = read_ods(cnf._INPUT_DIR +'\\Teams_coaches.ods', 'Coaches') # read ods team file #coaches=pd.read_excel('private\\Teams_coaches.xlsx', sheetname='Coaches') # load coach info coaches=pd.read_csv(cnf._INPUT_DIR +'\\coaches.csv', encoding='cp437') # common excel file encoding coaches.to_csv('coaches.csv', index=False) # Update teams (manual edit or using update script) teams=SC.updateoldteams(teams,year) teams.to_csv('private\\Teams_2019.csv', index=False) SC.writetoxls(teams,'Teams','teams_coaches.xlsx') # save fsupdated teams to tab in teams_coaches xls file # Now assign this season/years players to teams based on Teams xls file # Overwrite=True resets all existing custom player assignment (i.e. single 2nd grader playing on 3rd team) # Overwrite=False will not change any existing team assignments (only finds team for new signups) Mastersignups=SC.assigntoteams(Mastersignups, season, year, teams, overwrite=False) Mastersignups=assigntoteams(Mastersignups, season, year, teams, overwrite=False) temp=Mastersignups[(Mastersignups['Year']==2017) & (Mastersignups['Sport']=='Track')] # Track sub-team assigned based on DOB calculation (TEAM ASSIGNMENTS NOT AUTOSAVED) Mastersignups=SC.assigntrackgroup(Mastersignups, year, players) Mastersignups.to_csv(cnf._INPUT_DIR + '\\master_signups.csv',index=False) # if any players are playing up at different grade, just manually change team name in master_signups.csv (and use overwrite False) # also manually edit select players to open status # team contact lists to separate sport Excel tabs (Warning... this overwrites existing version) SC.writecontacts(Mastersignups, famcontact, players, season, year) # Make google compatible contacts list for all Cabrini teams (auto-save to csv) SC.makegoogcont(Mastersignups, famcontact, players, season, year) # Find missing players and add to recruits tab of signupfile # after all new signups added, just looks for those signed up last year but not this SC.findrecruits(Mastersignups, players, famcontact, season, year, signupfile) # TODO fix countteamplayers for co-ed teams teams=SC.countteamplayers(Mastersignups, teams, season, year) # summarizes players assigned to teams, autosaved to teams tab Mastersignups.to_csv('master_signups.csv', index=False) # Create 5 separate rosters (Cabrini CYC soccer & VB, soccer & VB transfers, junior teams (incl. age) incl. coaches acronyms=
pd.read_csv(cnf._INPUT_DIR+'\\acronyms.csv')
pandas.read_csv
import numpy as np import pandas as pd from spandex import TableLoader, TableFrame from spandex.io import df_to_db import urbansim.sim.simulation as sim import utils loader = TableLoader() staging = loader.tables.staging ## Assumptions. # Use codes were classified manually because the assessor classifications # are meant for property tax purposes. These classifications should be # reviewed and revised. res_codes = {'single': ([1100] + range(1120, 1151) + range(1200, 1501) + range(1900, 2000)), 'multi': (range(600, 1100) + [1700] + range(2000, 3000) + range(5000, 5300) + range(7000, 7701) + [7800]), 'mixed': (range(3900, 4000) + [4101] + [4191] + [4240] + [9401] + [9491])} exempt_codes = range(1, 1000) ## Register input tables. tf = TableFrame(staging.parcels_ala, index_col='apn_sort') sim.add_table('parcels_in', tf, copy_col=False) @sim.table(cache=True) def ie670(): filepath = \ loader.get_path('built/parcel/2010/ala/assessor_nov10/IE670c.txt') df = pd.read_table(filepath, sep='\t', index_col=False, low_memory=False) df.set_index("Assessor's Parcel Number (APN) sort format", inplace=True) assert df.index.is_unique assert not df.index.hasnans() return df @sim.table(cache=True) def ie673(): filepath = \ loader.get_path('built/parcel/2010/ala/assessor_nov10/IE673c.txt') df = pd.read_table(filepath, sep='\t', index_col=False) df.set_index('APNsort', inplace=True) assert df.index.is_unique assert not df.index.hasnans() return df ## Register output table. @sim.table(cache=True) def parcels_out(parcels_in): index =
pd.Series(parcels_in.index)
pandas.Series
# from datetime import datetime import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.dates as mdates from matplotlib.dates import DateFormatter, WeekdayLocator, DayLocator, MONDAY, date2num from mpl_finance import candlestick_ohlc #from pylab import mpl from arch.unitroot import ADF import statsmodels.api as sm from apps.tp.pair_trading import PairTrading from apps.tp.tp_quotation import TpQuotation class TpEngine(object): def __init__(self): self.name = 'apps.tp.TpEngine' self._create_stock_pool() def draw_daily_k_line(self, stock_code, start_date, end_date): raw_datas = pd.read_csv('./data/tp/sh{0}.csv'.format(stock_code)) datas = raw_datas datas['date2'] = datas['date'].map(lambda d: mdates.date2num(datetime.strptime(d, "%Y-%m-%d"))) start_date_val = mdates.date2num(datetime.strptime(start_date, "%Y-%m-%d")) end_date_val = mdates.date2num(datetime.strptime(end_date, "%Y-%m-%d")) recs = list() for i in range(len(datas)): rec = datas.iloc[i, :] if rec['date2']>= start_date_val and rec['date2']<=end_date_val: recs.append([rec['date2'], rec['open'], rec['high'], rec['low'], rec['close']]) ax = plt.subplot() mondays = WeekdayLocator(MONDAY) weekFormatter = DateFormatter('%y %b %d') ax.xaxis.set_major_locator(mondays) ax.xaxis.set_minor_locator(DayLocator()) ax.xaxis.set_major_formatter(weekFormatter) plt.rcParams['font.family'] = 'sans-serif' plt.rcParams['font.sans-serif'] = ['SimHei'] plt.rcParams['axes.unicode_minus'] = False ax.set_title(u'上证综指kline picture') candlestick_ohlc(ax, recs, width=0.7, colorup='r', colordown='g') plt.setp(plt.gca().get_xticklabels(), rotation=50, horizontalalignment='center') plt.show() def draw_close_price_graph(self, stock_code, start_date, end_date): raw_datas = pd.read_csv('./data/tp/sh{0}.csv'.format(stock_code)) datas = raw_datas datas['date2'] = datas['date'].map(lambda d: mdates.date2num(datetime.strptime(d, "%Y-%m-%d"))) start_date_val = mdates.date2num(datetime.strptime(start_date, "%Y-%m-%d")) end_date_val = mdates.date2num(datetime.strptime(end_date, "%Y-%m-%d")) datas = datas[(datas.date2>=start_date_val) & (datas.date2<=end_date_val)] datas.plot(x='date', y='close') plt.setp(plt.gca().get_xticklabels(), rotation=50, horizontalalignment='center') plt.grid(b=True, which='both', axis='x') plt.show() def check_cointegration(self, stock_x, stock_y, form_start, form_end): ''' 检查协整模型 ''' p_x = self.get_stock_df(stock_x, form_start, form_end) log_p_x = np.log(p_x) adf_status = self.check_adf(log_p_x.diff()[1:]) if not adf_status: print('{0}不是单阶平稳信号'.format(stock_x)) return False, 0.0, 0.0, 0.0, 0.0 p_y = self.get_stock_df(stock_y, form_start, form_end) log_p_y = np.log(p_y) adf_status = self.check_adf(log_p_y.diff()[1:]) if not adf_status: print('{0}不是单阶平稳信号'.format(stock_y)) return False, 0.0, 0.0, 0.0, 0.0 model = sm.OLS(log_p_y, sm.add_constant(log_p_x)).fit() alpha = model.params[0] beta = model.params[1] spreadf = log_p_y - beta*log_p_x - alpha mu = np.mean(spreadf) sd = np.std(spreadf) adf_status = self.check_adf(spreadf) if not adf_status: print('协整模型不具有平稳性') return False, 0.0, 0.0, 0.0, 0.0 return True, alpha, beta, mu, sd def check_adf(self, diff_val): adf_val = ADF(diff_val) return adf_val.pvalue < 0.05 def calculate_trading_pairs(self): stocks = [] need_quotation = False for key in self.stock_pool.keys(): stocks.append(key) if need_quotation: print('获取{0}股票行情数据...'.format(key)) tp_quotation = TpQuotation() tp_quotation.get_quotation(key) stocks_len = len(stocks) form_start = '2018-11-01' form_end = '2019-11-01' tpc = {} sum = 0 for i in range(stocks_len): p_x = self.get_stock_df(stocks[i], form_start, form_end) for j in range(i+1, stocks_len): print('trading_pair: {0}-{1}'.format(stocks[i], stocks[j])) p_y = self.get_stock_df(stocks[j], form_start, form_end) ssd = self._calculate_SSD(p_x, p_y) tpc['{0}-{1}'.format(stocks[i], stocks[j])] = ssd sum += 1 print('sum={0}'.format(sum)) self.tpc = sorted(tpc.items(), key=lambda x: x[1]) for itr in self.tpc: stock_items = itr[0].split('-') print('{0}({3})-{1}({4})={2};'.format( self.stock_pool[stock_items[0]], self.stock_pool[stock_items[1]], itr[1], stock_items[0], stock_items[1] )) def get_stock_df(self, stock_code, form_start, form_end): stock_df = pd.read_csv('./data/tp/sh{0}.csv'.format(stock_code), index_col='date') stock_df.index = pd.to_datetime(stock_df.index) return stock_df['close'][form_start:form_end] def _calculate_SSD(self, price_x, price_y): if price_x is None or price_y is None: print('缺少价格序列') return r_x = (price_x - price_x.shift(1)) / price_x.shift(1) [1:] r_y = (price_y - price_y.shift(1)) / price_y.shift(1) [1:] #hat_p_x = (r_x + 1).cumsum() hat_p_x = (r_x + 1).cumprod() #hat_p_y = (r_y + 1).cumsum() hat_p_y = (r_y + 1).cumprod() return np.sum( (hat_p_x - hat_p_y)**2 ) # 上证50股票池 def _create_stock_pool(self): self.stock_pool = { '600036': '招商银行', '601318': '中国平安', '600016': '民生银行', '601328': '交通银行', '600000': '浦发银行', '601166': '兴业银行', '601088': '中国神华', '600030': '中信证券', '600519': '贵州茅台', '600837': '海通证券', '601601': '中国太保', '601398': '工商银行', '601668': '中国建筑', '600031': '三一重工', '600585': '海螺水泥', '600111': '包钢稀土', '601006': '大秦铁路', '601899': '紫金矿业', '601939': '建设银行', '600050': '中国联通', '601169': '北京银行', '601288': '农业银行', '601857': '中国石油', '600048': '保利地产', '601989': '中国重工', '600547': '山东黄金', '600900': '长江电力', '600028': '中国石化', '600348': '国阳新能', '600104': '上海汽车', '600089': '特变电工', '601699': '潞安环能', '600019': '宝钢股份', '600362': '江西铜业', '601600': '中国铝业', '600015': '华夏银行', '600383': '金地集团', '601168': '西部矿业', '600489': '中金黄金', '601628': '中国人寿', '601766': '中国南车', '600518': '康美药业', '600999': '招商证券', '601688': '华泰证券', '601958': '金钼股份', '601390': '中国中铁', '601919': '中国远洋', '601111': '中国国航', '601818': '光大银行', '601118': '海南橡胶' } ''' ****************************************************************** ******************************************************************** ''' def startup(self): #self.test_pair_trading() self.do_pair_trading() def form_pair_trading(self, df, form_start, form_end, stock_x, stock_y): p_x = df[stock_x][form_start:form_end] log_p_x = np.log(p_x) adf_status = self.check_adf(log_p_x.diff()[1:]) if not adf_status: print('{0}不是单阶平稳信号'.format(stock_x)) return False, 0.0, 0.0, 0.0, 0.0 p_y = df[stock_y][form_start:form_end] log_p_y = np.log(p_y) adf_status = self.check_adf(log_p_y.diff()[1:]) if not adf_status: print('{0}不是单阶平稳信号'.format(stock_y)) return False, 0.0, 0.0, 0.0, 0.0 model = sm.OLS(log_p_y, sm.add_constant(log_p_x)).fit() alpha = model.params[0] beta = model.params[1] spreadf = log_p_y - beta*log_p_x - alpha mu = np.mean(spreadf) sd = np.std(spreadf) #adfSpread = ADF(spreadf) adf_status = self.check_adf(spreadf) if not adf_status: print('协整模型不具有平稳性') return False, 0.0, 0.0, 0.0, 0.0 return True, alpha, beta, mu, sd def do_pair_trading(self): sh=pd.read_csv('./data/sh50p.csv',index_col='Trddt') sh.index=pd.to_datetime(sh.index) #配对交易实测 #提取形成期数据 formStart='2014-01-01' formEnd='2015-01-01' PA=sh['601988'] PB=sh['600000'] PAf=PA[formStart:formEnd] PBf=PB[formStart:formEnd] #形成期协整关系检验 #一阶单整检验 log_PAf=np.log(PAf) adfA=ADF(log_PAf) print(adfA.summary().as_text()) adfAd=ADF(log_PAf.diff()[1:]) print(adfAd.summary().as_text()) # B股票平稳性检查 log_PBf=np.log(PBf) adfB=ADF(log_PBf) print(adfB.summary().as_text()) adfBd=ADF(log_PBf.diff()[1:]) print(adfBd.summary().as_text()) # #协整关系检验 model=sm.OLS(log_PBf,sm.add_constant(log_PAf)).fit() print('model:\r\n{0}'.format(model.summary())) alpha=model.params[0] print('alpha={0};'.format(alpha)) beta=model.params[1] print('beta={0}'.format(beta)) #残差单位根检验 spreadf = log_PBf-beta*log_PAf-alpha adfSpread = ADF(spreadf) print('残差单位根检验:{0}; v={1};'.format(adfSpread.summary().as_text(), adfSpread.critical_values['5%'])) # mu = np.mean(spreadf) sd = np.std(spreadf) # #设定交易期 tradeStart='2015-01-01' tradeEnd='2015-06-30' PAt=PA[tradeStart:tradeEnd] PBt=PB[tradeStart:tradeEnd] CoSpreadT=np.log(PBt)-beta*np.log(PAt)-alpha print('CoSpreadT: {0};'.format(CoSpreadT.describe())) plt.rcParams['font.family'] = 'sans-serif' plt.rcParams['font.sans-serif'] = ['SimHei'] plt.rcParams['axes.unicode_minus'] = False CoSpreadT.plot() plt.title('交易期价差序列(协整配对)') plt.axhline(y=mu,color='black') plt.axhline(y=mu+0.2*sd,color='blue',ls='-',lw=2) plt.axhline(y=mu-0.2*sd,color='blue',ls='-',lw=2) plt.axhline(y=mu+1.5*sd,color='green',ls='--',lw=2.5) plt.axhline(y=mu-1.5*sd,color='green',ls='--',lw=2.5) plt.axhline(y=mu+2.5*sd,color='red',ls='-.',lw=3) plt.axhline(y=mu-2.5*sd,color='red',ls='-.',lw=3) plt.show() # level = (float('-inf'),mu-2.5*sd,mu-1.5*sd,mu-0.2*sd,mu+0.2*sd,mu+1.5*sd,mu+2.5*sd,float('inf')) print('!!!!!! level: {0}={1}'.format(type(level), level)) # prcLevel=pd.cut(CoSpreadT,level,labels=False)-3 #print('prcLevel: {0}'.format(prcLevel.head())) pl = prcLevel.to_numpy() print('priceLevel:{0}'.format(pl)) signal = self.trade_signal(prcLevel) print('signal: {0}={1}'.format(signal.shape, signal)) # position position=[signal[0]] ns=len(signal) for i in range(1,ns): position.append(position[-1]) if signal[i]==1: position[i]=1 elif signal[i]==-2: position[i]=-1 elif signal[i]==-1 and position[i-1]==1: position[i]=0 elif signal[i]==2 and position[i-1]==-1: position[i]=0 elif signal[i]==3: position[i]=0 elif signal[i]==-3: position[i]=0 print('raw position: {0}; {1};'.format(len(position), position)) position=pd.Series(position,index=CoSpreadT.index) print('position: {0}'.format(position.tail())) # account = self.trade_simulate(alpha, beta, PAt, PBt, position) print('account: {0}'.format(account.tail())) # account.iloc[:,[0,1,4]].plot(style=['--','-',':']) plt.title('配对交易账户') plt.show() def trade_signal(self, prcLevel): n=len(prcLevel) signal=np.zeros(n) for i in range(1,n): if prcLevel[i-1]==1 and prcLevel[i]==2: signal[i]=-2 elif prcLevel[i-1]==1 and prcLevel[i]==0: signal[i]=2 elif prcLevel[i-1]==2 and prcLevel[i]==3: signal[i]=3 elif prcLevel[i-1]==-1 and prcLevel[i]==-2: signal[i]=1 elif prcLevel[i-1]==-1 and prcLevel[i]==0: signal[i]=-1 elif prcLevel[i-1]==-2 and prcLevel[i]==-3: signal[i]=-3 return(signal) def trade_simulate(self, alpha, beta, priceX,priceY,position): n=len(position) size=1000 shareY=size*position shareX=[(-beta)*shareY[0]*priceY[0]/priceX[0]] cash=[2000] for i in range(1,n): shareX.append(shareX[i-1]) cash.append(cash[i-1]) if position[i-1]==0 and position[i]==1: shareX[i]=(-beta)*shareY[i]*priceY[i]/priceX[i] cash[i]=cash[i-1]-(shareY[i]*priceY[i]+shareX[i]*priceX[i]) elif position[i-1]==0 and position[i]==-1: shareX[i]=(-beta)*shareY[i]*priceY[i]/priceX[i] cash[i]=cash[i-1]-(shareY[i]*priceY[i]+shareX[i]*priceX[i]) elif position[i-1]==1 and position[i]==0: shareX[i]=0 cash[i]=cash[i-1]+(shareY[i-1]*priceY[i]+shareX[i-1]*priceX[i]) elif position[i-1]==-1 and position[i]==0: shareX[i]=0 cash[i]=cash[i-1]+(shareY[i-1]*priceY[i]+shareX[i-1]*priceX[i]) cash = pd.Series(cash,index=position.index) shareY=pd.Series(shareY,index=position.index) shareX=pd.Series(shareX,index=position.index) asset=cash+shareY*priceY+shareX*priceX account=pd.DataFrame({'Position':position,'ShareY':shareY,'ShareX':shareX,'Cash':cash,'Asset':asset}) return(account) def test_pair_trading(self): sh=pd.read_csv('./data/sh50p.csv',index_col='Trddt') sh.index=pd.to_datetime(sh.index) # 定义区间 formPeriod='2014-01-01:2015-01-01' tradePeriod='2015-01-01:2015-06-30' # 计算价格 priceA=sh['601988'] priceB=sh['600000'] priceAf=priceA[formPeriod.split(':')[0]:formPeriod.split(':')[1]] priceBf=priceB[formPeriod.split(':')[0]:formPeriod.split(':')[1]] priceAt=priceA[tradePeriod.split(':')[0]:tradePeriod.split(':')[1]] priceBt=priceB[tradePeriod.split(':')[0]:tradePeriod.split(':')[1]] # SSD pt = PairTrading() SSD = pt.SSD(priceAf,priceBf) print('SSD: {0}'.format(SSD)) # 形成期 SSDspread=pt.SSDSpread(priceAf,priceBf) print(SSDspread.describe()) print(SSDspread.head()) # 形成期协整模型 coefficients=pt.cointegration(priceAf,priceBf) print('coeffients:{0};'.format(coefficients)) # CoSpreadF=pt.CointegrationSpread(priceA,priceB,formPeriod,formPeriod) print('CoSpreadF: {0}'.format(CoSpreadF.head())) # CoSpreadTr=pt.CointegrationSpread(priceA,priceB,formPeriod,tradePeriod) print('CoSpread: {0};'.format(CoSpreadTr.describe())) # bound=pt.calBound(priceA,priceB,'Cointegration',formPeriod,width=1.2) print('bound: {0};'.format(bound)) def do_pair_trading_org(self): sh=pd.read_csv('./data/sh50p.csv',index_col='Trddt') sh.index=pd.to_datetime(sh.index) #配对交易实测 #提取形成期数据 formStart='2014-01-01' formEnd='2015-01-01' PA=sh['601988'] PB=sh['600000'] PAf=PA[formStart:formEnd] PBf=PB[formStart:formEnd] #形成期协整关系检验 #一阶单整检验 log_PAf=np.log(PAf) adfA=ADF(log_PAf) print(adfA.summary().as_text()) adfAd=ADF(log_PAf.diff()[1:]) print(adfAd.summary().as_text()) # B股票平稳性检查 log_PBf=np.log(PBf) adfB=ADF(log_PBf) print(adfB.summary().as_text()) adfBd=ADF(log_PBf.diff()[1:]) print(adfBd.summary().as_text()) # #协整关系检验 model=sm.OLS(log_PBf,sm.add_constant(log_PAf)).fit() print('model:\r\n{0}'.format(model.summary())) alpha=model.params[0] print('alpha={0};'.format(alpha)) beta=model.params[1] print('beta={0}'.format(beta)) #残差单位根检验 spreadf = log_PBf-beta*log_PAf-alpha adfSpread = ADF(spreadf) print(adfSpread.summary().as_text()) # mu = np.mean(spreadf) sd = np.std(spreadf) # #设定交易期 tradeStart='2015-01-01' tradeEnd='2015-06-30' PAt=PA[tradeStart:tradeEnd] PBt=PB[tradeStart:tradeEnd] CoSpreadT=np.log(PBt)-beta*np.log(PAt)-alpha print('CoSpreadT: {0};'.format(CoSpreadT.describe())) plt.rcParams['font.family'] = 'sans-serif' plt.rcParams['font.sans-serif'] = ['SimHei'] plt.rcParams['axes.unicode_minus'] = False CoSpreadT.plot() plt.title('交易期价差序列(协整配对)') plt.axhline(y=mu,color='black') plt.axhline(y=mu+0.2*sd,color='blue',ls='-',lw=2) plt.axhline(y=mu-0.2*sd,color='blue',ls='-',lw=2) plt.axhline(y=mu+1.5*sd,color='green',ls='--',lw=2.5) plt.axhline(y=mu-1.5*sd,color='green',ls='--',lw=2.5) plt.axhline(y=mu+2.5*sd,color='red',ls='-.',lw=3) plt.axhline(y=mu-2.5*sd,color='red',ls='-.',lw=3) plt.show() # level = (float('-inf'),mu-2.5*sd,mu-1.5*sd,mu-0.2*sd,mu+0.2*sd,mu+1.5*sd,mu+2.5*sd,float('inf')) print('level: {0}={1}'.format(type(level), level)) # prcLevel=pd.cut(CoSpreadT,level,labels=False)-3 print('prcLevel: {0}'.format(prcLevel.head())) signal = self.TradeSig(prcLevel) print('signal: {0}={1}'.format(type(signal), signal)) # position position=[signal[0]] ns=len(signal) for i in range(1,ns): position.append(position[-1]) if signal[i]==1: position[i]=1 elif signal[i]==-2: position[i]=-1 elif signal[i]==-1 and position[i-1]==1: position[i]=0 elif signal[i]==2 and position[i-1]==-1: position[i]=0 elif signal[i]==3: position[i]=0 elif signal[i]==-3: position[i]=0 position=pd.Series(position,index=CoSpreadT.index) print('position: {0}'.format(position.tail())) # account = self.TradeSim(alpha, beta, PAt, PBt, position) print('account: {0}'.format(account.tail())) # account.iloc[:,[0,1,4]].plot(style=['--','-',':']) plt.title('配对交易账户') plt.show() def TradeSig(self, prcLevel): n=len(prcLevel) signal=np.zeros(n) for i in range(1,n): if prcLevel[i-1]==1 and prcLevel[i]==2: signal[i]=-2 elif prcLevel[i-1]==1 and prcLevel[i]==0: signal[i]=2 elif prcLevel[i-1]==2 and prcLevel[i]==3: signal[i]=3 elif prcLevel[i-1]==-1 and prcLevel[i]==-2: signal[i]=1 elif prcLevel[i-1]==-1 and prcLevel[i]==0: signal[i]=-1 elif prcLevel[i-1]==-2 and prcLevel[i]==-3: signal[i]=-3 return(signal) def TradeSim(self, alpha, beta, priceX,priceY,position): n=len(position) size=1000 shareY=size*position shareX=[(-beta)*shareY[0]*priceY[0]/priceX[0]] cash=[2000] for i in range(1,n): shareX.append(shareX[i-1]) cash.append(cash[i-1]) if position[i-1]==0 and position[i]==1: shareX[i]=(-beta)*shareY[i]*priceY[i]/priceX[i] cash[i]=cash[i-1]-(shareY[i]*priceY[i]+shareX[i]*priceX[i]) elif position[i-1]==0 and position[i]==-1: shareX[i]=(-beta)*shareY[i]*priceY[i]/priceX[i] cash[i]=cash[i-1]-(shareY[i]*priceY[i]+shareX[i]*priceX[i]) elif position[i-1]==1 and position[i]==0: shareX[i]=0 cash[i]=cash[i-1]+(shareY[i-1]*priceY[i]+shareX[i-1]*priceX[i]) elif position[i-1]==-1 and position[i]==0: shareX[i]=0 cash[i]=cash[i-1]+(shareY[i-1]*priceY[i]+shareX[i-1]*priceX[i]) cash = pd.Series(cash,index=position.index) shareY=
pd.Series(shareY,index=position.index)
pandas.Series
import __main__ as main import sys import geopandas as gpd import pandas as pd import numpy as np if not hasattr(main, '__file__'): argv = ['code', 'data/processed/geo/tiles.shp', 'data/processed/census/oa_tile_reference.csv', 'data/raw/census_lookups/engwal_OA_lsoa.csv', 'data/raw/census_lookups/OA_to_DZ.csv', 'data/raw/census/NI_SA_Centroids.shp', 'data/raw/ethnicity_data/bulk.csv', 'data/raw/ethnicity_data/KS201SC.csv', 'data/raw/ethnicity_data/DT201NI (s).csv', 'data/raw/census/Eng_Wal_OA_Mid_Pop.csv', 'data/raw/census/simd2020_withinds.csv', 'data/raw/census/NI_Mid_Pop.csv', 'data/processed/census/quadkey_mean_perc_white.csv'] else: argv = sys.argv #%% tiles = gpd.read_file(argv[1]) tiles.crs = 4326 #%% oa_tile_lookup = pd.read_csv(argv[2]) #%% oa_lus = {'england': pd.read_csv(argv[3]), 'scotland': pd.read_csv(argv[4]), 'ni': gpd.read_file(argv[5])} #%% oa_lus['ni'] = oa_lus['ni'].loc[:, ['SA2011', 'SOA2011']] #%% eth_data = {'england': pd.read_csv(argv[6]), 'scotland': pd.read_csv(argv[7]), 'ni': pd.read_csv(argv[8])} #%% scotland_imd = pd.read_csv(argv[10]) #%% #check that the admin code is in the lookups ''' england: lsoa level Scotland: data zone level NI: SOA level ''' pop_data = {'england': pd.read_csv(argv[9]), 'scotland': pd.read_csv(argv[10]), 'ni': pd.read_csv(argv[11])} # Handle scotland population peculiarities scotland_n_oas = oa_lus['scotland'].groupby('DataZone2011Code').count().reset_index()[['DataZone2011Code', 'OutputArea2011Code']].rename(columns = {'DataZone2011Code':'DZ', 'OutputArea2011Code':'n_oas'}) scotland_pop = pd.merge(scotland_imd, scotland_n_oas)[['DZ', 'Total_population', 'n_oas']] scotland_pop = pd.merge(oa_lus['scotland'][['OutputArea2011Code', 'DataZone2011Code']].rename(columns={'OutputArea2011Code':'OA', 'DataZone2011Code':'DZ'}), scotland_pop) scotland_pop['Total_population'] = scotland_pop['Total_population'] / scotland_pop['n_oas'] scotland_pop = scotland_pop.drop(columns = ['n_oas', 'DZ']).rename(columns = {'Total_population':'pop'}) ''' England ''' eth_data['england'] = pd.melt(eth_data['england'], id_vars = ['geography code'], value_vars = eth_data['england'].columns[3:]) eth_data['england']['variable'] = [x.split('.')[0] for x in eth_data['england']['variable']] eth_data['england']['white'] = [x == 'White' for x in eth_data['england']['variable']] eth_data['england']['value'] = [str(x).replace(',', '') for x in eth_data['england']['value']] eth_data['england']['value'] = pd.to_numeric(eth_data['england']['value'], errors = 'coerce') eth_data['england'] = eth_data['england'][['geography code', 'white', 'value']].groupby(['geography code', 'white']).sum().reset_index() eth_data['england'] = eth_data['england'].pivot(index = 'geography code', columns = 'white').reset_index() eth_data['england'].columns = eth_data['england'].columns.droplevel() eth_data['england']['perc_white'] = eth_data['england'][True] / (eth_data['england'][True] + eth_data['england'][False]) '''Scotland''' eth_data['scotland'] =
pd.melt(eth_data['scotland'], id_vars = ['Area'], value_vars = eth_data['scotland'].columns[2:])
pandas.melt
# -*- coding: utf-8 -*- # ***************************************************************************** # Copyright (c) 2020, Intel Corporation All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ***************************************************************************** import numpy as np import pandas as pd import platform import unittest from itertools import combinations, combinations_with_replacement, product from numba.core.config import IS_32BITS from numba.core.errors import TypingError from sdc.tests.test_base import TestCase from sdc.tests.test_utils import (skip_numba_jit, _make_func_from_text, gen_frand_array) def _make_func_use_binop1(operator): func_text = "def test_impl(A, B):\n" func_text += " return A {} B\n".format(operator) return _make_func_from_text(func_text) def _make_func_use_binop2(operator): func_text = "def test_impl(A, B):\n" func_text += " A {} B\n".format(operator) func_text += " return A\n" return _make_func_from_text(func_text) def _make_func_use_method_arg1(method): func_text = "def test_impl(A, B):\n" func_text += " return A.{}(B)\n".format(method) return _make_func_from_text(func_text) class TestSeries_ops(TestCase): def test_series_operators_int(self): """Verifies using all various Series arithmetic binary operators on two integer Series with default indexes""" n = 11 np.random.seed(0) data_to_test = [np.arange(-5, -5 + n, dtype=np.int32), np.ones(n + 3, dtype=np.int32), np.random.randint(-5, 5, n + 7)] arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**') for operator in arithmetic_binops: test_impl = _make_func_use_binop1(operator) hpat_func = self.jit(test_impl) for data_left, data_right in combinations_with_replacement(data_to_test, 2): # integers to negative powers are not allowed if (operator == '**' and np.any(data_right < 0)): data_right = np.abs(data_right) with self.subTest(left=data_left, right=data_right, operator=operator): S1 = pd.Series(data_left) S2 = pd.Series(data_right) # check_dtype=False because SDC implementation always returns float64 Series pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2), check_dtype=False) def test_series_operators_int_scalar(self): """Verifies using all various Series arithmetic binary operators on an integer Series with default index and a scalar value""" n = 11 np.random.seed(0) data_to_test = [np.arange(-5, -5 + n, dtype=np.int32), np.ones(n + 3, dtype=np.int32), np.random.randint(-5, 5, n + 7)] scalar_values = [1, -1, 0, 3, 7, -5] arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**') for operator in arithmetic_binops: test_impl = _make_func_use_binop1(operator) hpat_func = self.jit(test_impl) for data, scalar, swap_operands in product(data_to_test, scalar_values, (False, True)): S = pd.Series(data) left, right = (S, scalar) if swap_operands else (scalar, S) # integers to negative powers are not allowed if (operator == '**' and np.any(right < 0)): right = abs(right) with self.subTest(left=left, right=right, operator=operator): # check_dtype=False because SDC implementation always returns float64 Series pd.testing.assert_series_equal(hpat_func(left, right), test_impl(left, right), check_dtype=False) def test_series_operators_float(self): """Verifies using all various Series arithmetic binary operators on two float Series with default indexes""" n = 11 np.random.seed(0) data_to_test = [np.arange(-5, -5 + n, dtype=np.float32), np.ones(n + 3, dtype=np.float32), np.random.ranf(n + 7)] arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**') for operator in arithmetic_binops: test_impl = _make_func_use_binop1(operator) hpat_func = self.jit(test_impl) for data_left, data_right in combinations_with_replacement(data_to_test, 2): with self.subTest(left=data_left, right=data_right, operator=operator): S1 = pd.Series(data_left) S2 = pd.Series(data_right) # check_dtype=False because SDC implementation always returns float64 Series pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2), check_dtype=False) def test_series_operators_float_scalar(self): """Verifies using all various Series arithmetic binary operators on a float Series with default index and a scalar value""" n = 11 np.random.seed(0) data_to_test = [np.arange(-5, -5 + n, dtype=np.float32), np.ones(n + 3, dtype=np.float32), np.random.ranf(n + 7)] scalar_values = [1., -1., 0., -0., 7., -5.] arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**') for operator in arithmetic_binops: test_impl = _make_func_use_binop1(operator) hpat_func = self.jit(test_impl) for data, scalar, swap_operands in product(data_to_test, scalar_values, (False, True)): S = pd.Series(data) left, right = (S, scalar) if swap_operands else (scalar, S) with self.subTest(left=left, right=right, operator=operator): pd.testing.assert_series_equal(hpat_func(S, scalar), test_impl(S, scalar), check_dtype=False) @skip_numba_jit('Not implemented in new-pipeline yet') def test_series_operators_inplace(self): arithmetic_binops = ('+=', '-=', '*=', '/=', '//=', '%=', '**=') for operator in arithmetic_binops: test_impl = _make_func_use_binop2(operator) hpat_func = self.jit(test_impl) # TODO: extend to test arithmetic operations between numeric Series of different dtypes n = 11 A1 = pd.Series(np.arange(1, n, dtype=np.float64), name='A') A2 = A1.copy(deep=True) B = pd.Series(np.ones(n - 1), name='B') hpat_func(A1, B) test_impl(A2, B) pd.testing.assert_series_equal(A1, A2) @skip_numba_jit('Not implemented in new-pipeline yet') def test_series_operators_inplace_scalar(self): arithmetic_binops = ('+=', '-=', '*=', '/=', '//=', '%=', '**=') for operator in arithmetic_binops: test_impl = _make_func_use_binop2(operator) hpat_func = self.jit(test_impl) # TODO: extend to test arithmetic operations between numeric Series of different dtypes n = 11 S1 = pd.Series(np.arange(1, n, dtype=np.float64), name='A') S2 = S1.copy(deep=True) hpat_func(S1, 1) test_impl(S2, 1) pd.testing.assert_series_equal(S1, S2) @skip_numba_jit('operator.neg for SeriesType is not implemented in yet') def test_series_operator_neg(self): def test_impl(A): return -A hpat_func = self.jit(test_impl) n = 11 A = pd.Series(np.arange(n)) pd.testing.assert_series_equal(hpat_func(A), test_impl(A)) def test_series_operators_comp_numeric(self): """Verifies using all various Series comparison binary operators on two integer Series with various indexes""" n = 11 data_left = [1, 2, -1, 3, 4, 2, -3, 5, 6, 6, 0] data_right = [3, 2, -2, 1, 4, 1, -5, 6, 6, 3, -1] dtype_to_index = {'None': None, 'int': np.arange(n, dtype='int'), 'float': np.arange(n, dtype='float'), 'string': ['aa', 'aa', '', '', 'b', 'b', 'cccc', None, 'dd', 'ddd', None]} comparison_binops = ('<', '>', '<=', '>=', '!=', '==') for operator in comparison_binops: test_impl = _make_func_use_binop1(operator) hpat_func = self.jit(test_impl) for dtype, index_data in dtype_to_index.items(): with self.subTest(operator=operator, index_dtype=dtype, index=index_data): A = pd.Series(data_left, index=index_data) B = pd.Series(data_right, index=index_data) pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B)) def test_series_operators_comp_numeric_scalar(self): """Verifies using all various Series comparison binary operators on an integer Series and scalar values""" S = pd.Series([1, 2, -1, 3, 4, 2, -3, 5, 6, 6, 0]) scalar_values = [2, 2.0, -3, np.inf, -np.inf, np.PZERO, np.NZERO] comparison_binops = ('<', '>', '<=', '>=', '!=', '==') for operator in comparison_binops: test_impl = _make_func_use_binop1(operator) hpat_func = self.jit(test_impl) for scalar in scalar_values: with self.subTest(left=S, right=scalar, operator=operator): pd.testing.assert_series_equal(hpat_func(S, scalar), test_impl(S, scalar)) def test_series_operators_comp_str_scalar(self): """Verifies using all various Series comparison binary operators on an string Series and scalar values""" S = pd.Series(['aa', 'aa', '', '', 'b', 'b', 'cccc', None, 'dd', 'ddd', None]) scalar_values = ['a', 'aa', 'ab', 'ba', ''] comparison_binops = ('<', '>', '<=', '>=', '!=', '==') for operator in comparison_binops: test_impl = _make_func_use_binop1(operator) hpat_func = self.jit(test_impl) for scalar in scalar_values: with self.subTest(left=S, right=scalar, operator=operator): pd.testing.assert_series_equal(hpat_func(S, scalar), test_impl(S, scalar)) @skip_numba_jit def test_series_operators_inplace_array(self): def test_impl(A, B): A += B return A hpat_func = self.jit(test_impl) n = 11 A = np.arange(n)**2.0 # TODO: use 2 for test int casting B = pd.Series(np.ones(n)) np.testing.assert_array_equal(hpat_func(A.copy(), B), test_impl(A, B)) @skip_numba_jit('Functionally test passes, but in old-style it checked fusion of parfors.\n' 'TODO: implement the same checks in new-pipeline') def test_series_fusion1(self): def test_impl(A, B): return A + B + 1 hpat_func = self.jit(test_impl) n = 11 A = pd.Series(np.arange(n)) B = pd.Series(np.arange(n)**2) pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False) # self.assertEqual(count_parfor_REPs(), 1) @skip_numba_jit('Functionally test passes, but in old-style it checked fusion of parfors.\n' 'TODO: implement the same checks in new-pipeline') def test_series_fusion2(self): def test_impl(A, B): S = B + 2 if A.iat[0] == 0: S = A + 1 return S + B hpat_func = self.jit(test_impl) n = 11 A = pd.Series(np.arange(n)) B = pd.Series(np.arange(n)**2) pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False) # self.assertEqual(count_parfor_REPs(), 3) def test_series_operator_add_numeric_scalar(self): """Verifies Series.operator.add implementation for numeric series and scalar second operand""" def test_impl(A, B): return A + B hpat_func = self.jit(test_impl) n = 7 dtype_to_index = {'None': None, 'int': np.arange(n, dtype='int'), 'float': np.arange(n, dtype='float'), 'string': ['aa', 'aa', 'b', 'b', 'cccc', 'dd', 'ddd']} int_scalar = 24 for dtype, index_data in dtype_to_index.items(): with self.subTest(index_dtype=dtype, index=index_data): if platform.system() == 'Windows' and not IS_32BITS: A = pd.Series(np.arange(n, dtype=np.int64), index=index_data) else: A = pd.Series(np.arange(n), index=index_data) result = hpat_func(A, int_scalar) result_ref = test_impl(A, int_scalar) pd.testing.assert_series_equal(result, result_ref, check_dtype=False, check_names=False) float_scalar = 24.0 for dtype, index_data in dtype_to_index.items(): with self.subTest(index_dtype=dtype, index=index_data): if platform.system() == 'Windows' and not IS_32BITS: A = pd.Series(np.arange(n, dtype=np.int64), index=index_data) else: A = pd.Series(np.arange(n), index=index_data) ref_result = test_impl(A, float_scalar) result = hpat_func(A, float_scalar) pd.testing.assert_series_equal(result, ref_result, check_dtype=False, check_names=False) def test_series_operator_add_numeric_same_index_default(self): """Verifies implementation of Series.operator.add between two numeric Series with default indexes and same size""" def test_impl(A, B): return A + B hpat_func = self.jit(test_impl) n = 7 dtypes_to_test = (np.int32, np.int64, np.float32, np.float64) for dtype_left, dtype_right in combinations(dtypes_to_test, 2): with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right): A = pd.Series(np.arange(n), dtype=dtype_left) B = pd.Series(np.arange(n)**2, dtype=dtype_right) pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False) @skip_numba_jit def test_series_operator_add_numeric_same_index_numeric(self): """Verifies implementation of Series.operator.add between two numeric Series with the same numeric indexes of different dtypes""" def test_impl(A, B): return A + B hpat_func = self.jit(test_impl) n = 7 dtypes_to_test = (np.int32, np.int64, np.float32, np.float64) for dtype_left, dtype_right in combinations(dtypes_to_test, 2): with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right): A = pd.Series(np.arange(n), index=np.arange(n, dtype=dtype_left)) B = pd.Series(np.arange(n)**2, index=np.arange(n, dtype=dtype_right)) pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False) def test_series_operator_add_numeric_same_index_numeric_fixme(self): """ Same as test_series_operator_add_same_index_numeric but with w/a for the problem. Can be deleted when the latter is fixed """ def test_impl(A, B): return A + B hpat_func = self.jit(test_impl) n = 7 index_dtypes_to_test = (np.int32, np.int64, np.float32, np.float64) for dtype_left, dtype_right in combinations(index_dtypes_to_test, 2): # FIXME: skip the sub-test if one of the dtypes is float and the other is integer if not (np.issubdtype(dtype_left, np.integer) and np.issubdtype(dtype_right, np.integer) or np.issubdtype(dtype_left, np.float) and np.issubdtype(dtype_right, np.float)): continue with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right): A = pd.Series(np.arange(n), index=np.arange(n, dtype=dtype_left)) B = pd.Series(np.arange(n)**2, index=np.arange(n, dtype=dtype_right)) pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False) def test_series_operator_add_numeric_same_index_str(self): """Verifies implementation of Series.operator.add between two numeric Series with the same string indexes""" def test_impl(A, B): return A + B hpat_func = self.jit(test_impl) n = 7 A = pd.Series(np.arange(n), index=['a', 'c', 'e', 'c', 'b', 'a', 'o']) B = pd.Series(np.arange(n)**2, index=['a', 'c', 'e', 'c', 'b', 'a', 'o']) pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False) def test_series_operator_add_numeric_align_index_int(self): """Verifies implementation of Series.operator.add between two numeric Series with non-equal integer indexes""" def test_impl(A, B): return A + B hpat_func = self.jit(test_impl) n = 11 index_A = [0, 1, 1, 2, 3, 3, 3, 4, 6, 8, 9] index_B = [0, 1, 1, 3, 4, 4, 5, 5, 6, 6, 9] np.random.shuffle(index_A) np.random.shuffle(index_B) A = pd.Series(np.arange(n), index=index_A) B = pd.Series(np.arange(n)**2, index=index_B) pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False) def test_series_operator_add_numeric_align_index_str(self): """Verifies implementation of Series.operator.add between two numeric Series with non-equal string indexes""" def test_impl(A, B): return A + B hpat_func = self.jit(test_impl) n = 11 index_A = ['', '', 'aa', 'aa', 'ae', 'ae', 'b', 'ccc', 'cccc', 'oo', 's'] index_B = ['', '', 'aa', 'aa', 'cc', 'cccc', 'e', 'f', 'h', 'oo', 's'] np.random.shuffle(index_A) np.random.shuffle(index_B) A = pd.Series(np.arange(n), index=index_A) B = pd.Series(np.arange(n)**2, index=index_B) pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False) @skip_numba_jit('TODO: fix Series.sort_values to handle both None and '' in string series') def test_series_operator_add_numeric_align_index_str_fixme(self): """Same as test_series_operator_add_align_index_str but with None values in string indexes""" def test_impl(A, B): return A + B hpat_func = self.jit(test_impl) n = 11 index_A = ['', '', 'aa', 'aa', 'ae', 'b', 'ccc', 'cccc', 'oo', None, None] index_B = ['', '', 'aa', 'aa', 'cccc', 'f', 'h', 'oo', 's', None, None] np.random.shuffle(index_A) np.random.shuffle(index_B) A = pd.Series(np.arange(n), index=index_A) B = pd.Series(np.arange(n)**2, index=index_B) pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False) def test_series_operator_add_numeric_align_index_other_dtype(self): """Verifies implementation of Series.operator.add between two numeric Series with non-equal integer indexes of different dtypes""" def test_impl(A, B): return A + B hpat_func = self.jit(test_impl) n = 7 A = pd.Series(np.arange(3*n), index=np.arange(-n, 2*n, 1, dtype=np.int64)) B = pd.Series(np.arange(3*n)**2, index=np.arange(0, 3*n, 1, dtype=np.float64)) pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False) def test_series_operator_add_numeric_diff_series_sizes(self): """Verifies implementation of Series.operator.add between two numeric Series with different sizes""" def test_impl(A, B): return A + B hpat_func = self.jit(test_impl) size_A, size_B = 7, 25 A = pd.Series(np.arange(size_A)) B = pd.Series(np.arange(size_B)**2) result = hpat_func(A, B) result_ref = test_impl(A, B) pd.testing.assert_series_equal(result, result_ref, check_dtype=False, check_names=False) def test_series_operator_add_align_index_int_capacity(self): """Verifies implementation of Series.operator.add and alignment of numeric indexes of large size""" def test_impl(A, B): return A + B hpat_func = self.jit(test_impl) n = 20000 np.random.seed(0) index1 = np.random.randint(-30, 30, n) index2 = np.random.randint(-30, 30, n) A = pd.Series(np.random.ranf(n), index=index1) B = pd.Series(np.random.ranf(n), index=index2) pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False) def test_series_operator_add_align_index_str_capacity(self): """Verifies implementation of Series.operator.add and alignment of string indexes of large size""" def test_impl(A, B): return A + B hpat_func = self.jit(test_impl) n = 2000 np.random.seed(0) valid_ids = ['', 'aaa', 'a', 'b', 'ccc', 'ef', 'ff', 'fff', 'fa', 'dddd'] index1 = [valid_ids[i] for i in np.random.randint(0, len(valid_ids), n)] index2 = [valid_ids[i] for i in np.random.randint(0, len(valid_ids), n)] A = pd.Series(np.random.ranf(n), index=index1) B = pd.Series(np.random.ranf(n), index=index2) pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False) def test_series_operator_add_str_same_index_default(self): """Verifies implementation of Series.operator.add between two string Series with default indexes and same size""" def test_impl(A, B): return A + B hpat_func = self.jit(test_impl) A = pd.Series(['a', '', 'ae', 'b', 'cccc', 'oo', None]) B = pd.Series(['b', 'aa', '', 'b', 'o', None, 'oo']) pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False) def test_series_operator_add_str_align_index_int(self): """Verifies implementation of Series.operator.add between two string Series with non-equal integer indexes""" def test_impl(A, B): return A + B hpat_func = self.jit(test_impl) np.random.seed(0) index_A = [0, 1, 1, 2, 3, 3, 3, 4, 6, 8, 9] index_B = [0, 1, 1, 3, 4, 4, 5, 5, 6, 6, 9] np.random.shuffle(index_A) np.random.shuffle(index_B) data = ['', '', 'aa', 'aa', None, 'ae', 'b', 'ccc', 'cccc', None, 'oo'] A = pd.Series(data, index=index_A) B = pd.Series(data, index=index_B) pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False) def test_series_operator_add_result_name1(self): """Verifies name of the Series resulting from appying Series.operator.add to different arguments""" def test_impl(A, B): return A + B hpat_func = self.jit(test_impl) n = 7 series_names = ['A', '', None, 'B'] for left_name, right_name in combinations(series_names, 2): S1 = pd.Series(np.arange(n), name=left_name) S2 = pd.Series(np.arange(n, 0, -1), name=right_name) with self.subTest(left_series_name=left_name, right_series_name=right_name): # check_dtype=False because SDC implementation always returns float64 Series pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2), check_dtype=False) # also verify case when second operator is scalar scalar = 3.0 with self.subTest(scalar=scalar): S1 = pd.Series(np.arange(n), name='A') pd.testing.assert_series_equal(hpat_func(S1, scalar), test_impl(S1, scalar), check_dtype=False) @unittest.expectedFailure def test_series_operator_add_result_name2(self): """Verifies implementation of Series.operator.add differs from Pandas in returning unnamed Series when both operands are named Series with the same name""" def test_impl(A, B): return A + B hpat_func = self.jit(test_impl) n = 7 S1 = pd.Series(np.arange(n), name='A') S2 = pd.Series(np.arange(n, 0, -1), name='A') result = hpat_func(S1, S2) result_ref = test_impl(S1, S2) # check_dtype=False because SDC implementation always returns float64 Series pd.testing.assert_series_equal(result, result_ref, check_dtype=False) @unittest.expectedFailure def test_series_operator_add_series_dtype_promotion(self): """Verifies implementation of Series.operator.add differs from Pandas in dtype of resulting Series that is fixed to float64""" def test_impl(A, B): return A + B hpat_func = self.jit(test_impl) n = 7 dtypes_to_test = (np.int32, np.int64, np.float32, np.float64) for dtype_left, dtype_right in combinations(dtypes_to_test, 2): with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right): A = pd.Series(np.array(np.arange(n), dtype=dtype_left)) B = pd.Series(np.array(np.arange(n)**2, dtype=dtype_right)) pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B)) def test_series_operator_add_str_scalar(self): def test_impl(A, B): return A + B hpat_func = self.jit(test_impl) series_data = ['a', '', 'ae', 'b', 'cccc', 'oo', None] S = pd.Series(series_data) values_to_test = [' ', 'wq', '', '23'] for scalar in values_to_test: with self.subTest(left=series_data, right=scalar): result_ref = test_impl(S, scalar) result = hpat_func(S, scalar)
pd.testing.assert_series_equal(result, result_ref)
pandas.testing.assert_series_equal
import pandas as pd cate_headers = [ "Action", "Adventure", "Animation", "Children's", "Comedy", "Crime", "Documentary", "Drama", "Fantasy", "Film-Noir", "Horror", "Musical", "Mystery", "Romance", "Sci-Fi", "Thriller", "War", "Western" ] cont_headers = [ 'UserNum','UserScore','MovieNum','MovieScore' ] useless_headers =[ "UserID", "MovieID", "timestamp" ] def read_data(path, csv_header=0): data = pd.read_csv(path, header= csv_header) cont = data[['UserNum','UserScore','MovieNum','MovieScore']] cate = data[cate_headers] label = data['label'] useless = data[useless_headers] # print(cont.head()) # print(cate.head()) return cont, cate, label, useless def divide_data(data): cont = data[cont_headers] cate = data[cate_headers] label = data['label'] return cont, cate, label def cate_cal(cate, label): cate1_label1 = {} cate0_label1 = {} cate1_label0 = {} cate0_label0 = {} sum_score_list = {} for item in cate_headers: cate1_label1[item] = 0 cate0_label1[item] = 0 cate1_label0[item] = 0 cate0_label0[item] = 0 for index, row in cate.iterrows(): for item in cate_headers: if label[index]==1: if row[item]==1: cate1_label1[item] = cate1_label1[item] + 1 else: cate0_label1[item] = cate0_label1[item] + 1 else: if row[item]==1: cate1_label0[item] = cate1_label0[item] + 1 else: cate0_label0[item] = cate0_label0[item] + 1 # print(cate1_label1) # print(cate0_label1) # print("总条数",cate0_label0['Action']+cate0_label1['Action']+cate1_label0['Action']+cate1_label1['Action']) # print("Action中label为1",cate1_label0['Action']+cate1_label1['Action']) for item in cate_headers: if cate0_label0[item]+cate0_label1[item] == 0: c0 = -1 else: c0 = float(cate0_label1[item])/(cate0_label0[item]+cate0_label1[item]) if cate1_label0[item]+cate1_label1[item] == 0: c1 = -1 else: c1 = float(cate1_label1[item])/(cate1_label0[item]+cate1_label1[item]) sum_score = (1-c0)*(1-c0)*cate0_label1[item] + (0-c0)*(0-c0)*cate0_label0[item] + (1-c1)*(1-c1)*cate1_label1[item] + (0-c1)*(0-c1)*cate1_label1[item] sum_score_list[item] = sum_score # print(sum_score_list) return sum_score_list def cont_cal(cont, label, sum_score_list): item_label0_map = {} item_label1_map = {} for item in cont_headers: item_value_label0_map = {} item_value_label1_map = {} item_label0_map[item] = item_value_label0_map item_label1_map[item] = item_value_label1_map for index, row in cont.iterrows(): for item in cont_headers: if row[item] not in item_label0_map[item]: item_label0_map[item][row[item]] = 0 item_label1_map[item][row[item]] = 0 if label[index] == 0: item_label0_map[item][row[item]] = item_label0_map[item][row[item]] + 1 else: item_label1_map[item][row[item]] = item_label1_map[item][row[item]] + 1 # print(item_label0_map['UserNum'][232]+item_label1_map['UserNum'][232]) # print(item_label1_map['UserNum'][232]) least_ans_list={} for item in cont_headers: temp_least_score = -1 temp_value = 0 for keys in item_label0_map[item]: numleft_label1=0 numright_label1=0 numleft_label0=0 numright_label0=0 for keys2 in item_label0_map[item]: if keys<keys2: numleft_label1 = numleft_label1+item_label1_map[item][keys2] numleft_label0 = numleft_label0+item_label0_map[item][keys2] else: numright_label1 = numright_label1+item_label1_map[item][keys2] numright_label0 = numright_label0+item_label0_map[item][keys2] if numleft_label0+numleft_label1 == 0: cleft = -1 else: cleft = float(numleft_label1)/(numleft_label0+numleft_label1) if numright_label0+numright_label1 == 0: cright = -1 else: cright = float(numright_label1)/(numright_label0+numright_label1) sum_score = (1-cleft)*(1-cleft)*numleft_label1 + (0-cleft)*(0-cleft)*numleft_label0 +(1-cright)*(1-cright)*numright_label1 + (0-cright)*(0-cright)*numright_label0 if temp_least_score==-1 or sum_score<temp_least_score: temp_least_score = sum_score temp_value = keys least_ans_list[item]=temp_value sum_score_list[item]=temp_least_score # print(least_ans_list) # print(sum_score_list) return least_ans_list, sum_score_list def find_divide_point(sum_score_list, least_ans_list): count = 0 minitem = '' minscore = -1 for item in sum_score_list: if sum_score_list[item]<minscore or minscore==-1: minitem = item minscore = sum_score_list[item] # print(minitem) if minitem in cont_headers: divide_score = least_ans_list[minitem] else: divide_score = 1 # print(divide_score) sum_score_list.pop(minitem) # print(sum_score_list) return minitem, divide_score, sum_score_list cont, cate, label, useless = read_data('./process_labeled.csv') sum_score_list = cate_cal(cate, label) least_ans_list, sum_score_list = cont_cal(cont, label, sum_score_list) num = 0 df = pd.concat([cont,cate,label], axis=1) while num < 6: min_item, divide_score, sum_score_list = find_divide_point(sum_score_list, least_ans_list) print(num, "mid-区分特征:", min_item, "区分点:", divide_score) df_left = df[df[min_item]<divide_score] df_right = df[df[min_item]>=divide_score] cont, cate, label = divide_data(df_left) sum_score_list_left = cate_cal(cate, label) least_ans_list_left, sum_score_list_left = cont_cal(cont, label, sum_score_list_left) min_item_left, divide_score_left, sum_score_list_left = find_divide_point(sum_score_list_left, least_ans_list_left) if min_item_left == min_item: min_item_left, divide_score_left, sum_score_list_left = find_divide_point(sum_score_list_left, least_ans_list_left) print(num, "left-区分特征:", min_item_left, "区分点:", divide_score_left) df_left_left = df_left[df_left[min_item_left]<divide_score_left] df_left_right = df_left[df_left[min_item_left]>=divide_score_left] cont, cate, label = divide_data(df_right) sum_score_list_right = cate_cal(cate, label) least_ans_list_right, sum_score_list_right = cont_cal(cont, label, sum_score_list_right) min_item_right, divide_score_right, sum_score_list_right = find_divide_point(sum_score_list_right, least_ans_list_right) if min_item_right == min_item: min_item_right, divide_score_right, sum_score_list_right = find_divide_point(sum_score_list_right, least_ans_list_right) print(num, "right-区分特征:", min_item_right, "区分点:", divide_score_right) df_right_left = df_right[df_right[min_item_right]<divide_score_right] df_right_right = df_right[df_right[min_item_right]>=divide_score_right] col_name0 = 'gbdt_' + str(num) +'0' col_name1 = 'gbdt_' + str(num) +'1' df_left_left.insert(len(df_left_left.columns), col_name0, 0) df_left_left.insert(len(df_left_left.columns), col_name1, 0) df_left_right.insert(len(df_left_right.columns), col_name0, 0) df_left_right.insert(len(df_left_right.columns), col_name1, 1) df_right_left.insert(len(df_right_left.columns), col_name0, 1) df_right_left.insert(len(df_right_left.columns), col_name1, 0) df_right_right.insert(len(df_right_right.columns), col_name0, 1) df_right_right.insert(len(df_right_right.columns), col_name1, 1) df = pd.concat([df_left_left, df_left_right, df_right_left, df_right_right]) num = num + 1 df.sort_index(inplace=True) df =
pd.concat([df, useless], axis=1)
pandas.concat
""" test fancy indexing & misc """ from datetime import datetime import re import weakref import numpy as np import pytest import pandas.util._test_decorators as td from pandas.core.dtypes.common import ( is_float_dtype, is_integer_dtype, ) import pandas as pd from pandas import ( DataFrame, Index, NaT, Series, date_range, offsets, timedelta_range, ) import pandas._testing as tm from pandas.core.api import Float64Index from pandas.tests.indexing.common import _mklbl from pandas.tests.indexing.test_floats import gen_obj # ------------------------------------------------------------------------ # Indexing test cases class TestFancy: """pure get/set item & fancy indexing""" def test_setitem_ndarray_1d(self): # GH5508 # len of indexer vs length of the 1d ndarray df = DataFrame(index=Index(np.arange(1, 11))) df["foo"] = np.zeros(10, dtype=np.float64) df["bar"] = np.zeros(10, dtype=complex) # invalid msg = "Must have equal len keys and value when setting with an iterable" with pytest.raises(ValueError, match=msg): df.loc[df.index[2:5], "bar"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0]) # valid df.loc[df.index[2:6], "bar"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0]) result = df.loc[df.index[2:6], "bar"] expected = Series( [2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6], name="bar" ) tm.assert_series_equal(result, expected) def test_setitem_ndarray_1d_2(self): # GH5508 # dtype getting changed? df = DataFrame(index=Index(np.arange(1, 11))) df["foo"] = np.zeros(10, dtype=np.float64) df["bar"] = np.zeros(10, dtype=complex) msg = "Must have equal len keys and value when setting with an iterable" with pytest.raises(ValueError, match=msg): df[2:5] = np.arange(1, 4) * 1j def test_getitem_ndarray_3d( self, index, frame_or_series, indexer_sli, using_array_manager ): # GH 25567 obj = gen_obj(frame_or_series, index) idxr = indexer_sli(obj) nd3 = np.random.randint(5, size=(2, 2, 2)) msgs = [] if frame_or_series is Series and indexer_sli in [tm.setitem, tm.iloc]: msgs.append(r"Wrong number of dimensions. values.ndim > ndim \[3 > 1\]") if using_array_manager: msgs.append("Passed array should be 1-dimensional") if frame_or_series is Series or indexer_sli is tm.iloc: msgs.append(r"Buffer has wrong number of dimensions \(expected 1, got 3\)") if using_array_manager: msgs.append("indexer should be 1-dimensional") if indexer_sli is tm.loc or ( frame_or_series is Series and indexer_sli is tm.setitem ): msgs.append("Cannot index with multidimensional key") if frame_or_series is DataFrame and indexer_sli is tm.setitem: msgs.append("Index data must be 1-dimensional") if isinstance(index, pd.IntervalIndex) and indexer_sli is tm.iloc: msgs.append("Index data must be 1-dimensional") if isinstance(index, (pd.TimedeltaIndex, pd.DatetimeIndex, pd.PeriodIndex)): msgs.append("Data must be 1-dimensional") if len(index) == 0 or isinstance(index, pd.MultiIndex): msgs.append("positional indexers are out-of-bounds") msg = "|".join(msgs) potential_errors = (IndexError, ValueError, NotImplementedError) with pytest.raises(potential_errors, match=msg): idxr[nd3] def test_setitem_ndarray_3d(self, index, frame_or_series, indexer_sli): # GH 25567 obj = gen_obj(frame_or_series, index) idxr = indexer_sli(obj) nd3 = np.random.randint(5, size=(2, 2, 2)) if indexer_sli is tm.iloc: err = ValueError msg = f"Cannot set values with ndim > {obj.ndim}" else: err = ValueError msg = "|".join( [ r"Buffer has wrong number of dimensions \(expected 1, got 3\)", "Cannot set values with ndim > 1", "Index data must be 1-dimensional", "Data must be 1-dimensional", "Array conditional must be same shape as self", ] ) with pytest.raises(err, match=msg): idxr[nd3] = 0 def test_getitem_ndarray_0d(self): # GH#24924 key = np.array(0) # dataframe __getitem__ df = DataFrame([[1, 2], [3, 4]]) result = df[key] expected = Series([1, 3], name=0) tm.assert_series_equal(result, expected) # series __getitem__ ser = Series([1, 2]) result = ser[key] assert result == 1 def test_inf_upcast(self): # GH 16957 # We should be able to use np.inf as a key # np.inf should cause an index to convert to float # Test with np.inf in rows df = DataFrame(columns=[0]) df.loc[1] = 1 df.loc[2] = 2 df.loc[np.inf] = 3 # make sure we can look up the value assert df.loc[np.inf, 0] == 3 result = df.index expected = Float64Index([1, 2, np.inf]) tm.assert_index_equal(result, expected) def test_setitem_dtype_upcast(self): # GH3216 df = DataFrame([{"a": 1}, {"a": 3, "b": 2}]) df["c"] = np.nan assert df["c"].dtype == np.float64 df.loc[0, "c"] = "foo" expected = DataFrame( [{"a": 1, "b": np.nan, "c": "foo"}, {"a": 3, "b": 2, "c": np.nan}] ) tm.assert_frame_equal(df, expected) @pytest.mark.parametrize("val", [3.14, "wxyz"]) def test_setitem_dtype_upcast2(self, val): # GH10280 df = DataFrame( np.arange(6, dtype="int64").reshape(2, 3), index=list("ab"), columns=["foo", "bar", "baz"], ) left = df.copy() left.loc["a", "bar"] = val right = DataFrame( [[0, val, 2], [3, 4, 5]], index=list("ab"), columns=["foo", "bar", "baz"], ) tm.assert_frame_equal(left, right) assert is_integer_dtype(left["foo"]) assert is_integer_dtype(left["baz"]) def test_setitem_dtype_upcast3(self): left = DataFrame( np.arange(6, dtype="int64").reshape(2, 3) / 10.0, index=list("ab"), columns=["foo", "bar", "baz"], ) left.loc["a", "bar"] = "wxyz" right = DataFrame( [[0, "wxyz", 0.2], [0.3, 0.4, 0.5]], index=list("ab"), columns=["foo", "bar", "baz"], ) tm.assert_frame_equal(left, right) assert is_float_dtype(left["foo"]) assert is_float_dtype(left["baz"]) def test_dups_fancy_indexing(self): # GH 3455 df = tm.makeCustomDataframe(10, 3) df.columns = ["a", "a", "b"] result = df[["b", "a"]].columns expected = Index(["b", "a", "a"]) tm.assert_index_equal(result, expected) def test_dups_fancy_indexing_across_dtypes(self): # across dtypes df = DataFrame([[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]], columns=list("aaaaaaa")) df.head() str(df) result = DataFrame([[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]]) result.columns = list("aaaaaaa") # TODO(wesm): unused? df_v = df.iloc[:, 4] # noqa res_v = result.iloc[:, 4] # noqa tm.assert_frame_equal(df, result) def test_dups_fancy_indexing_not_in_order(self): # GH 3561, dups not in selected order df = DataFrame( {"test": [5, 7, 9, 11], "test1": [4.0, 5, 6, 7], "other": list("abcd")}, index=["A", "A", "B", "C"], ) rows = ["C", "B"] expected = DataFrame( {"test": [11, 9], "test1": [7.0, 6], "other": ["d", "c"]}, index=rows ) result = df.loc[rows] tm.assert_frame_equal(result, expected) result = df.loc[Index(rows)] tm.assert_frame_equal(result, expected) rows = ["C", "B", "E"] with pytest.raises(KeyError, match="not in index"): df.loc[rows] # see GH5553, make sure we use the right indexer rows = ["F", "G", "H", "C", "B", "E"] with pytest.raises(KeyError, match="not in index"): df.loc[rows] def test_dups_fancy_indexing_only_missing_label(self): # List containing only missing label dfnu = DataFrame(np.random.randn(5, 3), index=list("AABCD")) with pytest.raises( KeyError, match=re.escape( "\"None of [Index(['E'], dtype='object')] are in the [index]\"" ), ): dfnu.loc[["E"]] # ToDo: check_index_type can be True after GH 11497 @pytest.mark.parametrize("vals", [[0, 1, 2], list("abc")]) def test_dups_fancy_indexing_missing_label(self, vals): # GH 4619; duplicate indexer with missing label df = DataFrame({"A": vals}) with pytest.raises(KeyError, match="not in index"): df.loc[[0, 8, 0]] def test_dups_fancy_indexing_non_unique(self): # non unique with non unique selector df = DataFrame({"test": [5, 7, 9, 11]}, index=["A", "A", "B", "C"]) with pytest.raises(KeyError, match="not in index"): df.loc[["A", "A", "E"]] def test_dups_fancy_indexing2(self): # GH 5835 # dups on index and missing values df = DataFrame(np.random.randn(5, 5), columns=["A", "B", "B", "B", "A"]) with pytest.raises(KeyError, match="not in index"): df.loc[:, ["A", "B", "C"]] def test_dups_fancy_indexing3(self): # GH 6504, multi-axis indexing df = DataFrame( np.random.randn(9, 2), index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=["a", "b"] ) expected = df.iloc[0:6] result = df.loc[[1, 2]] tm.assert_frame_equal(result, expected) expected = df result = df.loc[:, ["a", "b"]] tm.assert_frame_equal(result, expected) expected = df.iloc[0:6, :] result = df.loc[[1, 2], ["a", "b"]] tm.assert_frame_equal(result, expected) def test_duplicate_int_indexing(self, indexer_sl): # GH 17347 ser = Series(range(3), index=[1, 1, 3]) expected = Series(range(2), index=[1, 1]) result = indexer_sl(ser)[[1]] tm.assert_series_equal(result, expected) def test_indexing_mixed_frame_bug(self): # GH3492 df = DataFrame( {"a": {1: "aaa", 2: "bbb", 3: "ccc"}, "b": {1: 111, 2: 222, 3: 333}} ) # this works, new column is created correctly df["test"] = df["a"].apply(lambda x: "_" if x == "aaa" else x) # this does not work, ie column test is not changed idx = df["test"] == "_" temp = df.loc[idx, "a"].apply(lambda x: "-----" if x == "aaa" else x) df.loc[idx, "test"] = temp assert df.iloc[0, 2] == "-----" def test_multitype_list_index_access(self): # GH 10610 df = DataFrame(np.random.random((10, 5)), columns=["a"] + [20, 21, 22, 23]) with pytest.raises(KeyError, match=re.escape("'[26, -8] not in index'")): df[[22, 26, -8]] assert df[21].shape[0] == df.shape[0] def test_set_index_nan(self): # GH 3586 df = DataFrame( { "PRuid": { 17: "nonQC", 18: "nonQC", 19: "nonQC", 20: "10", 21: "11", 22: "12", 23: "13", 24: "24", 25: "35", 26: "46", 27: "47", 28: "48", 29: "59", 30: "10", }, "QC": { 17: 0.0, 18: 0.0, 19: 0.0, 20: np.nan, 21: np.nan, 22: np.nan, 23: np.nan, 24: 1.0, 25: np.nan, 26: np.nan, 27: np.nan, 28: np.nan, 29: np.nan, 30: np.nan, }, "data": { 17: 7.9544899999999998, 18: 8.0142609999999994, 19: 7.8591520000000008, 20: 0.86140349999999999, 21: 0.87853110000000001, 22: 0.8427041999999999, 23: 0.78587700000000005, 24: 0.73062459999999996, 25: 0.81668560000000001, 26: 0.81927080000000008, 27: 0.80705009999999999, 28: 0.81440240000000008, 29: 0.80140849999999997, 30: 0.81307740000000006, }, "year": { 17: 2006, 18: 2007, 19: 2008, 20: 1985, 21: 1985, 22: 1985, 23: 1985, 24: 1985, 25: 1985, 26: 1985, 27: 1985, 28: 1985, 29: 1985, 30: 1986, }, } ).reset_index() result = ( df.set_index(["year", "PRuid", "QC"]) .reset_index() .reindex(columns=df.columns) ) tm.assert_frame_equal(result, df) def test_multi_assign(self): # GH 3626, an assignment of a sub-df to a df df = DataFrame( { "FC": ["a", "b", "a", "b", "a", "b"], "PF": [0, 0, 0, 0, 1, 1], "col1": list(range(6)), "col2": list(range(6, 12)), } ) df.iloc[1, 0] = np.nan df2 = df.copy() mask = ~df2.FC.isna() cols = ["col1", "col2"] dft = df2 * 2 dft.iloc[3, 3] = np.nan expected = DataFrame( { "FC": ["a", np.nan, "a", "b", "a", "b"], "PF": [0, 0, 0, 0, 1, 1], "col1": Series([0, 1, 4, 6, 8, 10]), "col2": [12, 7, 16, np.nan, 20, 22], } ) # frame on rhs df2.loc[mask, cols] = dft.loc[mask, cols] tm.assert_frame_equal(df2, expected) # with an ndarray on rhs # coerces to float64 because values has float64 dtype # GH 14001 expected = DataFrame( { "FC": ["a", np.nan, "a", "b", "a", "b"], "PF": [0, 0, 0, 0, 1, 1], "col1": [0.0, 1.0, 4.0, 6.0, 8.0, 10.0], "col2": [12, 7, 16, np.nan, 20, 22], } ) df2 = df.copy() df2.loc[mask, cols] = dft.loc[mask, cols].values tm.assert_frame_equal(df2, expected) def test_multi_assign_broadcasting_rhs(self): # broadcasting on the rhs is required df = DataFrame( { "A": [1, 2, 0, 0, 0], "B": [0, 0, 0, 10, 11], "C": [0, 0, 0, 10, 11], "D": [3, 4, 5, 6, 7], } ) expected = df.copy() mask = expected["A"] == 0 for col in ["A", "B"]: expected.loc[mask, col] = df["D"] df.loc[df["A"] == 0, ["A", "B"]] = df["D"] tm.assert_frame_equal(df, expected) # TODO(ArrayManager) setting single item with an iterable doesn't work yet # in the "split" path @td.skip_array_manager_not_yet_implemented def test_setitem_list(self): # GH 6043 # iloc with a list df = DataFrame(index=[0, 1], columns=[0]) df.iloc[1, 0] = [1, 2, 3] df.iloc[1, 0] = [1, 2] result = DataFrame(index=[0, 1], columns=[0]) result.iloc[1, 0] = [1, 2] tm.assert_frame_equal(result, df) def test_string_slice(self): # GH 14424 # string indexing against datetimelike with object # dtype should properly raises KeyError df = DataFrame([1], Index([pd.Timestamp("2011-01-01")], dtype=object)) assert df.index._is_all_dates with pytest.raises(KeyError, match="'2011'"): df["2011"] with pytest.raises(KeyError, match="'2011'"): df.loc["2011", 0] def test_string_slice_empty(self): # GH 14424 df = DataFrame() assert not df.index._is_all_dates with pytest.raises(KeyError, match="'2011'"): df["2011"] with pytest.raises(KeyError, match="^0$"): df.loc["2011", 0] def test_astype_assignment(self): # GH4312 (iloc) df_orig = DataFrame( [["1", "2", "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG") ) df = df_orig.copy() df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64) expected = DataFrame( [[1, 2, "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG") ) tm.assert_frame_equal(df, expected) df = df_orig.copy() df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True) expected = DataFrame( [[1, 2, "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG") ) tm.assert_frame_equal(df, expected) # GH5702 (loc) df = df_orig.copy() df.loc[:, "A"] = df.loc[:, "A"].astype(np.int64) expected = DataFrame( [[1, "2", "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG") ) tm.assert_frame_equal(df, expected) df = df_orig.copy() df.loc[:, ["B", "C"]] = df.loc[:, ["B", "C"]].astype(np.int64) expected = DataFrame( [["1", 2, 3, ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG") ) tm.assert_frame_equal(df, expected) def test_astype_assignment_full_replacements(self): # full replacements / no nans df = DataFrame({"A": [1.0, 2.0, 3.0, 4.0]}) df.iloc[:, 0] = df["A"].astype(np.int64) expected = DataFrame({"A": [1, 2, 3, 4]}) tm.assert_frame_equal(df, expected) df = DataFrame({"A": [1.0, 2.0, 3.0, 4.0]}) df.loc[:, "A"] = df["A"].astype(np.int64) expected = DataFrame({"A": [1, 2, 3, 4]}) tm.assert_frame_equal(df, expected) @pytest.mark.parametrize("indexer", [tm.getitem, tm.loc]) def test_index_type_coercion(self, indexer): # GH 11836 # if we have an index type and set it with something that looks # to numpy like the same, but is actually, not # (e.g. setting with a float or string '0') # then we need to coerce to object # integer indexes for s in [Series(range(5)), Series(range(5), index=range(1, 6))]: assert s.index.is_integer() s2 = s.copy() indexer(s2)[0.1] = 0 assert s2.index.is_floating() assert indexer(s2)[0.1] == 0 s2 = s.copy() indexer(s2)[0.0] = 0 exp = s.index if 0 not in s: exp = Index(s.index.tolist() + [0]) tm.assert_index_equal(s2.index, exp) s2 = s.copy() indexer(s2)["0"] = 0 assert s2.index.is_object() for s in [Series(range(5), index=np.arange(5.0))]: assert s.index.is_floating() s2 = s.copy() indexer(s2)[0.1] = 0 assert s2.index.is_floating() assert indexer(s2)[0.1] == 0 s2 = s.copy() indexer(s2)[0.0] = 0 tm.assert_index_equal(s2.index, s.index) s2 = s.copy() indexer(s2)["0"] = 0 assert s2.index.is_object() class TestMisc: def test_float_index_to_mixed(self): df = DataFrame({0.0: np.random.rand(10), 1.0: np.random.rand(10)}) df["a"] = 10 expected = DataFrame({0.0: df[0.0], 1.0: df[1.0], "a": [10] * 10}) tm.assert_frame_equal(expected, df) def test_float_index_non_scalar_assignment(self): df = DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]}, index=[1.0, 2.0, 3.0]) df.loc[df.index[:2]] = 1 expected = DataFrame({"a": [1, 1, 3], "b": [1, 1, 5]}, index=df.index) tm.assert_frame_equal(expected, df) def test_loc_setitem_fullindex_views(self): df =
DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]}, index=[1.0, 2.0, 3.0])
pandas.DataFrame
# -*- coding: utf-8 -*- """arima_btc_monthly.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1UD3t9q6t9vDNTdDfGC1NrS46XNE15xvi """ import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import plotly.graph_objects as go from matplotlib.pyplot import figure from datetime import datetime from google.colab import drive drive.mount('/content/drive') BTC_monthly =
pd.read_csv('/content/drive/Shareddrives/Crypto SP500 /data for arima/BTC-USD-monthly.csv')
pandas.read_csv
#!/usr/bin/env python # coding: utf-8 import pandas as pd import matplotlib.pyplot as plt import numpy as np from matplotlib import rcParams rcParams['font.family'] = 'sans-serif' rcParams['font.sans-serif'] = ['Hiragino Maru Gothic Pro', 'Yu Gothic', 'Meirio', 'Takao', 'IPAexGothic', 'IPAPGothic', 'VL PGothic', 'Noto Sans CJK JP'] # 事前処理 df_google = pd.read_csv("https://storage.googleapis.com/covid-external/forecast_JAPAN_PREFECTURE_28.csv") df_google['target_prediction_date'] = pd.to_datetime(df_google['target_prediction_date']) df_real =df_google[df_google['hospitalized_patients'].isnull()] df_google = df_google[~df_google['hospitalized_patients'].isnull()] # 重症者率 df_ratio =
pd.read_csv('data_severe_ratio/ratio.csv',index_col=0)
pandas.read_csv
#! /usr/bin/env python3 # coding=utf-8 import pandas as pd import joblib import numpy as np import logging import sys from sklearn.ensemble import RandomForestClassifier from sklearn.pipeline import Pipeline, FeatureUnion from sklearn.compose import ColumnTransformer from sklearn.impute import SimpleImputer from sklearn.preprocessing import StandardScaler, OneHotEncoder from sklearn.model_selection import train_test_split, RandomizedSearchCV from sklearn.metrics import classification_report from sklearn.metrics import make_scorer from sklearn.metrics import accuracy_score df=pd.read_csv("Data/Customer-Churn.csv") def remove_cols(df): ''' Check and remove the unwanted columns input: df output: cleaned dataframe ''' Constant_Values = df.columns[df.eq(df.iloc[0]).all()].tolist() Duplicate_Columns = df.columns[df.T.duplicated(keep='first').T] # Only report second column as duplicate df = df.drop(Constant_Values, axis=1) df = df.drop(Duplicate_Columns, axis=1) df = df.drop(['customerID','Dependents','PhoneService','MultipleLines', 'PaperlessBilling','PaymentMethod'], axis = 1) return df df = remove_cols(df) def missing_values_table(df): ''' Check the missing values in the data columns input: df output: Dataframe of columns and their missing value percent ''' df['TotalCharges'] = df['TotalCharges'].replace(' ', np.nan) missing_val = df.isnull().sum() missing_val_percent = 100 * missing_val/ len(df) mis_val_table =
pd.concat([missing_val, missing_val_percent], axis=1)
pandas.concat
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/04_Create_Acs_Indicators_Original.ipynb (unless otherwise specified). __all__ = ['racdiv', 'pasi', 'elheat', 'empl', 'fam', 'female', 'femhhs', 'heatgas', 'hh40inc', 'hh60inc', 'hh75inc', 'hhchpov', 'hhm75', 'hhpov', 'hhs', 'hsdipl', 'lesshs', 'male', 'nilf', 'othrcom', 'p2more', 'pubtran', 'age5', 'age24', 'age64', 'age18', 'age65', 'affordm', 'affordr', 'bahigher', 'carpool', 'drvalone', 'hh25inc', 'mhhi', 'nohhint', 'novhcl', 'paa', 'ppac', 'phisp', 'pwhite', 'sclemp', 'tpop', 'trav14', 'trav29', 'trav45', 'trav44', 'unempl', 'unempr', 'walked'] # Cell #File: racdiv.py #Author: <NAME> #Date: 4/16/19 #Section: Bnia #Email: <EMAIL> #Description: # Uses ACS Table B02001 - Race # Universe: Total Population # Uses ACS Table B03002 - HISPANIC OR LATINO ORIGIN BY RACE # Universe: Total Population # Table Creates: racdiv, paa, pwhite, pasi, phisp, p2more, ppac #purpose: #input: Year #output: import pandas as pd import glob def racdiv( year ): def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0] def getColByName (df, col): return df[getColName(df, col)] def addKey(df, fi, col): key = getColName(df, col) val = getColByName(df, col) fi[key] = val return fi def nullIfEqual(df, c1, c2): return df.apply(lambda x: x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1) def sumInts(df): return df.sum(numeric_only=True) #~~~~~~~~~~~~~~~ # Step 1) # Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder. #~~~~~~~~~~~~~~~ fileName = '' for name in glob.glob('AcsDataClean/B02001*5y'+str(year)+'_est.csv'): fileName = name df = pd.read_csv( fileName, index_col=0 ) fileName = '' for name in glob.glob('AcsDataClean/B03002*5y'+str(year)+'_est.csv'): fileName = name df_hisp = pd.read_csv( fileName, index_col=0 ) # Aggregate by CSA # Group By CSA so that they may be opperated on df = df.groupby('CSA') df_hisp = df_hisp.groupby('CSA') # Aggregate Numeric Values by Sum df = df.sum(numeric_only=True) df_hisp = df_hisp.sum(numeric_only=True) # Append the one column from the other ACS Table df['B03002_012E_Total_Hispanic_or_Latino'] = df_hisp['B03002_012E_Total_Hispanic_or_Latino'] df1 = pd.DataFrame() df1['CSA'] = df.index df1.set_index('CSA', drop = True, inplace = True) df1['African-American%'] = df[ 'B02001_003E_Total_Black_or_African_American_alone' ] / df[ 'B02001_001E_Total' ] * 100 df1['White%'] = df[ 'B02001_002E_Total_White_alone' ] / df[ 'B02001_001E_Total' ] * 100 df1['American Indian%'] = df[ 'B02001_004E_Total_American_Indian_and_Alaska_Native_alone' ]/ df[ 'B02001_001E_Total' ] * 100 df1['Asian%'] = df[ 'B02001_005E_Total_Asian_alone' ] / df[ 'B02001_001E_Total' ] * 100 df1['Native Hawaii/Pac Islander%'] = df[ 'B02001_006E_Total_Native_Hawaiian_and_Other_Pacific_Islander_alone'] / df[ 'B02001_001E_Total' ] * 100 df1['Hisp %'] = df['B03002_012E_Total_Hispanic_or_Latino'] / df[ 'B02001_001E_Total' ] * 100 # =1-(POWER(%AA/100,2)+POWER(%White/100,2)+POWER(%AmerInd/100,2)+POWER(%Asian/100,2) + POWER(%NativeAm/100,2))*(POWER(%Hispanci/100,2) + POWER(1-(%Hispanic/100),2)) df1['Diversity_index'] = ( 1- ( ( df1['African-American%'] /100 )**2 +( df1['White%'] /100 )**2 +( df1['American Indian%'] /100 )**2 +( df1['Asian%'] /100 )**2 +( df1['Native Hawaii/Pac Islander%'] /100 )**2 )*( ( df1['Hisp %'] /100 )**2 +(1-( df1['Hisp %'] /100) )**2 ) ) * 100 return df1['Diversity_index'] # Cell #File: pasi.py #Author: <NAME> #Date: 4/16/19 #Section: Bnia #Email: <EMAIL> #Description: # Uses ACS Table B03002 - HISPANIC OR LATINO ORIGIN BY RACE # Universe: Total Population # Table Creates: racdiv, paa, pwhite, pasi, phisp, p2more, ppac #purpose: #input: Year #output: import pandas as pd import glob def pasi( year ): def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0] def getColByName (df, col): return df[getColName(df, col)] def addKey(df, fi, col): key = getColName(df, col) val = getColByName(df, col) fi[key] = val return fi def nullIfEqual(df, c1, c2): return df.apply(lambda x: x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1) def sumInts(df): return df.sum(numeric_only=True) #~~~~~~~~~~~~~~~ # Step 1) # Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder. #~~~~~~~~~~~~~~~ fileName = '' for name in glob.glob('AcsDataClean/B03002*5y'+str(year)+'_est.csv'): fileName = name df = pd.read_csv( fileName, index_col=0 ) # Aggregate by CSA # Group By CSA so that they may be opperated on df = df.groupby('CSA') # Aggregate Numeric Values by Sum df = df.sum(numeric_only=True) # Append the one column from the other ACS Table df['B03002_012E_Total_Hispanic_or_Latino'] df1 = pd.DataFrame() df1['CSA'] = df.index df1.set_index('CSA', drop = True, inplace = True) tot = df[ 'B03002_001E_Total' ] df1['Asian%NH'] = df[ 'B03002_006E_Total_Not_Hispanic_or_Latino_Asian_alone' ]/ tot * 100 return df1['Asian%NH'] # Cell #File: elheat.py #Author: <NAME> #Date: 1/17/19 #Section: Bnia #Email: <EMAIL> #Description: # Uses ACS Table B25040 - HOUSE HEATING FUEL # Universe - Occupied housing units # Table Creates: elheat, heatgas #purpose: Produce Sustainability - Percent of Residences Heated by Electricity Indicator #input: Year #output: import pandas as pd import glob def elheat( year ): def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0] def getColByName (df, col): return df[getColName(df, col)] def addKey(df, fi, col): key = getColName(df, col) val = getColByName(df, col) fi[key] = val return fi def nullIfEqual(df, c1, c2): return df.apply(lambda x: x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1) def sumInts(df): return df.sum(numeric_only=True) #~~~~~~~~~~~~~~~ # Step 1) # Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder. #~~~~~~~~~~~~~~~ fileName = '' for name in glob.glob('AcsDataClean/B25040*5y'+str(year)+'_est.csv'): fileName = name df = pd.read_csv( fileName, index_col=0 ) # Aggregate by CSA # Group By CSA so that they may be opperated on df = df.groupby('CSA') # Aggregate Numeric Values by Sum df = sumInts(df) # Add 'BALTIMORE' which is the SUM of all the CSAs #~~~~~~~~~~~~~~~ # Step 2) # Prepare the columns #~~~~~~~~~~~~~~~ # Final Dataframe fi = pd.DataFrame() columns = ['B25040_004E','B25040_001E'] for col in columns: fi = addKey(df, fi, col) # Numerators numerators = pd.DataFrame() columns = ['B25040_004E'] for col in columns: numerators = addKey(df, numerators, col) # Denominators denominators = pd.DataFrame() columns = ['B25040_001E'] for col in columns: denominators = addKey(df, denominators, col) # construct the denominator, returns 0 iff the other two rows are equal. #~~~~~~~~~~~~~~~ # Step 3) # Run the Calculation + final mods # ( value[1] / nullif(value[2],0) )*100 #~~~~~~~~~~~~~~~ fi['numerator'] = numerators.sum(axis=1) fi['denominator'] = denominators.sum(axis=1) fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0 fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100 return fi['final'] """ /* <elheat_14> */ -- WITH tbl AS ( select csa, ( value[1] / nullif(value[2],0) )*100::numeric as result from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B25040_004E','B25040_001E']) ) update vital_signs.data set elheat = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014'; """ # Cell #File: empl.py #Author: <NAME> #Date: 1/17/19 #Section: Bnia #Email: <EMAIL> #Description: # Uses ACS Table B23001 - SEX BY AGE BY EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER # Universe - Population 16 years and over # Table Creates: empl, unempl, unempr, nilf #purpose: Produce Workforce and Economic Development - Percent Population 16-64 Employed Indicator #input: Year #output: import pandas as pd import glob def empl( year ): def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0] def getColByName (df, col): return df[getColName(df, col)] def addKey(df, fi, col): key = getColName(df, col) val = getColByName(df, col) fi[key] = val return fi def nullIfEqual(df, c1, c2): return df.apply(lambda x: x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1) def sumInts(df): return df.sum(numeric_only=True) #~~~~~~~~~~~~~~~ # Step 1) # Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder. #~~~~~~~~~~~~~~~ fileName = '' for name in glob.glob('AcsDataClean/B23001*5y'+str(year)+'_est.csv'): fileName = name df = pd.read_csv( fileName, index_col=0 ) # Aggregate by CSA # Group By CSA so that they may be opperated on df = df.groupby('CSA') # Aggregate Numeric Values by Sum df = sumInts(df) # Add 'BALTIMORE' which is the SUM of all the CSAs #~~~~~~~~~~~~~~~ # Step 2) # Prepare the columns #~~~~~~~~~~~~~~~ # Final Dataframe fi = pd.DataFrame() columns = ['B23001_003E', 'B23001_010E', 'B23001_017E', 'B23001_024E', 'B23001_031E', 'B23001_038E', 'B23001_045E', 'B23001_052E', 'B23001_059E', 'B23001_066E', 'B23001_089E', 'B23001_096E', 'B23001_103E', 'B23001_110E', 'B23001_117E', 'B23001_124E', 'B23001_131E', 'B23001_138E', 'B23001_145E', 'B23001_152E', 'B23001_007E', 'B23001_014E', 'B23001_021E', 'B23001_028E', 'B23001_035E', 'B23001_042E', 'B23001_049E', 'B23001_056E', 'B23001_063E', 'B23001_070E', 'B23001_093E', 'B23001_100E', 'B23001_107E', 'B23001_114E', 'B23001_121E', 'B23001_128E', 'B23001_135E', 'B23001_142E', 'B23001_149E', 'B23001_156E'] for col in columns: fi = addKey(df, fi, col) # Numerators numerators = pd.DataFrame() columns = ['B23001_007E', 'B23001_014E', 'B23001_021E', 'B23001_028E', 'B23001_035E', 'B23001_042E', 'B23001_049E', 'B23001_056E', 'B23001_063E', 'B23001_070E', 'B23001_093E', 'B23001_100E', 'B23001_107E', 'B23001_114E', 'B23001_121E', 'B23001_128E', 'B23001_135E', 'B23001_142E', 'B23001_149E', 'B23001_156E'] for col in columns: numerators = addKey(df, numerators, col) # Denominators denominators = pd.DataFrame() columns = ['B23001_003E', 'B23001_010E', 'B23001_017E', 'B23001_024E', 'B23001_031E', 'B23001_038E', 'B23001_045E', 'B23001_052E', 'B23001_059E', 'B23001_066E', 'B23001_089E', 'B23001_096E', 'B23001_103E', 'B23001_110E', 'B23001_117E', 'B23001_124E', 'B23001_131E', 'B23001_138E', 'B23001_145E', 'B23001_152E'] for col in columns: denominators = addKey(df, denominators, col) # construct the denominator, returns 0 iff the other two rows are equal. #~~~~~~~~~~~~~~~ # Step 3) # Run the Calculation # (value[21]+value[22]+value[23]+value[24]+value[25]+value[26]+value[27]+value[28]+value[29]+value[30]+value[31]+value[32]+value[33]+value[34]+value[35]+value[36]+value[37]+value[38]+value[39]+value[40]) --civil labor force empl 16-64 #/ #nullif( (value[1]+value[2]+value[3]+value[4]+value[5]+value[6]+value[7]+value[8]+value[9]+value[10]+value[11]+value[12]+value[13]+value[14]+value[15]+value[16]+value[17]+value[18]+value[19]+value[20]) -- population 16 to 64 ,0) )*100 #~~~~~~~~~~~~~~~ fi['numerator'] = numerators.sum(axis=1) fi['denominator'] = denominators.sum(axis=1) fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0 fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100 return fi['final'] """ /* <empl_14> */ -- WITH tbl AS ( select csa, ( ( value[21]+value[22]+value[23]+value[24]+value[25]+value[26]+value[27]+value[28]+value[29]+value[30]+value[31]+value[32]+value[33]+value[34]+value[35]+value[36]+value[37]+value[38]+value[39]+value[40]) --civil labor force empl 16-64 / nullif( (value[1]+value[2]+value[3]+value[4]+value[5]+value[6]+value[7]+value[8]+value[9]+value[10]+value[11]+value[12]+value[13]+value[14]+value[15]+value[16]+value[17]+value[18]+value[19]+value[20]) -- population 16 to 64 ,0) )*100::numeric as result from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY[ 'B23001_003E','B23001_010E','B23001_017E','B23001_024E','B23001_031E','B23001_038E','B23001_045E','B23001_052E','B23001_059E','B23001_066E','B23001_089E','B23001_096E','B23001_103E','B23001_110E','B23001_117E','B23001_124E','B23001_131E','B23001_138E','B23001_145E','B23001_152E','B23001_007E','B23001_014E','B23001_021E','B23001_028E','B23001_035E','B23001_042E','B23001_049E','B23001_056E','B23001_063E','B23001_070E','B23001_093E','B23001_100E','B23001_107E','B23001_114E','B23001_121E','B23001_128E','B23001_135E','B23001_142E','B23001_149E','B23001_156E']) ) update vital_signs.data set empl = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014'; """ # Cell #File: fam.py #Author: <NAME> #Date: 4/16/19 #Section: Bnia #Email: <EMAIL> #Description: # Uses ACS Table B11005 - HOUSEHOLDS BY PRESENCE OF PEOPLE UNDER 18 YEARS BY HOUSEHOLD TYPE # Universe: Households # Table Creates: hhs, fam, femhhs #purpose: #input: Year #output: import pandas as pd import glob def fam( year ): def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0] def getColByName (df, col): return df[getColName(df, col)] def addKey(df, fi, col): key = getColName(df, col) val = getColByName(df, col) fi[key] = val return fi def nullIfEqual(df, c1, c2): return df.apply(lambda x: x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1) def sumInts(df): return df.sum(numeric_only=True) #~~~~~~~~~~~~~~~ # Step 1) # Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder. #~~~~~~~~~~~~~~~ fileName = '' for name in glob.glob('AcsDataClean/B11005*5y'+str(year)+'_est.csv'): fileName = name df = pd.read_csv( fileName, index_col=0 ) # Aggregate by CSA # Group By CSA so that they may be opperated on df = df.groupby('CSA') # Aggregate Numeric Values by Sum df = df.sum(numeric_only=True) df1 = pd.DataFrame() df1['CSA'] = df.index df1.set_index('CSA', drop = True, inplace = True) # DIFFERENCES IN TABLE NAMES EXIST BETWEEN 16 and 17. 17 has no comma. rootStr = 'B11005_007E_Total_Households_with_one_or_more_people_under_18_years_Family_households_Other_family_Female_householder' str16 = rootStr + ',_no_husband_present' str17 = rootStr + '_no_husband_present' # Aggregate by CSA # Group By CSA so that they may be opperated on df = df.groupby('CSA') # Aggregate Numeric Values by Sum df = df.sum(numeric_only=True) # Delete Unassigned--Jail df = df[df.index != 'Unassigned--Jail'] # Move Baltimore to Bottom bc = df.loc[ 'Baltimore City' ] df = df.drop( df.index[1] ) df.loc[ 'Baltimore City' ] = bc df1 = pd.DataFrame() df1['CSA'] = df.index df1.set_index('CSA', drop = True, inplace = True) # Actually produce the data df1['total'] = df[ 'B11005_001E_Total' ] df1['18Under'] = df[ 'B11005_002E_Total_Households_with_one_or_more_people_under_18_years' ] / df1['total'] * 100 return df1['18Under'] # Cell #File: female.py #Author: <NAME> #Date: 4/16/19 #Section: Bnia #Email: <EMAIL> #Description: # Uses ACS Table B01001 - SEX BY AGE # Universe: Total population # Table Creates: tpop, female, male, age5 age18 age24 age64 age65 #purpose: #input: Year #output: import pandas as pd import glob def female( year ): def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0] def getColByName (df, col): return df[getColName(df, col)] def addKey(df, fi, col): key = getColName(df, col) val = getColByName(df, col) fi[key] = val return fi def nullIfEqual(df, c1, c2): return df.apply(lambda x: x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1) def sumInts(df): return df.sum(numeric_only=True) #~~~~~~~~~~~~~~~ # Step 1) # Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder. #~~~~~~~~~~~~~~~ fileName = '' for name in glob.glob('AcsDataClean/B01001*5y'+str(year)+'_est.csv'): fileName = name df = pd.read_csv( fileName, index_col=0 ) # Aggregate by CSA # Group By CSA so that they may be opperated on df = df.groupby('CSA') # Aggregate Numeric Values by Sum df = df.sum(numeric_only=True) # df.columns total = df['B01001_001E_Total'] df1 = pd.DataFrame() df1['CSA'] = df.index df1.set_index('CSA', drop = True, inplace = True) df1['onlyTheLadies'] = df[ 'B01001_026E_Total_Female' ] return df1['onlyTheLadies'] # Cell #File: femhhs.py #Author: <NAME> #Date: 4/16/19 #Section: Bnia #Email: <EMAIL> #Description: # Uses ACS Table B11005 - HOUSEHOLDS BY PRESENCE OF PEOPLE UNDER 18 YEARS BY HOUSEHOLD TYPE # Universe: Households # Table Creates: male, hhs, fam, femhhs #purpose: #input: Year #output: import pandas as pd import glob def femhhs( year ): def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0] def getColByName (df, col): return df[getColName(df, col)] def addKey(df, fi, col): key = getColName(df, col) val = getColByName(df, col) fi[key] = val return fi def nullIfEqual(df, c1, c2): return df.apply(lambda x: x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1) def sumInts(df): return df.sum(numeric_only=True) #~~~~~~~~~~~~~~~ # Step 1) # Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder. #~~~~~~~~~~~~~~~ fileName = '' for name in glob.glob('AcsDataClean/B11005*5y'+str(year)+'_est.csv'): fileName = name df = pd.read_csv( fileName, index_col=0 ) # Aggregate by CSA # Group By CSA so that they may be opperated on df = df.groupby('CSA') # Aggregate Numeric Values by Sum df = df.sum(numeric_only=True) df1 = pd.DataFrame() df1['CSA'] = df.index df1.set_index('CSA', drop = True, inplace = True) # DIFFERENCES IN TABLE NAMES EXIST BETWEEN 16 and 17. 17 has no comma. rootStr = 'B11005_007E_Total_Households_with_one_or_more_people_under_18_years_Family_households_Other_family_Female_householder' str16 = rootStr + ',_no_husband_present' str17 = rootStr + '_no_husband_present' str19 = rootStr + ',_no_spouse_present' femhh = str17 if year == '17' else str19 if year == '19' else str16 # Actually produce the data df1['total'] = df[ 'B11005_001E_Total' ] df1['18Under'] = df[ 'B11005_002E_Total_Households_with_one_or_more_people_under_18_years' ] / df1['total'] * 100 df1['FemaleHH'] = df[ femhh ] / df['B11005_002E_Total_Households_with_one_or_more_people_under_18_years'] * 100 df1['FamHHChildrenUnder18'] = df['B11005_003E_Total_Households_with_one_or_more_people_under_18_years_Family_households'] df1['FamHHChildrenOver18'] = df['B11005_012E_Total_Households_with_no_people_under_18_years_Family_households'] df1['FamHH'] = df1['FamHHChildrenOver18'] + df1['FamHHChildrenUnder18'] return df1['FemaleHH'] # Cell #File: heatgas.py #Author: <NAME> #Date: 1/17/19 #Section: Bnia #Email: <EMAIL> #Description: # Uses ACS Table B25040 - HOUSE HEATING FUEL # Universe - Occupied housing units # Table Creates: elheat, heatgas #purpose: Produce Sustainability - Percent of Residences Heated by Electricity Indicator #input: Year #output: import pandas as pd import glob def heatgas( year ): def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0] def getColByName (df, col): return df[getColName(df, col)] def addKey(df, fi, col): key = getColName(df, col) val = getColByName(df, col) fi[key] = val return fi def nullIfEqual(df, c1, c2): return df.apply(lambda x: x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1) def sumInts(df): return df.sum(numeric_only=True) #~~~~~~~~~~~~~~~ # Step 1) # Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder. #~~~~~~~~~~~~~~~ fileName = '' for name in glob.glob('AcsDataClean/B25040*5y'+str(year)+'_est.csv'): fileName = name df = pd.read_csv( fileName, index_col=0 ) # Aggregate by CSA # Group By CSA so that they may be opperated on df = df.groupby('CSA') # Aggregate Numeric Values by Sum df = sumInts(df) # Add 'BALTIMORE' which is the SUM of all the CSAs #~~~~~~~~~~~~~~~ # Step 2) # Prepare the columns #~~~~~~~~~~~~~~~ # Final Dataframe fi = pd.DataFrame() columns = ['B25040_002E','B25040_001E'] for col in columns: fi = addKey(df, fi, col) # Numerators numerators = pd.DataFrame() columns = ['B25040_002E'] for col in columns: numerators = addKey(df, numerators, col) # Denominators denominators = pd.DataFrame() columns = ['B25040_001E'] for col in columns: denominators = addKey(df, denominators, col) # construct the denominator, returns 0 iff the other two rows are equal. #~~~~~~~~~~~~~~~ # Step 3) # Run the Calculation # ( value[1] / nullif(value[2],0) )*100 #~~~~~~~~~~~~~~~ fi['numerator'] = numerators.sum(axis=1) fi['denominator'] = denominators.sum(axis=1) fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0 fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100 return fi['final'] """ /* <heatgas_14> */ -- WITH tbl AS ( select csa, ( value[1] / nullif(value[2],0) )*100::numeric as result from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B25040_002E','B25040_001E']) ) update vital_signs.data set heatgas = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014'; """ # Cell #File: hh40inc.py #Author: <NAME> #Date: 1/17/19 #Section: Bnia #Email: <EMAIL> #Description: # Uses ACS Table B19001 - HOUSEHOLD INCOME V # HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS) # Table Creates: hh25 hh40 hh60 hh75 hhm75, mhhi #purpose: Produce Household Income 25K-40K Indicator #input: Year #output: import pandas as pd import glob def hh40inc( year ): def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0] def getColByName (df, col): return df[getColName(df, col)] def nullIfEqual(df, c1, c2): return df.apply(lambda x: x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1) def sumInts(df): return df.sum(numeric_only=True) #~~~~~~~~~~~~~~~ # Step 1) # Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder. #~~~~~~~~~~~~~~~ fileName = '' for name in glob.glob('AcsDataClean/B19001*5y'+str(year)+'_est.csv'): fileName = name df = pd.read_csv( fileName, index_col=0 ) # Aggregate by CSA # Group By CSA so that they may be opperated on df = df.groupby('CSA') # Aggregate Numeric Values by Sum df = sumInts(df) # Add 'BALTIMORE' which is the SUM of all the CSAs #~~~~~~~~~~~~~~~ # Step 2) # Prepare the columns #~~~~~~~~~~~~~~~ # val1.__class__.__name__ # # create a new dataframe for giggles fi = pd.DataFrame() # append into that dataframe col 001 key = getColName(df, '001') val = getColByName(df, '001') fi[key] = val # append into that dataframe col 006 key = getColName(df, '006') val = getColByName(df, '006') fi[key] = val # append into that dataframe col 007 key = getColName(df, '007') val = getColByName(df, '007') fi[key] = val # append into that dataframe col 008 key = getColName(df, '008') val = getColByName(df, '008') fi[key] = val # Delete Rows where the 'denominator' column is 0 -> like the Jail fi = fi[fi[fi.columns[0]] != 0] #~~~~~~~~~~~~~~~ # Step 3) # Run the Calculation #~~~~~~~~~~~~~~~ return fi.apply(lambda x: ( ( x[fi.columns[1] ]+ x[fi.columns[2] ]+ x[fi.columns[3] ] ) / x[fi.columns[0]])*100, axis=1) """ /* hh40inc */ -- WITH tbl AS ( select csa, ( (value[1] + value[2] + value[3]) / value[4] )*100 as result from vital_signs.get_acs_vars_csa_and_bc('2013',ARRAY['B19001_006E','B19001_007E','B19001_008E','B19001_001E']) ) UPDATE vital_signs.data set hh40inc = result from tbl where data.csa = tbl.csa and data_year = '2013'; """ # Cell #File: hh60inc.py #Author: <NAME> #Date: 1/17/19 #Section: Bnia #Email: <EMAIL> #Description: # Uses ACS Table B19001 - HOUSEHOLD INCOME V # HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS) # Table Creates: hh25 hh40 hh60 hh75 hhm75, mhhi #purpose: Produce Household 45-60K Indicator #input: Year #output: import pandas as pd import glob def hh60inc( year ): def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0] def getColByName (df, col): return df[getColName(df, col)] def nullIfEqual(df, c1, c2): return df.apply(lambda x: x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1) def sumInts(df): return df.sum(numeric_only=True) #~~~~~~~~~~~~~~~ # Step 1) # Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder. #~~~~~~~~~~~~~~~ fileName = '' for name in glob.glob('AcsDataClean/B19001*5y'+str(year)+'_est.csv'): fileName = name df = pd.read_csv( fileName, index_col=0 ) # Aggregate by CSA # Group By CSA so that they may be opperated on df = df.groupby('CSA') # Aggregate Numeric Values by Sum df = sumInts(df) # Add 'BALTIMORE' which is the SUM of all the CSAs #~~~~~~~~~~~~~~~ # Step 2) # Prepare the columns #~~~~~~~~~~~~~~~ # val1.__class__.__name__ # # create a new dataframe for giggles fi = pd.DataFrame() # append into that dataframe col 001 key = getColName(df, '001') val = getColByName(df, '001') fi[key] = val # append into that dataframe col 009 key = getColName(df, '009') val = getColByName(df, '009') fi[key] = val # append into that dataframe col 010 key = getColName(df, '010') val = getColByName(df, '010') fi[key] = val # append into that dataframe col 011 key = getColName(df, '011') val = getColByName(df, '011') fi[key] = val # Delete Rows where the 'denominator' column is 0 -> like the Jail fi = fi[fi[fi.columns[0]] != 0] #~~~~~~~~~~~~~~~ # Step 3) # Run the Calculation #~~~~~~~~~~~~~~~ return fi.apply(lambda x: ( ( x[fi.columns[1] ]+ x[fi.columns[2] ]+ x[fi.columns[3] ] ) / x[fi.columns[0]])*100, axis=1) """ /* hh60inc */ -- WITH tbl AS ( select csa, ( (value[1] + value[2] + value[3]) / value[4] )*100 as result from vital_signs.get_acs_vars_csa_and_bc('2013',ARRAY['B19001_009E','B19001_010E','B19001_011E','B19001_001E']) ) UPDATE vital_signs.data set hh60inc = result from tbl where data.csa = tbl.csa and data_year = '2013'; """ # Cell #File: hh75inc.py #Author: <NAME> #Date: 1/17/19 #Section: Bnia #Email: <EMAIL> #Description: # Uses ACS Table B19001 - HOUSEHOLD INCOME V # HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS) # Table Creates: hh25 hh40 hh60 hh75 hhm75, mhhi #purpose: Produce Household Income 60-70K Indicator #input: Year #output: import pandas as pd import glob def hh75inc( year ): def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0] def getColByName (df, col): return df[getColName(df, col)] def nullIfEqual(df, c1, c2): return df.apply(lambda x: x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1) def sumInts(df): return df.sum(numeric_only=True) #~~~~~~~~~~~~~~~ # Step 1) # Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder. #~~~~~~~~~~~~~~~ fileName = '' for name in glob.glob('AcsDataClean/B19001*5y'+str(year)+'_est.csv'): fileName = name df = pd.read_csv( fileName, index_col=0 ) # Aggregate by CSA # Group By CSA so that they may be opperated on df = df.groupby('CSA') # Aggregate Numeric Values by Sum df = sumInts(df) # Add 'BALTIMORE' which is the SUM of all the CSAs #~~~~~~~~~~~~~~~ # Step 2) # Prepare the columns #~~~~~~~~~~~~~~~ # val1.__class__.__name__ # # create a new dataframe for giggles fi = pd.DataFrame() # append into that dataframe col 001 key = getColName(df, '001') val = getColByName(df, '001') fi[key] = val # append into that dataframe col 012 key = getColName(df, '012') val = getColByName(df, '012') fi[key] = val # Delete Rows where the 'denominator' column is 0 -> like the Jail fi = fi[fi[fi.columns[0]] != 0] #~~~~~~~~~~~~~~~ # Step 3) # Run the Calculation #~~~~~~~~~~~~~~~ #12/1 return fi.apply(lambda x: ( x[fi.columns[1] ] / x[fi.columns[0]])*100, axis=1) """ /* hh75inc */ -- WITH tbl AS ( select csa, ( value[1] / value[2] )*100 as result from vital_signs.get_acs_vars_csa_and_bc('2013',ARRAY['B19001_012E','B19001_001E']) ) UPDATE vital_signs.data set hh75inc = result from tbl where data.csa = tbl.csa and data_year = '2013'; """ # Cell #File: hhchpov.py #Author: <NAME> #Date: 1/17/19 #Section: Bnia #Email: <EMAIL> #Description: # Uses ACS Table B17001 - POVERTY STATUS IN THE PAST 12 MONTHS BY SEX BY AGE # Universe: Population for whom poverty status is determined more information #purpose: Produce Household Poverty Indicator #input: Year #output: import pandas as pd import glob def hhchpov( year ): def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0] def getColByName (df, col): return df[getColName(df, col)] def addKey(df, fi, col): key = getColName(df, col) val = getColByName(df, col) fi[key] = val return fi def nullIfEqual(df, c1, c2): return df.apply(lambda x: x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1) def sumInts(df): return df.sum(numeric_only=True) #~~~~~~~~~~~~~~~ # Step 1) # Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder. #~~~~~~~~~~~~~~~ fileName = '' for name in glob.glob('AcsDataClean/B17001*5y'+str(year)+'_est.csv'): fileName = name df = pd.read_csv( fileName, index_col=0 ) # Aggregate by CSA # Group By CSA so that they may be opperated on df = df.groupby('CSA') # Aggregate Numeric Values by Sum df = sumInts(df) # Add 'BALTIMORE' which is the SUM of all the CSAs #~~~~~~~~~~~~~~~ # Step 2) # Prepare the columns #~~~~~~~~~~~~~~~ # Final Dataframe fi = pd.DataFrame() columns = ['B17001_004E', 'B17001_005E', 'B17001_006E', 'B17001_007E', 'B17001_008E', 'B17001_009E', 'B17001_018E', 'B17001_019E', 'B17001_020E', 'B17001_021E', 'B17001_022E', 'B17001_023E', 'B17001_033E', 'B17001_034E', 'B17001_035E', 'B17001_036E', 'B17001_037E', 'B17001_038E', 'B17001_047E', 'B17001_048E', 'B17001_049E', 'B17001_050E', 'B17001_051E', 'B17001_052E'] for col in columns: fi = addKey(df, fi, col) # Numerators numerators = pd.DataFrame() columns = ['B17001_004E', 'B17001_005E', 'B17001_006E', 'B17001_007E', 'B17001_008E', 'B17001_009E', 'B17001_018E', 'B17001_019E', 'B17001_020E', 'B17001_021E', 'B17001_022E', 'B17001_023E'] for col in columns: numerators = addKey(df, numerators, col) # Denominators denominators = pd.DataFrame() columns = ['B17001_004E', 'B17001_005E', 'B17001_006E', 'B17001_007E', 'B17001_008E', 'B17001_009E', 'B17001_018E', 'B17001_019E', 'B17001_020E', 'B17001_021E', 'B17001_022E', 'B17001_023E', 'B17001_033E', 'B17001_034E', 'B17001_035E', 'B17001_036E', 'B17001_037E', 'B17001_038E', 'B17001_047E', 'B17001_048E', 'B17001_049E', 'B17001_050E', 'B17001_051E', 'B17001_052E'] for col in columns: denominators = addKey(df, denominators, col) #~~~~~~~~~~~~~~~ # Step 3) # Run the Calculation #~~~~~~~~~~~~~~~ fi['numerator'] = numerators.sum(axis=1) fi['denominator'] = denominators.sum(axis=1) fi = fi[fi['denominator'] != 0] #Delete Rows where the 'denominator' column is 0 fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100 #~~~~~~~~~~~~~~~ # Step 4) # Add Special Baltimore City Data #~~~~~~~~~~~~~~~ url = 'https://api.census.gov/data/20'+str(year)+'/acs/acs5/subject?get=NAME,S1701_C03_002E&for=county%3A510&in=state%3A24&key=<KEY>' table = pd.read_json(url, orient='records') fi['final']['Baltimore City'] = float(table.loc[1, table.columns[1]]) return fi['final'] """ /* <hhchpov_14> */ WITH tbl AS ( select csa, ( (value[1] + value[2] + value[3] + value[4] + value[5] + value[6] + value[7] + value[8] + value[9] + value[10] + value[11] + value[12]) / nullif( (value[1] + value[2] + value[3] + value[4] + value[5] + value[6] + value[7] + value[8] + value[9] + value[10] + value[11] + value[12] + value[13] + value[14] + value[15] + value[16] + value[17] + value[18] + value[19] + value[20] + value[21] + value[22] + value[23] + value[24] ), 0) ) * 100::numeric as result from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B17001_004E','B17001_005E','B17001_006E','B17001_007E','B17001_008E','B17001_009E','B17001_018E','B17001_019E','B17001_020E','B17001_021E','B17001_022E','B17001_023E','B17001_033E','B17001_034E','B17001_035E','B17001_036E','B17001_037E','B17001_038E','B17001_047E','B17001_048E','B17001_049E','B17001_050E','B17001_051E','B17001_052E']) ) update vital_signs.data set hhchpov = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014'; """ # Cell #File: hhm75.py #Author: <NAME> #Date: 1/17/19 #Section: Bnia #Email: <EMAIL> #Description: # Uses ACS Table B19001 - HOUSEHOLD INCOME V # HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS) # Table Creates: hh25 hh40 hh60 hh75 hhm75, mhhi #purpose: Produce Household Income Over 75K Indicator #input: Year #output: import pandas as pd import glob def hhm75( year ): def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0] def getColByName (df, col): return df[getColName(df, col)] def nullIfEqual(df, c1, c2): return df.apply(lambda x: x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1) def sumInts(df): return df.sum(numeric_only=True) #~~~~~~~~~~~~~~~ # Step 1) # Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder. #~~~~~~~~~~~~~~~ fileName = '' for name in glob.glob('AcsDataClean/B19001*5y'+str(year)+'_est.csv'): fileName = name df = pd.read_csv( fileName, index_col=0 ) # Aggregate by CSA # Group By CSA so that they may be opperated on df = df.groupby('CSA') # Aggregate Numeric Values by Sum df = sumInts(df) # Add 'BALTIMORE' which is the SUM of all the CSAs #~~~~~~~~~~~~~~~ # Step 2) # Prepare the columns #~~~~~~~~~~~~~~~ # val1.__class__.__name__ # # create a new dataframe for giggles fi = pd.DataFrame() # append into that dataframe col 001 key = getColName(df, '001') val = getColByName(df, '001') fi[key] = val # append into that dataframe col 002 key = getColName(df, '002') val = getColByName(df, '002') fi[key] = val # append into that dataframe col 003 key = getColName(df, '003') val = getColByName(df, '003') fi[key] = val # append into that dataframe col 004 key = getColName(df, '004') val = getColByName(df, '004') fi[key] = val # append into that dataframe col 005 key = getColName(df, '005') val = getColByName(df, '005') fi[key] = val # append into that dataframe col 006 key = getColName(df, '006') val = getColByName(df, '006') fi[key] = val # append into that dataframe col 007 key = getColName(df, '007') val = getColByName(df, '007') fi[key] = val # append into that dataframe col 008 key = getColName(df, '008') val = getColByName(df, '008') fi[key] = val # append into that dataframe col 009 key = getColName(df, '009') val = getColByName(df, '009') fi[key] = val # append into that dataframe col 010 key = getColName(df, '010') val = getColByName(df, '010') fi[key] = val # append into that dataframe col 011 key = getColName(df, '011') val = getColByName(df, '011') fi[key] = val # append into that dataframe col 012 key = getColName(df, '012') val = getColByName(df, '012') fi[key] = val # Delete Rows where the 'denominator' column is 0 -> like the Jail fi = fi[fi[fi.columns[0]] != 0] #~~~~~~~~~~~~~~~ # Step 3) # Run the Calculation #~~~~~~~~~~~~~~~ return fi.apply(lambda x: ( ( x[fi.columns[0]]-( x[fi.columns[1] ]+ x[fi.columns[2] ]+ x[fi.columns[3] ]+ x[fi.columns[4] ]+ x[fi.columns[5] ]+ x[fi.columns[6] ]+ x[fi.columns[7] ]+ x[fi.columns[8] ]+ x[fi.columns[9] ]+ x[fi.columns[10] ]+ x[fi.columns[11] ] ) ) / x[fi.columns[0]])*100, axis=1) # Cell #File: hhpov.py #Author: <NAME> #Date: 1/17/19 #Section: Bnia #Email: <EMAIL> #Description: # Uses ACS Table B17017 - Household Poverty, Uses Table B17017 which includes V # Poverty Status in the Past 12 Months by Household Type by Age of Householder (Universe = households) #purpose: Produce Household Poverty Indicator #input: Year #output: import pandas as pd import glob def hhpov( year ): def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0] def getColByName (df, col): return df[getColName(df, col)] def nullIfEqual(df, c1, c2): return df.apply(lambda x: x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1) def sumInts(df): return df.sum(numeric_only=True) #~~~~~~~~~~~~~~~ # Step 1) # Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder. #~~~~~~~~~~~~~~~ fileName = '' for name in glob.glob('AcsDataClean/B17017*5y'+str(year)+'_est.csv'): fileName = name df = pd.read_csv( fileName, index_col=0 ) # Aggregate by CSA # Group By CSA so that they may be opperated on df = df.groupby('CSA') # Aggregate Numeric Values by Sum df = sumInts(df) # Add 'BALTIMORE' which is the SUM of all the CSAs #~~~~~~~~~~~~~~~ # Step 2) # Prepare the columns #~~~~~~~~~~~~~~~ # create a new dataframe for giggles fi = pd.DataFrame() # append into that dataframe col 003 key = getColName(df, '003') val = getColByName(df, '003') fi[key] = val # append into that dataframe col 032 key = getColName(df, '032') val = getColByName(df, '032') fi[key] = val # construct the denominator, returns 0 iff the other two rows are equal. fi['denominator'] = nullIfEqual( df, '003', '032') # Delete Rows where the 'denominator' column is 0 fi = fi[fi['denominator'] != 0] #~~~~~~~~~~~~~~~ # Step 3) # Run the Calculation #~~~~~~~~~~~~~~~ return fi.apply(lambda x: (x[fi.columns[0]] / x['denominator'])*100, axis=1) # Cell #File: hhs.py #Author: <NAME> #Date: 4/16/19 #Section: Bnia #Email: <EMAIL> #Description: # Uses ACS Table B11005 - HOUSEHOLDS BY PRESENCE OF PEOPLE UNDER 18 YEARS BY HOUSEHOLD TYPE # Universe: Households # Table Creates: hhs, fam, femhhs #purpose: #input: Year #output: import pandas as pd import glob def hhs( year ): def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0] def getColByName (df, col): return df[getColName(df, col)] def addKey(df, fi, col): key = getColName(df, col) val = getColByName(df, col) fi[key] = val return fi def nullIfEqual(df, c1, c2): return df.apply(lambda x: x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1) def sumInts(df): return df.sum(numeric_only=True) #~~~~~~~~~~~~~~~ # Step 1) # Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder. #~~~~~~~~~~~~~~~ fileName = '' for name in glob.glob('AcsDataClean/B11005*5y'+str(year)+'_est.csv'): fileName = name df = pd.read_csv( fileName, index_col=0 ) # Aggregate by CSA # Group By CSA so that they may be opperated on df = df.groupby('CSA') # Aggregate Numeric Values by Sum df = df.sum(numeric_only=True) df1 = pd.DataFrame() df1['CSA'] = df.index df1.set_index('CSA', drop = True, inplace = True) df1['tot'] = df[ 'B11005_001E_Total' ] return df1['tot'] # Cell #File: hsdipl.py #Author: <NAME> #Date: 1/17/19 #Section: Bnia #Email: <EMAIL> #Description: # Uses ACS Table B06009 - PLACE OF BIRTH BY EDUCATIONAL ATTAINMENT IN THE UNITED STATES #purpose: Produce Workforce and Economic Development - Percent Population (25 Years and over) With High School Diploma and Some College or Associates Degree #Table Uses: B06009 - lesshs, hsdipl, bahigher #input: Year #output: import pandas as pd import glob def hsdipl( year ): def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0] def getColByName (df, col): return df[getColName(df, col)] def addKey(df, fi, col): key = getColName(df, col) val = getColByName(df, col) fi[key] = val return fi def nullIfEqual(df, c1, c2): return df.apply(lambda x: x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1) def sumInts(df): return df.sum(numeric_only=True) #~~~~~~~~~~~~~~~ # Step 1) # Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder. #~~~~~~~~~~~~~~~ fileName = '' for name in glob.glob('AcsDataClean/B06009*5y'+str(year)+'_est.csv'): fileName = name df = pd.read_csv( fileName, index_col=0 ) # Aggregate by CSA # Group By CSA so that they may be opperated on df = df.groupby('CSA') # Aggregate Numeric Values by Sum df = sumInts(df) # Add 'BALTIMORE' which is the SUM of all the CSAs #~~~~~~~~~~~~~~~ # Step 2) # Prepare the columns #~~~~~~~~~~~~~~~ # Final Dataframe fi = pd.DataFrame() columns = ['B06009_003E','B06009_004E','B06009_001E'] for col in columns: fi = addKey(df, fi, col) # Numerators numerators = pd.DataFrame() columns = ['B06009_003E','B06009_004E'] for col in columns: numerators = addKey(df, numerators, col) # Denominators denominators = pd.DataFrame() columns = ['B06009_001E'] for col in columns: denominators = addKey(df, denominators, col) # construct the denominator, returns 0 iff the other two rows are equal. #~~~~~~~~~~~~~~~ # Step 3) # Run the Calculation + final mods # ( ( value[1] + value[2] ) / nullif(value[3],0) )*100 #~~~~~~~~~~~~~~~ fi['numerator'] = numerators.sum(axis=1) fi['denominator'] = denominators.sum(axis=1) fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0 fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100 return fi['final'] """ /* <hsdipl_14> */ -- WITH tbl AS ( select csa, ( ( value[1] + value[2] ) / nullif(value[3],0) )*100::numeric as result from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B06009_003E','B06009_004E','B06009_001E']) ) update vital_signs.data set hsdipl = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014'; """ # Cell #File: lesshs.py #Author: <NAME> #Date: 1/17/19 #Section: Bnia #Email: <EMAIL> #Description: # Uses ACS Table B06009 - PLACE OF BIRTH BY EDUCATIONAL ATTAINMENT IN THE UNITED STATES #purpose: Produce Workforce and Economic Development - Percent Population (25 Years and over) With Less Than a High School Diploma or GED Indicator #Table Uses: B06009 - lesshs, hsdipl, bahigher #input: Year #output: import pandas as pd import glob def lesshs( year ): def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0] def getColByName (df, col): return df[getColName(df, col)] def addKey(df, fi, col): key = getColName(df, col) val = getColByName(df, col) fi[key] = val return fi def nullIfEqual(df, c1, c2): return df.apply(lambda x: x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1) def sumInts(df): return df.sum(numeric_only=True) #~~~~~~~~~~~~~~~ # Step 1) # Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder. #~~~~~~~~~~~~~~~ fileName = '' for name in glob.glob('AcsDataClean/B06009*5y'+str(year)+'_est.csv'): fileName = name df = pd.read_csv( fileName, index_col=0 ) # Aggregate by CSA # Group By CSA so that they may be opperated on df = df.groupby('CSA') # Aggregate Numeric Values by Sum df = sumInts(df) # Add 'BALTIMORE' which is the SUM of all the CSAs #~~~~~~~~~~~~~~~ # Step 2) # Prepare the columns #~~~~~~~~~~~~~~~ # Final Dataframe fi = pd.DataFrame() columns = ['B06009_002E','B06009_001E'] for col in columns: fi = addKey(df, fi, col) # Numerators numerators = pd.DataFrame() columns = ['B06009_002E'] for col in columns: numerators = addKey(df, numerators, col) # Denominators denominators = pd.DataFrame() columns = ['B06009_001E'] for col in columns: denominators = addKey(df, denominators, col) # construct the denominator, returns 0 iff the other two rows are equal. #~~~~~~~~~~~~~~~ # Step 3) # Run the Calculation + final mods # ( value[1] / nullif(value[2],0) )*100 #~~~~~~~~~~~~~~~ fi['numerator'] = numerators.sum(axis=1) fi['denominator'] = denominators.sum(axis=1) fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0 fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100 return fi['final'] """ /* <lesshs_14> */ -- WITH tbl AS ( select csa, ( value[1] / nullif(value[2],0) )*100::numeric as result from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B06009_002E','B06009_001E']) ) update vital_signs.data set lesshs = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014'; """ # Cell #File: male.py #Author: <NAME> #Date: 4/16/19 #Section: Bnia #Email: <EMAIL> #Description: # Uses ACS Table B01001 - SEX BY AGE # Universe: Total population # Table Creates: tpop, female, male, age5 age18 age24 age64 age65 #purpose: #input: Year #output: import pandas as pd import glob def male( year ): def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0] def getColByName (df, col): return df[getColName(df, col)] def addKey(df, fi, col): key = getColName(df, col) val = getColByName(df, col) fi[key] = val return fi def nullIfEqual(df, c1, c2): return df.apply(lambda x: x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1) def sumInts(df): return df.sum(numeric_only=True) #~~~~~~~~~~~~~~~ # Step 1) # Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder. #~~~~~~~~~~~~~~~ fileName = '' for name in glob.glob('AcsDataClean/B01001*5y'+str(year)+'_est.csv'): fileName = name df = pd.read_csv( fileName, index_col=0 ) # Aggregate by CSA # Group By CSA so that they may be opperated on df = df.groupby('CSA') # Aggregate Numeric Values by Sum df = df.sum(numeric_only=True) # df.columns total = df['B01001_001E_Total'] df1 = pd.DataFrame() df1['CSA'] = df.index df1.set_index('CSA', drop = True, inplace = True) df1['onlyTheFellas'] = df[ 'B01001_002E_Total_Male' ] return df1['onlyTheFellas'] # Cell #File: nilf.py #Author: <NAME> #Date: 1/17/19 #Section: Bnia #Email: <EMAIL> #Description: # Uses ACS Table B23001 - SEX BY AGE BY EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER # Universe - Population 16 years and over # Table Creates: empl, unempl, unempr, nilf #purpose: Produce Workforce and Economic Development - Percent Population 16-64 Not in Labor Force Indicator #input: Year #output: import pandas as pd import glob def nilf( year ): def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0] def getColByName (df, col): return df[getColName(df, col)] def addKey(df, fi, col): key = getColName(df, col) val = getColByName(df, col) fi[key] = val return fi def nullIfEqual(df, c1, c2): return df.apply(lambda x: x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1) def sumInts(df): return df.sum(numeric_only=True) #~~~~~~~~~~~~~~~ # Step 1) # Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder. #~~~~~~~~~~~~~~~ fileName = '' for name in glob.glob('AcsDataClean/B23001*5y'+str(year)+'_est.csv'): fileName = name df = pd.read_csv( fileName, index_col=0 ) # Aggregate by CSA # Group By CSA so that they may be opperated on df = df.groupby('CSA') # Aggregate Numeric Values by Sum df = sumInts(df) # Add 'BALTIMORE' which is the SUM of all the CSAs #~~~~~~~~~~~~~~~ # Step 2) # Prepare the columns #~~~~~~~~~~~~~~~ # Final Dataframe fi = pd.DataFrame() columns = ['B23001_003E', 'B23001_010E', 'B23001_017E', 'B23001_024E', 'B23001_031E', 'B23001_038E', 'B23001_045E', 'B23001_052E', 'B23001_059E', 'B23001_066E', 'B23001_089E', 'B23001_096E', 'B23001_103E', 'B23001_110E', 'B23001_117E', 'B23001_124E', 'B23001_131E', 'B23001_138E', 'B23001_145E', 'B23001_152E', 'B23001_009E', 'B23001_016E', 'B23001_023E', 'B23001_030E', 'B23001_037E', 'B23001_044E', 'B23001_051E', 'B23001_058E', 'B23001_065E', 'B23001_072E', 'B23001_095E', 'B23001_102E', 'B23001_109E', 'B23001_116E', 'B23001_123E', 'B23001_130E', 'B23001_137E', 'B23001_144E', 'B23001_151E', 'B23001_158E'] for col in columns: fi = addKey(df, fi, col) # Numerators numerators = pd.DataFrame() columns = ['B23001_009E', 'B23001_016E', 'B23001_023E', 'B23001_030E', 'B23001_037E', 'B23001_044E', 'B23001_051E', 'B23001_058E', 'B23001_065E', 'B23001_072E', 'B23001_095E', 'B23001_102E', 'B23001_109E', 'B23001_116E', 'B23001_123E', 'B23001_130E', 'B23001_137E', 'B23001_144E', 'B23001_151E', 'B23001_158E'] for col in columns: numerators = addKey(df, numerators, col) # Denominators denominators = pd.DataFrame() columns = ['B23001_003E', 'B23001_010E', 'B23001_017E', 'B23001_024E', 'B23001_031E', 'B23001_038E', 'B23001_045E', 'B23001_052E', 'B23001_059E', 'B23001_066E', 'B23001_089E', 'B23001_096E', 'B23001_103E', 'B23001_110E', 'B23001_117E', 'B23001_124E', 'B23001_131E', 'B23001_138E', 'B23001_145E', 'B23001_152E'] for col in columns: denominators = addKey(df, denominators, col) # construct the denominator, returns 0 iff the other two rows are equal. #~~~~~~~~~~~~~~~ # Step 3) # Run the Calculation # ( ( value[21]+value[22]+value[23]+value[24]+value[25]+value[26]+value[27]+value[28]+value[29]+value[30]+value[31]+value[32]+value[33]+value[34]+value[35]+value[36]+value[37]+value[38]+value[39]+value[40]) --not in labor force 16-64 # / # nullif( (value[1]+value[2]+value[3]+value[4]+value[5]+value[6]+value[7]+value[8]+value[9]+value[10]+value[11]+value[12]+value[13]+value[14]+value[15]+value[16]+value[17]+value[18]+value[19]+value[20]) -- population 16 to 64 ,0) )*100::numeric #~~~~~~~~~~~~~~~ fi['numerator'] = numerators.sum(axis=1) fi['denominator'] = denominators.sum(axis=1) fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0 fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100 return fi['final'] """ /* <nilf_14> */ -- WITH tbl AS ( select csa, ( (value[21]+value[22]+value[23]+value[24]+value[25]+value[26]+value[27]+value[28]+value[29]+value[30]+value[31]+value[32]+value[33]+value[34]+value[35]+value[36]+value[37]+value[38]+value[39]+value[40]) --not in labor force 16-64 / nullif( (value[1]+value[2]+value[3]+value[4]+value[5]+value[6]+value[7]+value[8]+value[9]+value[10]+value[11]+value[12]+value[13]+value[14]+value[15]+value[16]+value[17]+value[18]+value[19]+value[20]) -- population 16 to 64 ,0) )*100::numeric as result from vital_signs.get_acs_vars_csa_and_bc('2014', ARRAY['B23001_003E','B23001_010E','B23001_017E','B23001_024E','B23001_031E','B23001_038E','B23001_045E','B23001_052E','B23001_059E','B23001_066E','B23001_089E','B23001_096E','B23001_103E','B23001_110E','B23001_117E','B23001_124E','B23001_131E','B23001_138E','B23001_145E','B23001_152E','B23001_009E','B23001_016E','B23001_023E','B23001_030E','B23001_037E','B23001_044E','B23001_051E','B23001_058E','B23001_065E','B23001_072E','B23001_095E','B23001_102E','B23001_109E','B23001_116E','B23001_123E','B23001_130E','B23001_137E','B23001_144E','B23001_151E','B23001_158E']) ) update vital_signs.data set nilf = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014'; """ # Cell #File: othrcom.py #Author: <NAME> #Date: 1/24/19 #Section: Bnia #Email: <EMAIL> #Description: # Uses ACS Table B08101 - MEANS OF TRANSPORTATION TO WORK BY AGE # Universe: Workers 16 years and over # Table Creates: othrcom, drvalone, carpool, pubtran, walked #purpose: Produce Sustainability - Percent of Population Using Other Means to Commute to Work (Taxi, Motorcycle, Bicycle, Other) Indicator #input: Year #output: import pandas as pd import glob def othrcom( year ): def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0] def getColByName (df, col): return df[getColName(df, col)] def addKey(df, fi, col): key = getColName(df, col) val = getColByName(df, col) fi[key] = val return fi def nullIfEqual(df, c1, c2): return df.apply(lambda x: x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1) def sumInts(df): return df.sum(numeric_only=True) #~~~~~~~~~~~~~~~ # Step 1) # Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder. #~~~~~~~~~~~~~~~ fileName = '' for name in glob.glob('AcsDataClean/B08101*5y'+str(year)+'_est.csv'): fileName = name df = pd.read_csv( fileName, index_col=0 ) # Aggregate by CSA # Group By CSA so that they may be opperated on df = df.groupby('CSA') # Aggregate Numeric Values by Sum df = sumInts(df) # Add 'BALTIMORE' which is the SUM of all the CSAs #~~~~~~~~~~~~~~~ # Step 2) # Prepare the columns #~~~~~~~~~~~~~~~ # Final Dataframe fi = pd.DataFrame() columns = ['B08101_001E','B08101_049E','B08101_041E'] for col in columns: fi = addKey(df, fi, col) # Numerators numerators = pd.DataFrame() columns = ['B08101_041E'] for col in columns: numerators = addKey(df, numerators, col) # Denominators denominators = pd.DataFrame() columns = ['B08101_001E','B08101_049E'] for col in columns: denominators = addKey(df, denominators, col) # construct the denominator, returns 0 iff the other two rows are equal. #~~~~~~~~~~~~~~~ # Step 3) # Run the Calculation # ( value[3] / nullif((value[1]-value[2]),0) )*100 #~~~~~~~~~~~~~~~ fi['numerator'] = numerators.sum(axis=1) fi['denominator'] = denominators.iloc[: ,0] - denominators.iloc[: ,1] fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0 fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100 #~~~~~~~~~~~~~~~ # Step 4) # Add Special Baltimore City Data # 100- "6.7", "59.8", "9.2", "18.4", "3.7", = 2.2 # 100- (walked + drvalone + carpool + pubtran + workfromhome(13e)) #~~~~~~~~~~~~~~~ url = 'https://api.census.gov/data/20'+str(year)+'/acs/acs5/subject?get=NAME,S0801_C01_010E,S0801_C01_003E,S0801_C01_004E,S0801_C01_009E,S0801_C01_013E&for=county%3A510&in=state%3A24&key=<KEY>' table = pd.read_json(url, orient='records') walked = float(table.loc[1, table.columns[1]] ) drvalone = float(table.loc[1, table.columns[2]] ) carpool = float(table.loc[1, table.columns[3]] ) pubtran = float(table.loc[1, table.columns[4]] ) workfromhome = float(table.loc[1, table.columns[5]] ) fi['final']['Baltimore City'] = 100 - ( walked + drvalone + carpool + pubtran + workfromhome ) return fi['final'] """ /* <othrcom_14> */ -- WITH tbl AS ( select csa, ( value[3] / nullif((value[1]-value[2]),0) )*100::numeric as result from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B08101_001E','B08101_049E','B08101_041E']) ) update vital_signs.data set othrcom = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014'; """ # Cell #File: p2more.py #Author: <NAME> #Date: 4/16/19 #Section: Bnia #Email: <EMAIL> #Description: # Uses ACS Table B03002 - HISPANIC OR LATINO ORIGIN BY RACE # Universe: Total Population # Table Creates: racdiv, paa, pwhite, pasi, phisp, p2more, ppac #purpose: #input: Year #output: import pandas as pd import glob def p2more( year ): def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0] def getColByName (df, col): return df[getColName(df, col)] def addKey(df, fi, col): key = getColName(df, col) val = getColByName(df, col) fi[key] = val return fi def nullIfEqual(df, c1, c2): return df.apply(lambda x: x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1) def sumInts(df): return df.sum(numeric_only=True) #~~~~~~~~~~~~~~~ # Step 1) # Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder. #~~~~~~~~~~~~~~~ fileName = '' for name in glob.glob('AcsDataClean/B03002*5y'+str(year)+'_est.csv'): fileName = name df = pd.read_csv( fileName, index_col=0 ) # Aggregate by CSA # Group By CSA so that they may be opperated on df = df.groupby('CSA') # Aggregate Numeric Values by Sum df = df.sum(numeric_only=True) # Append the one column from the other ACS Table df['B03002_012E_Total_Hispanic_or_Latino'] df1 = pd.DataFrame() df1['CSA'] = df.index df1.set_index('CSA', drop = True, inplace = True) tot = df[ 'B03002_001E_Total' ] df1['TwoOrMore%NH'] = df['B03002_009E_Total_Not_Hispanic_or_Latino_Two_or_more_races'] / tot * 100 return df1['TwoOrMore%NH'] # Cell #File: pubtran.py #Author: <NAME> #Date: 1/17/19 #Section: Bnia #Email: <EMAIL> #Description: # Uses ACS Table B08101 - MEANS OF TRANSPORTATION TO WORK BY AGE # Universe: Workers 16 Years and Over # Table Creates: othrcom, drvalone, carpool, pubtran, walked #purpose: Produce Sustainability - Percent of Population that Uses Public Transportation to Get to Work Indicator #input: Year #output: import pandas as pd import glob def pubtran( year ): def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0] def getColByName (df, col): return df[getColName(df, col)] def addKey(df, fi, col): key = getColName(df, col) val = getColByName(df, col) fi[key] = val return fi def nullIfEqual(df, c1, c2): return df.apply(lambda x: x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1) def sumInts(df): return df.sum(numeric_only=True) #~~~~~~~~~~~~~~~ # Step 1) # Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder. #~~~~~~~~~~~~~~~ fileName = '' for name in glob.glob('AcsDataClean/B08101*5y'+str(year)+'_est.csv'): fileName = name df = pd.read_csv( fileName, index_col=0 ) # Aggregate by CSA # Group By CSA so that they may be opperated on df = df.groupby('CSA') # Aggregate Numeric Values by Sum df = sumInts(df) # Add 'BALTIMORE' which is the SUM of all the CSAs #~~~~~~~~~~~~~~~ # Step 2) # Prepare the columns #~~~~~~~~~~~~~~~ # Final Dataframe fi = pd.DataFrame() columns = ['B08101_001E','B08101_049E','B08101_025E'] for col in columns: fi = addKey(df, fi, col) # Numerators numerators = pd.DataFrame() columns = ['B08101_025E'] for col in columns: numerators = addKey(df, numerators, col) # Denominators denominators = pd.DataFrame() columns = ['B08101_001E','B08101_049E'] for col in columns: denominators = addKey(df, denominators, col) # construct the denominator, returns 0 iff the other two rows are equal. #~~~~~~~~~~~~~~~ # Step 3) # Run the Calculation # ( value[3] / nullif((value[1]-value[2]),0) )*100 #~~~~~~~~~~~~~~~ fi['numerator'] = numerators.sum(axis=1) fi['denominator'] = denominators.iloc[: ,0] - denominators.iloc[: ,1] fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0 fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100 #~~~~~~~~~~~~~~~ # Step 4) # Add Special Baltimore City Data #~~~~~~~~~~~~~~~ url = 'https://api.census.gov/data/20'+str(year)+'/acs/acs5/subject?get=NAME,S0801_C01_009E&for=county%3A510&in=state%3A24&key=<KEY>' table = pd.read_json(url, orient='records') fi['final']['Baltimore City'] = float(table.loc[1, table.columns[1]]) return fi['final'] """ /* <pubtran_14> */ -- WITH tbl AS ( select csa, ( value[3] / nullif((value[1]-value[2]),0) )*100::numeric as result from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B08101_001E','B08101_049E','B08101_025E']) ) update vital_signs.data set pubtran = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014'; """ # Cell #File: age5.py #Author: <NAME> #Date: 4/16/19 #Section: Bnia #Email: <EMAIL> #Description: # Uses ACS Table B01001 - SEX BY AGE # Universe: Total population # Table Creates: tpop, female, male, age5 age18 age24 age64 age65 #purpose: #input: Year #output: import pandas as pd import glob def age5( year ): def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0] def getColByName (df, col): return df[getColName(df, col)] def addKey(df, fi, col): key = getColName(df, col) val = getColByName(df, col) fi[key] = val return fi def nullIfEqual(df, c1, c2): return df.apply(lambda x: x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1) def sumInts(df): return df.sum(numeric_only=True) #~~~~~~~~~~~~~~~ # Step 1) # Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder. #~~~~~~~~~~~~~~~ fileName = '' for name in glob.glob('AcsDataClean/B01001*5y'+str(year)+'_est.csv'): fileName = name df = pd.read_csv( fileName, index_col=0 ) # Aggregate by CSA # Group By CSA so that they may be opperated on df = df.groupby('CSA') # Aggregate Numeric Values by Sum df = df.sum(numeric_only=True) # df.columns total = df['B01001_001E_Total'] df1 = pd.DataFrame() df1['CSA'] = df.index df1.set_index('CSA', drop = True, inplace = True) # Under 5 df1['under_5'] = ( df[ 'B01001_003E_Total_Male_Under_5_years' ] + df[ 'B01001_027E_Total_Female_Under_5_years' ] ) / total * 100 return df1['under_5'] # Cell #File: age24.py #Author: <NAME> #Date: 4/16/19 #Section: Bnia #Email: <EMAIL> #Description: # Uses ACS Table B01001 - SEX BY AGE # Universe: Total population # Table Creates: tpop, female, male, age5 age18 age24 age64 age65 #purpose: #input: Year #output: import pandas as pd import glob def age24( year ): def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0] def getColByName (df, col): return df[getColName(df, col)] def addKey(df, fi, col): key = getColName(df, col) val = getColByName(df, col) fi[key] = val return fi def nullIfEqual(df, c1, c2): return df.apply(lambda x: x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1) def sumInts(df): return df.sum(numeric_only=True) #~~~~~~~~~~~~~~~ # Step 1) # Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder. #~~~~~~~~~~~~~~~ fileName = '' for name in glob.glob('AcsDataClean/B01001*5y'+str(year)+'_est.csv'): fileName = name df = pd.read_csv( fileName, index_col=0 ) # Aggregate by CSA # Group By CSA so that they may be opperated on df = df.groupby('CSA') # Aggregate Numeric Values by Sum df = df.sum(numeric_only=True) # df.columns total = df['B01001_001E_Total'] df1 = pd.DataFrame() df1['CSA'] = df.index df1.set_index('CSA', drop = True, inplace = True) df1['eighteen_to_24'] = ( df[ 'B01001_007E_Total_Male_18_and_19_years' ] + df[ 'B01001_008E_Total_Male_20_years' ] + df[ 'B01001_009E_Total_Male_21_years' ] + df[ 'B01001_010E_Total_Male_22_to_24_years' ] + df[ 'B01001_031E_Total_Female_18_and_19_years' ] + df[ 'B01001_032E_Total_Female_20_years' ] + df[ 'B01001_033E_Total_Female_21_years' ] + df[ 'B01001_034E_Total_Female_22_to_24_years' ] ) / total * 100 return df1['eighteen_to_24'] # Cell #File: age64.py #Author: <NAME> #Date: 4/16/19 #Section: Bnia #Email: <EMAIL> #Description: # Uses ACS Table B01001 - SEX BY AGE # Universe: Total population # Table Creates: tpop, female, male, age5 age18 age24 age64 age65 #purpose: #input: Year #output: import pandas as pd import glob def age64( year ): def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0] def getColByName (df, col): return df[getColName(df, col)] def addKey(df, fi, col): key = getColName(df, col) val = getColByName(df, col) fi[key] = val return fi def nullIfEqual(df, c1, c2): return df.apply(lambda x: x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1) def sumInts(df): return df.sum(numeric_only=True) #~~~~~~~~~~~~~~~ # Step 1) # Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder. #~~~~~~~~~~~~~~~ fileName = '' for name in glob.glob('AcsDataClean/B01001*5y'+str(year)+'_est.csv'): fileName = name df = pd.read_csv( fileName, index_col=0 ) # Aggregate by CSA # Group By CSA so that they may be opperated on df = df.groupby('CSA') # Aggregate Numeric Values by Sum df = df.sum(numeric_only=True) # df.columns total = df['B01001_001E_Total'] df1 = pd.DataFrame() df1['CSA'] = df.index df1.set_index('CSA', drop = True, inplace = True) df1['twentyfive_to_64'] = ( df[ 'B01001_011E_Total_Male_25_to_29_years' ] + df[ 'B01001_012E_Total_Male_30_to_34_years' ] + df[ 'B01001_013E_Total_Male_35_to_39_years' ] + df[ 'B01001_014E_Total_Male_40_to_44_years' ] + df[ 'B01001_015E_Total_Male_45_to_49_years' ] + df[ 'B01001_016E_Total_Male_50_to_54_years' ] + df[ 'B01001_017E_Total_Male_55_to_59_years' ] + df[ 'B01001_018E_Total_Male_60_and_61_years' ] + df[ 'B01001_019E_Total_Male_62_to_64_years' ] + df[ 'B01001_035E_Total_Female_25_to_29_years' ] + df[ 'B01001_036E_Total_Female_30_to_34_years' ] + df[ 'B01001_037E_Total_Female_35_to_39_years' ] + df[ 'B01001_038E_Total_Female_40_to_44_years' ] + df[ 'B01001_039E_Total_Female_45_to_49_years' ] + df[ 'B01001_040E_Total_Female_50_to_54_years' ] + df[ 'B01001_041E_Total_Female_55_to_59_years' ] + df[ 'B01001_042E_Total_Female_60_and_61_years' ] + df[ 'B01001_043E_Total_Female_62_to_64_years' ] ) / total * 100 return df1['twentyfive_to_64'] # Cell #File: age18.py #Author: <NAME> #Date: 4/16/19 #Section: Bnia #Email: <EMAIL> #Description: # Uses ACS Table B01001 - SEX BY AGE # Universe: Total population # Table Creates: tpop, female, male, age5 age18 age24 age64 age65 #purpose: #input: Year #output: import pandas as pd import glob def age18( year ): def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0] def getColByName (df, col): return df[getColName(df, col)] def addKey(df, fi, col): key = getColName(df, col) val = getColByName(df, col) fi[key] = val return fi def nullIfEqual(df, c1, c2): return df.apply(lambda x: x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1) def sumInts(df): return df.sum(numeric_only=True) #~~~~~~~~~~~~~~~ # Step 1) # Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder. #~~~~~~~~~~~~~~~ fileName = '' for name in glob.glob('AcsDataClean/B01001*5y'+str(year)+'_est.csv'): fileName = name df = pd.read_csv( fileName, index_col=0 ) # Aggregate by CSA # Group By CSA so that they may be opperated on df = df.groupby('CSA') # Aggregate Numeric Values by Sum df = df.sum(numeric_only=True) # df.columns total = df['B01001_001E_Total'] df1 = pd.DataFrame() df1['CSA'] = df.index df1.set_index('CSA', drop = True, inplace = True) df1['five_to_17'] = ( df[ 'B01001_004E_Total_Male_5_to_9_years' ] + df[ 'B01001_005E_Total_Male_10_to_14_years' ] + df[ 'B01001_006E_Total_Male_15_to_17_years' ] + df[ 'B01001_028E_Total_Female_5_to_9_years' ] + df[ 'B01001_029E_Total_Female_10_to_14_years' ] + df[ 'B01001_030E_Total_Female_15_to_17_years' ] ) / total * 100 return df1['five_to_17'] # Cell #File: age65.py #Author: <NAME> #Date: 4/16/19 #Section: Bnia #Email: <EMAIL> #Description: # Uses ACS Table B01001 - SEX BY AGE # Universe: Total population # Table Creates: tpop, female, male, age5 age18 age24 age64 age65 #purpose: #input: Year #output: import pandas as pd import glob def age65( year ): def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0] def getColByName (df, col): return df[getColName(df, col)] def addKey(df, fi, col): key = getColName(df, col) val = getColByName(df, col) fi[key] = val return fi def nullIfEqual(df, c1, c2): return df.apply(lambda x: x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1) def sumInts(df): return df.sum(numeric_only=True) #~~~~~~~~~~~~~~~ # Step 1) # Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder. #~~~~~~~~~~~~~~~ fileName = '' for name in glob.glob('AcsDataClean/B01001*5y'+str(year)+'_est.csv'): fileName = name df = pd.read_csv( fileName, index_col=0 ) # Aggregate by CSA # Group By CSA so that they may be opperated on df = df.groupby('CSA') # Aggregate Numeric Values by Sum df = df.sum(numeric_only=True) # df.columns total = df['B01001_001E_Total'] df1 = pd.DataFrame() df1['CSA'] = df.index df1.set_index('CSA', drop = True, inplace = True) df1['sixtyfive_and_up'] = ( df[ 'B01001_020E_Total_Male_65_and_66_years' ] + df[ 'B01001_021E_Total_Male_67_to_69_years' ] + df[ 'B01001_022E_Total_Male_70_to_74_years' ] + df[ 'B01001_023E_Total_Male_75_to_79_years' ] + df[ 'B01001_024E_Total_Male_80_to_84_years' ] + df[ 'B01001_025E_Total_Male_85_years_and_over' ] + df[ 'B01001_044E_Total_Female_65_and_66_years' ] + df[ 'B01001_045E_Total_Female_67_to_69_years' ] + df[ 'B01001_046E_Total_Female_70_to_74_years' ] + df[ 'B01001_047E_Total_Female_75_to_79_years' ] + df[ 'B01001_048E_Total_Female_80_to_84_years' ] + df[ 'B01001_049E_Total_Female_85_years_and_over' ] ) / total * 100 return df1['sixtyfive_and_up'] # Cell #File: affordm.py #Author: <NAME> #Date: 1/25/19 #Section: Bnia #Email: <EMAIL> #Description: # Uses ACS Table B25091 - MORTGAGE STATUS BY SELECTED MONTHLY OWNER COSTS AS A PERCENTAGE OF HOUSEHOLD INCOME IN THE PAST 12 MONTHS # Universe: Owner-occupied housing units # Table Creates: #purpose: Produce Housing and Community Development - Affordability Index - Mortgage Indicator #input: Year #output: import pandas as pd import glob def affordm( year ): def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0] def getColByName (df, col): return df[getColName(df, col)] def addKey(df, fi, col): key = getColName(df, col) val = getColByName(df, col) fi[key] = val return fi def nullIfEqual(df, c1, c2): return df.apply(lambda x: x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1) def sumInts(df): return df.sum(numeric_only=True) #~~~~~~~~~~~~~~~ # Step 1) # Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder. #~~~~~~~~~~~~~~~ fileName = '' for name in glob.glob('AcsDataClean/B25091*5y'+str(year)+'_est.csv'): fileName = name df = pd.read_csv( fileName, index_col=0 ) # Aggregate by CSA # Group By CSA so that they may be opperated on df = df.groupby('CSA') # Aggregate Numeric Values by Sum df = sumInts(df) # Add 'BALTIMORE' which is the SUM of all the CSAs #~~~~~~~~~~~~~~~ # Step 2) # Prepare the columns #~~~~~~~~~~~~~~~ # Final Dataframe fi = pd.DataFrame() columns = ['B25091_008E','B25091_009E','B25091_010E','B25091_011E','B25091_002E'] for col in columns: fi = addKey(df, fi, col) # Numerators numerators = pd.DataFrame() columns = ['B25091_008E','B25091_009E','B25091_010E','B25091_011E'] for col in columns: numerators = addKey(df, numerators, col) # Denominators denominators = pd.DataFrame() columns = ['B25091_002E'] for col in columns: denominators = addKey(df, denominators, col) # construct the denominator, returns 0 iff the other two rows are equal. #~~~~~~~~~~~~~~~ # Step 3) # Run the Calculation # ( (value[1]+value[2]+value[3]+value[4]) / nullif(value[5],0) )*100 #~~~~~~~~~~~~~~~ fi['numerator'] = numerators.sum(axis=1) fi['denominator'] = denominators.sum(axis=1) fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0 fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100 return fi['final'] """ WITH tbl AS ( select csa, ( (value[1]+value[2]+value[3]+value[4]) / nullif(value[5],0) )*100::numeric as result from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B25091_008E','B25091_009E','B25091_010E','B25091_011E','B25091_002E']) ) update vital_signs.data set affordm = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014'; """ # Cell #File: affordr.py #Author: <NAME> #Date: 1/17/19 #Section: Bnia #Email: <EMAIL> #Description: # Uses ACS Table B25070 - GROSS RENT AS A PERCENTAGE OF HOUSEHOLD INCOME IN THE PAST 12 MONTHS # Universe: Renter-occupied housing units #purpose: Produce Housing and Community Development - Affordability Index - Rent Indicator #input: Year #output: import pandas as pd import glob def affordr( year ): def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0] def getColByName (df, col): return df[getColName(df, col)] def addKey(df, fi, col): key = getColName(df, col) val = getColByName(df, col) fi[key] = val return fi def nullIfEqual(df, c1, c2): return df.apply(lambda x: x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1) def sumInts(df): return df.sum(numeric_only=True) #~~~~~~~~~~~~~~~ # Step 1) # Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder. #~~~~~~~~~~~~~~~ fileName = '' for name in glob.glob('AcsDataClean/B25070*5y'+str(year)+'_est.csv'): fileName = name df = pd.read_csv( fileName, index_col=0 ) # Aggregate by CSA # Group By CSA so that they may be opperated on df = df.groupby('CSA') # Aggregate Numeric Values by Sum df = sumInts(df) # Add 'BALTIMORE' which is the SUM of all the CSAs #~~~~~~~~~~~~~~~ # Step 2) # Prepare the columns #~~~~~~~~~~~~~~~ # Final Dataframe fi = pd.DataFrame() columns = ['B25070_007E','B25070_008E','B25070_009E','B25070_010E','B25070_001E'] for col in columns: fi = addKey(df, fi, col) # Numerators numerators = pd.DataFrame() columns = ['B25070_007E','B25070_008E','B25070_009E','B25070_010E'] for col in columns: numerators = addKey(df, numerators, col) # Denominators denominators = pd.DataFrame() columns = ['B25070_001E'] for col in columns: denominators = addKey(df, denominators, col) # construct the denominator, returns 0 iff the other two rows are equal. #~~~~~~~~~~~~~~~ # Step 3) # Run the Calculation # ( (value[1]+value[2]+value[3]+value[4]) / nullif(value[5],0) )*100 #~~~~~~~~~~~~~~~ fi['numerator'] = numerators.sum(axis=1) fi['denominator'] = denominators.sum(axis=1) fi = fi[fi['denominator'] != 0] # Delete Rows where the 'denominator' column is 0 fi['final'] = (fi['numerator'] / fi['denominator'] ) * 100 return fi['final'] """ WITH tbl AS ( select csa, ( (value[1]+value[2]+value[3]+value[4]) / nullif(value[5],0) )*100::numeric as result from vital_signs.get_acs_vars_csa_and_bc('2014',ARRAY['B25070_007E','B25070_008E','B25070_009E','B25070_010E','B25070_001E']) ) update vital_signs.data set affordr = result from tbl where data2.csa = tbl.csa and update_data_year = '2014' and data_year = '2014'; """ # Cell #File: bahigher.py #Author: <NAME> #Date: 1/17/19 #Section: Bnia #Email: <EMAIL> #Description: # Uses ACS Table B06009 - PLACE OF BIRTH BY EDUCATIONAL ATTAINMENT IN THE UNITED STATES #purpose: Produce Workforce and Economic Development - Percent Population (25 Years and over) with a Bachelor's Degree or Above #Table Uses: B06009 - lesshs, hsdipl, bahigher #input: Year #output: import pandas as pd import glob def bahigher( year ): def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0] def getColByName (df, col): return df[getColName(df, col)] def addKey(df, fi, col): key = getColName(df, col) val = getColByName(df, col) fi[key] = val return fi def nullIfEqual(df, c1, c2): return df.apply(lambda x: x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1) def sumInts(df): return df.sum(numeric_only=True) #~~~~~~~~~~~~~~~ # Step 1) # Fetch Tract Files w/CSA Lables by Name from the 2_cleaned folder. #~~~~~~~~~~~~~~~ fileName = '' for name in glob.glob('AcsDataClean/B06009*5y'+str(year)+'_est.csv'): fileName = name df = pd.read_csv( fileName, index_col=0 ) # Aggregate by CSA # Group By CSA so that they may be opperated on df = df.groupby('CSA') # Aggregate Numeric Values by Sum df = sumInts(df) # Add 'BALTIMORE' which is the SUM of all the CSAs #~~~~~~~~~~~~~~~ # Step 2) # Prepare the columns #~~~~~~~~~~~~~~~ # Final Dataframe fi = pd.DataFrame() columns = ['B06009_005E','B06009_006E','B06009_001E'] for col in columns: fi = addKey(df, fi, col) # Numerators numerators =
pd.DataFrame()
pandas.DataFrame
""" October 2020 Updated: August 2021 Software version: Python 3.7 This code retrieves the calculation of building material demand and embodied greenhouse gas emissions in 26 global regions between 2020-2060. For the original code & latest updates, see: https://github.com/oucxiaoyang/GloBUME The building material model is based on the BUMA model developed by <NAME>, Leiden University, the Netherlands. For the original code & latest updates, see: https://github.com/SPDeetman/BUMA The dynamic stock model is based on the ODYM model developed by <NAME>, Uni Freiburg, Germany. For the original code & latest updates, see: https://github.com/IndEcol/ODYM @author: <NAME>; <EMAIL> <NAME>; <EMAIL> <NAME>; <EMAIL> contributions from: <NAME> *NOTE: Insert location of the GloBUME-main folder in 'dir_path' (line 28) to run the code. """ #%% GENERAL SETTING & STATEMENTS import pandas as pd import numpy as np import os import ctypes import math # set current directory dir_path = "" os.chdir(dir_path) # Set general constants regions = 26 #26 IMAGE regions res_building_types = 4 #4 residential building types: detached, semi-detached, appartments & high-rise area = 2 #2 areas: rural & urban materials = 7 #7 materials: Steel, brick, Concrete, Wood, Copper, Aluminium, Glass inflation = 1.2423 # gdp/cap inflation correction between 2005 (IMAGE data) & 2016 (commercial calibration) according to https://www.bls.gov/data/inflation_calculator.htm # Set Flags for sensitivity analysis flag_alpha = 0 # switch for the sensitivity analysis on alpha, if 1 the maximum alpha is 10% above the maximum found in the data flag_ExpDec = 0 # switch to choose between Gompertz and Exponential Decay function for commercial floorspace demand (0 = Gompertz, 1 = Expdec) flag_Normal = 0 # switch to choose between Weibull and Normal lifetime distributions (0 = Weibull, 1 = Normal) flag_Mean = 0 # switch to choose between material intensity settings (0 = regular regional, 1 = mean, 2 = high, 3 = low, 4 = median) #%%Load files & arrange tables ---------------------------------------------------- if flag_Mean == 0: file_addition = '' elif flag_Mean == 1: file_addition = '_mean' elif flag_Mean == 2: file_addition = '_high' elif flag_Mean == 3: file_addition = '_low' else: file_addition = '_median' # Load Population, Floor area, and Service value added (SVA) Database csv-files pop = pd.read_csv('files_population/pop.csv', index_col = [0]) # Pop; unit: million of people; meaning: global population (over time, by region) rurpop = pd.read_csv('files_population/rurpop.csv', index_col = [0]) # rurpop; unit: %; meaning: the share of people living in rural areas (over time, by region) housing_type = pd.read_csv('files_population\Housing_type.csv') # Housing_type; unit: %; meaning: the share of the NUMBER OF PEOPLE living in a particular building type (by region & by area) floorspace = pd.read_csv('files_floor_area/res_Floorspace.csv') # Floorspace; unit: m2/capita; meaning: the average m2 per capita (over time, by region & area) floorspace = floorspace[floorspace.Region != regions + 1] # Remove empty region 27 avg_m2_cap = pd.read_csv('files_floor_area\Average_m2_per_cap.csv') # Avg_m2_cap; unit: m2/capita; meaning: average square meters per person (by region & area (rural/urban) & building type) sva_pc_2005 = pd.read_csv('files_GDP/sva_pc.csv', index_col = [0]) sva_pc = sva_pc_2005 * inflation # we use the inflation corrected SVA to adjust for the fact that IMAGE provides gdp/cap in 2005 US$ # load material density data csv-files building_materials = pd.read_csv('files_material_density\Building_materials' + file_addition + '.csv') # Building_materials; unit: kg/m2; meaning: the average material use per square meter (by building type, by region & by area) materials_commercial = pd.read_csv('files_material_density\materials_commercial' + file_addition + '.csv') # 7 building materials in 4 commercial building types; unit: kg/m2; meaning: the average material use per square meter (by commercial building type) # Load fitted regression parameters for comercial floor area estimate if flag_alpha == 0: gompertz = pd.read_csv('files_floor_area//files_commercial/Gompertz_parameters.csv', index_col = [0]) else: gompertz = pd.read_csv('files_floor_area//files_commercial/Gompertz_parameters_alpha.csv', index_col = [0]) # Ensure full time series for pop & rurpop (interpolation, some years are missing) rurpop2 = rurpop.reindex(list(range(1970,2061,1))).interpolate() pop2 = pop.reindex(list(range(1970,2061,1))).interpolate() # Remove 1st year, to ensure same Table size as floorspace data (from 1971) pop2 = pop2.iloc[1:] rurpop2 = rurpop2.iloc[1:] # pre-calculate urban population urbpop = 1 - rurpop2 # urban population is 1 - the fraction of people living in rural areas (rurpop) # Restructure the tables to regions as columns; for floorspace floorspace_rur = floorspace.pivot(index = "t", columns = "Region", values = "Rural") floorspace_urb = floorspace.pivot(index = "t", columns = "Region", values = "Urban") # Restructuring for square meters (m2/cap) avg_m2_cap_urb = avg_m2_cap.loc[avg_m2_cap['Area'] == 'Urban'].drop('Area', 1).T # Remove area column & Transpose avg_m2_cap_urb.columns = list(map(int,avg_m2_cap_urb.iloc[0])) # name columns according to the row containing the region-labels avg_m2_cap_urb2 = avg_m2_cap_urb.drop(['Region']) # Remove idle row avg_m2_cap_rur = avg_m2_cap.loc[avg_m2_cap['Area'] == 'Rural'].drop('Area', 1).T # Remove area column & Transpose avg_m2_cap_rur.columns = list(map(int,avg_m2_cap_rur.iloc[0])) # name columns according to the row containing the region-labels avg_m2_cap_rur2 = avg_m2_cap_rur.drop(['Region']) # Remove idle row # Restructuring for the Housing types (% of population living in them) housing_type_urb = housing_type.loc[housing_type['Area'] == 'Urban'].drop('Area', 1).T # Remove area column & Transpose housing_type_urb.columns = list(map(int,housing_type_urb.iloc[0])) # name columns according to the row containing the region-labels housing_type_urb2 = housing_type_urb.drop(['Region']) # Remove idle row housing_type_rur = housing_type.loc[housing_type['Area'] == 'Rural'].drop('Area', 1).T # Remove area column & Transpose housing_type_rur.columns = list(map(int,housing_type_rur.iloc[0])) # name columns according to the row containing the region-labels housing_type_rur2 = housing_type_rur.drop(['Region']) # Remove idle row #%% COMMERCIAL building space demand (stock) calculated from Gomperz curve (fitted, using separate regression model) # Select gompertz curve paramaters for the total commercial m2 demand (stock) alpha = gompertz['All']['a'] if flag_ExpDec == 0 else 25.601 beta = gompertz['All']['b'] if flag_ExpDec == 0 else 28.431 gamma = gompertz['All']['c'] if flag_ExpDec == 0 else 0.0415 # find the total commercial m2 stock (in Millions of m2) commercial_m2_cap = pd.DataFrame(index = range(1971,2061), columns = range(1,27)) for year in range(1971,2061): for region in range(1,27): if flag_ExpDec == 0: commercial_m2_cap[region][year] = alpha * math.exp(-beta * math.exp((-gamma/1000) * sva_pc[str(region)][year])) else: commercial_m2_cap[region][year] = max(0.542, alpha - beta * math.exp((-gamma/1000) * sva_pc[str(region)][year])) # Subdivide the total across Offices, Retail+, Govt+ & Hotels+ commercial_m2_cap_office = pd.DataFrame(index = range(1971,2061), columns = range(1,27)) # Offices commercial_m2_cap_retail = pd.DataFrame(index = range(1971,2061), columns = range(1,27)) # Retail & Warehouses commercial_m2_cap_hotels = pd.DataFrame(index = range(1971,2061), columns = range(1,27)) # Hotels & Restaurants commercial_m2_cap_govern = pd.DataFrame(index = range(1971,2061), columns = range(1,27)) # Hospitals, Education, Government & Transportation minimum_com_office = 25 minimum_com_retail = 25 minimum_com_hotels = 25 minimum_com_govern = 25 for year in range(1971,2061): for region in range(1,27): # get the square meter per capita floorspace for 4 commercial applications office = gompertz['Office']['a'] * math.exp(-gompertz['Office']['b'] * math.exp((-gompertz['Office']['c']/1000) * sva_pc[str(region)][year])) retail = gompertz['Retail+']['a'] * math.exp(-gompertz['Retail+']['b'] * math.exp((-gompertz['Retail+']['c']/1000) * sva_pc[str(region)][year])) hotels = gompertz['Hotels+']['a'] * math.exp(-gompertz['Hotels+']['b'] * math.exp((-gompertz['Hotels+']['c']/1000) * sva_pc[str(region)][year])) govern = gompertz['Govt+']['a'] * math.exp(-gompertz['Govt+']['b'] * math.exp((-gompertz['Govt+']['c']/1000) * sva_pc[str(region)][year])) #calculate minimum values for later use in historic tail(Region 20: China @ 134 $/cap SVA) minimum_com_office = office if office < minimum_com_office else minimum_com_office minimum_com_retail = retail if retail < minimum_com_retail else minimum_com_retail minimum_com_hotels = hotels if hotels < minimum_com_hotels else minimum_com_hotels minimum_com_govern = govern if govern < minimum_com_govern else minimum_com_govern # Then use the ratio's to subdivide the total commercial floorspace into 4 categories commercial_sum = office + retail + hotels + govern commercial_m2_cap_office[region][year] = commercial_m2_cap[region][year] * (office/commercial_sum) commercial_m2_cap_retail[region][year] = commercial_m2_cap[region][year] * (retail/commercial_sum) commercial_m2_cap_hotels[region][year] = commercial_m2_cap[region][year] * (hotels/commercial_sum) commercial_m2_cap_govern[region][year] = commercial_m2_cap[region][year] * (govern/commercial_sum) #%% Add historic tail (1720-1970) + 100 yr initial ----------------------------------------------------------- # load historic population development hist_pop = pd.read_csv('files_initial_stock\hist_pop.csv', index_col = [0]) # initial population as a percentage of the 1970 population; unit: %; according to the Maddison Project Database (MPD) 2018 (Groningen University) # Determine the historical average global trend in floorspace/cap & the regional rural population share based on the last 10 years of IMAGE data floorspace_urb_trend_by_region = [0 for j in range(0,26)] floorspace_rur_trend_by_region = [0 for j in range(0,26)] rurpop_trend_by_region = [0 for j in range(0,26)] commercial_m2_cap_office_trend = [0 for j in range(0,26)] commercial_m2_cap_retail_trend = [0 for j in range(0,26)] commercial_m2_cap_hotels_trend = [0 for j in range(0,26)] commercial_m2_cap_govern_trend = [0 for j in range(0,26)] # For the RESIDENTIAL & COMMERCIAL floorspace: Derive the annual trend (in m2/cap) over the initial 10 years of IMAGE data for region in range(1,27): floorspace_urb_trend_by_year = [0 for i in range(0,10)] floorspace_rur_trend_by_year = [0 for i in range(0,10)] commercial_m2_cap_office_trend_by_year = [0 for j in range(0,10)] commercial_m2_cap_retail_trend_by_year = [0 for i in range(0,10)] commercial_m2_cap_hotels_trend_by_year = [0 for j in range(0,10)] commercial_m2_cap_govern_trend_by_year = [0 for i in range(0,10)] # Get the growth by year (for the first 10 years) for year in range(1970,1980): floorspace_urb_trend_by_year[year-1970] = floorspace_urb[region][year+1]/floorspace_urb[region][year+2] floorspace_rur_trend_by_year[year-1970] = floorspace_rur[region][year+1]/floorspace_rur[region][year+2] commercial_m2_cap_office_trend_by_year[year-1970] = commercial_m2_cap_office[region][year+1]/commercial_m2_cap_office[region][year+2] commercial_m2_cap_retail_trend_by_year[year-1970] = commercial_m2_cap_retail[region][year+1]/commercial_m2_cap_retail[region][year+2] commercial_m2_cap_hotels_trend_by_year[year-1970] = commercial_m2_cap_hotels[region][year+1]/commercial_m2_cap_hotels[region][year+2] commercial_m2_cap_govern_trend_by_year[year-1970] = commercial_m2_cap_govern[region][year+1]/commercial_m2_cap_govern[region][year+2] rurpop_trend_by_region[region-1] = ((1 - (rurpop[str(region)][1980]/rurpop[str(region)][1970]))/10)*100 floorspace_urb_trend_by_region[region-1] = sum(floorspace_urb_trend_by_year)/10 floorspace_rur_trend_by_region[region-1] = sum(floorspace_rur_trend_by_year)/10 commercial_m2_cap_office_trend[region-1] = sum(commercial_m2_cap_office_trend_by_year)/10 commercial_m2_cap_retail_trend[region-1] = sum(commercial_m2_cap_retail_trend_by_year)/10 commercial_m2_cap_hotels_trend[region-1] = sum(commercial_m2_cap_hotels_trend_by_year)/10 commercial_m2_cap_govern_trend[region-1] = sum(commercial_m2_cap_govern_trend_by_year)/10 # Average global annual decline in floorspace/cap in %, rural: 1%; urban 1.2%; commercial: 1.26-2.18% /yr floorspace_urb_trend_global = (1 - (sum(floorspace_urb_trend_by_region)/26))*100 # in % decrease per annum floorspace_rur_trend_global = (1 - (sum(floorspace_rur_trend_by_region)/26))*100 # in % decrease per annum commercial_m2_cap_office_trend_global = (1 - (sum(commercial_m2_cap_office_trend)/26))*100 # in % decrease per annum commercial_m2_cap_retail_trend_global = (1 - (sum(commercial_m2_cap_retail_trend)/26))*100 # in % decrease per annum commercial_m2_cap_hotels_trend_global = (1 - (sum(commercial_m2_cap_hotels_trend)/26))*100 # in % decrease per annum commercial_m2_cap_govern_trend_global = (1 - (sum(commercial_m2_cap_govern_trend)/26))*100 # in % decrease per annum # define historic floorspace (1820-1970) in m2/cap floorspace_urb_1820_1970 = pd.DataFrame(index = range(1820,1971), columns = floorspace_urb.columns) floorspace_rur_1820_1970 = pd.DataFrame(index = range(1820,1971), columns = floorspace_rur.columns) rurpop_1820_1970 = pd.DataFrame(index = range(1820,1971), columns = rurpop.columns) pop_1820_1970 = pd.DataFrame(index = range(1820,1971), columns = pop2.columns) commercial_m2_cap_office_1820_1970 = pd.DataFrame(index = range(1820,1971), columns = commercial_m2_cap_office.columns) commercial_m2_cap_retail_1820_1970 = pd.DataFrame(index = range(1820,1971), columns = commercial_m2_cap_retail.columns) commercial_m2_cap_hotels_1820_1970 = pd.DataFrame(index = range(1820,1971), columns = commercial_m2_cap_hotels.columns) commercial_m2_cap_govern_1820_1970 = pd.DataFrame(index = range(1820,1971), columns = commercial_m2_cap_govern.columns) # Find minumum or maximum values in the original IMAGE data (Just for residential, commercial minimum values have been calculated above) minimum_urb_fs = floorspace_urb.values.min() # Region 20: China minimum_rur_fs = floorspace_rur.values.min() # Region 20: China maximum_rurpop = rurpop.values.max() # Region 9 : Eastern Africa # Calculate the actual values used between 1820 & 1970, given the trends & the min/max values for region in range(1,regions+1): for year in range(1820,1971): # MAX of 1) the MINimum value & 2) the calculated value floorspace_urb_1820_1970[region][year] = max(minimum_urb_fs, floorspace_urb[region][1971] * ((100-floorspace_urb_trend_global)/100)**(1971-year)) # single global value for average annual Decrease floorspace_rur_1820_1970[region][year] = max(minimum_rur_fs, floorspace_rur[region][1971] * ((100-floorspace_rur_trend_global)/100)**(1971-year)) # single global value for average annual Decrease commercial_m2_cap_office_1820_1970[region][year] = max(minimum_com_office, commercial_m2_cap_office[region][1971] * ((100-commercial_m2_cap_office_trend_global)/100)**(1971-year)) # single global value for average annual Decrease commercial_m2_cap_retail_1820_1970[region][year] = max(minimum_com_retail, commercial_m2_cap_retail[region][1971] * ((100-commercial_m2_cap_retail_trend_global)/100)**(1971-year)) # single global value for average annual Decrease commercial_m2_cap_hotels_1820_1970[region][year] = max(minimum_com_hotels, commercial_m2_cap_hotels[region][1971] * ((100-commercial_m2_cap_hotels_trend_global)/100)**(1971-year)) # single global value for average annual Decrease commercial_m2_cap_govern_1820_1970[region][year] = max(minimum_com_govern, commercial_m2_cap_govern[region][1971] * ((100-commercial_m2_cap_govern_trend_global)/100)**(1971-year)) # single global value for average annual Decrease # MIN of 1) the MAXimum value & 2) the calculated value rurpop_1820_1970[str(region)][year] = min(maximum_rurpop, rurpop[str(region)][1970] * ((100 + rurpop_trend_by_region[region - 1])/100)**(1970 - year)) # average annual INcrease by region # just add the tail to the population (no min/max & trend is pre-calculated in hist_pop) pop_1820_1970[str(region)][year] = hist_pop[str(region)][year] * pop[str(region)][1970] urbpop_1820_1970 = 1 - rurpop_1820_1970 # To avoid full model setup in 1820 (all required stock gets built in yr 1) we assume another tail that linearly increases to the 1820 value over a 100 year time period, so 1720 = 0 floorspace_urb_1721_1820 = pd.DataFrame(index = range(1721,1820), columns = floorspace_urb.columns) floorspace_rur_1721_1820 = pd.DataFrame(index = range(1721,1820), columns = floorspace_rur.columns) rurpop_1721_1820 = pd.DataFrame(index = range(1721,1820), columns = rurpop.columns) urbpop_1721_1820 = pd.DataFrame(index = range(1721,1820), columns = urbpop.columns) pop_1721_1820 = pd.DataFrame(index = range(1721,1820), columns = pop2.columns) commercial_m2_cap_office_1721_1820 = pd.DataFrame(index = range(1721,1820), columns = commercial_m2_cap_office.columns) commercial_m2_cap_retail_1721_1820 = pd.DataFrame(index = range(1721,1820), columns = commercial_m2_cap_retail.columns) commercial_m2_cap_hotels_1721_1820 = pd.DataFrame(index = range(1721,1820), columns = commercial_m2_cap_hotels.columns) commercial_m2_cap_govern_1721_1820 = pd.DataFrame(index = range(1721,1820), columns = commercial_m2_cap_govern.columns) for region in range(1,27): for time in range(1721,1820): # MAX(0,...) Because of floating point deviations, leading to negative stock in some cases floorspace_urb_1721_1820[int(region)][time] = max(0.0, floorspace_urb_1820_1970[int(region)][1820] - (floorspace_urb_1820_1970[int(region)][1820]/100)*(1820-time)) floorspace_rur_1721_1820[int(region)][time] = max(0.0, floorspace_rur_1820_1970[int(region)][1820] - (floorspace_rur_1820_1970[int(region)][1820]/100)*(1820-time)) rurpop_1721_1820[str(region)][time] = max(0.0, rurpop_1820_1970[str(region)][1820] - (rurpop_1820_1970[str(region)][1820]/100)*(1820-time)) urbpop_1721_1820[str(region)][time] = max(0.0, urbpop_1820_1970[str(region)][1820] - (urbpop_1820_1970[str(region)][1820]/100)*(1820-time)) pop_1721_1820[str(region)][time] = max(0.0, pop_1820_1970[str(region)][1820] - (pop_1820_1970[str(region)][1820]/100)*(1820-time)) commercial_m2_cap_office_1721_1820[int(region)][time] = max(0.0, commercial_m2_cap_office_1820_1970[region][1820] - (commercial_m2_cap_office_1820_1970[region][1820]/100)*(1820-time)) commercial_m2_cap_retail_1721_1820[int(region)][time] = max(0.0, commercial_m2_cap_retail_1820_1970[region][1820] - (commercial_m2_cap_retail_1820_1970[region][1820]/100)*(1820-time)) commercial_m2_cap_hotels_1721_1820[int(region)][time] = max(0.0, commercial_m2_cap_hotels_1820_1970[region][1820] - (commercial_m2_cap_hotels_1820_1970[region][1820]/100)*(1820-time)) commercial_m2_cap_govern_1721_1820[int(region)][time] = max(0.0, commercial_m2_cap_govern_1820_1970[region][1820] - (commercial_m2_cap_govern_1820_1970[region][1820]/100)*(1820-time)) # combine historic with IMAGE data here rurpop_tail = rurpop_1820_1970.append(rurpop2, ignore_index = False) urbpop_tail = urbpop_1820_1970.append(urbpop, ignore_index = False) pop_tail = pop_1820_1970.append(pop2, ignore_index = False) floorspace_urb_tail = floorspace_urb_1820_1970.append(floorspace_urb, ignore_index = False) floorspace_rur_tail = floorspace_rur_1820_1970.append(floorspace_rur, ignore_index = False) commercial_m2_cap_office_tail = commercial_m2_cap_office_1820_1970.append(commercial_m2_cap_office, ignore_index = False) commercial_m2_cap_retail_tail = commercial_m2_cap_retail_1820_1970.append(commercial_m2_cap_retail, ignore_index = False) commercial_m2_cap_hotels_tail = commercial_m2_cap_hotels_1820_1970.append(commercial_m2_cap_hotels, ignore_index = False) commercial_m2_cap_govern_tail = commercial_m2_cap_govern_1820_1970.append(commercial_m2_cap_govern, ignore_index = False) rurpop_tail = rurpop_1721_1820.append(rurpop_1820_1970.append(rurpop2, ignore_index = False), ignore_index = False) urbpop_tail = urbpop_1721_1820.append(urbpop_1820_1970.append(urbpop, ignore_index = False), ignore_index = False) pop_tail = pop_1721_1820.append(pop_1820_1970.append(pop2, ignore_index = False), ignore_index = False) floorspace_urb_tail = floorspace_urb_1721_1820.append(floorspace_urb_1820_1970.append(floorspace_urb, ignore_index = False), ignore_index = False) floorspace_rur_tail = floorspace_rur_1721_1820.append(floorspace_rur_1820_1970.append(floorspace_rur, ignore_index = False), ignore_index = False) commercial_m2_cap_office_tail = commercial_m2_cap_office_1721_1820.append(commercial_m2_cap_office_1820_1970.append(commercial_m2_cap_office, ignore_index = False), ignore_index = False) commercial_m2_cap_retail_tail = commercial_m2_cap_retail_1721_1820.append(commercial_m2_cap_retail_1820_1970.append(commercial_m2_cap_retail, ignore_index = False), ignore_index = False) commercial_m2_cap_hotels_tail = commercial_m2_cap_hotels_1721_1820.append(commercial_m2_cap_hotels_1820_1970.append(commercial_m2_cap_hotels, ignore_index = False), ignore_index = False) commercial_m2_cap_govern_tail = commercial_m2_cap_govern_1721_1820.append(commercial_m2_cap_govern_1820_1970.append(commercial_m2_cap_govern, ignore_index = False), ignore_index = False) #%% FLOOR AREA STOCK ----------------------------------------------------------- # adjust the share for urban/rural only (shares in csv are as percantage of the total(Rur + Urb), we needed to adjust the urban shares to add up to 1, same for rural) housing_type_rur3 = housing_type_rur2/housing_type_rur2.sum() housing_type_urb3 = housing_type_urb2/housing_type_urb2.sum() # calculte the total rural/urban population (pop2 = millions of people, rurpop2 = % of people living in rural areas) people_rur = pd.DataFrame(rurpop_tail.values*pop_tail.values, columns = pop_tail.columns, index = pop_tail.index) people_urb = pd.DataFrame(urbpop_tail.values*pop_tail.values, columns = pop_tail.columns, index = pop_tail.index) # calculate the total number of people (urban/rural) BY HOUSING TYPE (the sum of det,sem,app & hig equals the total population e.g. people_rur) people_det_rur = pd.DataFrame(housing_type_rur3.iloc[0].values*people_rur.values, columns = people_rur.columns, index = people_rur.index) people_sem_rur = pd.DataFrame(housing_type_rur3.iloc[1].values*people_rur.values, columns = people_rur.columns, index = people_rur.index) people_app_rur = pd.DataFrame(housing_type_rur3.iloc[2].values*people_rur.values, columns = people_rur.columns, index = people_rur.index) people_hig_rur = pd.DataFrame(housing_type_rur3.iloc[3].values*people_rur.values, columns = people_rur.columns, index = people_rur.index) people_det_urb = pd.DataFrame(housing_type_urb3.iloc[0].values*people_urb.values, columns = people_urb.columns, index = people_urb.index) people_sem_urb = pd.DataFrame(housing_type_urb3.iloc[1].values*people_urb.values, columns = people_urb.columns, index = people_urb.index) people_app_urb = pd.DataFrame(housing_type_urb3.iloc[2].values*people_urb.values, columns = people_urb.columns, index = people_urb.index) people_hig_urb = pd.DataFrame(housing_type_urb3.iloc[3].values*people_urb.values, columns = people_urb.columns, index = people_urb.index) # calculate the total m2 (urban/rural) BY HOUSING TYPE (= nr. of people * OWN avg m2, so not based on IMAGE) m2_unadjusted_det_rur = pd.DataFrame(avg_m2_cap_rur2.iloc[0].values * people_det_rur.values, columns = people_det_rur.columns, index = people_det_rur.index) m2_unadjusted_sem_rur = pd.DataFrame(avg_m2_cap_rur2.iloc[1].values * people_sem_rur.values, columns = people_sem_rur.columns, index = people_sem_rur.index) m2_unadjusted_app_rur = pd.DataFrame(avg_m2_cap_rur2.iloc[2].values * people_app_rur.values, columns = people_app_rur.columns, index = people_app_rur.index) m2_unadjusted_hig_rur = pd.DataFrame(avg_m2_cap_rur2.iloc[3].values * people_hig_rur.values, columns = people_hig_rur.columns, index = people_hig_rur.index) m2_unadjusted_det_urb = pd.DataFrame(avg_m2_cap_urb2.iloc[0].values * people_det_urb.values, columns = people_det_urb.columns, index = people_det_urb.index) m2_unadjusted_sem_urb = pd.DataFrame(avg_m2_cap_urb2.iloc[1].values * people_sem_urb.values, columns = people_sem_urb.columns, index = people_sem_urb.index) m2_unadjusted_app_urb = pd.DataFrame(avg_m2_cap_urb2.iloc[2].values * people_app_urb.values, columns = people_app_urb.columns, index = people_app_urb.index) m2_unadjusted_hig_urb = pd.DataFrame(avg_m2_cap_urb2.iloc[3].values * people_hig_urb.values, columns = people_hig_urb.columns, index = people_hig_urb.index) # Define empty dataframes for m2 adjustments total_m2_adj_rur = pd.DataFrame(index = m2_unadjusted_det_rur.index, columns = m2_unadjusted_det_rur.columns) total_m2_adj_urb = pd.DataFrame(index = m2_unadjusted_det_urb.index, columns = m2_unadjusted_det_urb.columns) # Sum all square meters in Rural area for j in range(1721,2061,1): for i in range(1,27,1): total_m2_adj_rur.loc[j,str(i)] = m2_unadjusted_det_rur.loc[j,str(i)] + m2_unadjusted_sem_rur.loc[j,str(i)] + m2_unadjusted_app_rur.loc[j,str(i)] + m2_unadjusted_hig_rur.loc[j,str(i)] # Sum all square meters in Urban area for j in range(1721,2061,1): for i in range(1,27,1): total_m2_adj_urb.loc[j,str(i)] = m2_unadjusted_det_urb.loc[j,str(i)] + m2_unadjusted_sem_urb.loc[j,str(i)] + m2_unadjusted_app_urb.loc[j,str(i)] + m2_unadjusted_hig_urb.loc[j,str(i)] # average square meter per person implied by our OWN data avg_m2_cap_adj_rur = pd.DataFrame(total_m2_adj_rur.values / people_rur.values, columns = people_rur.columns, index = people_rur.index) avg_m2_cap_adj_urb = pd.DataFrame(total_m2_adj_urb.values / people_urb.values, columns = people_urb.columns, index = people_urb.index) # factor to correct square meters per capita so that we respect the IMAGE data in terms of total m2, but we use our own distinction between Building types m2_cap_adj_fact_rur = pd.DataFrame(floorspace_rur_tail.values / avg_m2_cap_adj_rur.values, columns = floorspace_rur_tail.columns, index = floorspace_rur_tail.index) m2_cap_adj_fact_urb = pd.DataFrame(floorspace_urb_tail.values / avg_m2_cap_adj_urb.values, columns = floorspace_urb_tail.columns, index = floorspace_urb_tail.index) # All m2 by region (in millions), Building_type & year (using the correction factor, to comply with IMAGE avg m2/cap) m2_det_rur = pd.DataFrame(m2_unadjusted_det_rur.values * m2_cap_adj_fact_rur.values, columns = m2_cap_adj_fact_rur.columns, index = m2_cap_adj_fact_rur.index) m2_sem_rur = pd.DataFrame(m2_unadjusted_sem_rur.values * m2_cap_adj_fact_rur.values, columns = m2_cap_adj_fact_rur.columns, index = m2_cap_adj_fact_rur.index) m2_app_rur = pd.DataFrame(m2_unadjusted_app_rur.values * m2_cap_adj_fact_rur.values, columns = m2_cap_adj_fact_rur.columns, index = m2_cap_adj_fact_rur.index) m2_hig_rur = pd.DataFrame(m2_unadjusted_hig_rur.values * m2_cap_adj_fact_rur.values, columns = m2_cap_adj_fact_rur.columns, index = m2_cap_adj_fact_rur.index) m2_det_urb = pd.DataFrame(m2_unadjusted_det_urb.values * m2_cap_adj_fact_urb.values, columns = m2_cap_adj_fact_urb.columns, index = m2_cap_adj_fact_urb.index) m2_sem_urb =
pd.DataFrame(m2_unadjusted_sem_urb.values * m2_cap_adj_fact_urb.values, columns = m2_cap_adj_fact_urb.columns, index = m2_cap_adj_fact_urb.index)
pandas.DataFrame
import base64 import gzip import json import os import pickle from glob import glob from urllib.parse import urlparse import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import requests from requests.auth import HTTPBasicAuth from settings.basic import (CACHE_ENABLED, CACHE_PATH, DATA_PATH, intrinio_username, intrinio_password, debug) def dict_to_str(dct): return ' '.join(['%s:%s' % (k, v) for k, v in dct.items()]) def get_datasets_name(resample_period, symbols_list_name, thresholds, target_shift): normal_name = "normal_%s_%s_%s_%s_y%s" % ( resample_period, symbols_list_name, thresholds[0], thresholds[1], target_shift) z_name = "z-score_%s_%s_%s_%s_y%s" % ( resample_period, symbols_list_name, thresholds[0], thresholds[1], target_shift) return normal_name, z_name def get_headers(trading_params): header = 'dataset,period,clf,magic,model_params,' header += ','.join( [k for k in trading_params.keys() if k != 'dates']) header += ',start_trade,final_trade,time,min,max,mean,last' return header def format_line(dataset_name, clf, magic, trading_params, model_params, pfs, total_time): r = [p.total_money for p in pfs] line = '%s,%s,%s,%s,%s,' % ( dataset_name.split('_')[0], dataset_name.split('_')[1], clf, magic, dict_to_str(model_params)) line += ','.join(list([str(v) for v in trading_params.values()])[:-1]) line += ',' + trading_params['dates'][0] + ',' + \ trading_params['dates'][1] + ',' line += '%.2f,' % total_time line += '%.1f,%.1f,%.1f,%.1f' % (np.min(r), np.max(r), np.mean(r), r[-1]) return line def full_print(res): with pd.option_context('display.max_rows', None, 'display.max_columns', None): print(res) def exists_obj(name): return os.path.exists(name + '.pgz') def save_obj(obj, name): with gzip.GzipFile(name + '.pgz', 'w') as f: pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) def load_obj(name): with gzip.GzipFile(name + '.pgz', 'r') as f: return pickle.load(f) def to_df(file: str) -> pd.DataFrame: df = pd.read_csv(file) df.set_index(['year', 'quarter'], inplace=True) df.sort_index(inplace=True) return df def plot(x, y): import matplotlib.pyplot as plt import matplotlib.dates as mdates plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d')) plt.gca().xaxis.set_major_locator(mdates.DayLocator()) plt.plot(x, y) plt.gcf().autofmt_xdate() def load_symbol_list(symbols_list_name: str) -> list: path = os.path.join(DATA_PATH, '%s_symbols.lst' % (symbols_list_name)) return open(path).read().split() def call_and_cache(url: str, cache=True) -> dict: """ Calls the URL with GET method if the url file is not cached :param url: url to retrieve :param kwargs: specify no-cache :return: json.loads of the response (or empty dict if error) """ url_parsed = urlparse(url) cached_file = os.path.join(CACHE_PATH, url_parsed.netloc + url_parsed.path + "/" + base64.standard_b64encode( url_parsed.query.encode()).decode()) if not os.path.exists(os.path.dirname(cached_file)): os.makedirs(os.path.dirname(cached_file)) data_json = {} if CACHE_ENABLED and os.path.exists(cached_file) and cache: if debug: print( "Data was present in cache and cache is enabled, loading: %s for %s" % (cached_file, url)) with open(cached_file, 'r') as f: data_json = json.loads(f.read()) else: print( "Data was either not present in cache or it was disabled calling request: %s" % url) r = requests.get(url, auth=HTTPBasicAuth(intrinio_username, intrinio_password)) if r.status_code != 200: print( "Request status was: %s for URL: %s" % (r.status_code, url)) return data_json data_json = json.loads(r.text) if 'data' in data_json.keys() and not len(data_json['data']) > 0: print("Data field is empty.\nRequest URL: %s" % (url)) with open(cached_file, 'w') as f: f.write(json.dumps(data_json)) print( "Successfully cached url: %s to %s" % (url, cached_file)) return data_json def plot_2_axis(): import numpy as np import matplotlib.pyplot as plt x, y = np.random.random((2, 50)) fig, ax1 = plt.subplots() ax2 = ax1.twinx() ax1.scatter(df['last'], df.C, c='b') ax2.scatter(df['last'], df.gamma, c='r') ax1.set_yscale('log') ax2.set_yscale('log') def load_trend(file, price, name): res = load_obj(file) res = [r for r in res if '%.1f' % r[1][-1].total_money == str(price)] return to_df_col(res[0][1], name) def get_trend(results, price, name): res = [] # from glob import glob # for file in glob('*/clean_results_*'): # import ipdb # ipdb.set_trace() # result = load_obj(file[:-4]) for result in results: res.extend( [r for r in result if '%.1f' % r[1][-1].total_money == str(price)]) # del result if len(res) == 0: print("No results found") return # break return to_df_col(res[0][1], name) def load_all(experiment): from glob import glob results = [] for file in glob('%s/*/clean_results_*' % experiment): print("loading: %s" % file) results.append(load_obj(file[:-4])) return results def new_plot(file, experiment): cols = ["dataset", "period", "clf", "magic", "model_params", "k", "bot_thresh", "top_thresh", "mode", "trade_frequency", "start_trade", "final_trade", "time", "min", "max", "mean", "last"] results = load_all(experiment) r1 = pd.read_csv(file, names=cols).sort_values('last').drop('time', 1).drop_duplicates() best = r1.groupby('clf')[['last']].max() sp500 = pd.read_csv('sp500.csv').set_index('Date') sp500.index = pd.to_datetime(sp500.index) sp500 = sp500[['Adj Close']].rename(columns={'Adj Close': 'S&P 500'}) ratio = 100000 / sp500.iloc[0] trends = [] names = ['AdaBoost', 'NN', 'RF', 'SVM', 'Graham', 'S&P 500'] for i, (clf, price) in enumerate(best.itertuples()): trends.append(get_trend(results, price, names[i])) sptrend = sp500 * ratio sptrend = sptrend.resample('1W').last() sptrend = sptrend[sptrend.index.isin(trends[0].index)] trends.append(sptrend) df = pd.concat(trends, axis=1).interpolate() df.index =
pd.to_datetime(df.index)
pandas.to_datetime
from copy import deepcopy import numpy as np import pandas as pd from .utils import isin, str_type # ============================================================================================================ # === BFS SEARCH OF ROUTES # ============================================================================================================ def backtrace(parent, start, end): path = [end] while path[-1] != start: path.append(parent[path[-1]]) path.reverse() return tuple(path) def bfs_build_route_list_slow(orig, dest, t, contact_plan, relays=None, max_speed=40.0, verbose=False): # Initialize variables cp = contact_plan.copy(deep=True) tinf = max(cp.tstart.max(), cp.tend.max()) + 100 * 365 * 24 * 3600 nodes = set(cp.orig).union(set(cp.dest)) relays = relays if relays is not None else nodes # Eliminate contact with itself if it exists (needs to be done before adding -1 and -2) cp = cp.loc[~(cp.orig == cp.dest)] # Add dummy initial and final contacts: (orig, dest, tstart, tend, duration, range, rate, capacity cp.loc[-1, :] = (orig, orig, t, t, 0.0, 0.0, np.inf, np.inf) cp.loc[-2, :] = (dest, dest, np.inf, np.inf, 0.0, 0.0, np.inf, np.inf) cp = cp.sort_values(['tstart', 'tend', 'orig', 'dest']) # Initialize necessary data structures cp['EAT'] = tinf cp['owlt'] = cp['range'] cp['margin'] = ((1.0 * max_speed / 3600) * cp.owlt) / 186282 cp.loc[-1, 'EAT'] = t # Initialize variables queue = [] paths = [] EATs = {} visited_nodes = {} found_paths = [] # Start at the root contact queue.append(-1) paths.append((-1,)) visited_nodes[(-1,)] = {orig} EATs[(-1,)] = t # While there are contacts to explore while queue: # Get the next contact current, path = cp.loc[queue.pop(), :], paths.pop() # Find path properties visited = visited_nodes[path] EAT = EATs[path] # Find the neighbors cids = (current.dest == cp.orig) & (~cp.dest.isin(visited)) & (cp.tend > EAT) & \ (cp.dest.isin(relays) | (cp.dest == dest)) # If no neighbors identified, continue if not cids.any(): continue # Compute early transmission time (ETT) and early arrival time (EAT) for neighbors ETT = cp.tstart[cids].apply(lambda x: max(x, EAT)) EAT = ETT + cp.owlt[cids] + cp.margin[cids] # Iterate over neighbors for cid, _ in cp.loc[cids, :].iterrows(): # Create a record for this path p = list(deepcopy(path)) p.append(cid) p = tuple(p) # If next neighbor is destination, save path if cid == -2: found_paths.append(p) if verbose: print('New path: ' + str(p)) continue # Store neighbors EAT EATs[p] = EAT.loc[cid] visited_nodes[p] = visited | {current.dest} # Do not use ADD, it does not return anything # Append the neighbor to continue exploring the graph queue.append(cid) paths.append(p) # Initialize variables routes = [] # Iterate over computed paths for path in found_paths: # Get the route (i.e. nodes traversed path1 = path[1:] rt = tuple(cp.loc[idx, 'orig'] for idx in path1) # Get path contact list and EAT path2 = path[1:-1] end = [cp.loc[idx, 'tend'] for idx in path2] ix = np.argmin(end) # Compose route data structure route = {} route['orig'] = orig route['dest'] = dest route['time'] = t route['contacts'] = path2 route['route'] = rt route['tstart'] = cp.loc[route['contacts'][0], 'tstart'] route['tend'] = end[ix] route['EAT'] = EATs[path[:-1]] route['limit_cid'] = path2[ix] route['nhops'] = len(rt)-1 routes.append(route) return routes def bfs_build_route_list_fast(orig, dest, t, contact_plan, relays=None, max_speed=40.0, verbose=False): # Initialize variables cp = contact_plan.copy(deep=True) tinf = max(cp.tstart.max(), cp.tend.max()) + 100 * 365 * 24 * 3600 nodes = set(cp.orig).union(set(cp.dest)) relays = relays if relays is not None else nodes # Eliminate contact with itself if it exists (needs to be done before adding -1 and -2) cp = cp.loc[~(cp.orig == cp.dest)] # Add dummy initial and final contacts: (orig, dest, tstart, tend, duration, range, rate, capacity) cp.loc[-1, :] = (orig, orig, t, t, 0.0, 0.0, np.inf, np.inf) cp.loc[-2, :] = (dest, dest, np.inf, np.inf, 0.0, 0.0, np.inf, np.inf) cp = cp.sort_values(['tstart', 'tend', 'orig', 'dest']) # Initialize necessary data structures cp['EAT'] = tinf cp['owlt'] = cp['range'] cp['margin'] = ((1.0 * max_speed / 3600) * cp.owlt) / 186282 cp.loc[-1, 'EAT'] = t # Transform to dict for fast processing. Format is {column -> [values]} idx = cp.index.values cp = {c: cp[c].values for c in cp.columns} cp['index'] = idx # Initialize variables queue = [] paths = [] EATs = {} visited_nodes = {} found_paths = [] # Start at the root contact queue.append(-1) paths.append((-1,)) visited_nodes[(-1,)] = {orig} EATs[(-1,)] = t # While there are contacts to explore while queue: # Get the next contact cur_idx = queue.pop() c_idx = cp['index'] == cur_idx c_dest = np.compress(c_idx, cp['dest'])[0] # Avoid fancy slow indexing path = paths.pop() # Find path properties visited = visited_nodes[path] EAT = EATs[path] # Get this contact neighbors. Neighbors meet the following criteria: # 1) The current.dest = contact.orig # 2) The contact.dest is a node that has already been visited # 3) The contact.tend > current.EAT (i.e. a neighboring contact does not end before data arrives to this contact) # 4) The neighbor contact's destination has relay capabilities or is the destination itself # (note: this is not present in the SABR specification or ION code) cids = (c_dest == cp['orig']) & (~isin(cp['dest'], tuple(visited))) & (cp['tend'] > EAT) & \ (isin(cp['dest'], list(relays)) | (cp['dest'] == dest)) # If no neighbors identified, continue if not cids.any(): continue # Compute early transmission time (ETT) and early arrival time (EAT) ETT = np.maximum(np.compress(cids, cp['tstart']), EAT) EAT = ETT + np.compress(cids, cp['owlt']) + np.compress(cids, cp['margin']) # Iterate over neighbors for i, cid in enumerate(np.compress(cids, cp['index'])): # Create a record for this path p = list(deepcopy(path)) p.append(cid) p = tuple(p) # If next neighbor is destination, save path if cid == -2: found_paths.append(p) if verbose: print('New path: ' + str(p)) continue # Store neighbors EAT EATs[p] = EAT[i] visited_nodes[p] = visited | {c_dest} # Do not use ADD, it does not return anything # Append the neighbor to continue exploring the graph queue.append(cid) paths.append(p) # Initialize variables routes = [] # Iterate over computed paths for path in found_paths: # Get the route (i.e. nodes traversed) path1 = path[1:] rt = tuple(np.compress(cp['index'] == idx, cp['orig'])[0] for idx in path1) # Get path contact list and EAT path2 = path[1:-1] end = [np.compress(cp['index'] == idx, cp['tend'])[0] for idx in path2] ix = np.argmin(end) # Compose route data structure route = {} route['orig'] = orig route['dest'] = dest route['time'] = t route['contacts'] = path2 route['route'] = rt route['tstart'] = np.compress(cp['index'] == route['contacts'][0], cp['tstart'])[0] route['tend'] = end[ix] route['EAT'] = EATs[path[:-1]] route['limit_cid'] = path2[ix] route['nhops'] = len(rt)-1 routes.append(route) return routes # ============================================================================================================ # === VALIDATION: COMPARE SLOW AND FAST OUTPUTS # ============================================================================================================ def validate_value(idx, v1, v2): if isinstance(v1, pd.Timestamp): eq = (v1 - v2).to_timedelta64() < np.timedelta64(1, 'ms') # Precision of 1msec elif isinstance(v1, str_type): eq = v1 == v2 elif isinstance(v1, Number): eq = np.isclose(v1, v2) elif isinstance(v1, (list, tuple)): eq = all(validate_value(idx, vv1, vv2) for vv1, vv2 in zip(v1, v2)) else: raise ValueError('Cannot validate row {} of type {}'.format(idx, type(v1))) return eq def validate_fast_cgr(cp, ri, t, orig, dest, relays, algorithm='bfs', ncpu=1): # Initialize variables diff = [] # Load old route schedule disp('='*50 + ' COMPUTING ROUTE SCHEDULE - FAST MODE ' + '='*50) with Timer(): slow_df = build_route_list(orig, dest, t, cp, ri, relays=relays, ncpu=ncpu, algorithm=algorithm, mode='fast') # Compute new route schedule disp('=' * 50 + ' COMPUTING ROUTE SCHEDULE - SLOW MODE ' + '=' * 50) with Timer(): fast_df = build_route_list(orig, dest, t, cp, ri, relays=relays, ncpu=ncpu, algorithm=algorithm, mode='slow') # Compare rows disp('='*50 + ' VALIDATING ROUTE SCHEDULE - END ' + '='*50) for slow_cid, slow_row in slow_df.iterrows(): # Get this row in the fast_df fast_row = fast_df.loc[slow_cid, :] # Compare each entry of the row equal_idx = [validate_value(idx, slow_row[idx], slow_row[idx]) for idx in slow_row.index] # Flag non-equal entries diff_tags = slow_row.index[~np.array(equal_idx)].values if any(diff_tags): df = pd.concat((slow_row[diff_tags], fast_row[diff_tags]), axis=1) df.columns = ['Reference', 'New'] diff.append(df) # If the fast_df has extra more rows than the slow_df, flag them slow_idx, fast_idx = set(slow_df.index), set(fast_df.index) for fast_cid in (fast_idx-slow_idx): diff.append(fast_df.loc[[fast_cid],:]) return diff, fast_df, slow_df if __name__ == '__main__': # Import for testing from . import build_route_list from simulator.utils.basic_utils import disp, read, Timer from numbers import Number from simulator.utils.time_utils import str2time # Load data contact_plan = read('.\Scenario 2 - Data\Scn2_contact_table.xlsx') range_intervals = read('.\Scenario 2 - Data\Scn2_range_intervals.xlsx') # Define inputs t = str2time('01-JAN-2034 00:00:00', fmt='%d-%b-%Y %H:%M:%S') nodes = ('MR1','DSH','MCC','PSH','HHC','MMU','EVA1','EVA2','MOI','PDSR','PEV','PTX','EDL-MAV','EDL-DS') relays = ('PSH', 'DSH', 'MR1') # TEST 1 - Validate CGR fast implementation '''diff, fast_df, slow_df = validate_fast_cgr(contact_plan, range_intervals, t, nodes, nodes, relays, ncpu=5) if not diff: print('Fast CGR implementation successfully validated.') else: print('Error in CGR fast implementation, check:\n', diff)''' # TEST 2 - Build routes between two given nodes routes = build_route_list('MCC', 'PSH', t, contact_plan, range_intervals, relays=relays, ncpu=1, algorithm='bfs', mode='slow', verbose=True)
pd.DataFrame(routes)
pandas.DataFrame
import networkx as nx import numpy as np from hiveplot import HivePlot import pandas as pd import matplotlib.pyplot as plt # load data pd_data = pd.read_csv('./kg_final_with_temporal_data_and_validated_inconsistencies.txt', sep='\t') pd_data = pd_data[['Subject', 'Predicate', 'Object']] # filter positives only neg_predicates = [ 'confers no resistance to antibiotic', 'not upregulated by antibiotic', 'no represses', 'no activates',] def _check_match(x, predicates): flag = False for predicate in predicates: if predicate in x: flag = True return flag # remove the negatives pd_data = pd_data[~pd_data['Predicate'].apply(lambda x: _check_match(x, neg_predicates))] # select data by relation type pd_cras = pd_data[pd_data['Predicate'].str.contains('resistance to antibiotic')] pd_ubas = pd_data[pd_data['Predicate'].str.contains('upregulated by antibiotic')] pd_represses = pd_data[pd_data['Predicate'].str.contains('represses')] pd_activates = pd_data[pd_data['Predicate'].str.contains('activates')] pd_has = pd_data[pd_data['Predicate'].str.contains('has')] pd_iii = pd_data[pd_data['Predicate'].str.contains('is involved in')] pd_ipo = pd_data[pd_data['Predicate'].str.contains('is part of')] pd_tb = pd_data[pd_data['Predicate'].str.contains('targeted by')] # get genes genes = [] genes.extend(pd_cras['Subject'].to_numpy().tolist()) genes.extend(pd_ubas['Subject'].to_numpy().tolist()) genes.extend(pd_represses['Subject'].to_numpy().tolist()) genes.extend(pd_represses['Object'].to_numpy().tolist()) genes.extend(pd_activates['Subject'].to_numpy().tolist()) genes.extend(pd_activates['Object'].to_numpy().tolist()) genes.extend(pd_has['Subject'].to_numpy().tolist()) genes.extend(pd_iii['Subject'].to_numpy().tolist()) genes.extend(pd_ipo['Subject'].to_numpy().tolist()) genes.extend(pd_tb['Subject'].to_numpy().tolist()) genes = list(set(genes)) pd_genes = pd.DataFrame(genes, columns=['Label']) pd_genes['Category'] = 'gene' print('gene:', pd_genes.shape) # get antibiotics antibiotics = [] antibiotics.extend(pd_cras['Object'].to_numpy().tolist()) antibiotics.extend(pd_ubas['Object'].to_numpy().tolist()) antibiotics.extend(pd_tb['Object'].to_numpy().tolist()) antibiotics = list(set(antibiotics)) pd_antibiotics = pd.DataFrame(antibiotics, columns=['Label']) pd_antibiotics['Category'] = 'antibiotic' print('antibiotic:', pd_antibiotics.shape) # get molecular_function molecular_functions = pd_has['Object'].to_numpy().tolist() molecular_functions = list(set(molecular_functions)) pd_molecular_functions = pd.DataFrame(molecular_functions, columns=['Label']) pd_molecular_functions['Category'] = 'molecular_function' print('molecular_function:', pd_molecular_functions.shape) # get biological_process biological_processes = pd_iii['Object'].to_numpy().tolist() biological_processes = list(set(biological_processes)) pd_biological_processes =
pd.DataFrame(biological_processes, columns=['Label'])
pandas.DataFrame
# -*- coding: utf-8 -*- """ Created on Mon Oct 7 08:09:14 2019 @author: haukeh """ import pandas as pd import numpy as np import sys import os import getpass import json import pathlib from sqlalchemy import * #%% def reeem_session(): """SQLAlchemy session object with valid connection to reeem database""" print('Please provide connection parameters to database:\n' + 'Hit [Enter] to take defaults') host = '172.16.58.3' # input('host (default 172.16.58.3): ') port = '5432' # input('port (default 5432): ') database = 'reeem' # input("database name (default 'reeem'): ") user = 'reeem_vis' # input('user (default postgres): ') # password = input('password: ') password = getpass.getpass(prompt='password: ', stream=sys.stderr) con = create_engine( 'postgresql://' + '%s:%s@%s:%s/%s' % (user, password, host, port, database)).connect() print('Password correct! Database connection established.') return con #%% def import_reeemdb(con): """This function imports data from the REEEMdb It imports the data needed to perform the calculations of scores and indicators for the the REEEMgame. Arguments --------- """ #database info schema = 'model_draft' table_in = 'reeem_osembe_input' table_out = 'reeem_osembe_output' emission = text(""" SELECT nid, pathway, version, region, year, category, indicator, value -- column FROM {0}.{1} -- table WHERE category = 'Emissions' AND indicator = 'CO2' AND version = 'DataV2' AND year = '2015' ORDER BY version, pathway, year; -- sorting """.format(schema, table_out)) cap_cost = text(""" SELECT nid, pathway, version, region, year, category, indicator, value -- column FROM {0}.{1} -- table WHERE category = 'CapitalCost' AND version = 'DataV2' AND year = '2015' ORDER BY version, pathway, year; -- sorting """.format(schema, table_in)) new_capa = text(""" SELECT nid, pathway, version, region, year, category, indicator, value -- column FROM {0}.{1} -- table WHERE (category = 'New Capacity_Coal' OR category = 'New Capacity_Oil' OR category = 'New Capacity_Natural gas / non renew.' OR category = 'New Capacity_Nuclear' OR category = 'New Capacity_Waste non renewable' OR category = 'New Capacity_Biomass solid' OR category = 'New Capacity_Biofuel liquid' OR category = 'New Capacity_Hydro' OR category = 'New Capacity_Wind' OR category = 'New Capacity_Solar' OR category = 'New Capacity_Geothermal' OR category = 'New Capacity_Ocean') AND version = 'DataV2' AND year = '2015' ORDER BY version, pathway, year; -- sorting """.format(schema, table_out)) discount_rate = text(""" SELECT nid, pathway, version, region, year, category, indicator, value -- column FROM {0}.{1} -- table WHERE category = 'DiscountRate' AND version = 'DataV2' AND region = 'EU+CH+NO' AND year = '2015' ORDER BY version, pathway, year; -- sorting """.format(schema, table_in)) oper_life = text(""" SELECT nid, pathway, version, region, year, category, indicator, value -- column FROM {0}.{1} -- table WHERE category = 'OperationalLife' AND version = 'DataV2' AND year = '2015' ORDER BY version, pathway, year; -- sorting """.format(schema, table_in)) inst_capa = text(""" SELECT nid, pathway, version, region, year, category, indicator, value -- column FROM {0}.{1} -- table WHERE (category = 'Installed Capacities Public and Industrial Power and CHP Plants by Fuel and Technology_Coal' OR category = 'Installed Capacities Public and Industrial Power and CHP Plants by Fuel and Technology_Oil' OR category = 'Installed Capacities Public and Industrial Power and CHP Plants by Fuel and Technology_Natural gas / non renew.' OR category = 'Installed Capacities Public and Industrial Power and CHP Plants by Fuel and Technology_Nuclear' OR category = 'Installed Capacities Public and Industrial Power and CHP Plants by Fuel and Technology_Waste non renewable' OR category = 'Installed Capacities Public and Industrial Power and CHP Plants by Fuel and Technology_Biomass solid' OR category = 'Installed Capacities Public and Industrial Power and CHP Plants by Fuel and Technology_Biofuel liquid' OR category = 'Installed Capacities Public and Industrial Power and CHP Plants by Fuel and Technology_Hydro' OR category = 'Installed Capacities Public and Industrial Power and CHP Plants by Fuel and Technology_Wind' OR category = 'Installed Capacities Public and Industrial Power and CHP Plants by Fuel and Technology_Solar' OR category = 'Installed Capacities Public and Industrial Power and CHP Plants by Fuel and Technology_Geothermal' OR category = 'Installed Capacities Public and Industrial Power and CHP Plants by Fuel and Technology_Ocean') AND (indicator = 'Heat and Power Unit' OR indicator = 'Combined Cycle' OR indicator = 'CHP' OR indicator = 'Carbon Capture and Storage' OR indicator = 'Steam Turbine' OR indicator = 'Steam Turbine small' OR indicator = 'Steam Turbine large' OR indicator = 'Conventional' OR indicator = 'Gas Turbine old' OR indicator = 'Gas Turbine new' OR indicator = 'Heat and Power Unit small' OR indicator = 'Heat and Power Unit large' OR indicator = 'Run of river' OR indicator = 'Dam <10MW' OR indicator = 'Dam 10-100MW' OR indicator = 'Dam >100MW' OR indicator = 'Pumped Storage <100MW' OR indicator = 'Pumped Storage >100MW' OR indicator = 'CHP old' OR indicator = 'CHP new' OR indicator = 'Fuel cell' OR indicator = 'Generation 2' OR indicator = 'Generation 3' OR indicator = 'Wave' OR indicator = 'Distributed PV' OR indicator = 'Utility PV' OR indicator = 'Offshore' OR indicator = 'Onshore') AND version = 'DataV2' AND year = '2015' ORDER BY version, pathway, year; -- sorting """.format(schema, table_out)) fix_cost = text(""" SELECT nid, pathway, version, region, year, category, indicator, value -- column FROM {0}.{1} -- table WHERE category = 'FixedCost' AND version = 'DataV2' AND year = '2015' ORDER BY version, pathway, year; -- sorting """.format(schema, table_in)) el_prod = text(""" SELECT nid, pathway, version, region, year, category, indicator, value -- column FROM {0}.{1} -- table WHERE (category = 'Electricity Production from Public and Industrial Power and CHP Plants by Fuel and Technology_Coal' OR category = 'Electricity Production from Public and Industrial Power and CHP Plants by Fuel and Technology_Oil' OR category = 'Electricity Production from Public and Industrial Power and CHP Plants by Fuel and Technology_Natural gas / non renew.' OR category = 'Electricity Production from Public and Industrial Power and CHP Plants by Fuel and Technology_Nuclear' OR category = 'Electricity Production from Public and Industrial Power and CHP Plants by Fuel and Technology_Waste non renewable' OR category = 'Electricity Production from Public and Industrial Power and CHP Plants by Fuel and Technology_Biomass solid' OR category = 'Electricity Production from Public and Industrial Power and CHP Plants by Fuel and Technology_Biofuel liquid' OR category = 'Electricity Production from Public and Industrial Power and CHP Plants by Fuel and Technology_Hydro' OR category = 'Electricity Production from Public and Industrial Power and CHP Plants by Fuel and Technology_Wind' OR category = 'Electricity Production from Public and Industrial Power and CHP Plants by Fuel and Technology_Solar' OR category = 'Electricity Production from Public and Industrial Power and CHP Plants by Fuel and Technology_Geothermal' OR category = 'Electricity Production from Public and Industrial Power and CHP Plants by Fuel and Technology_Ocean') AND (indicator = 'Heat and Power Unit' OR indicator = 'Combined Cycle' OR indicator = 'CHP' OR indicator = 'Carbon Capture and Storage' OR indicator = 'Steam Turbine' OR indicator = 'Steam Turbine small' OR indicator = 'Steam Turbine large' OR indicator = 'Conventional' OR indicator = 'Gas Turbine old' OR indicator = 'Gas Turbine new' OR indicator = 'Heat and Power Unit small' OR indicator = 'Heat and Power Unit large' OR indicator = 'Run of river' OR indicator = 'Dam <10MW' OR indicator = 'Dam 10-100MW' OR indicator = 'Dam >100MW' OR indicator = 'Pumped Storage <100MW' OR indicator = 'Pumped Storage >100MW' OR indicator = 'CHP old' OR indicator = 'CHP new' OR indicator = 'Fuel cell' OR indicator = 'Generation 2' OR indicator = 'Generation 3' OR indicator = 'Wave' OR indicator = 'Distributed PV' OR indicator = 'Utility PV' OR indicator = 'Offshore' OR indicator = 'Onshore') AND version = 'DataV2' AND year = '2015' ORDER BY version, pathway, year; -- sorting """.format(schema, table_out)) var_cost = text(""" SELECT nid, pathway, version, region, year, category, indicator, value -- column FROM {0}.{1} -- table WHERE category = 'VariableCost' AND version = 'DataV2' AND year = '2015' ORDER BY version, pathway, year; -- sorting """.format(schema, table_in)) fuel_inp = text(""" SELECT nid, pathway, version, region, year, category, indicator, value -- column FROM {0}.{1} -- table WHERE (category = 'Fuel Input to Public and Industrial Power and CHP Plants by Fuel and Technology_Coal' OR category = 'Fuel Input to Public and Industrial Power and CHP Plants by Fuel and Technology_Oil' OR category = 'Fuel Input to Public and Industrial Power and CHP Plants by Fuel and Technology_Natural gas / non renew.' OR category = 'Fuel Input to Public and Industrial Power and CHP Plants by Fuel and Technology_Nuclear' OR category = 'Fuel Input to Public and Industrial Power and CHP Plants by Fuel and Technology_Waste non renewable' OR category = 'Fuel Input to Public and Industrial Power and CHP Plants by Fuel and Technology_Biomass solid' OR category = 'Fuel Input to Public and Industrial Power and CHP Plants by Fuel and Technology_Biofuel liquid' OR category = 'Fuel Input to Public and Industrial Power and CHP Plants by Fuel and Technology_Hydro' OR category = 'Fuel Input to Public and Industrial Power and CHP Plants by Fuel and Technology_Wind' OR category = 'Fuel Input to Public and Industrial Power and CHP Plants by Fuel and Technology_Solar' OR category = 'Fuel Input to Public and Industrial Power and CHP Plants by Fuel and Technology_Geothermal' OR category = 'Fuel Input to Public and Industrial Power and CHP Plants by Fuel and Technology_Ocean') AND (indicator = 'Heat and Power Unit' OR indicator = 'Combined Cycle' OR indicator = 'CHP' OR indicator = 'Carbon Capture and Storage' OR indicator = 'Steam Turbine' OR indicator = 'Steam Turbine small' OR indicator = 'Steam Turbine large' OR indicator = 'Conventional' OR indicator = 'Gas Turbine old' OR indicator = 'Gas Turbine new' OR indicator = 'Heat and Power Unit small' OR indicator = 'Heat and Power Unit large' OR indicator = 'Run of river' OR indicator = 'Dam <10MW' OR indicator = 'Dam 10-100MW' OR indicator = 'Dam >100MW' OR indicator = 'Pumped Storage <100MW' OR indicator = 'Pumped Storage >100MW' OR indicator = 'CHP old' OR indicator = 'CHP new' OR indicator = 'Fuel cell' OR indicator = 'Generation 2' OR indicator = 'Generation 3' OR indicator = 'Wave' OR indicator = 'Distributed PV' OR indicator = 'Utility PV' OR indicator = 'Offshore' OR indicator = 'Onshore') AND version = 'DataV2' AND year = '2015' ORDER BY version, pathway, year; -- sorting """.format(schema, table_out)) spec_demand = text(""" SELECT nid, pathway, version, region, year, category, indicator, value -- column FROM {0}.{1} -- table WHERE category = 'SpecifiedAnnualDemand' AND version = 'DataV2' AND year = '2015' ORDER BY version, pathway, year; -- sorting """.format(schema, table_in)) el_exchange = text(""" SELECT nid, pathway, version, region, year, category, indicator, value -- column FROM {0}.{1} -- table WHERE category = 'Electricity Exchange - Net Imports' AND version = 'DataV2' AND year = '2015' ORDER BY version, pathway, year; -- sorting """.format(schema, table_out)) rawData = pd.read_sql_query(emission, con) cap_cost_df = pd.read_sql_query(cap_cost, con) new_capa_df = pd.read_sql_query(new_capa, con) discount_rate_df = pd.read_sql_query(discount_rate, con) oper_life_df = pd.read_sql_query(oper_life, con) inst_capa_df = pd.read_sql_query(inst_capa, con) fix_cost_df = pd.read_sql_query(fix_cost, con) el_prod_df = pd.read_sql_query(el_prod, con) var_cost_df = pd.read_sql_query(var_cost, con) fuel_inp_df = pd.read_sql_query(fuel_inp, con) spec_demand_df = pd.read_sql_query(spec_demand, con) el_exchange_df = pd.read_sql_query(el_exchange, con) rawData = rawData.append(cap_cost_df, ignore_index = True) rawData = rawData.append(new_capa_df, ignore_index = True) rawData = rawData.append(discount_rate_df, ignore_index = True) rawData = rawData.append(oper_life_df, ignore_index = True) rawData = rawData.append(inst_capa_df, ignore_index = True) rawData = rawData.append(fix_cost_df, ignore_index = True) rawData = rawData.append(el_prod_df, ignore_index = True) rawData = rawData.append(var_cost_df, ignore_index = True) rawData = rawData.append(fuel_inp_df, ignore_index = True) rawData = rawData.append(spec_demand_df, ignore_index = True) rawData = rawData.append(el_exchange_df, ignore_index = True) rawData = rawData.drop(columns = 'version') return rawData #%% def import_excel(file_name, countries): """This function imports data on the population projection It imports the population for all countries modelled in OSeMBE and returns them as dictionray with a dataframe per country. Arguments --------- file_name : str File name of the excel file that contains the population data for the EU countries. countries : list list with the country codes of all modelled countries """ pop_dic = {} non_list_countr = ['CH','NO'] for country in countries: pop_dic[country] = pd.DataFrame() pop_data = pd.DataFrame(index=pd.Series(range(2015,2051))) pop_data['population'] = np.nan if country == 'CH' or country == 'NO': raw_data = pd.read_excel('pop_projection_NEWAGE_CH_NO.xlsx','MaGe Factors',usecols="AN:BW",nrows=2) raw_data.index = non_list_countr raw_data = raw_data.transpose() raw_data.index = pd.Series(range(2015,2051)) raw_data = raw_data.multiply(1000) pop_data['population'] = raw_data[country] pop_dic[country] = pop_data else: sheet = str(country+'-A') years = pd.read_excel(file_name,sheet,usecols="E:L",nrows=1) years = years.iloc[0] years = years.tolist() pop = pd.read_excel(file_name,sheet,usecols="E:L",skiprows=2,nrows=1) pop = pop.transpose() pop.index = years for y in years: pop_data.loc[y]['population'] = pop.loc[y] pop_data = pop_data.interpolate() pop_data = pop_data.multiply(1000000) pop_dic[country] = pop_data return pop_dic #%% Calculation of CO2 intensity per citizen def co2intensity(rawData, countries, pop_data): # rawData = output #for testing # pop_data = pop_raw #for testing CO2Intensity = pd.DataFrame(columns = ['pathway', 'region', 'year', 'indicator', 'value']) emission_data = rawData[rawData['category'] == 'Emissions'] pathways = emission_data['pathway'].unique().tolist() years = emission_data['year'].unique().tolist() # emission_data.insert(7, 'population', pd.Series([np.nan]), True) for pathway in pathways: for country in countries: for year in years: # emission_data[(emission_data['pathway']==country) & (emission_data['region']==country)] = pop_data[country].loc[year] # emission_data.loc[(emission_data['region']==country) & (emission_data['year']==year),'population'] = pop_data[country].loc[year] value = emission_data.loc[(emission_data['pathway']==pathway) & (emission_data['region']==country) & (emission_data['year']==year),'value'] / pop_data[country].loc[year, 'population'] CO2Intensity = CO2Intensity.append({"pathway":pathway,"region":country,"year": year, "indicator": "Carbon intensity", "value": value.iloc[0]}, ignore_index = True) return CO2Intensity #%% Calculation of the Discounted Investment per Citizen def disc_investment(): Disc_Investment = pd.DataFrame() return Disc_Investment #%% Calculation of Capital Recovery Factor def crf(rawData): # rawData = output #for testing req_data = rawData[(rawData['category']=='DiscountRate') | (rawData['category']=='OperationalLife')] req_data = req_data.drop_duplicates('indicator') req_data = req_data.drop(['pathway', 'region', 'year'], axis=1) crf = pd.DataFrame(columns = ['category', 'indicator', 'value']) technologies = req_data['indicator'][req_data['category']=='OperationalLife'] dr = req_data.loc[req_data['category']=='DiscountRate','value'] for technology in technologies: value = (dr.iloc[0]*(1+dr.iloc[0])**req_data.loc[req_data['indicator']==technology,'value'])/((1+dr.iloc[0])**req_data.loc[req_data['indicator']==technology,'value']-1) crf = crf.append({"category":'CapitalRecoveryFactor',"indicator":technology,"value":value.iloc[0]}, ignore_index = True) return crf #%% Calculation of the Capital Investment per country, technology and year def ci(): rawData = output #for testing req_data = rawData[(rawData['category']=='CapitalCost') | (rawData['category'].str.contains('New Capacity_'))] cap_cost = [71,74,75,744,76,79,745,80,81,92,94,95,96,97,98,99,100,101,102,103,104,105,106,107,110,111,11,746,113,114,115,116,117,118,119,120,121,125,126,128,129,130,131,132,134,135] new_cap = [277,273,274,306,275,244,304,245,246,292,248,249,250,251,252,253,254,255,279,280,281,282,283,284,257,258,259,305,260,261,262,263,264,265,267,268,294,289,290,286,286,286,287,287,270,271] ci =pd.DataFrame(columns = ['pathway', 'region', 'year', 'indicator', 'value']) pathways = req_data['pathway'].unique().tolist() countries = req_data['region'].unique().tolist() years = req_data['year'].unique().tolist() for pathway in pathways: for country in countries: for year in years: j = 0 for i in cap_cost: value = req_data.loc[(req_data['pathway']==pathway) & (req_data['region']==country) & (req_data['year']==year) & (req_data['nid']==i), 'value'] * req_data.loc[(req_data['pathway']==pathway) & (req_data['region']==country) & (req_data['year']==year) & (req_data['nid']==new_cap[j]), 'value'] if not value.empty: ci = ci.append({"pathway":pathway,"region":country,"year":year,"indicator":'CapitalInvestment',"value":value.iloc[0]}, ignore_index = True) j += 1 print(j) return ci #%% Calculation of the Annualized Investment Cost def aic(): aic =
pd.DataFrame()
pandas.DataFrame
""" Module to process and analyse rheology data containing stress ramps Created: March 24th, 2020 Author: <NAME> """ import pandas as pd import numpy as np import matplotlib.pyplot as plt class Rstressramp(): """ Class with the functions relevant to stress ramps Main focus on extracting data from .csv files, computing K' and data visualization """ def readcsv_full2ramp(filename, export = True, file_export = None, action_name = 'viscometry ramp', variables = ['sample des', 'stress', 'strain (sample)'], sep = ',', dec = '.'): """ Function to select the desired data from raw .csv files. TO DO: combine this and Rtimedep.readcsv_full2time to a general function INPUT filename : string, file to read export : if True, the selected data is exported to a .csv file file_export : string, name of the file where the data will be exported. if None, then attaches the suffix '_clean_stress_ramp' to the file name. action_name : string, name of the dataset where the ramp data is variables : list of strings, desired variables to be extracted. The name can be a partial match of the column name, and is case insensitive. If more than one column matches a given variable name, all the corresponding columns are included. sep : string, character used as a delimiter in the .csv file dec : string, character used as a decimal separator in the .csv file OUTPUT select_data : data frame with the selected data. Only returns the value if export = False. When export = True, the function only exports the data without returning any values. """ # Import the file as a data frame data_input = pd.read_csv(filename, sep = sep, decimal = dec) print('\n Successfully imported the file: ') print(filename) # Because there is more than one action in the file, # select only the data for the stress ramp # TO DO: make this selection optional for the user data_frame = Rstressramp.splitaction_ramp(data_input, action_name = action_name) # Find the columns that match the desired variable names # and select the data within. columns = [] for ivar in variables: print('\n Variable to search:', ivar) column_names = [x for x in data_frame.columns if ivar in x.lower()] print('Variables found:', column_names) columns.extend(column_names) select_data = data_frame[columns] # Export the data to the file specified in file_export or # return the data frame if export == False. if export == True: if file_export == None: file_export = filename.replace('.csv','_clean_stress_ramp.csv') select_data.to_csv(file_export, index=False, sep = sep, decimal = dec) print('\n Selected data exported to:', file_export) else: return select_data def splitaction_ramp(data_frame, action_header = 'Action Name', action_name = 'viscometry ramp'): """ Function to extract the stress ramp data from a file with multiple types of measurement INPUT data_frame : pandas data frame with the full data action_header : string with the name of the column containing the type of measurement, or action, action_name : string with the name of the measurement, or action. It accepts a partial match, and is case insensitive. OUTPUT select_data : pandas data frame containing only the stress ram[] data. """ print('\n Splitting data by action name: ', action_name) # Gets all the actions within the data frame iaction = [x for x in data_frame[action_header].unique() if action_name in x.lower()] print(iaction) data_frame.set_index(action_header, inplace = True) # Find the location of the desired action, and save to a data frame # If the action name is not found, it prints an error message try: select_data = data_frame.loc[iaction] select_data.reset_index(inplace = True) except IndexError: print('ERROR: Action name not found') select_data = None return select_data def compute_k(stress, strain, show = None, remove_neg = True): """ Function to compute the differential storage modulus from the slope of the stress vs strain curve. INPUT stress : numpy array or list, Shear Stress (in Pa) data strain : numpy array or list, Shear Strain (in %) data show : 'stress', 'strain', 'both', or None. Plots the result remove_neg : if True, removes data where strain is negative OUTPUT stress : numpy array, mean value of the stress (in Pa) where k is computed strain : numpy array, mean value of strain (in %) where k is computed k : numpy array, differential storage modulus, (in Pa) """ # Work with numpy arrays stress = np.array(stress) strain = np.array(strain) # Start by cleaning the data from any NaN value ind_nan = np.isnan(strain) | np.isnan(stress) stress = stress[~ind_nan] strain = strain[~ind_nan] # Clean the data from values after rupture, strain must be # less than 5000% ind_nonrupture = np.where(strain < 5e3)[0] stress = stress[ind_nonrupture] strain = strain[ind_nonrupture] # Remove data where strain is negative. Note that if recording # the absolute strain of the sample, strain can be negative # in the initial interval. This data is tipically not useful # and therefore not desired. if remove_neg == True: ind_positive = np.where(strain >= 0) stress = stress[ind_positive] strain = strain[ind_positive] # Compute the differential values of strain and stress diff_stress = stress[1:] - stress[:-1] diff_strain = strain[1:] - strain[:-1] # Compute k' and the mean values of stress and strain k = diff_stress / diff_strain * 100 # multiplied by 100, because strain is in % stress = (stress[1:] + stress[:-1])/2 strain = (strain[1:] + strain[:-1])/2 # Show the results if desired if show == 'stress': Rstressramp.plot_k([stress], k) elif show == 'strain': Rstressramp.plot_k([strain], k) elif show == 'both': Rstressramp.plot_k([stress, strain], k) elif show is not None: print('Error: cannot plot: ', show) return [stress, strain, k] def plot_k(x, k, linewidth = 1.5, marker = 'o', color = 'k', marker_facecolor = 'k'): """ Function to plot, in log scale, the differential storage modulus, k as a function of stress, strain, or both. INPUT x : list of numpy arrays of dependent variables k : numpy array, differential storage modulus linewidth : float, width of the line to plot marker : string, marker of the lineplot, needs to be compatible with matplotlib.pyplot color : color for the lineplot, and marker border, needs to be compatible with matplotlib.pyplot marker_facecolor : color of the marker, compatible with matplotlib.pyplot """ # Plot the first variable x1 = x[0] plt.figure(figsize = (9,5)) plt.plot(x1, k, c = color, lw = linewidth, marker = marker, mec = color, mfc = marker_facecolor) plt.loglog() plt.ylabel('$K\'$ (Pa)') # If there is more than one dependent variable, # Plot also the second variable in a different figure try: x2 = x[1] plt.xlabel('$\sigma$ (Pa)') plt.pause(0.1) plt.figure(figsize =(9, 5)) plt.plot(x2, k, c = color, lw = linewidth, marker = marker, mec = color, mfc = marker_facecolor) plt.loglog() plt.ylabel('$K\'$ (Pa)') plt.xlabel('$\gamma$ (%)') except IndexError: pass def export_kall(data_frame, file_export = None, remove_neg = True, group_header = 'Sample Description', subgroup_header = None, stress_header = 'Shear stress(Pa)', strain_header = 'Shear strain (sample)(%)'): """ Function to compute the differential storage modulus for all the data groups (e.g. samples, interals, experiments) within a data_frame INPUT data_frame : pandas data frame with the full data file_export : string, name of the file where data will be exported if None, it saves to 'All_k_curves.csv' remove_neg : if True, removes data where strain is negative group_header : string, name of the column where the data group label are subgroup_header : string, name of the column where the sub dataset label are stress_header : string, name of the column where the stress data is strain_header : string, name of the column where the strain data is OUTPUT all_data : data frame with the computed stress, strain, k' It also saves the data_rame to file_export. """ groups_all = [] subgroups_all = [] s_all = [] y_all = [] k_all = [] for igroup in data_frame[group_header].unique(): data_group = data_frame.loc[data_frame[group_header] == igroup] try: list_subgroups = data_group[subgroup_header].unique() subset_header = subgroup_header except KeyError: list_subgroups = [igroup] subset_header = group_header for isubset in list_subgroups: data_subgroup = data_group.loc[data_group[subset_header] == isubset] stress = np.array(data_group[stress_header]) strain = np.array(data_group[strain_header]) [s, y, k] = Rstressramp.compute_k(stress, strain, remove_neg = remove_neg) groups_all.extend([igroup]*len(s)) subgroups_all.extend([isubset]*len(s)) s_all.extend(s) y_all.extend(y) k_all.extend(k) all_data =
pd.DataFrame()
pandas.DataFrame
import pandas as pd import numpy as np import torch from scipy.spatial import distance from sklearn.metrics.pairwise import cosine_similarity from scipy.spatial.distance import pdist from sentence_transformers import util from collections import OrderedDict def get_similar_comments(embedder, dataset, corpus, sarcasm_embeddings, query, n): """ Returns the most similar comments in terms of cosine similarity of their respective embeddings Parameters: embedder (SentenceTransformer): chosen model from HuggingFace dataset (DataFrame): comment metadata corpus (list): list of all comments sarcasm_embeddings (Tensor): embeddings of all comments query (string): the text of the comment n (int): number of comments to recommend Returns: [DataFrame: Top n similar comments with metadata, DataFrame: with similarity (used for diversity computation)] """ # Find the closest k sentences of the corpus for each query sentence based on cosine similarity top_k = min(n, len(corpus)) query_embedding = embedder.encode(query, convert_to_tensor=True) similarities = [] pairs = [] # We use cosine-similarity and torch.topk to find the highest k scores cos_scores = util.pytorch_cos_sim(query_embedding.cpu(), sarcasm_embeddings)[0] top_results = torch.topk(cos_scores, k=top_k) for score, idx in zip(top_results[0], top_results[1]): pairs.append(tuple((corpus[idx], score))) recommend_frame = [] for val in pairs: recommend_frame.append({'comment':val[0],'similarity':val[1].cpu().numpy()}) df = pd.DataFrame(recommend_frame) df_sim = df.copy() df_sim = df_sim = df_sim.set_index(['comment']) df = df.join(dataset.set_index('comment'), on='comment') return df, df_sim def calculate_quality(c, R, df, df_sim): """ Returns the quality of a particular comment - The quality of an item c is proportional to the similarity between c and the current target t, and to the diversity of c relative to those items so far selected, R = {r1,...,rm}. Parameters: c (string): current comment R (list): recommendation set df (DataFrame): All comments (generated from get_similar_comments) except those already in R df_sim (DataFrame): All comments (generated from get_similar_comments) Returns: int: quality """ quality = 0 rel_diversity = 0 if len(R) == 0: rel_diversity = 1 vector = np.array(df['vector'][df['comment'] == c].to_numpy()[0]).reshape(1, -1) diversity = [] for item in R: diversity.append( 1 - cosine_similarity( vector, np.array(df_sim['vector'][df_sim['comment'] == item].to_numpy()[0]).reshape(1, -1) ) ) rel_diversity = sum(diversity)/len(R) # relative diversity similarity = df['similarity'][df['comment'] == c].to_numpy()[0] # similarity quality = rel_diversity[0][0] * similarity # quality return quality def greedy_selection(embedder, dataset, corpus, sarcasm_embeddings, query, n): """ Returns comment recommendations generated by implementing the Bounded Greedy Selection Algorithm Parameters: embedder (SentenceTransformer): chosen model from HuggingFace dataset (DataFrame): comment metadata corpus (list): list of all comments sarcasm_embeddings (Tensor): embeddings of all comments query (string): the text of the comment n (int): number of comments to recommend Returns: [DataFrame: Top diverse comments with metadata, DataFrame: with similarity (used for diversity computation)] """ # Step 1: Select the best x = 500 cases according to their similarity to the target query. Set C' C_prime = get_similar_comments(embedder, dataset, corpus, sarcasm_embeddings, query, 500)[0] # Step 2: Add the most similar item from C' as the first item in the result set R and drop this item from C' df_temp = C_prime.copy() recommendations = ['dummy'] recommendations[0] = C_prime["comment"][0] # first item is always the one with the highest similarity index = df_temp[(df_temp.comment == recommendations[0])].index df_temp = df_temp.drop(index) # Step 3: During each subsequent iteration, the item selected is the one with the highest quality # with respect to the set of cases selected during the previous iteration # set k = 50 to get top 50 recommendations for i in range(n): qualities = {} # Calculate the quality of each subreddit for item in df_temp['comment']: qualities[item] = calculate_quality(item, recommendations, df_temp, C_prime) highest_quality = max(qualities.values()) highest_quality_subreddit = max(qualities, key= lambda x: qualities[x]) recommendations.append(highest_quality_subreddit) index = df_temp[(df_temp.comment == recommendations[-1])].index df_temp = df_temp.drop(index) # Evaluate the recommendations similarities = [] for item in recommendations: sim = C_prime['similarity'][C_prime['comment'] == item].to_numpy()[0] similarities.append(sim) pairs = list(zip(recommendations, similarities)) recommend_frame = [] for val in pairs: recommend_frame.append({'comment':val[0],'similarity':val[1].item()}) df_sim = pd.DataFrame(recommend_frame) df = df_sim.copy() df = df.join(dataset.set_index('comment'), on='comment') df_sim = df_sim.set_index(['comment']) df = df.reset_index() df = df.drop(columns=['vector','index']) pd.set_option("display.max_colwidth", 300) return df, df_sim def topic_diversification(embedder, dataset, corpus, sarcasm_embeddings, query, n): """ Returns comment recommendations generated by implementing the Topic Diversification Algorithm Parameters: embedder (SentenceTransformer): chosen model from HuggingFace dataset (DataFrame): comment metadata corpus (list): list of all comments sarcasm_embeddings (Tensor): embeddings of all comments query (string): the text of the comment n (int): number of comments to recommend Returns: [DataFrame: Top diverse comments with metadata, DataFrame: with similarity (used for diversity computation)] """ # Step 1: Generate predictions (at least 5N for a final top-N recommendation list). N = 5 * n C_prime = get_similar_comments(embedder, dataset, corpus, sarcasm_embeddings, query, N)[0] # Step 2: For each N+1 position item calculate the ILS (diversity) if this item was part of the top-N list. # Prepare df for pairwise distance df_ils = C_prime.copy() df_ils = df_ils.set_index(['comment']) ils = {} # set ILS for first item ils[df_ils.head(1)['similarity'].index.values.item(0)] = df_ils.head(1)['similarity'].values[0].item() for i in range(2, N+1): top_n = df_ils.head(i - 1) top_n = top_n[['similarity']] bottom = df_ils.tail(len(df_ils) - i + 1) bottom = bottom[['similarity']] for item in bottom.index: row_data = bottom.loc[[item], :] top_n = top_n.append(row_data) ils[item] = sum([x for x in pdist(top_n)]) / len(top_n) # ILS Calculation top_n = top_n.drop(index=item) # Step 3: Sort the remaining items in reverse (according to ILS rank) to get their dissimilarity rank. # A low ILS score means a higher dissimilarity rank dissimilarity_rank = {k: v for k, v in sorted(ils.items(), key=lambda item: item[1], reverse=True)} # Step 4: Calculate new rank for each item as r = a ∗ P + b ∗ Pd, with P being the original rank, # Pd being the dissimilarity rank and a, b being constants in range [0, 1] # a,b ∈ [0,1] a = 0.01 b = 0.99 new_rank = {} dissimilarity_rank = OrderedDict(dissimilarity_rank) for item in df_ils.index: P = C_prime['similarity'][C_prime['comment'] == item].values[0] Pd = dissimilarity_rank[item] new_rank[item] = ((a * P) + (b * Pd)) # Step 5: Select the top-N items according to the newly calculated rank final_ranks = {k: v for k, v in sorted(new_rank.items(), key=lambda item: item[1], reverse=True)} data = [] for comment, score in final_ranks.items(): data.append({'comment': comment,'rank': score}) df_sim =
pd.DataFrame(data)
pandas.DataFrame
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt # Loads the episode lengths from the csv files into a dictionary and return the dictionary def load_data(algpath, name='episodes'): Data = [] dirFiles = os.listdir(algpath) # Files = np.array([i for i in dirFiles if 'episodes' in i]) Files = np.array([i for i in dirFiles if name in i]) for fileIndex in range(len(Files)): if name == "episodes": List = pd.read_csv(algpath+'/'+Files[fileIndex]) Data.append(List['episode lengths']) elif name == "rewards": List = pd.read_csv(algpath+'/'+Files[fileIndex]) Data.append(List['rewards']) # return np.array(Data) if len(Data) !=1 else np.array([Data]) return np.array(Data) def convert_data_ep(Data): convertedData = [] for run in range(len(Data)): episodeLengthsData = Data[run].to_numpy() failureTimesteps = np.cumsum(episodeLengthsData) totalTimesteps = failureTimesteps[-1] # Not a failure on the last episode on the last timestep if episodeLengthsData[-1] != 0.0: failureTimesteps = failureTimesteps[:-1] failureTimesteps_DataFrame = pd.DataFrame({'failures': failureTimesteps}) convertedData.append(failureTimesteps_DataFrame) return convertedData, totalTimesteps def convert_data_reward(data): convertedData = np.zeros(data.shape) convertedData[:, 0] = data[:, 0] for s in range(1, data.shape[1]): convertedData[:, s] = data[:, s] + convertedData[:, s-1] return convertedData, data.shape[1] def transform_data(failureTimesteps, totalTimesteps, transformation='Rewards', window=0, alpha=0.0004): transformedData = [] for run in range(len(failureTimesteps)): # Calculate rewards from failure timesteps indexing = (failureTimesteps[run] - 1).to_numpy().flatten() rewardsList = np.zeros(totalTimesteps) rewardsList[indexing] = -1.0 # Keep the data to rewards if transformation == 'Rewards': tempData = pd.DataFrame({'rewards': rewardsList}) # Returns are equal to sum of rewards elif transformation == 'Returns': returnsList = np.cumsum(rewardsList) tempData = pd.DataFrame({'returns': returnsList}) # Failures are equal to negative returns elif transformation == 'Failures': returnsList = np.cumsum(rewardsList) failuresList = -1 * returnsList tempData =
pd.DataFrame({'cummulativeFailures': failuresList})
pandas.DataFrame
from __future__ import annotations from linearmodels.compat.statsmodels import Summary import datetime as dt from typing import Dict, List, Optional, Union import numpy as np from pandas import DataFrame, Series, concat from property_cached import cached_property from scipy import stats from statsmodels.iolib.summary import SimpleTable, fmt_2cols, fmt_params from linearmodels.iv.results import default_txt_fmt, stub_concat, table_concat from linearmodels.shared.base import _ModelComparison, _SummaryStr from linearmodels.shared.hypotheses import WaldTestStatistic, quadratic_form_test from linearmodels.shared.io import _str, add_star, pval_format from linearmodels.shared.utility import AttrDict from linearmodels.typing import Float64Array, OptionalArrayLike __all__ = [ "PanelResults", "PanelEffectsResults", "RandomEffectsResults", "FamaMacBethResults", "compare", ] class PanelResults(_SummaryStr): """ Results container for panel data models that do not include effects """ def __init__(self, res: AttrDict): self._params = res.params.squeeze() self._deferred_cov = res.deferred_cov self._debiased = res.debiased self._df_resid = res.df_resid self._df_model = res.df_model self._nobs = res.nobs self._name = res.name self._var_names = res.var_names self._residual_ss = res.residual_ss self._total_ss = res.total_ss self._r2 = res.r2 self._r2w = res.r2w self._r2b = res.r2b self._r2o = res.r2o self._c2w = res.c2w self._c2b = res.c2b self._c2o = res.c2o self._s2 = res.s2 self._entity_info = res.entity_info self._time_info = res.time_info self.model = res.model self._cov_type = res.cov_type self._datetime = dt.datetime.now() self._resids = res.resids self._wresids = res.wresids self._index = res.index self._f_info = res.f_info self._f_stat = res.f_stat self._loglik = res.loglik self._fitted = res.fitted self._effects = res.effects self._idiosyncratic = res.idiosyncratic self._original_index = res.original_index self._not_null = res.not_null @property def params(self) -> Series: """Estimated parameters""" return Series(self._params, index=self._var_names, name="parameter") @cached_property def cov(self) -> DataFrame: """Estimated covariance of parameters""" return DataFrame( self._deferred_cov(), columns=self._var_names, index=self._var_names ) @property def std_errors(self) -> Series: """Estimated parameter standard errors""" return Series(np.sqrt(np.diag(self.cov)), self._var_names, name="std_error") @property def tstats(self) -> Series: """Parameter t-statistics""" return Series(self._params / self.std_errors, name="tstat") @cached_property def pvalues(self) -> Series: """ Parameter p-vals. Uses t(df_resid) if ``debiased`` is True, else normal """ abs_tstats = np.abs(self.tstats) if self._debiased: pv = 2 * (1 - stats.t.cdf(abs_tstats, self.df_resid)) else: pv = 2 * (1 - stats.norm.cdf(abs_tstats)) return Series(pv, index=self._var_names, name="pvalue") @property def df_resid(self) -> int: """ Residual degree of freedom Notes ----- Defined as nobs minus nvar minus the number of included effects, if any. """ return self._df_resid @property def df_model(self) -> int: """ Model degree of freedom Notes ----- Defined as nvar plus the number of included effects, if any. """ return self._df_model @property def nobs(self) -> int: """Number of observations used to estimate the model""" return self._nobs @property def name(self) -> str: """Model name""" return self._name @property def total_ss(self) -> float: """Total sum of squares""" return self._total_ss @property def model_ss(self) -> float: """Residual sum of squares""" return self._total_ss - self._residual_ss @property def resid_ss(self) -> float: """Residual sum of squares""" return self._residual_ss @property def rsquared(self) -> float: """Model Coefficient of determination""" return self._r2 @property def rsquared_between(self) -> float: """ Between Coefficient of determination Returns ------- float Between coefficient of determination Notes ----- The between rsquared measures the fit of the time-averaged dependent variable on the time averaged dependent variables. It accounts for the weights used in the estimation of the model. See the mathematical reference in the documentation for the formal definition of this measure. """ return self._r2b @property def rsquared_within(self) -> float: """ Within coefficient of determination Returns ------- float Within coefficient of determination Notes ----- The within rsquared measures the fit of the dependent purged of entity effects on the exogenous purged of entity effects. It accounts for the weights used in the estimation of the model. See the mathematical reference in the documentation for the formal definition of this measure. """ return self._r2w @property def rsquared_overall(self) -> float: """ Overall coefficient of determination Returns ------- float Between coefficient of determination Notes ----- The overall rsquared measures the fit of the dependent variable on the dependent variables ignoring any included effects. It accounts for the weights used in the estimation of the model. See the mathematical reference in the documentation for the formal definition of this measure. """ return self._r2o @property def corr_squared_between(self) -> float: r""" Between Coefficient of determination using squared correlation Returns ------- float Between coefficient of determination Notes ----- The between rsquared measures the fit of the time-averaged dependent variable on the time averaged dependent variables. This measure is based on the squared correlation between the entity-wise averaged dependent variables and their average predictions. .. math:: Corr[\bar{y}_i, \bar{x}_i\hat{\beta}] This measure **does not** account for weights. """ return self._c2b @property def corr_squared_within(self) -> float: r""" Within coefficient of determination using squared correlation Returns ------- float Within coefficient of determination Notes ----- The within rsquared measures the fit of the dependent purged of entity effects on the exogenous purged of entity effects. This measure is based on the squared correlation between the entity-wise demeaned dependent variables and their demeaned predictions. .. math:: Corr[y_{it}-\bar{y}_i, (x_{it}-\bar{x}_i)\hat{\beta}] This measure **does not** account for weights. """ return self._c2w @property def corr_squared_overall(self) -> float: r""" Overall coefficient of determination using squared correlation Returns ------- float Between coefficient of determination Notes ----- The overall rsquared measures the fit of the dependent variable on the dependent variables ignoring any included effects. This measure is based on the squared correlation between the dependent variables and their predictions. .. math:: Corr[y_{it}, x_{it}\hat{\beta}] This measure **does not** account for weights. """ return self._c2o @property def s2(self) -> float: """Residual variance estimator""" return self._s2 @property def entity_info(self) -> Series: """Statistics on observations per entity""" return self._entity_info @property def time_info(self) -> Series: """Statistics on observations per time interval""" return self._time_info def conf_int(self, level: float = 0.95) -> DataFrame: """ Confidence interval construction Parameters ---------- level : float Confidence level for interval Returns ------- DataFrame Confidence interval of the form [lower, upper] for each parameters Notes ----- Uses a t(df_resid) if ``debiased`` is True, else normal. """ ci_quantiles = [(1 - level) / 2, 1 - (1 - level) / 2] if self._debiased: q = stats.t.ppf(ci_quantiles, self.df_resid) else: q = stats.norm.ppf(ci_quantiles) q = q[None, :] params = np.asarray(self.params)[:, None] ci = params + np.asarray(self.std_errors)[:, None] * q return DataFrame(ci, index=self._var_names, columns=["lower", "upper"]) @property def summary(self) -> Summary: """ Model estimation summary. Returns ------- Summary Summary table of model estimation results Supports export to csv, html and latex using the methods ``summary.as_csv()``, ``summary.as_html()`` and ``summary.as_latex()``. """ title = self.name + " Estimation Summary" mod = self.model top_left = [ ("Dep. Variable:", mod.dependent.vars[0]), ("Estimator:", self.name), ("No. Observations:", self.nobs), ("Date:", self._datetime.strftime("%a, %b %d %Y")), ("Time:", self._datetime.strftime("%H:%M:%S")), ("Cov. Estimator:", self._cov_type), ("", ""), ("Entities:", str(int(self.entity_info["total"]))), ("Avg Obs:", _str(self.entity_info["mean"])), ("Min Obs:", _str(self.entity_info["min"])), ("Max Obs:", _str(self.entity_info["max"])), ("", ""), ("Time periods:", str(int(self.time_info["total"]))), ("Avg Obs:", _str(self.time_info["mean"])), ("Min Obs:", _str(self.time_info["min"])), ("Max Obs:", _str(self.time_info["max"])), ("", ""), ] is_invalid = np.isfinite(self.f_statistic.stat) f_stat = _str(self.f_statistic.stat) if is_invalid else "--" f_pval = pval_format(self.f_statistic.pval) if is_invalid else "--" f_dist = self.f_statistic.dist_name if is_invalid else "--" f_robust = _str(self.f_statistic_robust.stat) if is_invalid else "--" f_robust_pval = ( pval_format(self.f_statistic_robust.pval) if is_invalid else "--" ) f_robust_name = self.f_statistic_robust.dist_name if is_invalid else "--" top_right = [ ("R-squared:", _str(self.rsquared)), ("R-squared (Between):", _str(self.rsquared_between)), ("R-squared (Within):", _str(self.rsquared_within)), ("R-squared (Overall):", _str(self.rsquared_overall)), ("Log-likelihood", _str(self._loglik)), ("", ""), ("F-statistic:", f_stat), ("P-value", f_pval), ("Distribution:", f_dist), ("", ""), ("F-statistic (robust):", f_robust), ("P-value", f_robust_pval), ("Distribution:", f_robust_name), ("", ""), ("", ""), ("", ""), ("", ""), ] stubs = [] vals = [] for stub, val in top_left: stubs.append(stub) vals.append([val]) table = SimpleTable(vals, txt_fmt=fmt_2cols, title=title, stubs=stubs) # create summary table instance smry = Summary() # Top Table # Parameter table fmt = fmt_2cols fmt["data_fmts"][1] = "%18s" top_right = [("%-21s" % (" " + k), v) for k, v in top_right] stubs = [] vals = [] for stub, val in top_right: stubs.append(stub) vals.append([val]) table.extend_right(SimpleTable(vals, stubs=stubs)) smry.tables.append(table) param_data = np.c_[ self.params.values[:, None], self.std_errors.values[:, None], self.tstats.values[:, None], self.pvalues.values[:, None], self.conf_int(), ] data = [] for row in param_data: txt_row = [] for i, v in enumerate(row): f = _str if i == 3: f = pval_format txt_row.append(f(v)) data.append(txt_row) title = "Parameter Estimates" table_stubs = list(self.params.index) header = ["Parameter", "Std. Err.", "T-stat", "P-value", "Lower CI", "Upper CI"] table = SimpleTable( data, stubs=table_stubs, txt_fmt=fmt_params, headers=header, title=title ) smry.tables.append(table) return smry @property def resids(self) -> Series: """ Model residuals Notes ----- These residuals are from the estimated model. They will not have the same shape as the original data whenever the model is estimated on transformed data which has a different shape.""" return Series(self._resids.squeeze(), index=self._index, name="residual") def _out_of_sample( self, exog: OptionalArrayLike, data: Optional[DataFrame], missing: bool ) -> DataFrame: """Interface between model predict and predict for OOS fits""" if exog is not None and data is not None: raise ValueError( "Predictions can only be constructed using one " "of exog or data, but not both." ) pred = self.model.predict(self.params, exog=exog, data=data) if not missing: pred = pred.loc[pred.notnull().all(1)] return pred def predict( self, exog: OptionalArrayLike = None, *, data: Optional[DataFrame] = None, fitted: bool = True, effects: bool = False, idiosyncratic: bool = False, missing: bool = False, ) -> DataFrame: """ In- and out-of-sample predictions Parameters ---------- exog : array_like Exogenous values to use in out-of-sample prediction (nobs by nexog) data : DataFrame, optional DataFrame to use for out-of-sample predictions when model was constructed using a formula. fitted : bool, optional Flag indicating whether to include the fitted values effects : bool, optional Flag indicating whether to include estimated effects idiosyncratic : bool, optional Flag indicating whether to include the estimated idiosyncratic shock missing : bool, optional Flag indicating to adjust for dropped observations. if True, the values returns will have the same size as the original input data before filtering missing values Returns ------- DataFrame DataFrame containing columns for all selected output Notes ----- `data` can only be used when the model was created using the formula interface. `exog` can be used for both a model created using a formula or a model specified with dependent and exog arrays. When using `exog` to generate out-of-sample predictions, the variable order must match the variables in the original model. Idiosyncratic errors and effects are not available for out-of-sample predictions. """ if not (exog is None and data is None): return self._out_of_sample(exog, data, missing) out = [] if fitted: out.append(self.fitted_values) if effects: out.append(self.estimated_effects) if idiosyncratic: out.append(self.idiosyncratic) if len(out) == 0: raise ValueError("At least one output must be selected") out_df: DataFrame = concat(out, axis=1) if missing: index = self._original_index out_df = out_df.reindex(index) return out_df @property def fitted_values(self) -> Series: """Fitted values""" return self._fitted @property def estimated_effects(self) -> Series: """ Estimated effects Notes ----- NaN filled when models do not include effects. """ return self._effects @property def idiosyncratic(self) -> Series: """ Idiosyncratic error Notes ----- Differs from resids since this is the estimated idiosyncratic shock from the data. It has the same dimension as the dependent data. The shape and nature of resids depends on the model estimated. These estimates only depend on the model estimated through the estimation of parameters and inclusion of effects, if any. """ return self._idiosyncratic @property def wresids(self) -> Series: """Weighted model residuals""" return Series( self._wresids.squeeze(), index=self._index, name="weighted residual" ) @property def f_statistic_robust(self) -> WaldTestStatistic: r""" Joint test of significance for non-constant regressors Returns ------- WaldTestStatistic Statistic value, distribution and p-value Notes ----- Implemented as a Wald test using the estimated parameter covariance, and so inherits any robustness that the choice of covariance estimator provides. .. math:: W = \hat{\beta}_{-}' \hat{\Sigma}_{-}^{-1} \hat{\beta}_{-} where :math:`\hat{\beta}_{-}` does not include the model constant and :math:`\hat{\Sigma}_{-}` is the estimated covariance of the parameters, also excluding the constant. The test statistic is distributed as :math:`\chi^2_{k}` where k is the number of non- constant parameters. If ``debiased`` is True, then the Wald statistic is divided by the number of restrictions and inference is made using an :math:`F_{k,df}` distribution where df is the residual degree of freedom from the model. """ from linearmodels.panel.model import _deferred_f return _deferred_f( self.params, self.cov, self._debiased, self.df_resid, self._f_info ) @property def f_statistic(self) -> WaldTestStatistic: r""" Joint test of significance for non-constant regressors Returns ------- WaldTestStatistic Statistic value, distribution and p-value Notes ----- Classical F-stat that is only correct under an assumption of homoskedasticity. The test statistic is defined as .. math:: F = \frac{(RSS_R - RSS_U)/ k}{RSS_U / df_U} where :math:`RSS_R` is the restricted sum of squares from the model where the coefficients on all exog variables is zero, excluding a constant if one was included. :math:`RSS_U` is the unrestricted residual sum of squares. k is the number of non-constant regressors in the model and :math:`df_U` is the residual degree of freedom in the unrestricted model. The test has an :math:`F_{k,df_U}` distribution. """ return self._f_stat @property def loglik(self) -> float: """Log-likelihood of model""" return self._loglik def wald_test( self, restriction: Optional[Union[Float64Array, DataFrame]] = None, value: Optional[Union[Float64Array, Series]] = None, *, formula: Optional[Union[str, List[str]]] = None, ) -> WaldTestStatistic: r""" Test linear equality constraints using a Wald test Parameters ---------- restriction : {ndarray, DataFrame}, optional q by nvar array containing linear weights to apply to parameters when forming the restrictions. It is not possible to use both restriction and formula. value : {ndarray, Series}, optional q element array containing the restricted values. formula : Union[str, list[str]], optional patsy linear constraints. The simplest formats are one of: * A single comma-separated string such as 'x1=0, x2+x3=1' * A list of strings where each element is a single constraint such as ['x1=0', 'x2+x3=1'] * A single string without commas to test simple constraints such as 'x1=x2=x3=0' It is not possible to use both ``restriction`` and ``formula``. Returns ------- WaldTestStatistic Test statistic for null that restrictions are valid. Notes ----- Hypothesis test examines whether :math:`H_0:C\theta=v` where the matrix C is ``restriction`` and v is ``value``. The test statistic has a :math:`\chi^2_q` distribution where q is the number of rows in C. Examples -------- >>> from linearmodels.datasets import wage_panel >>> import statsmodels.api as sm >>> import numpy as np >>> import pandas as pd >>> data = wage_panel.load() >>> year = pd.Categorical(data.year) >>> data = data.set_index(['nr', 'year']) >>> data['year'] = year >>> from linearmodels.panel import PanelOLS >>> exog_vars = ['expersq', 'union', 'married', 'year'] >>> exog = sm.add_constant(data[exog_vars]) >>> mod = PanelOLS(data.lwage, exog, entity_effects=True) >>> fe_res = mod.fit() Test the restriction that union and married have 0 coefficients >>> restriction = np.zeros((2, 11)) >>> restriction[0, 2] = 1 >>> restriction[1, 3] = 1 >>> value = np.array([0, 0]) >>> fe_res.wald_test(restriction, value) The same test using formulas >>> formula = 'union = married = 0' >>> fe_res.wald_test(formula=formula) """ return quadratic_form_test( self.params, self.cov, restriction=restriction, value=value, formula=formula ) class PanelEffectsResults(PanelResults): """ Results container for panel data models that include effects """ def __init__(self, res: AttrDict) -> None: super(PanelEffectsResults, self).__init__(res) self._other_info = res.other_info self._f_pooled = res.f_pooled self._entity_effect = res.entity_effects self._time_effect = res.time_effects self._other_effect = res.other_effects self._rho = res.rho self._sigma2_eps = res.sigma2_eps self._sigma2_effects = res.sigma2_effects self._r2_ex_effects = res.r2_ex_effects self._effects = res.effects @property def f_pooled(self) -> WaldTestStatistic: r""" Test that included effects are jointly zero. Returns ------- WaldTestStatistic Statistic value, distribution and p-value Notes ----- Joint test that all included effects are zero. Only correct under an assumption of homoskedasticity. The test statistic is defined as .. math:: F = \frac{(RSS_{pool}-RSS_{effect})/(df_{pool}-df_{effect})}{RSS_{effect}/df_{effect}} where :math:`RSS_{pool}` is the residual sum of squares from a no- effect (pooled) model. :math:`RSS_{effect}` is the residual sum of squares from a model with effects. :math:`df_{pool}` is the residual degree of freedom in the pooled regression and :math:`df_{effect}` is the residual degree of freedom from the model with effects. The test has an :math:`F_{k,df_{effect}}` distribution where :math:`k=df_{pool}-df_{effect}`. """ return self._f_pooled @property def included_effects(self) -> List[str]: """List of effects included in the model""" entity_effect = self._entity_effect time_effect = self._time_effect other_effect = self._other_effect effects = [] if entity_effect or time_effect or other_effect: if entity_effect: effects.append("Entity") if time_effect: effects.append("Time") if other_effect: oe = self.model._other_effect_cats.dataframe for c in oe: effects.append("Other Effect (" + str(c) + ")") return effects @property def other_info(self) -> Optional[DataFrame]: """Statistics on observations per group for other effects""" return self._other_info @property def rsquared_inclusive(self) -> float: """Model Coefficient of determination including fit of included effects""" return self._r2_ex_effects @property def summary(self) -> Summary: """ Model estimation summary. Returns ------- Summary Summary table of model estimation results Supports export to csv, html and latex using the methods ``summary.as_csv()``, ``summary.as_html()`` and ``summary.as_latex()``. """ smry = super(PanelEffectsResults, self).summary is_invalid = np.isfinite(self.f_pooled.stat) f_pool = _str(self.f_pooled.stat) if is_invalid else "--" f_pool_pval = pval_format(self.f_pooled.pval) if is_invalid else "--" f_pool_name = self.f_pooled.dist_name if is_invalid else "--" extra_text = [] if is_invalid: extra_text.append("F-test for Poolability: {0}".format(f_pool)) extra_text.append("P-value: {0}".format(f_pool_pval)) extra_text.append("Distribution: {0}".format(f_pool_name)) extra_text.append("") if self.included_effects: effects = ", ".join(self.included_effects) extra_text.append("Included effects: " + effects) if self.other_info is not None: nrow = self.other_info.shape[0] plural = "s" if nrow > 1 else "" extra_text.append(f"Model includes {nrow} other effect{plural}") for c in self.other_info.T: col = self.other_info.T[c] extra_text.append("Other Effect {0}:".format(c)) stats = "Avg Obs: {0}, Min Obs: {1}, Max Obs: {2}, Groups: {3}" stats = stats.format( _str(col["mean"]), _str(col["min"]), _str(col["max"]), int(col["total"]), ) extra_text.append(stats) smry.add_extra_txt(extra_text) return smry @property def variance_decomposition(self) -> Series: """Decomposition of total variance into effects and residuals""" vals = [self._sigma2_effects, self._sigma2_eps, self._rho] index = ["Effects", "Residual", "Percent due to Effects"] return Series(vals, index=index, name="Variance Decomposition") class RandomEffectsResults(PanelResults): """ Results container for random effect panel data models """ def __init__(self, res: AttrDict) -> None: super(RandomEffectsResults, self).__init__(res) self._theta = res.theta self._sigma2_effects = res.sigma2_effects self._sigma2_eps = res.sigma2_eps self._rho = res.rho @property def variance_decomposition(self) -> Series: """Decomposition of total variance into effects and residuals""" vals = [self._sigma2_effects, self._sigma2_eps, self._rho] index = ["Effects", "Residual", "Percent due to Effects"] return
Series(vals, index=index, name="Variance Decomposition")
pandas.Series
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os plt.rcParams["svg.hashsalt"]=0 def mkdirs(pre_path,parm_name): try: os.makedirs("../figures/"+pre_path+parm_name) except: pass try: os.makedirs("../analysed_data/"+pre_path+parm_name) except: pass def timeseries(pre_path,parm_name,parm_array,parm_format='{:.2E}',post_path='',plot_Tpos=True,plot_Tpro=True,plot_Tneg=True,plot_o2=True,plot_test=True,plot_tot=False,custom_title=None,save=True): fig,ax=plt.subplots(len(parm_array),2,sharex=True,figsize=(10,3*len(parm_array))) i=0 for parm in parm_array: if isinstance(parm,(list,pd.core.series.Series,np.ndarray)): #If the parameter explored is multidimensional string=parm_format.format(parm[0]) for pp in parm[1:]: string+='-'+parm_format.format(pp) else: string=parm_format.format(parm) #print('../raw_output/'+pre_path+parm_name+'/'+post_path+string+'.csv') df=pd.read_csv('../raw_output/'+pre_path+parm_name+'/'+post_path+string+'.csv') ## Plotting Resources if plot_o2: ax[i,1].plot(df.t/24/60,df.o2,color="tab:cyan",label='o2') if plot_test: ax[i,1].plot(df.t/24/60,df.test,color="tab:orange",label='test') ax[i,1].set_ylabel("Resource (proportion)") ax[i,1].legend() ## Plotting Cell Number if plot_Tpos: ax[i,0].plot(df.t/24/60,df.Tpos,color="tab:green",label='T+') if plot_Tpro: ax[i,0].plot(df.t/24/60,df.Tpro,color="tab:blue",label='Tp') if plot_Tneg: ax[i,0].plot(df.t/24/60,df.Tneg,color="tab:red",label='T-') if plot_tot: ax[i,0].plot(df.t/24/60,df.Tpos+df.Tpro+df.Tneg,color="tab:grey",label='Total') ax[i,0].set_ylabel("No of Cells") ax[i,0].legend() if custom_title==None: ax[i,0].set_title(parm_name+'='+string) else: ax[i,0].set_title(custom_title[i]) i+=1 ## Add Xaxis label for the last row only ax[i-1,0].set_xlabel('Time (days)') ax[i-1,1].set_xlabel('Time (days)') fig.tight_layout() if save: fig.savefig('../figures/'+pre_path+parm_name+'/'+post_path+'timeseries.svg') fig.clf() plt.close(fig) def timeseries_split(no_fig,sub_arr_len,pre_path,parm_name,parm_array,parm_format='{:.2E}',post_path='',plot_Tpos=True,plot_Tpro=True,plot_Tneg=True,plot_o2=True,plot_test=True,save=True): if (sub_arr_len*no_fig!=len(parm_array)): print("Wrong Array Length") return for i in range(no_fig): timeseries(pre_path=pre_path,parm_name=parm_name,parm_array=parm_array[i*sub_arr_len:(i+1)*sub_arr_len],parm_format=parm_format,post_path=post_path) os.rename('../figures/'+pre_path+parm_name+'/'+post_path+'timeseries.svg','../figures/'+pre_path+parm_name+'/'+post_path+'timeseries-'+str(i)+'.svg') def eq_values(pre_path,parm_name,parm_array,parm_format='{:.2E}',post_path='',save=True,parm_name_array=None,ttp=False,limit=None): lis=[] eq_col=['o2_eq','test_eq','Tpos_eq','Tpro_eq','Tneg_eq'] ttp_col=['TTE_Tpro','TTP_{}'.format(limit)] if isinstance(parm_array[0],(list,pd.core.series.Series,np.ndarray)): #If the parameter explored is multidimensional for parm in parm_array: string=parm_format.format(parm[0]) for pp in parm[1:]: string+='-'+parm_format.format(pp) #print('../raw_output/'+pre_path+parm_name+'/'+post_path+string+'.csv') df=pd.read_csv('../raw_output/'+pre_path+parm_name+'/'+post_path+string+'.csv') ttp_arr=[] if ttp: ttp_arr=[TTE_Tpro(df),TTP(df,limit)] lis.append(np.concatenate((parm,df.o2.iloc[-1],df.test.iloc[-1],df.Tpos.iloc[-1],df.Tpro.iloc[-1],df.Tneg.iloc[-1],ttp*ttp_arr),axis=None)) columns=np.concatenate((parm_name_array,eq_col,ttp*ttp_col),axis=None) df=pd.DataFrame(lis,columns=columns) else: for parm in parm_array: string=parm_format.format(parm) #print('../raw_output/'+pre_path+parm_name+'/'+post_path+string+'.csv') df=
pd.read_csv('../raw_output/'+pre_path+parm_name+'/'+post_path+string+'.csv')
pandas.read_csv
# -*- coding: utf-8 -*- """ Created on Thu Nov 12 20:21:46 2015 @author: derrick """ # python 2 and 3 compatibility imports from __future__ import print_function, absolute_import, unicode_literals from __future__ import with_statement, nested_scopes, generators, division import collections import numpy as np import obspy import pandas as pd import scipy import detex from detex.construct import fast_normcorr, multiplex, _applyFilter class _SSDetex(object): """ Private class to run subspace detections or event classifications """ def __init__(self, TRDF, utcStart, utcEnd, cfetcher, clusters, subspaceDB, trigCon, triggerLTATime, triggerSTATime, multiprocess, calcHist, dtype, estimateMags, classifyEvents, eventCorFile, utcSaves, fillZeros, issubspace=True): # Instantiate input varaibles that are needed by many functions self.utcStart = utcStart self.utcEnd = utcEnd self.filt = clusters.filt self.decimate = clusters.decimate self.triggerLTATime = triggerLTATime self.triggerSTATime = triggerSTATime self.calcHist = calcHist self.dtype = dtype self.estimateMags = estimateMags self.eventCorFile = eventCorFile self.utcSaves = utcSaves self.fillZeros = fillZeros self.issubspace = issubspace self.stakey = clusters.stakey self.classifyEvents = classifyEvents self.trigCon = trigCon self.subspaceDB = subspaceDB # set DataFetcher and read classifyEvents key, get data length if classifyEvents is not None: self.eveKey = detex.util.readKey(classifyEvents) fetcher = clusters.fetcher dur = fetcher.timeBeforeOrigin + fetcher.timeAfterOrigin else: fetcher = cfetcher dur = fetcher.conDatDuration + fetcher.conBuff self.fetcher = fetcher self.dataLength = dur # if using utcSavs init list and make sure all inputs are UTCs if utcSaves is not None: if isinstance(utcSaves, collections.Iterable): self.UTCSaveList = [] try: ts = [obspy.UTCDateTime(x).timestamp for x in utcSaves] except ValueError: msg = ('Not all elements in utcSaves are readable by obspy' ' UTCDateTime class') detex.log(__name__, msg, level='error') self.utcSaves = np.array(ts) else: msg = 'UTCSaves must be a list or tupple' detex.log(__name__, msg, level='error') # init histogram stuff if used if calcHist: self.hist = {} self.hist['Bins'] = np.linspace(0, 1, num=401) for sta in TRDF.keys(): # loop through each station DFsta = TRDF[sta] # current row (all ss or singletons on this sta) DFsta.reset_index(inplace=True, drop=True) if len(DFsta) > 0: self.hist[sta] = self._corStations(DFsta, sta) # if classifyEvents was used try to write results to DataFrame if classifyEvents is not None: try: DFeve = pd.concat(self.eventCorList, ignore_index=True) DFeve.to_pickle(self.eventCorFile + '_%s,pkl' % sta) except ValueError: msg = 'classify events failed for %s, skipping' % sta detex.log(__name__, msg, level='warn', pri=True) # If utcSaves was used write results to DataFrame if isinstance(utcSaves, collections.Iterable): try: DFutc = pd.concat(self.UTCSaveList, ignore_index=True) try: # try and read, pass DFutc_current = pd.read_pickle('UTCsaves.pkl') DFutc = DFutc.append(DFutc_current, ignore_index=True) except Exception: pass DFutc.to_pickle('UTCsaves.pkl') except ValueError: msg = 'Failed to save data in utcSaves' detex.log(__name__, msg, level='warning', pri=True) def _corStations(self, DFsta, sta): """ Function to perform subspace detection on a specific station """ # get station key for current station skey = self.stakey stakey = skey[skey.STATION == sta.split('.')[1]] # get chans, sampling rates, and trims channels = _getChannels(DFsta) samplingRates = _getSampleRates(DFsta) threshold = {x.Name: x.Threshold for num, x in DFsta.iterrows()} names = DFsta.Name.values names.sort() # make sure samp rate and chans is kosher, get trims if channels is None or samplingRates is None: return None samplingRate = samplingRates[0] contrim = self._getConTrims(DFsta, channels, samplingRate) # Proceed to subspace operations histdict = self._corDat(threshold, sta, channels, contrim, names, DFsta, samplingRate, stakey) return histdict def _corDat(self, threshold, sta, channels, contrim, names, DFsta, samplingRate, stakey): """ Function to perform subspace detection (sub function of _corStations) """ # init various parameters numdets = 0 # counter for number of detections tableName = 'ss_df' if self.issubspace else 'sg_df' DF = pd.DataFrame() # DF for results, dumped to SQL database histdic = {na: [0.0] * (len(self.hist['Bins']) - 1) for na in names} nc = len(channels) lso = self._loadMPSubSpace(DFsta, sta, channels, samplingRate, True) ssTD, ssFD, reqlen, offsets, mags, ewf, events, WFU, UtU = lso if self.classifyEvents is not None: datGen = self.fetcher.getTemData(self.evekey, stakey) else: datGen = self.fetcher.getConData(stakey, utcstart=self.utcStart, utcend=self.utcEnd, returnTimes=True) for st, utc1, utc2 in datGen: # loop each data chunk msg = 'starting on sta %s from %s to %s' % (sta, utc1, utc2) detex.log(__name__, msg, level='info') if st is None or len(st) < 1: msg = 'could not get data on %s from %s to %s' % ( stakey.STATION.iloc[0], utc1, utc2) detex.log(__name__, msg, level='warning', pri=True) continue # make dataframe with info for each hour (including det. stats.) CorDF, MPcon, ConDat = self._getRA(ssTD, ssFD, st, nc, reqlen, contrim, names, sta) # if something is broken skip hours if CorDF is None or MPcon is None: msg = (('failing to run detector on %s from %s to %s ') % (sta, utc1, utc2)) detex.log(__name__, msg, level='warning', pri=True) continue # iterate through each subspace/single for name, row in CorDF.iterrows(): if self.calcHist and len(CorDF) > 0: try: hg = np.histogram(row.SSdetect, bins=self.hist['Bins']) histdic[name] = histdic[name] + hg[0] except Exception: msg = (('binning failed on %s for %s from %s to %s') % (sta, name, utc1, utc2)) detex.log(__name__, msg, level='warning') if isinstance(self.utcSaves, collections.Iterable): self._makeUTCSaveDF(row, name, threshold, sta, offsets, mags, ewf, MPcon, events, ssTD) if self._evalTrigCon(row, name, threshold): Sar = self._CreateCoeffArray(row, name, threshold, sta, offsets, mags, ewf, MPcon, events, ssTD, WFU, UtU) # if lots of detections are being made raise warning if len(Sar) > 300: msg = (('over 300 events found in singledata block, on' ' %s form %s to %s perphaps minCoef is too ' 'low?') % (sta, utc1, utc2)) detex.log(__name__, msg, level='warning', pri=True) if any(Sar.DS > 1.05): msg = (('DS values above 1 found in sar, at %s on %s ' 'this can happen when fillZeros==True, removing' ' values above 1') % (utc1, st[0].stats.station)) detex.log(__name__, msg, level='warn', pri=True) Sar = Sar[Sar.DS <= 1.05] if len(Sar) > 0: DF = DF.append(Sar, ignore_index=True) if len(DF) > 500: detex.util.saveSQLite(DF, self.subspaceDB, tableName) DF = pd.DataFrame() numdets += 500 if len(DF) > 0: detex.util.saveSQLite(DF, self.subspaceDB, tableName) detType = 'Subspaces' if self.issubspace else 'Singletons' msg = (('%s on %s completed, %d potential detection(s) recorded') % (detType, sta, len(DF) + numdets)) detex.log(__name__, msg, pri=1) if self.calcHist: return histdic def _getRA(self, ssTD, ssFD, st, Nc, reqlen, contrim, names, sta): """ Function to make DataFrame of this datachunk with all subspaces and singles that act on it """ cols = ['SSdetect', 'STALTA', 'TimeStamp', 'SampRate', 'MaxDS', 'MaxSTALTA', 'Nc', 'File'] CorDF = pd.DataFrame(index=names, columns=cols) utc1 = st[0].stats.starttime utc2 = st[0].stats.endtime try: conSt = _applyFilter(st, self.filt, self.decimate, self.dtype, fillZeros=self.fillZeros) except Exception: msg = 'failed to filter %s, skipping' % st detex.log(__name__, msg, level='warning', pri=True) return None, None, None if len(conSt) < 1: return None, None, None sr = conSt[0].stats.sampling_rate CorDF.SampRate = sr MPcon, ConDat, TR = multiplex(conSt, Nc, returnlist=True, retst=True) CorDF.TimeStamp = TR[0].stats.starttime.timestamp if isinstance(contrim, dict): ctrim = np.median(contrim.values()) else: ctrim = contrim # Trim continuous data to avoid overlap if ctrim < 0: MPconcur = MPcon[:len(MPcon) - int(ctrim * sr * Nc)] else: MPconcur = MPcon # get freq. domain rep of data rele = 2 ** np.max(reqlen.values()).bit_length() MPconFD = scipy.fftpack.fft(MPcon, n=rele) # loop through each subpsace/single and calc sd for ind, row in CorDF.iterrows(): # make sure the template is shorter than continuous data else skip if len(MPcon) <= np.max(np.shape(ssTD[ind])): msg = ('current data block on %s ranging from %s to %s is ' 'shorter than %s, skipping') % (sta, utc1, utc2, ind) detex.log(__name__, msg, level='warning') return None, None, None ssd = self._MPXDS(MPconcur, reqlen[ind], ssTD[ind], ssFD[ind], Nc, MPconFD) CorDF.SSdetect[ind] = ssd # set detection statistic if len(ssd) < 10: msg = ('current data block on %s ranging from %s to %s is too ' 'short, skipping') % (sta, utc1, utc2, ind) detex.log(__name__, msg, level='warning') return None, None, None CorDF.MaxDS[ind] = ssd.max() CorDF.Nc[ind] = Nc # If an infinity value occurs, zero it. if CorDF.MaxDS[ind] > 1.1: ssd[np.isinf(ssd)] = 0 CorDF.SSdetect[ind] = ssd CorDF.MaxDS[ind] = ssd.max() if not self.fillZeros: # dont calculate sta/lta if zerofill used try: CorDF.STALTA[ind] = self._getStaLtaArray( CorDF.SSdetect[ind], self.triggerLTATime * CorDF.SampRate[0], self.triggerSTATime * CorDF.SampRate[0]) CorDF.MaxSTALTA[ind] = CorDF.STALTA[ind].max() except Exception: msg = ('failing to calculate sta/lta of det. statistic' ' on %s for %s start at %s') % (sta, ind, utc1) detex.log(__name__, msg, level='warn') # else: # return None, None, None return CorDF, MPcon, ConDat def _makeUTCSaveDF(self, row, name, threshold, sta, offsets, mags, ewf, MPcon, events, ssTD): """ Function to make utc saves dataframe, which allows times of interest to be saved and examined. Results are appended to UTCSaveList """ TS1 = row.TimeStamp TS2 = row.TimeStamp + len(MPcon) / (row.SampRate * float(row.Nc)) inUTCs = (self.utcSaves > TS1) & (self.utcSaves < TS2) if any(inUTCs): Th = threshold[name] of = offsets[name] dat = [sta, name, Th, of, TS1, TS2, self.utcSaves[inUTCs], MPcon] inds = ['Station', 'Name', 'Threshold', 'offset', 'TS1', 'TS2', 'utcSaves', 'MPcon'] ser = pd.Series(dat, index=inds) df = pd.DataFrame(pd.concat([ser, row])).T self.UTCSaveList.append(df) return # function to load subspace representations def _loadMPSubSpace(self, DFsta, sta, channels, samplingRate, returnFull=False): """ Function to parse out important information from main DataFrame for performing subspace operations. Also recalcs the freq. domain rep of each event to the correct length to multiply by the feq domain rep of the continuous data """ # init dicts that can be returned (Keys are subspace/single name) ssTD = {} ssFD = {} rele = {} offsets = {} mags = {} ewf = {} eves = {} WFU = {} UtUdict = {} # variables needed for analysis Nc = len(channels) # num of channels dataLength = self.dataLength # get values and preform calcs for ind, row in DFsta.iterrows(): events = row.Events if self.issubspace: U = np.array([row.SVD[x] for x in row.UsedSVDKeys]) dlen = np.shape(U)[1] if 'Starttime' in row.SampleTrims.keys(): start = row.SampleTrims['Starttime'] end = row.SampleTrims['Endtime'] WFl = [row.AlignedTD[x][start:end] for x in events] WFs = np.array(WFl) else: WFl = [row.AlignedTD[x] for x in events] WFs = np.array(WFl) else: # if single trim and normalize (already done for subspaces) mptd = row.MPtd.values()[0] if row.SampleTrims: # if this is a non empty dict start = row.SampleTrims['Starttime'] end = row.SampleTrims['Endtime'] upr = mptd[start:end] else: upr = mptd U = np.array([x / np.linalg.norm(x) for x in [upr]]) dlen = len(upr) WFs = [upr] UtU = np.dot(np.transpose(U), U) r2d2 = dataLength * samplingRate * Nc # beep beep reqlen = int(r2d2 + dlen) rbi = 2 ** reqlen.bit_length() mpfd = np.array([scipy.fftpack.fft(x[::-1], n=rbi) for x in U]) mag = np.array([row.Stats[x]['magnitude'] for x in events]) # Populate dicts ssFD[row.Name] = mpfd # freq domain of required length ssTD[row.Name] = U # basis vects mags[row.Name] = mag # mag of events eves[row.Name] = events # event names ewf[row.Name] = WFs # event waveforms offsets[row.Name] = row.Offsets # offsets (from eve origin) WFU[row.Name] = np.dot(WFs, UtU) # events projected into subspace UtUdict[row.Name] = UtU # UtU rele[row.Name] = reqlen # required lengths if returnFull: return ssTD, ssFD, rele, offsets, mags, ewf, eves, WFU, UtUdict else: return ssTD, ssFD, rele def _CreateCoeffArray(self, corSeries, name, threshold, sta, offsets, mags, ewf, MPcon, events, ssTD, WFU, UtU): """ function to create an array of results for each detection, including time of detection, estimated magnitude, etc. """ dpv = 0 cols = ['DS', 'DS_STALTA', 'STMP', 'Name', 'Sta', 'MSTAMPmin', 'MSTAMPmax', 'Mag', 'SNR', 'ProEnMag'] sr = corSeries.SampRate # sample rate start = corSeries.TimeStamp # start time of data block # set array to evaluate for successful triggers if self.trigCon == 0: Ceval = corSeries.SSdetect.copy() elif self.trigCon == 1: Ceval = corSeries.STALTA.copy() Sar = pd.DataFrame(columns=cols) count = 0 # while there are any values in the det stat. vect that exceed thresh. while Ceval.max() >= threshold[name]: trigIndex = Ceval.argmax() coef = corSeries.SSdetect[trigIndex] times = float(trigIndex) / sr + start if self.fillZeros: # if zeros are being filled dont try STA/LTA SLValue = 0.0 else: try: SLValue = corSeries.STALTA[trigIndex] except TypeError: SLValue = 0.0 Ceval = self._downPlayArrayAroundMax(Ceval, sr, dpv) # estimate mags else return NaNs as mag estimates if self.estimateMags: # estimate magnitudes M1, M2, SNR = self._estMag(trigIndex, corSeries, MPcon, mags[name], events[name], WFU[name], UtU[name], ewf[name], coef, times, name, sta) peMag, stMag = M1, M2 else: peMag, stMag, SNR = np.NaN, np.NaN, np.NaN # kill switch to prevent infinite loop (just in case) if count > 4000: msg = (('over 4000 events found in single data block on %s for' '%s around %s') % (sta, name, times)) detex.log(__name__, msg, level='error') # get predicted origin time ranges minof = np.min(offsets[name]) maxof = np.max(offsets[name]) MSTAMPmax, MSTAMPmin = times - minof, times - maxof Sar.loc[count] = [coef, SLValue, times, name, sta, MSTAMPmin, MSTAMPmax, stMag, SNR, peMag] count += 1 return Sar def _estMag(self, trigIndex, corSeries, MPcon, mags, events, WFU, UtU, ewf, coef, times, name, sta): """ Estimate magnitudes by applying projected subspace mag estimates and standard deviation mag estimates as outlined in Chambers et al. 2015. """ WFlen = np.shape(WFU)[1] # event waveform length nc = corSeries.Nc # number of chans # continuous data chunk that triggered subspace ConDat = MPcon[trigIndex * nc:trigIndex * nc + WFlen] if self.issubspace: # continuous data chunk projected into subspace ssCon = np.dot(UtU, ConDat) # projected energy proEn = np.var(ssCon) / np.var(WFU, axis=1) # Try and estimate pre-event noise level (for estimating SNR) if trigIndex * nc > 5 * WFlen: # take 5x waveform length before event pe = MPcon[trigIndex * nc - 5 * WFlen: trigIndex * nc] rollingstd = pd.rolling_std(pe, WFlen)[WFlen - 1:] else: # if not enough data take 6 times after event pe = MPcon[trigIndex * nc: trigIndex * nc + WFlen + 6 * WFlen] rollingstd = pd.rolling_std(pe, WFlen)[WFlen - 1:] baseNoise = np.median(rollingstd) # take median of std for noise level SNR = np.std(ConDat) / baseNoise # estiamte SNR # ensure mags are greater than -15, else assume no mag value for event touse = mags > -15 if self.issubspace: # if subspace if not any(touse): # if no defined magnitudes avaliable msg = (('No magnitudes above -15 usable for detection at %s on' ' station %s and %s') % (times, sta, name)) detex.log(__name__, msg, level='warn') return np.NaN, np.Nan, SNR else: # correlation coefs between each event and data block ecor = [fast_normcorr(x, ConDat)[0] for x in ewf] eventCors = np.array(ecor) projectedEnergyMags = _estPEMag(mags, proEn, eventCors, touse) stdMags = _estSTDMag(mags, ConDat, ewf, eventCors, touse) else: # if singleton assert len(mags) == 1 if np.isnan(mags[0]) or mags[0] < -15: projectedEnergyMags = np.NaN stdMags = np.NaN else: # use simple waveform scaling if single d1 = np.dot(ConDat, WFU[0]) d2 = np.dot(WFU[0], WFU[0]) projectedEnergyMags = mags[0] + d1 / d2 stdMags = mags[0] + np.log10(np.std(ConDat) / np.std(WFU[0])) return projectedEnergyMags, stdMags, SNR def _getStaLtaArray(self, C, LTA, STA): """ Function to calculate the sta/lta of the detection statistic """ if STA == 0: STA = 1 STArray = np.abs(C) else: STArray = pd.rolling_mean(np.abs(C), STA, center=True) STArray = self._replaceNanWithMean(STArray) LTArray = pd.rolling_mean(np.abs(C), LTA, center=True) LTArray = self._replaceNanWithMean(LTArray) out = np.divide(STArray, LTArray) return out def _replaceNanWithMean(self, arg): """ Function to replace any NaN values in sta/lta array with mean """ ind = np.where(~np.isnan(arg))[0] first, last = ind[0], ind[-1] arg[:first] = arg[first + 1] arg[last + 1:] = arg[last] return arg def _evalTrigCon(self, Corrow, name, threshold, returnValue=False): """ Evaluate if Trigger condition is met and return True or False. Also return detection statistic value if returnValue==True """ Out = False if self.trigCon == 0: trig = Corrow.MaxDS if trig > threshold[name]: Out = True elif self.trigCon == 1: trig = Corrow.maxSTALTA if trig > threshold[name]: Out = True if returnValue: return trig if not returnValue: return Out def _downPlayArrayAroundMax(self, C, sr, dpv, buff=20): """ function to zero out det. stat. array around where max occurs, important to avoid counting side lobs as detections. """ index = C.argmax() if index < buff * sr + 1: C[0:int(index + buff * sr)] = dpv elif index > len(C) - buff * sr: C[int(index - sr * buff):] = dpv else: C[int(index - sr * buff):int(sr * buff + index)] = dpv return C def _MPXDS(self, MPcon, reqlen, ssTD, ssFD, Nc, MPconFD): """ Function to preform subspace detection on multiplexed data MPcon is time domain rep of data block, MPconFD is freq. domain, ssTD is time domain rep of subspace, ssFD id freq domain rep, Nc is the number of channels in the multiplexed stream """ n = np.int32(np.shape(ssTD)[1]) # length of each basis vector a =
pd.rolling_mean(MPcon, n)
pandas.rolling_mean
import itertools from typing import List, Optional, Union import numpy as np import pandas._libs.algos as libalgos import pandas._libs.reshape as libreshape from pandas._libs.sparse import IntIndex from pandas.util._decorators import cache_readonly from pandas.core.dtypes.cast import maybe_promote from pandas.core.dtypes.common import ( ensure_platform_int, is_bool_dtype, is_extension_array_dtype, is_integer, is_integer_dtype, is_list_like, is_object_dtype, needs_i8_conversion, ) from pandas.core.dtypes.missing import notna import pandas.core.algorithms as algos from pandas.core.arrays import SparseArray from pandas.core.arrays.categorical import factorize_from_iterable from pandas.core.construction import extract_array from pandas.core.frame import DataFrame from pandas.core.indexes.api import Index, MultiIndex from pandas.core.series import Series from pandas.core.sorting import ( compress_group_index, decons_obs_group_ids, get_compressed_ids, get_group_index, ) class _Unstacker: """ Helper class to unstack data / pivot with multi-level index Parameters ---------- index : object Pandas ``Index`` level : int or str, default last level Level to "unstack". Accepts a name for the level. fill_value : scalar, optional Default value to fill in missing values if subgroups do not have the same set of labels. By default, missing values will be replaced with the default fill value for that data type, NaN for float, NaT for datetimelike, etc. For integer types, by default data will converted to float and missing values will be set to NaN. constructor : object Pandas ``DataFrame`` or subclass used to create unstacked response. If None, DataFrame will be used. Examples -------- >>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), ... ('two', 'a'), ('two', 'b')]) >>> s = pd.Series(np.arange(1, 5, dtype=np.int64), index=index) >>> s one a 1 b 2 two a 3 b 4 dtype: int64 >>> s.unstack(level=-1) a b one 1 2 two 3 4 >>> s.unstack(level=0) one two a 1 3 b 2 4 Returns ------- unstacked : DataFrame """ def __init__( self, index, level=-1, constructor=None, ): if constructor is None: constructor = DataFrame self.constructor = constructor self.index = index.remove_unused_levels() self.level = self.index._get_level_number(level) # when index includes `nan`, need to lift levels/strides by 1 self.lift = 1 if -1 in self.index.codes[self.level] else 0 # Note: the "pop" below alters these in-place. self.new_index_levels = list(self.index.levels) self.new_index_names = list(self.index.names) self.removed_name = self.new_index_names.pop(self.level) self.removed_level = self.new_index_levels.pop(self.level) self.removed_level_full = index.levels[self.level] # Bug fix GH 20601 # If the data frame is too big, the number of unique index combination # will cause int32 overflow on windows environments. # We want to check and raise an error before this happens num_rows = np.max([index_level.size for index_level in self.new_index_levels]) num_columns = self.removed_level.size # GH20601: This forces an overflow if the number of cells is too high. num_cells = np.multiply(num_rows, num_columns, dtype=np.int32) if num_rows > 0 and num_columns > 0 and num_cells <= 0: raise ValueError("Unstacked DataFrame is too big, causing int32 overflow") self._make_selectors() @cache_readonly def _indexer_and_to_sort(self): v = self.level codes = list(self.index.codes) levs = list(self.index.levels) to_sort = codes[:v] + codes[v + 1 :] + [codes[v]] sizes = [len(x) for x in levs[:v] + levs[v + 1 :] + [levs[v]]] comp_index, obs_ids = get_compressed_ids(to_sort, sizes) ngroups = len(obs_ids) indexer = libalgos.groupsort_indexer(comp_index, ngroups)[0] indexer = ensure_platform_int(indexer) return indexer, to_sort @cache_readonly def sorted_labels(self): indexer, to_sort = self._indexer_and_to_sort return [l.take(indexer) for l in to_sort] def _make_sorted_values(self, values): indexer, _ = self._indexer_and_to_sort sorted_values = algos.take_nd(values, indexer, axis=0) return sorted_values def _make_selectors(self): new_levels = self.new_index_levels # make the mask remaining_labels = self.sorted_labels[:-1] level_sizes = [len(x) for x in new_levels] comp_index, obs_ids = get_compressed_ids(remaining_labels, level_sizes) ngroups = len(obs_ids) comp_index = ensure_platform_int(comp_index) stride = self.index.levshape[self.level] + self.lift self.full_shape = ngroups, stride selector = self.sorted_labels[-1] + stride * comp_index + self.lift mask = np.zeros(np.prod(self.full_shape), dtype=bool) mask.put(selector, True) if mask.sum() < len(self.index): raise ValueError("Index contains duplicate entries, cannot reshape") self.group_index = comp_index self.mask = mask self.unique_groups = obs_ids self.compressor = comp_index.searchsorted(np.arange(ngroups)) def get_result(self, values, value_columns, fill_value): if values.ndim == 1: values = values[:, np.newaxis] if value_columns is None and values.shape[1] != 1: # pragma: no cover raise ValueError("must pass column labels for multi-column data") values, _ = self.get_new_values(values, fill_value) columns = self.get_new_columns(value_columns) index = self.new_index return self.constructor(values, index=index, columns=columns) def get_new_values(self, values, fill_value=None): if values.ndim == 1: values = values[:, np.newaxis] sorted_values = self._make_sorted_values(values) # place the values length, width = self.full_shape stride = values.shape[1] result_width = width * stride result_shape = (length, result_width) mask = self.mask mask_all = mask.all() # we can simply reshape if we don't have a mask if mask_all and len(values): new_values = ( sorted_values.reshape(length, width, stride) .swapaxes(1, 2) .reshape(result_shape) ) new_mask = np.ones(result_shape, dtype=bool) return new_values, new_mask # if our mask is all True, then we can use our existing dtype if mask_all: dtype = values.dtype new_values = np.empty(result_shape, dtype=dtype) else: dtype, fill_value = maybe_promote(values.dtype, fill_value) new_values = np.empty(result_shape, dtype=dtype) new_values.fill(fill_value) new_mask = np.zeros(result_shape, dtype=bool) name = np.dtype(dtype).name # we need to convert to a basic dtype # and possibly coerce an input to our output dtype # e.g. ints -> floats if needs_i8_conversion(values): sorted_values = sorted_values.view("i8") new_values = new_values.view("i8") elif is_bool_dtype(values): sorted_values = sorted_values.astype("object") new_values = new_values.astype("object") else: sorted_values = sorted_values.astype(name, copy=False) # fill in our values & mask libreshape.unstack( sorted_values, mask.view("u1"), stride, length, width, new_values, new_mask.view("u1"), ) # reconstruct dtype if needed if needs_i8_conversion(values): new_values = new_values.view(values.dtype) return new_values, new_mask def get_new_columns(self, value_columns): if value_columns is None: if self.lift == 0: return self.removed_level._shallow_copy(name=self.removed_name) lev = self.removed_level.insert(0, item=self.removed_level._na_value) return lev.rename(self.removed_name) stride = len(self.removed_level) + self.lift width = len(value_columns) propagator = np.repeat(np.arange(width), stride) if isinstance(value_columns, MultiIndex): new_levels = value_columns.levels + (self.removed_level_full,) new_names = value_columns.names + (self.removed_name,) new_codes = [lab.take(propagator) for lab in value_columns.codes] else: new_levels = [value_columns, self.removed_level_full] new_names = [value_columns.name, self.removed_name] new_codes = [propagator] # The two indices differ only if the unstacked level had unused items: if len(self.removed_level_full) != len(self.removed_level): # In this case, we remap the new codes to the original level: repeater = self.removed_level_full.get_indexer(self.removed_level) if self.lift: repeater = np.insert(repeater, 0, -1) else: # Otherwise, we just use each level item exactly once: repeater = np.arange(stride) - self.lift # The entire level is then just a repetition of the single chunk: new_codes.append(np.tile(repeater, width)) return MultiIndex( levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False ) @cache_readonly def new_index(self): # Does not depend on values or value_columns result_codes = [lab.take(self.compressor) for lab in self.sorted_labels[:-1]] # construct the new index if len(self.new_index_levels) == 1: level, level_codes = self.new_index_levels[0], result_codes[0] if (level_codes == -1).any(): level = level.insert(len(level), level._na_value) return level.take(level_codes).rename(self.new_index_names[0]) return MultiIndex( levels=self.new_index_levels, codes=result_codes, names=self.new_index_names, verify_integrity=False, ) def _unstack_multiple(data, clocs, fill_value=None): if len(clocs) == 0: return data # NOTE: This doesn't deal with hierarchical columns yet index = data.index # GH 19966 Make sure if MultiIndexed index has tuple name, they will be # recognised as a whole if clocs in index.names: clocs = [clocs] clocs = [index._get_level_number(i) for i in clocs] rlocs = [i for i in range(index.nlevels) if i not in clocs] clevels = [index.levels[i] for i in clocs] ccodes = [index.codes[i] for i in clocs] cnames = [index.names[i] for i in clocs] rlevels = [index.levels[i] for i in rlocs] rcodes = [index.codes[i] for i in rlocs] rnames = [index.names[i] for i in rlocs] shape = [len(x) for x in clevels] group_index = get_group_index(ccodes, shape, sort=False, xnull=False) comp_ids, obs_ids = compress_group_index(group_index, sort=False) recons_codes = decons_obs_group_ids(comp_ids, obs_ids, shape, ccodes, xnull=False) if not rlocs: # Everything is in clocs, so the dummy df has a regular index dummy_index = Index(obs_ids, name="__placeholder__") else: dummy_index = MultiIndex( levels=rlevels + [obs_ids], codes=rcodes + [comp_ids], names=rnames + ["__placeholder__"], verify_integrity=False, ) if isinstance(data, Series): dummy = data.copy() dummy.index = dummy_index unstacked = dummy.unstack("__placeholder__", fill_value=fill_value) new_levels = clevels new_names = cnames new_codes = recons_codes else: if isinstance(data.columns, MultiIndex): result = data for i in range(len(clocs)): val = clocs[i] result = result.unstack(val, fill_value=fill_value) clocs = [v if v < val else v - 1 for v in clocs] return result dummy = data.copy() dummy.index = dummy_index unstacked = dummy.unstack("__placeholder__", fill_value=fill_value) if isinstance(unstacked, Series): unstcols = unstacked.index else: unstcols = unstacked.columns assert isinstance(unstcols, MultiIndex) # for mypy new_levels = [unstcols.levels[0]] + clevels new_names = [data.columns.name] + cnames new_codes = [unstcols.codes[0]] for rec in recons_codes: new_codes.append(rec.take(unstcols.codes[-1])) new_columns = MultiIndex( levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False ) if isinstance(unstacked, Series): unstacked.index = new_columns else: unstacked.columns = new_columns return unstacked def unstack(obj, level, fill_value=None): if isinstance(level, (tuple, list)): if len(level) != 1: # _unstack_multiple only handles MultiIndexes, # and isn't needed for a single level return _unstack_multiple(obj, level, fill_value=fill_value) else: level = level[0] # Prioritize integer interpretation (GH #21677): if not is_integer(level) and not level == "__placeholder__": level = obj.index._get_level_number(level) if isinstance(obj, DataFrame): if isinstance(obj.index, MultiIndex): return _unstack_frame(obj, level, fill_value=fill_value) else: return obj.T.stack(dropna=False) else: if is_extension_array_dtype(obj.dtype): return _unstack_extension_series(obj, level, fill_value) unstacker = _Unstacker( obj.index, level=level, constructor=obj._constructor_expanddim, ) return unstacker.get_result( obj.values, value_columns=None, fill_value=fill_value ) def _unstack_frame(obj, level, fill_value=None): if obj._is_mixed_type: unstacker = _Unstacker(obj.index, level=level) blocks = obj._mgr.unstack(unstacker, fill_value=fill_value) return obj._constructor(blocks) else: return _Unstacker( obj.index, level=level, constructor=obj._constructor, ).get_result(obj.values, value_columns=obj.columns, fill_value=fill_value) def _unstack_extension_series(series, level, fill_value): """ Unstack an ExtensionArray-backed Series. The ExtensionDtype is preserved. Parameters ---------- series : Series A Series with an ExtensionArray for values level : Any The level name or number. fill_value : Any The user-level (not physical storage) fill value to use for missing values introduced by the reshape. Passed to ``series.values.take``. Returns ------- DataFrame Each column of the DataFrame will have the same dtype as the input Series. """ # Implementation note: the basic idea is to # 1. Do a regular unstack on a dummy array of integers # 2. Followup with a columnwise take. # We use the dummy take to discover newly-created missing values # introduced by the reshape. from pandas.core.reshape.concat import concat dummy_arr = np.arange(len(series)) # fill_value=-1, since we will do a series.values.take later result = _Unstacker(series.index, level=level).get_result( dummy_arr, value_columns=None, fill_value=-1 ) out = [] values = extract_array(series, extract_numpy=False) for col, indices in result.items(): out.append( Series( values.take(indices.values, allow_fill=True, fill_value=fill_value), name=col, index=result.index, ) ) return concat(out, axis="columns", copy=False, keys=result.columns) def stack(frame, level=-1, dropna=True): """ Convert DataFrame to Series with multi-level Index. Columns become the second level of the resulting hierarchical index Returns ------- stacked : Series """ def factorize(index): if index.is_unique: return index, np.arange(len(index)) codes, categories = factorize_from_iterable(index) return categories, codes N, K = frame.shape # Will also convert negative level numbers and check if out of bounds. level_num = frame.columns._get_level_number(level) if isinstance(frame.columns, MultiIndex): return _stack_multi_columns(frame, level_num=level_num, dropna=dropna) elif isinstance(frame.index, MultiIndex): new_levels = list(frame.index.levels) new_codes = [lab.repeat(K) for lab in frame.index.codes] clev, clab = factorize(frame.columns) new_levels.append(clev) new_codes.append(np.tile(clab, N).ravel()) new_names = list(frame.index.names) new_names.append(frame.columns.name) new_index = MultiIndex( levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False ) else: levels, (ilab, clab) = zip(*map(factorize, (frame.index, frame.columns))) codes = ilab.repeat(K), np.tile(clab, N).ravel() new_index = MultiIndex( levels=levels, codes=codes, names=[frame.index.name, frame.columns.name], verify_integrity=False, ) if frame._is_homogeneous_type: # For homogeneous EAs, frame._values will coerce to object. So # we concatenate instead. dtypes = list(frame.dtypes._values) dtype = dtypes[0] if is_extension_array_dtype(dtype): arr = dtype.construct_array_type() new_values = arr._concat_same_type( [col._values for _, col in frame.items()] ) new_values = _reorder_for_extension_array_stack(new_values, N, K) else: # homogeneous, non-EA new_values = frame._values.ravel() else: # non-homogeneous new_values = frame._values.ravel() if dropna: mask = notna(new_values) new_values = new_values[mask] new_index = new_index[mask] return frame._constructor_sliced(new_values, index=new_index) def stack_multiple(frame, level, dropna=True): # If all passed levels match up to column names, no # ambiguity about what to do if all(lev in frame.columns.names for lev in level): result = frame for lev in level: result = stack(result, lev, dropna=dropna) # Otherwise, level numbers may change as each successive level is stacked elif all(isinstance(lev, int) for lev in level): # As each stack is done, the level numbers decrease, so we need # to account for that when level is a sequence of ints result = frame # _get_level_number() checks level numbers are in range and converts # negative numbers to positive level = [frame.columns._get_level_number(lev) for lev in level] # Can't iterate directly through level as we might need to change # values as we go for index in range(len(level)): lev = level[index] result = stack(result, lev, dropna=dropna) # Decrement all level numbers greater than current, as these # have now shifted down by one updated_level = [] for other in level: if other > lev: updated_level.append(other - 1) else: updated_level.append(other) level = updated_level else: raise ValueError( "level should contain all level names or all level " "numbers, not a mixture of the two." ) return result def _stack_multi_columns(frame, level_num=-1, dropna=True): def _convert_level_number(level_num, columns): """ Logic for converting the level number to something we can safely pass to swaplevel: We generally want to convert the level number into a level name, except when columns do not have names, in which case we must leave as a level number """ if level_num in columns.names: return columns.names[level_num] else: if columns.names[level_num] is None: return level_num else: return columns.names[level_num] this = frame.copy() # this makes life much simpler if level_num != frame.columns.nlevels - 1: # roll levels to put selected level at end roll_columns = this.columns for i in range(level_num, frame.columns.nlevels - 1): # Need to check if the ints conflict with level names lev1 = _convert_level_number(i, roll_columns) lev2 = _convert_level_number(i + 1, roll_columns) roll_columns = roll_columns.swaplevel(lev1, lev2) this.columns = roll_columns if not this.columns.is_lexsorted(): # Workaround the edge case where 0 is one of the column names, # which interferes with trying to sort based on the first # level level_to_sort = _convert_level_number(0, this.columns) this = this.sort_index(level=level_to_sort, axis=1) # tuple list excluding level for grouping columns if len(frame.columns.levels) > 2: tuples = list( zip( *[ lev.take(level_codes) for lev, level_codes in zip( this.columns.levels[:-1], this.columns.codes[:-1] ) ] ) ) unique_groups = [key for key, _ in itertools.groupby(tuples)] new_names = this.columns.names[:-1] new_columns = MultiIndex.from_tuples(unique_groups, names=new_names) else: new_columns = this.columns.levels[0]._shallow_copy(name=this.columns.names[0]) unique_groups = new_columns # time to ravel the values new_data = {} level_vals = this.columns.levels[-1] level_codes = sorted(set(this.columns.codes[-1])) level_vals_used = level_vals[level_codes] levsize = len(level_codes) drop_cols = [] for key in unique_groups: try: loc = this.columns.get_loc(key) except KeyError: drop_cols.append(key) continue # can make more efficient? # we almost always return a slice # but if unsorted can get a boolean # indexer if not isinstance(loc, slice): slice_len = len(loc) else: slice_len = loc.stop - loc.start if slice_len != levsize: chunk = this.loc[:, this.columns[loc]] chunk.columns = level_vals.take(chunk.columns.codes[-1]) value_slice = chunk.reindex(columns=level_vals_used).values else: if frame._is_homogeneous_type and is_extension_array_dtype( frame.dtypes.iloc[0] ): dtype = this[this.columns[loc]].dtypes.iloc[0] subset = this[this.columns[loc]] value_slice = dtype.construct_array_type()._concat_same_type( [x._values for _, x in subset.items()] ) N, K = this.shape idx = np.arange(N * K).reshape(K, N).T.ravel() value_slice = value_slice.take(idx) elif frame._is_mixed_type: value_slice = this[this.columns[loc]].values else: value_slice = this.values[:, loc] if value_slice.ndim > 1: # i.e. not extension value_slice = value_slice.ravel() new_data[key] = value_slice if len(drop_cols) > 0: new_columns = new_columns.difference(drop_cols) N = len(this) if isinstance(this.index, MultiIndex): new_levels = list(this.index.levels) new_names = list(this.index.names) new_codes = [lab.repeat(levsize) for lab in this.index.codes] else: old_codes, old_levels = factorize_from_iterable(this.index) new_levels = [old_levels] new_codes = [old_codes.repeat(levsize)] new_names = [this.index.name] # something better? new_levels.append(level_vals) new_codes.append(np.tile(level_codes, N)) new_names.append(frame.columns.names[level_num]) new_index = MultiIndex( levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False ) result = frame._constructor(new_data, index=new_index, columns=new_columns) # more efficient way to go about this? can do the whole masking biz but # will only save a small amount of time... if dropna: result = result.dropna(axis=0, how="all") return result def get_dummies( data, prefix=None, prefix_sep="_", dummy_na=False, columns=None, sparse=False, drop_first=False, dtype=None, ) -> "DataFrame": """ Convert categorical variable into dummy/indicator variables. Parameters ---------- data : array-like, Series, or DataFrame Data of which to get dummy indicators. prefix : str, list of str, or dict of str, default None String to append DataFrame column names. Pass a list with length equal to the number of columns when calling get_dummies on a DataFrame. Alternatively, `prefix` can be a dictionary mapping column names to prefixes. prefix_sep : str, default '_' If appending prefix, separator/delimiter to use. Or pass a list or dictionary as with `prefix`. dummy_na : bool, default False Add a column to indicate NaNs, if False NaNs are ignored. columns : list-like, default None Column names in the DataFrame to be encoded. If `columns` is None then all the columns with `object` or `category` dtype will be converted. sparse : bool, default False Whether the dummy-encoded columns should be backed by a :class:`SparseArray` (True) or a regular NumPy array (False). drop_first : bool, default False Whether to get k-1 dummies out of k categorical levels by removing the first level. dtype : dtype, default np.uint8 Data type for new columns. Only a single dtype is allowed. .. versionadded:: 0.23.0 Returns ------- DataFrame Dummy-coded data. See Also -------- Series.str.get_dummies : Convert Series to dummy codes. Examples -------- >>> s = pd.Series(list('abca')) >>> pd.get_dummies(s) a b c 0 1 0 0 1 0 1 0 2 0 0 1 3 1 0 0 >>> s1 = ['a', 'b', np.nan] >>> pd.get_dummies(s1) a b 0 1 0 1 0 1 2 0 0 >>> pd.get_dummies(s1, dummy_na=True) a b NaN 0 1 0 0 1 0 1 0 2 0 0 1 >>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'], ... 'C': [1, 2, 3]}) >>> pd.get_dummies(df, prefix=['col1', 'col2']) C col1_a col1_b col2_a col2_b col2_c 0 1 1 0 0 1 0 1 2 0 1 1 0 0 2 3 1 0 0 0 1 >>> pd.get_dummies(pd.Series(list('abcaa'))) a b c 0 1 0 0 1 0 1 0 2 0 0 1 3 1 0 0 4 1 0 0 >>> pd.get_dummies(pd.Series(list('abcaa')), drop_first=True) b c 0 0 0 1 1 0 2 0 1 3 0 0 4 0 0 >>> pd.get_dummies(pd.Series(list('abc')), dtype=float) a b c 0 1.0 0.0 0.0 1 0.0 1.0 0.0 2 0.0 0.0 1.0 """ from pandas.core.reshape.concat import concat dtypes_to_encode = ["object", "category"] if isinstance(data, DataFrame): # determine columns being encoded if columns is None: data_to_encode = data.select_dtypes(include=dtypes_to_encode) elif not is_list_like(columns): raise TypeError("Input must be a list-like for parameter `columns`") else: data_to_encode = data[columns] # validate prefixes and separator to avoid silently dropping cols def check_len(item, name): if is_list_like(item): if not len(item) == data_to_encode.shape[1]: len_msg = ( f"Length of '{name}' ({len(item)}) did not match the " "length of the columns being encoded " f"({data_to_encode.shape[1]})." ) raise ValueError(len_msg) check_len(prefix, "prefix") check_len(prefix_sep, "prefix_sep") if isinstance(prefix, str): prefix = itertools.cycle([prefix]) if isinstance(prefix, dict): prefix = [prefix[col] for col in data_to_encode.columns] if prefix is None: prefix = data_to_encode.columns # validate separators if isinstance(prefix_sep, str): prefix_sep = itertools.cycle([prefix_sep]) elif isinstance(prefix_sep, dict): prefix_sep = [prefix_sep[col] for col in data_to_encode.columns] with_dummies: List[DataFrame] if data_to_encode.shape == data.shape: # Encoding the entire df, do not prepend any dropped columns with_dummies = [] elif columns is not None: # Encoding only cols specified in columns. Get all cols not in # columns to prepend to result. with_dummies = [data.drop(columns, axis=1)] else: # Encoding only object and category dtype columns. Get remaining # columns to prepend to result. with_dummies = [data.select_dtypes(exclude=dtypes_to_encode)] for (col, pre, sep) in zip(data_to_encode.items(), prefix, prefix_sep): # col is (column_name, column), use just column data here dummy = _get_dummies_1d( col[1], prefix=pre, prefix_sep=sep, dummy_na=dummy_na, sparse=sparse, drop_first=drop_first, dtype=dtype, ) with_dummies.append(dummy) result = concat(with_dummies, axis=1) else: result = _get_dummies_1d( data, prefix, prefix_sep, dummy_na, sparse=sparse, drop_first=drop_first, dtype=dtype, ) return result def _get_dummies_1d( data, prefix, prefix_sep="_", dummy_na=False, sparse=False, drop_first=False, dtype=None, ): from pandas.core.reshape.concat import concat # Series avoids inconsistent NaN handling codes, levels = factorize_from_iterable(Series(data)) if dtype is None: dtype = np.uint8 dtype = np.dtype(dtype) if is_object_dtype(dtype): raise ValueError("dtype=object is not a valid dtype for get_dummies") def get_empty_frame(data) -> DataFrame: if isinstance(data, Series): index = data.index else: index = np.arange(len(data)) return DataFrame(index=index) # if all NaN if not dummy_na and len(levels) == 0: return get_empty_frame(data) codes = codes.copy() if dummy_na: codes[codes == -1] = len(levels) levels = np.append(levels, np.nan) # if dummy_na, we just fake a nan level. drop_first will drop it again if drop_first and len(levels) == 1: return get_empty_frame(data) number_of_cols = len(levels) if prefix is None: dummy_cols = levels else: dummy_cols = [f"{prefix}{prefix_sep}{level}" for level in levels] index: Optional[Index] if isinstance(data, Series): index = data.index else: index = None if sparse: fill_value: Union[bool, float, int] if is_integer_dtype(dtype): fill_value = 0 elif dtype == bool: fill_value = False else: fill_value = 0.0 sparse_series = [] N = len(data) sp_indices: List[List] = [[] for _ in range(len(dummy_cols))] mask = codes != -1 codes = codes[mask] n_idx = np.arange(N)[mask] for ndx, code in zip(n_idx, codes): sp_indices[code].append(ndx) if drop_first: # remove first categorical level to avoid perfect collinearity # GH12042 sp_indices = sp_indices[1:] dummy_cols = dummy_cols[1:] for col, ixs in zip(dummy_cols, sp_indices): sarr = SparseArray( np.ones(len(ixs), dtype=dtype), sparse_index=IntIndex(N, ixs), fill_value=fill_value, dtype=dtype, ) sparse_series.append(
Series(data=sarr, index=index, name=col)
pandas.core.series.Series
import boto3 import json import os import requests import pandas as pd import warnings from pandas import json_normalize from github import Github warnings.filterwarnings('ignore') bucket = 'wmwaredata' fileName = 'gw_releases.json' s3 = boto3.client('s3') git_token = os.getenv('GIT_TOKEN') git_headers = {'Authorization': f'token {git_token}'} g = Github(os.getenv('GIT_TOKEN')) class GetRelease(): def releases(self): # Listing repos org = g.get_organization("k8-proxy") all_repos = org.get_repos() repos = [] for repo in all_repos: myrepo = repo.id, repo.name, repo.html_url repos.append(myrepo) df1 = pd.DataFrame(repos) df1.columns = ['id', 'name', 'repo_url'] # Getting release data url = f'https://api.github.com/repos/k8-proxy/GW-Releases/contents' res = requests.get(url, headers=git_headers).json() data1 = json_normalize(res, max_level=1) dft =
pd.DataFrame(data1)
pandas.DataFrame
from movierecommender.datahandler.DbHandler import DbHandler import pathlib import os import pandas as pd from sklearn.feature_extraction.text import CountVectorizer from sklearn.metrics.pairwise import cosine_similarity import errno import time from sqlalchemy import text as sql_text root_dir = pathlib.Path(__file__).parent.parent.parent.resolve() DATA_PATH = os.path.join(root_dir, "data/") class Recommender: def __init__(self): self.dbhandler = DbHandler() self.dbhandler.connect() data = self.dbhandler.exec_select_sql_from_file(os.path.join(DATA_PATH, "sql/select_soup.sql")) self.df = pd.DataFrame(data=data, columns=['tconst', 'soup']) self.cosine_sim = None self.cosine_sim_csv_path = os.path.join(DATA_PATH, 'cosine_sim/cosine_sim.csv') return def create_cosine_sim(self) -> None: """ Creates the cosine_sim matrix from the self.df dataframe This is the method that does all the raw calculation, and this only needs to be done when the dataset changes or the cosine_csv.csv file is missing for some reason Otherwise it is better to just import the cosine_csv.csv file that is generated by an earlier use of this method :return: """ cv = CountVectorizer() count_matrix = cv.fit_transform(self.df['soup']) cosine_sim = cosine_similarity(count_matrix) tconst_array = self.df['tconst'].values cosine_sim_df = pd.DataFrame(data=cosine_sim) cosine_sim_df.insert(0, column='tconst', value=tconst_array) self.cosine_sim = cosine_sim_df return def export_cosine_sim_to_pkl(self, pkl_path=os.path.join(DATA_PATH, 'cosine_sim/cosine_sim.pkl')): """ Exports the self.cosine_sim dataframe to the specified path for later imports :param pkl_path: the path of the pkl file to export the dataframe :return: """ # Initialize the self.cosine_sim df self.create_cosine_sim() print(self.cosine_sim.head()) print("Starting to write...") start_time = time.time() self.cosine_sim.to_pickle(pkl_path) print("Finished writing...") end_time = time.time() print(f"Elapsed {end_time - start_time} s") return def import_cosine_sim_from_pkl(self, pkl_path=os.path.join(DATA_PATH, 'cosine_sim/cosine_sim.pkl'), auto_create=False): if not os.path.exists(pkl_path): if not auto_create: # Throw exception, we shouldn't create the file that doesn't exist print(f"Can't find file with path {pkl_path}, exiting") raise FileNotFoundError( errno.ENOENT, os.strerror(errno.ENOENT), pkl_path) else: # we should call export_cosine_sim_to_pkl to create the file self.create_cosine_sim() self.export_cosine_sim_to_pkl(pkl_path=pkl_path) self.cosine_sim = pd.read_pickle(pkl_path) return def get_tconst_from_idx(self, idx: int) -> str: return self.df[self.df.index == idx]["tconst"].values[0] def get_index_from_tconst(self, tconst) -> int: """ Returns the index of the row the tconst param corresponds to in the self.df dataframe Throws an Exception if the tconst value is not found :param tconst: str the tconst to look for in the df :return: int the index of the row found """ if not (tconst in self.cosine_sim['tconst'].values): raise Exception(f"tconst can't be found in tconst values of self.df, {tconst}") return self.cosine_sim[self.cosine_sim['tconst'] == tconst].index.values[0] def get_recommendation_from_tconst(self, tconst: str, limit=10) -> list: """ Assumes the self.cosine_sim is set (imported via import_cosine_sim_from_csv or generated via create_cosine_sim) :param tconst: str the tconst of the value we are looking for :param limit: int optional the number of results to be included, default is 10 :return: sorted_tconst: list of tconst id recommendations in a sorted order, starting from most similar """ tconst_idx = self.get_index_from_tconst(tconst) # get the index of the movie # print(tconst_idx) movie_recommendations = list(enumerate(self.cosine_sim[tconst_idx])) # sort the values by the similarity in desc order sorted_movie_recommendations = sorted(movie_recommendations, key=lambda x: x[1], reverse=True) sorted_tconst = [self.get_tconst_from_idx(movie[0]) for movie in sorted_movie_recommendations[:limit]] # print(sorted_tconst) return sorted_tconst def get_recommendation_titles_from_tconst(self, tconst: str, limit=10): return get_titles_from_tconst_list(self.get_recommendation_from_tconst(tconst, limit)) def get_titles_from_tconst_list(tconst_list: list) -> list: """ :param tconst_list: list of tconst ids, normally generated via get_recommendation_from_tconst :return: list of tuples, where first field is the tconst and second field is the title from the title_basics table """ dbhandler = DbHandler() dbhandler.connect() all_titles = [row for row in dbhandler.conn.execute(sql_text(f"SELECT tconst, primaryTitle FROM " f"title_basics NATURAL JOIN title_soup"))] all_titles_df =
pd.DataFrame(data=all_titles, columns=['tconst', 'primaryTitle'])
pandas.DataFrame
""" Title: Margin Loan LVR Analysis Helper Functions Desc: A collection of helper functions used throughout the analysis """ import numpy as np import pandas as pd from string import digits import math from datetime import datetime from yahoo_finance import get_yahoo_history from alpha_vantage.timeseries import TimeSeries def remove_numbers(string: str) -> str: """Strips numbers from a string Eg, a1b2c3 -> abc Args: string (str): String to clean Returns: str: Provided string without numbers """ from string import digits return string.translate(str.maketrans("", "", digits)) def get_historical_data(key, time_slice, symbol): """Wrapper function to source historical EOD stock data Args: key (str): alphavantage api key time_slice (str): Aggregate level to fetch. Options are: - daily - weekly - monthly symbol (str): Symbol used, including the exchange Returns: DataFrame: EOD dataframe """ # Instantiate Session ts = TimeSeries(key=key, output_format="pandas", indexing_type="integer") # Retrieve Data if time_slice == "daily": df, metadata = ts.get_daily(symbol, outputsize="full") elif time_slice == "weekly": df, metadata = ts.get_weekly(symbol) elif time_slice == "monthly": df, metadata = ts.get_monthly(symbol) # Replace 0's with NA's because they're almost certainly false df.replace(0, np.nan, inplace=True) # Fix crappy column header that contain numbers & periods df.columns = [remove_numbers(x).replace(". ", "") for x in df.columns.tolist()] # Fix Date type df["date"] =
pd.to_datetime(df["date"])
pandas.to_datetime
# Copyright (C) 2020 University of Oxford # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import pandas as pd import numpy as np from datetime import datetime __all__ = ('MYS_MHYS',) from utils.fetcher.base_epidemiology import BaseEpidemiologyFetcher """ site-location: https://github.com/ynshung/covid-19-malaysia COVID19-Malaysia Data for Malaysia created, maintained and hosted by ynshung Data are retrieved from multiple offical sources such as: MOH Facebook Page CPRC KKM Telegram Channel Desk of DG KKM Portal MyHealth Twitter Twitter of DG Info Sihat. """ logger = logging.getLogger(__name__) class MYS_MHYS(BaseEpidemiologyFetcher): LOAD_PLUGIN = True SOURCE = 'MYS_MHYS' def province_confirmed_fetch(self): """ This url mainly provide cumulative data of confirmed cases on the province-level. """ url = 'https://raw.githubusercontent.com/ynshung/covid-19-malaysia/master/covid-19-my-states-cases.csv' logger.debug('Fetching Malaysia province-level confirmed cases from MYS_MHYS') return
pd.read_csv(url, na_values='-')
pandas.read_csv
""" This module can filter, normalize, intersect and visualize tabular data such as read counts and differential expression data. Any tabular data saved in a csv format can be imported. \ Use this module to perform various filtering operations on your data, normalize your data, \ perform set operations (union, intersection, etc), run basic exploratory analyses and plots \ (such as PCA, clustergram, violin plots, scatter, etc), \ save the filtered data to your computer, and more. When you save filtered/modified data, its new file name will include by default \ all of the operations performed on it, in the order they were performed, to allow easy traceback of your analyses. """ import numpy as np import pandas as pd from pathlib import Path import warnings import os from rnalysis import general from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler import seaborn as sns import matplotlib.pyplot as plt from grid_strategy import strategies from typing import Union, List, Set, Dict, Tuple class Filter: """ An all-purpose Filter. **Attributes** df: pandas DataFrame A DataFrame that contains the DESeq output file contents. \ The DataFrame is modified upon usage of filter operations. . shape: tuple (rows, columns) The dimensions of df. columns: list The columns of df. fname: pathlib.Path The path and filename for the purpose of saving df as a csv file. \ Updates automatically when filter operations are applied. index_set: set All of the indices in the current DataFrame (which were not removed by previously used filter methods) \ as a set. index_string: string A string of all feature indices in the current DataFrame separated by newline. """ __slots__ = {'fname': 'filename with full path', 'df': 'pandas.DataFrame with the data', 'shape': '(rows, columns)', 'columns': 'list of column names'} def __init__(self, fname: Union[str, Path], drop_columns: Union[str, List[str]] = False): """ :param fname: full path/filename of the .csv file to be loaded into the Filter object :type fname: Union[str, Path] :param drop_columns: if a string or list of strings are specified, \ the columns of the same name/s will be dropped from the loaded DataFrame. :type drop_columns: str, list of str, or False (default False) :Examples: >>> from rnalysis import filtering >>> d = filtering.Filter("tests/counted.csv") """ if isinstance(fname, tuple): assert isinstance(fname[1], (pd.DataFrame, pd.Series)) and isinstance(fname[0], (str, Path)) self.fname = fname[0] self.df = fname[1] else: assert isinstance(fname, (str, Path)) self.fname = Path(fname) self.df = general.load_csv(fname, 0, squeeze=True, drop_columns=drop_columns) if self.df.index.has_duplicates: warnings.warn("This Filter object contains multiple rows with the same WBGene index.") self.shape = self.df.shape if isinstance(self.df, pd.DataFrame): self.columns = list(self.df.columns) def __repr__(self): return f"{type(self).__name__} of file {self.fname}" def __copy__(self): """ :rtype: Filter """ return type(self)((self.fname, self.df.copy(deep=True))) def _inplace(self, new_df: pd.DataFrame, opposite: bool, inplace: bool, suffix: str, printout_operation: str = 'filter'): """ Executes the user's choice whether to filter in-place or create a new instance of the Filter object. :param new_df: the post-filtering DataFrame :param opposite: boolean. Determines whether to return the filtration ,or its opposite. :param inplace: boolean. Determines whether to filter in-place or not. :param suffix: The suffix to be added to the filename :return: If inplace is False, returns a new instance of the Filter object. """ assert isinstance(inplace, bool), "'inplace' must be True or False!" assert isinstance(opposite, bool), "'opposite' must be True or False!" assert printout_operation.lower() in ['filter', 'normalize'], \ f"Invalid input for variable 'printout_operation': {printout_operation}" if opposite: new_df = self.df.loc[self.df.index.difference(new_df.index)] suffix += 'opposite' new_fname = Path(f"{str(self.fname.parent)}\\{self.fname.stem}{suffix}{self.fname.suffix}") printout = '' if printout_operation.lower() == 'filter': printout += f"Filtered {self.df.shape[0] - new_df.shape[0]} features, leaving {new_df.shape[0]} " \ f"of the original {self.df.shape[0]} features. " elif printout_operation.lower() == 'normalize': printout += f"Normalized the values of {new_df.shape[0]} features. " if inplace: if printout_operation.lower() == 'filter': printout += 'Filtered inplace.' elif printout_operation.lower() == 'normalize': printout += 'Normalized inplace.' print(printout) self.df, self.fname = new_df, new_fname self.shape = self.df.shape else: if printout_operation.lower() == 'filter': printout += 'Filtering result saved to new object.' elif printout_operation.lower() == 'normalize': printout += 'Normalization result saved to a new object.' print(printout) tmp_df, tmp_fname = self.df, self.fname self.df, self.fname = new_df, new_fname new_obj = self.__copy__() self.df, self.fname = tmp_df, tmp_fname return new_obj def save_csv(self, alt_filename: Union[None, str, Path] = None): """ Saves the current filtered data to a .csv file. :param alt_filename: If None, file name will be generated automatically \ according to the filtering methods used. \ If it's a string, it will be used as the name of the saved file. Example input: 'myfilename' :type alt_filename: str, pathlib.Path, or None (default) """ if alt_filename is None: alt_filename = self.fname else: alt_filename = f"{str(self.fname.parent)}\\{alt_filename}{self.fname.suffix}" general.save_to_csv(self.df, alt_filename) @staticmethod def _color_gen(): """ A generator that yields distinct colors up to a certain limit, and then yields randomized RGB values. :return: a color name string (like 'black', \ or a numpy.ndarray of size (3,) containing three random values each between 0 and 1. """ preset_colors = ['tab:blue', 'tab:red', 'tab:green', 'tab:orange', 'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan', 'gold', 'maroon', 'mediumslateblue', 'fuchsia', 'mediumblue', 'black', 'lawngreen'] for color in preset_colors: yield color while True: yield np.random.random(3) @staticmethod def _from_string(msg: str = '', delimiter: str = '\n'): """ Takes a manual string input from the user, and then splits it using a delimiter into a list of values. \ :param msg: a promprt to be printed to the user :param delimiter: the delimiter used to separate the values. Default is '\n' :return: A list of the comma-seperated values the user inserted. """ string = input(msg) split = string.split(sep=delimiter) if split[-1] == '': split = split[:-1] return split def head(self, n: int = 5): """ Return the first n rows of the Filter object. See pandas.DataFrame.head documentation. :type n: positive int, default 5 :param n: Number of rows to show. :return: returns the first n rows of the Filter object. :Examples: >>> from rnalysis import filtering >>> d = filtering.Filter("tests/test_deseq.csv") >>> d.head() baseMean log2FoldChange ... pvalue padj WBGene00000002 6820.755327 7.567762 ... 0.000000e+00 0.000000e+00 WBGene00000003 3049.625670 9.138071 ... 4.660000e-302 4.280000e-298 WBGene00000004 1432.911791 8.111737 ... 6.400000e-237 3.920000e-233 WBGene00000005 4028.154186 6.534112 ... 1.700000e-228 7.800000e-225 WBGene00000006 1230.585240 7.157428 ... 2.070000e-216 7.590000e-213 <BLANKLINE> [5 rows x 6 columns] >>> d.head(3) # return only the first 3 rows baseMean log2FoldChange ... pvalue padj WBGene00000002 6820.755327 7.567762 ... 0.000000e+00 0.000000e+00 WBGene00000003 3049.625670 9.138071 ... 4.660000e-302 4.280000e-298 WBGene00000004 1432.911791 8.111737 ... 6.400000e-237 3.920000e-233 <BLANKLINE> [3 rows x 6 columns] """ return self.df.head(n) def tail(self, n: int = 5) -> pd.DataFrame: """ Return the last n rows of the Filter object. See pandas.DataFrame.tail documentation. :type n: positive int, default 5 :param n: Number of rows to show. :rtype: pandas.DataFrame :return: returns the last n rows of the Filter object. :Examples: >>> from rnalysis import filtering >>> d = filtering.Filter("tests/test_deseq.csv") >>> d.tail() baseMean log2FoldChange ... pvalue padj WBGene00000025 2236.185837 2.477374 ... 1.910000e-81 1.460000e-78 WBGene00000026 343.648987 -4.037191 ... 2.320000e-75 1.700000e-72 WBGene00000027 175.142856 6.352044 ... 1.580000e-74 1.120000e-71 WBGene00000028 219.163200 3.913657 ... 3.420000e-72 2.320000e-69 WBGene00000029 1066.242402 -2.811281 ... 1.420000e-70 9.290000e-68 <BLANKLINE> [5 rows x 6 columns] >>> d.tail(8) # returns the last 8 rows baseMean log2FoldChange ... pvalue padj WBGene00000022 365.813048 6.101303 ... 2.740000e-97 2.400000e-94 WBGene00000023 3168.566714 3.906719 ... 1.600000e-93 1.340000e-90 WBGene00000024 221.925724 4.801676 ... 1.230000e-84 9.820000e-82 WBGene00000025 2236.185837 2.477374 ... 1.910000e-81 1.460000e-78 WBGene00000026 343.648987 -4.037191 ... 2.320000e-75 1.700000e-72 WBGene00000027 175.142856 6.352044 ... 1.580000e-74 1.120000e-71 WBGene00000028 219.163200 3.913657 ... 3.420000e-72 2.320000e-69 WBGene00000029 1066.242402 -2.811281 ... 1.420000e-70 9.290000e-68 <BLANKLINE> [8 rows x 6 columns] """ return self.df.tail(n) def filter_percentile(self, percentile: float, column: str, opposite: bool = False, inplace: bool = True): """ Removes all entries above the specified percentile in the specified column. \ For example, if the column were 'pvalue' and the percentile was 0.5, then all features whose pvalue is above \ the median pvalue will be filtered out. :type percentile: float between 0 and 1 :param percentile: The percentile that all features above it will be filtered out. :type column: str :param column: Name of the DataFrame column according to which the filtering will be performed. :type opposite: bool :param opposite: If True, the output of the filtering will be the OPPOSITE of the specified \ (instead of filtering out X, the function will filter out anything BUT X). \ If False (default), the function will filter as expected. :type inplace: bool :param inplace: If True (default), filtering will be applied to the current Filter object. If False, \ the function will return a new Filter instance and the current instance will not be affected. :return: If inplace is False, returns a new and filtered instance of the Filter object. :Examples: >>> from rnalysis import filtering >>> d = filtering.Filter("tests/test_deseq.csv") >>> # keep only the rows whose value in the column 'log2FoldChange' is below the 75th percentile >>> d.filter_percentile(0.75,'log2FoldChange') Filtered 7 features, leaving 21 of the original 28 features. Filtered inplace. >>> d = filtering.Filter("tests/test_deseq.csv") >>> # keep only the rows vulse value in the column 'log2FoldChange' is above the 25th percentile >>> d.filter_percentile(0.25,'log2FoldChange',opposite=True) Filtered 7 features, leaving 21 of the original 28 features. Filtered inplace. """ assert isinstance(percentile, float), "percentile must be a float between 0 and 1!" assert isinstance(column, str) and column in self.df, "Invalid column name!" suffix = f'_below{percentile}percentile' new_df = self.df[self.df[column] < self.df[column].quantile(percentile)] return self._inplace(new_df, opposite, inplace, suffix) def split_by_percentile(self, percentile: float, column: str) -> tuple: """ Splits the Filter object into two Filter objects: \ below and above the specified percentile in the spcfieid column. :type percentile: float between 0 and 1 :param percentile: The percentile that all features above it will be filtered out. :type column: str :param column: Name of the DataFrame column according to which the filtering will be performed. :rtype: Tuple[filtering.Filter, filtering.Filter] :return: a tuple of two Filter objects: the first contains all of the features below the specified percentile, \ and the second contains all of the features above and equal to the specified percentile. :Examples: >>> from rnalysis import filtering >>> d = filtering.Filter("tests/test_deseq.csv") >>> below, above = d.split_by_percentile(0.75,'log2FoldChange') Filtered 7 features, leaving 21 of the original 28 features. Filtering result saved to new object. Filtered 21 features, leaving 7 of the original 28 features. Filtering result saved to new object. """ return self.filter_percentile(percentile=percentile, column=column, opposite=False, inplace=False), self.filter_percentile(percentile=percentile, column=column, opposite=True, inplace=False) def filter_biotype(self, biotype: Union[str, List[str]] = 'protein_coding', ref: str = 'predefined', opposite: bool = False, inplace: bool = True): """ Filters out all features that do not match the indicated biotype/biotypes. \ Legal inputs: 'protein_coding','pseudogene','piRNA','miRNA','ncRNA','lincRNA','rRNA','snRNA','snoRNA'. :type biotype: string or list of strings :param biotype: the biotypes which will not be filtered out. :param ref: Name of the biotype reference file used to determine biotypes. \ Default is the path defined by the user in the settings.yaml file. :type opposite: bool :param opposite: If True, the output of the filtering will be the OPPOSITE of the specified \ (instead of filtering out X, the function will filter out anything BUT X). \ If False (default), the function will filter as expected. :type inplace: bool :param inplace: If True (default), filtering will be applied to the current Filter object. If False, \ the function will return a new Filter instance and the current instance will not be affected. :return: If 'inplace' is False, returns a new instance of Filter object. :Examples: >>> from rnalysis import filtering >>> counts = filtering.Filter('tests/counted.csv') >>> # keep only rows whose biotype is 'protein_coding' >>> counts.filter_biotype('protein_coding',ref='tests/biotype_ref_table_for_tests.csv') Filtered 9 features, leaving 13 of the original 22 features. Filtered inplace. >>> counts = filtering.Filter('tests/counted.csv') >>> # keep only rows whose biotype is 'protein_coding' or 'pseudogene' >>> counts.filter_biotype(['protein_coding','pseudogene'],ref='tests/biotype_ref_table_for_tests.csv') Filtered 0 features, leaving 22 of the original 22 features. Filtered inplace. """ assert isinstance(biotype, (str, list)), "biotype must be a string or a list!" if isinstance(biotype, str): biotype = [biotype] ref = general._get_biotype_ref_path(ref) ref_df = general.load_csv(ref) general._biotype_table_assertions(ref_df) ref_df.set_index('gene', inplace=True) ref_df.columns = ref_df.columns.str.lower() legal_inputs = set(ref_df['biotype'].unique()) for bio in biotype: assert bio in legal_inputs, f"biotype {bio} is not a legal string!" suffix = f"_{'_'.join(biotype)}" mask = pd.Series(np.zeros_like(ref_df['biotype'], dtype=bool), index=ref_df['biotype'].index, name='biotype') for bio in biotype: mask = mask | (ref_df['biotype'] == bio) gene_names = ref_df[mask].index.intersection(self.df.index) new_df = self.df.loc[gene_names] return self._inplace(new_df, opposite, inplace, suffix) # TODO: add 'remove unindexed rows' to here! def filter_by_attribute(self, attributes: Union[str, List[str]] = None, mode='union', ref: str = 'predefined', opposite: bool = False, inplace: bool = True): """ Filters features according to user-defined attributes from an Attribute Reference Table. \ When multiple attributes are given, filtering can be done in 'union' mode \ (where features that belong to at least one attribute are not filtered out), or in 'intersection' mode \ (where only features that belong to ALL attributes are not filtered out). \ To learn more about user-defined attributes and Attribute Reference Tables, read the user guide. :type attributes: string or list of strings, \ which are column titles in the user-defined Attribute Reference Table. :param attributes: attributes to filter by. :type mode: 'union' or 'intersection'. :param mode: If 'union', filters out every genomic feature that does not belong to one or more \ of the indicated attributes. If 'intersection', \ filters out every genomic feature that does not belong to ALL of the indicated attributes. :type ref: str or pathlib.Path (default 'predefined') :param ref: filename/path of the attribute reference table to be used as reference. :type opposite: bool (default False) :param opposite: If True, the output of the filtering will be the OPPOSITE of the specified \ (instead of filtering out X, the function will filter out anything BUT X). \ If False (default), the function will filter as expected. :type inplace: bool (default True) :param inplace: If True (default), filtering will be applied to the current Filter object. If False, \ the function will return a new Filter instance and the current instance will not be affected. :return: If 'inplace' is False, returns a new and filtered instance of the Filter object. :Examples: >>> from rnalysis import filtering >>> counts = filtering.Filter('tests/counted.csv') >>> # keep only rows that belong to the attribute 'attribute1' >>> counts.filter_by_attribute('attribute1',ref='tests/attr_ref_table_for_examples.csv') Filtered 15 features, leaving 7 of the original 22 features. Filtered inplace. >>> counts = filtering.Filter('tests/counted.csv') >>> # keep only rows that belong to the attributes 'attribute1' OR 'attribute3' (union) >>> counts.filter_by_attribute(['attribute1','attribute3'],ref='tests/attr_ref_table_for_examples.csv') Filtered 14 features, leaving 8 of the original 22 features. Filtered inplace. >>> counts = filtering.Filter('tests/counted.csv') >>> # keep only rows that belong to both attributes 'attribute1' AND 'attribute3' (intersection) >>> counts.filter_by_attribute(['attribute1','attribute3'],mode='intersection', ... ref='tests/attr_ref_table_for_examples.csv') Filtered 19 features, leaving 3 of the original 22 features. Filtered inplace. >>> counts = filtering.Filter('tests/counted.csv') >>> # keep only rows that DON'T belong to either 'attribute1','attribute3' or both >>> counts.filter_by_attribute(['attribute1','attribute3'],ref='tests/attr_ref_table_for_examples.csv', ... opposite=True) Filtered 8 features, leaving 14 of the original 22 features. Filtered inplace. >>> counts = filtering.Filter('tests/counted.csv') >>> # keep only rows that DON'T belong to both 'attribute1' AND 'attribute3' >>> counts.filter_by_attribute(['attribute1','attribute3'],mode='intersection', ... ref='tests/attr_ref_table_for_examples.csv',opposite=True) Filtered 3 features, leaving 19 of the original 22 features. Filtered inplace. """ if attributes is None: attributes = self._from_string( "Please insert attributes separated by newline " "(for example: \n'epigenetic_related_genes\nnrde-3 targets\nALG-3/4 class small RNAs')") elif isinstance(attributes, str): attributes = [attributes] else: assert isinstance(attributes, (list, tuple, set)) assert isinstance(mode, str), "'mode' must be a string!" ref = general._get_attr_ref_path(ref) attr_ref_table = general.load_csv(ref) general._attr_table_assertions(attr_ref_table) attr_ref_table.set_index('gene', inplace=True) sep_idx = [attr_ref_table[attr_ref_table[attr].notnull()].index for attr in attributes] if mode == 'intersection': suffix = '_reftableintersection' indices = self.df.index for idx in sep_idx: indices = indices.intersection(idx) elif mode == 'union': suffix = '_reftableUnion' indices = pd.Index([]) for idx in sep_idx: indices = indices.union(idx) indices = indices.intersection(self.df.index) else: raise ValueError(f"Illegal input {mode}: mode must be either 'union' or 'intersection'") new_df = self.df.loc[set(indices)] return self._inplace(new_df, opposite, inplace, suffix) def split_by_attribute(self, attributes: List[str], ref: str = 'predefined') -> tuple: """ Splits the Filter object into multiple Filter objects, \ each corresponding to one of the specified Attribute Reference Table attributes. \ Each object contains only features that match its Attribute Reference Table attribute. :param attributes: list of attribute names from the Attribute Reference Table to filter by. :type attributes: list of strings :param ref: filename/path of the reference table to be used as reference. :rtype: Tuple[filtering.Filter] :return: A tuple of Filter objects, each containing only features that match one Attribute Reference Table attribute; \ the Filter objects are returned in the same order the attributes were given in. :Examples: >>> from rnalysis import filtering >>> counts = filtering.Filter('tests/counted.csv') >>> attribute1,attribute2 = counts.split_by_attribute(['attribute1','attribute2'], ... ref='tests/attr_ref_table_for_examples.csv') Filtered 15 features, leaving 7 of the original 22 features. Filtering result saved to new object. Filtered 20 features, leaving 2 of the original 22 features. Filtering result saved to new object. """ assert isinstance(attributes, list) return tuple([self.filter_by_attribute(attributes=att, mode='union', ref=ref, inplace=False) for att in attributes]) def describe(self, percentiles: list = (0.01, 0.25, 0.5, 0.75, 0.99)): """ Generate descriptive statistics that summarize the central tendency, dispersion and shape \ of the dataset’s distribution, excluding NaN values. \ For more information see the documentation of pandas.DataFrame.describe. :type percentiles: list-like of numbers, optional :param percentiles: The percentiles to include in the output. \ All should fall between 0 and 1. \ The default is [.25, .5, .75], which returns the 25th, 50th, and 75th percentiles. :return: Summary statistics of the dataset. :rtype: Series or DataFrame :Examples: >>> from rnalysis import filtering >>> counts = filtering.Filter('tests/counted.csv') >>> counts.describe() cond1 cond2 cond3 cond4 count 22.000000 22.000000 22.000000 22.000000 mean 2515.590909 2209.227273 4230.227273 3099.818182 std 4820.512674 4134.948493 7635.832664 5520.394522 min 0.000000 0.000000 0.000000 0.000000 1% 0.000000 0.000000 0.000000 0.000000 25% 6.000000 6.250000 1.250000 0.250000 50% 57.500000 52.500000 23.500000 21.000000 75% 2637.000000 2479.000000 6030.500000 4669.750000 99% 15054.950000 12714.290000 21955.390000 15603.510000 max 15056.000000 12746.000000 22027.000000 15639.000000 >>> # show the deciles (10%, 20%, 30%... 90%) of the columns >>> counts.describe(percentiles=[decile/10 for decile in range(1,10)]) cond1 cond2 cond3 cond4 count 22.000000 22.000000 22.000000 22.000000 mean 2515.590909 2209.227273 4230.227273 3099.818182 std 4820.512674 4134.948493 7635.832664 5520.394522 min 0.000000 0.000000 0.000000 0.000000 10% 0.000000 0.200000 0.000000 0.000000 20% 1.400000 3.200000 1.000000 0.000000 30% 15.000000 15.700000 2.600000 1.000000 40% 28.400000 26.800000 14.000000 9.000000 50% 57.500000 52.500000 23.500000 21.000000 60% 82.000000 106.800000 44.000000 33.000000 70% 484.200000 395.500000 305.000000 302.500000 80% 3398.600000 3172.600000 7981.400000 6213.000000 90% 8722.100000 7941.800000 16449.500000 12129.900000 max 15056.000000 12746.000000 22027.000000 15639.000000 """ return self.df.describe(percentiles=percentiles) @property def index_set(self) -> set: """ Returns all of the features in the current DataFrame (which were not removed by previously used filter methods) \ as a set. \ if any duplicate features exist in the filter object (same WBGene appears more than once), \ the corresponding WBGene index will appear in the returned set ONLY ONCE. :return: A set of WBGene names. :Examples: >>> from rnalysis import filtering >>> counts = filtering.Filter('tests/counted.csv') >>> myset = counts.index_set >>> print(myset) {'WBGene00044022', 'WBGene00077504', 'WBGene00007079', 'WBGene00007069', 'WBGene00007063', 'WBGene00007067', 'WBGene00077503', 'WBGene00007078', 'WBGene00007064', 'WBGene00077502', 'WBGene00044951', 'WBGene00007077', 'WBGene00007066', 'WBGene00007076', 'WBGene00014997', 'WBGene00043990', 'WBGene00007074', 'WBGene00043987', 'WBGene00007071', 'WBGene00043989', 'WBGene00043988', 'WBGene00007075'} """ if self.df.index.has_duplicates: warnings.warn(" this filter object contains multiple rows with the same WBGene index. When " "returning a set or string of features from this DESeqFilter object, each WBGene index will " "appear ONLY ONCE!") return set(self.df.index) @property def index_string(self) -> str: r""" Returns a string of all feature indices in the current DataFrame, \ sorted alphabetically and separated by newline. \ This includes all of the feature indices which were not filtered out by previously-used filter methods. \ if any duplicate features exist in the filter object (same index appears more than once), \ the corresponding index will appear in the returned string ONLY ONCE. :return: A string of WBGene indices separated by newlines (\\n). \ For example, "WBGene00000001\\nWBGene00000003\\nWBGene12345678". :Examples: >>> from rnalysis import filtering >>> counts = filtering.Filter('tests/counted.csv') >>> mystring = counts.index_string >>> print(mystring) WBGene00007063 WBGene00007064 WBGene00007066 WBGene00007067 WBGene00007069 WBGene00007071 WBGene00007074 WBGene00007075 WBGene00007076 WBGene00007077 WBGene00007078 WBGene00007079 WBGene00014997 WBGene00043987 WBGene00043988 WBGene00043989 WBGene00043990 WBGene00044022 WBGene00044951 WBGene00077502 WBGene00077503 WBGene00077504 """ ordered = list(self.index_set) ordered.sort() return "\n".join(ordered) def print_features(self): """ Print the feature indices in the Filter object, sorted alphabetically and separated by newline. :Examples: >>> from rnalysis import filtering >>> counts = filtering.Filter('tests/counted.csv') >>> counts.print_features() WBGene00007063 WBGene00007064 WBGene00007066 WBGene00007067 WBGene00007069 WBGene00007071 WBGene00007074 WBGene00007075 WBGene00007076 WBGene00007077 WBGene00007078 WBGene00007079 WBGene00014997 WBGene00043987 WBGene00043988 WBGene00043989 WBGene00043990 WBGene00044022 WBGene00044951 WBGene00077502 WBGene00077503 WBGene00077504 """ print(self.index_string) def biotypes(self, return_format: str = 'short', ref: str = 'predefined'): """ Returns a DataFrame of the biotypes in the Filter object and their count. :type return_format: 'short' or 'long' (default 'short') :param return_format: 'short' returns a short-form DataFrame, which states the biotypes \ in the Filter object and their count. 'long' returns a long-form DataFrame, which also provides descriptive statistics of each column per biotype. :param ref: Name of the biotype reference table used to determine biotype. Default is ce11 (included in the package). :rtype: pandas.DataFrame :returns: a pandas DataFrame showing the number of values belonging to each biotype, \ as well as additional descriptive statistics of format=='long'. :Examples: >>> from rnalysis import filtering >>> d = filtering.Filter("tests/test_deseq.csv") >>> # short-form view >>> d.biotypes(ref='tests/biotype_ref_table_for_tests.csv') gene biotype protein_coding 26 pseudogene 1 unknown 1 >>> # long-form view >>> d.biotypes(return_format='long', ref='tests/biotype_ref_table_for_tests.csv') baseMean ... padj count mean ... 75% max biotype ... protein_coding 26.0 1823.089609 ... 1.005060e-90 9.290000e-68 pseudogene 1.0 2688.043701 ... 1.800000e-94 1.800000e-94 unknown 1.0 2085.995094 ... 3.070000e-152 3.070000e-152 <BLANKLINE> [3 rows x 48 columns] """ ref = general._get_biotype_ref_path(ref) ref_df = general.load_csv(ref) general._biotype_table_assertions(ref_df) ref_df.columns = ref_df.columns.str.lower() not_in_ref = self.df.index.difference(ref_df['gene']) if len(not_in_ref) > 0: warnings.warn( f'{len(not_in_ref)} of the features in the Filter object do not appear in the Biotype Reference Table. ') ref_df = ref_df.append(pd.DataFrame({'gene': not_in_ref, 'biotype': 'not_in_biotype_reference'})) if return_format == 'short': return ref_df.set_index('gene', drop=False).loc[self.df.index].groupby('biotype').count() elif return_format == 'long': self_df = self.df.__deepcopy__() self_df['biotype'] = ref_df.set_index('gene').loc[self.df.index] return self_df.groupby('biotype').describe() else: raise ValueError(f'Invalid format "{return_format}"') def number_filters(self, column: str, operator: str, value, opposite=False, inplace=True): """ Applay a number filter (greater than, equal, lesser than) on a particular column in the Filter object. :type column: str :param column: name of the column to filter by :type operator: str: 'gt' / 'greater than' / '>', 'eq' / 'equals' / '=', 'lt' / 'lesser than' / '<' :param operator: the operator to filter the column by (greater than, equal or lesser than) :type value: number (int or float) :param value: the value to filter by :type opposite: bool :param opposite: If True, the output of the filtering will be the OPPOSITE of the specified \ (instead of filtering out X, the function will filter out anything BUT X). \ If False (default), the function will filter as expected. :type inplace: bool :param inplace: If True (default), filtering will be applied to the current Filter object. If False, \ the function will return a new Filter instance and the current instance will not be affected. :return: If 'inplace' is False, returns a new instance of the Filter object. :Examples: >>> from rnalysis import filtering >>> filt = filtering.Filter('tests/test_deseq.csv') >>> #keep only rows that have a value greater than 5900 in the column 'baseMean'. >>> filt.number_filters('baseMean','gt',5900) Filtered 26 features, leaving 2 of the original 28 features. Filtered inplace. >>> filt = filtering.Filter('tests/test_deseq.csv') >>> #keep only rows that have a value greater than 5900 in the column 'baseMean'. >>> filt.number_filters('baseMean','greater than',5900) Filtered 26 features, leaving 2 of the original 28 features. Filtered inplace. >>> filt = filtering.Filter('tests/test_deseq.csv') >>> #keep only rows that have a value greater than 5900 in the column 'baseMean'. >>> filt.number_filters('baseMean','>',5900) Filtered 26 features, leaving 2 of the original 28 features. Filtered inplace. """ operator_dict = {'gt': 'gt', 'greater than': 'gt', '>': 'gt', 'eq': 'eq', 'equals': 'eq', '=': 'eq', 'lt': 'lt', 'lesser than': 'lt', '<': 'lt', 'equal': 'eq'} operator = operator.lower() assert operator in operator_dict, f"Invalid operator {operator}" assert isinstance(value, (int, float)), f"'value' must be a number!" assert column in self.columns, f"column {column} not in DataFrame!" op = operator_dict[operator] suffix = f"_{column}{op}{value}" if op == 'eq': new_df = self.df[self.df[column] == value] elif op == 'gt': new_df = self.df[self.df[column] > value] elif op == 'lt': new_df = self.df[self.df[column] < value] else: raise KeyError(f"Problem with operator {operator} or key {op}. Please report to the developer. ") return self._inplace(new_df, opposite, inplace, suffix) def text_filters(self, column: str, operator: str, value: str, opposite=False, inplace=True): """ Applay a text filter (equals, contains, starts with, ends with) on a particular column in the Filter object. :type column: str :param column: name of the column to filter by :type operator: str: 'eq' / 'equals' / '=', 'ct' / 'contains' / 'in', 'bw' / 'begins with', 'ew' / 'ends with' :param operator: the operator to filter the column by (equals, contains, starts with, ends with) :type value: number (int or float) :param value: the value to filter by :type opposite: bool :param opposite: If True, the output of the filtering will be the OPPOSITE of the specified \ (instead of filtering out X, the function will filter out anything BUT X). \ If False (default), the function will filter as expected. :type inplace: bool :param inplace: If True (default), filtering will be applied to the current Filter object. If False, \ the function will return a new Filter instance and the current instance will not be affected. :return: If 'inplace' is False, returns a new instance of the Filter object. :Examples: >>> from rnalysis import filtering >>> filt = filtering.Filter('tests/text_filters.csv') >>> # keep only rows that have a value that starts with 'AC3' in the column 'name'. >>> filt.text_filters('name','sw','AC3') Filtered 17 features, leaving 5 of the original 22 features. Filtered inplace. """ operator_dict = {'eq': 'eq', 'equals': 'eq', '=': 'eq', 'ct': 'ct', 'in': 'ct', 'contains': 'ct', 'sw': 'sw', 'starts with': 'sw', 'ew': 'ew', 'ends with': 'ew', 'equal': 'eq', 'begins with': 'sw'} operator = operator.lower() assert operator in operator_dict, f"Invalid operator {operator}" assert isinstance(value, str), f"'value' must be a string!" assert column in self.columns, f"column {column} not in DataFrame!" op = operator_dict[operator] suffix = f"_{column}{op}{value}" if op == 'eq': new_df = self.df[self.df[column] == value] elif op == 'ct': new_df = self.df[self.df[column].str.contains(value)] elif op == 'ew': new_df = self.df[self.df[column].str.endswith(value)] elif op == 'sw': new_df = self.df[self.df[column].str.startswith(value)] else: raise KeyError(f"Problem with operator {operator} or key {op}. Please report to the developer. ") return self._inplace(new_df, opposite, inplace, suffix) def sort(self, by: Union[str, List[str]], ascending: Union[bool, List[bool]] = True, na_position: str = 'last', inplace: bool = True): """ Sort the rows by the values of specified column or columns. :type by: str or list of str :param by: Names of the column or columns to sort by. :type ascending: bool or list of bool, default True :param ascending: Sort ascending vs. descending. Specify list for multiple sort orders. \ If this is a list of bools, it must have the same length as 'by'. :type na_position: 'first' or 'last', default 'last' :param na_position: If 'first', puts NaNs at the beginning; if 'last', puts NaNs at the end. :type inplace: bool, default True :param inplace: If True, perform operation in-place. \ Otherwise, returns a sorted copy of the Filter object without modifying the original. :return: None if inplace=True, a sorted Filter object otherwise. :Examples: >>> from rnalysis import filtering >>> counts = filtering.Filter('tests/counted.csv') >>> counts.head() cond1 cond2 cond3 cond4 WBGene00007063 633 451 365 388 WBGene00007064 60 57 20 23 WBGene00044951 0 0 0 1 WBGene00007066 55 266 46 39 WBGene00007067 15 13 1 0 >>> counts.sort(by='cond1',ascending=True) >>> counts.head() cond1 cond2 cond3 cond4 WBGene00044951 0 0 0 1 WBGene00077504 0 0 0 0 WBGene00007069 0 2 1 0 WBGene00077502 0 0 0 0 WBGene00077503 1 4 2 0 """ if inplace: self.df.sort_values(by=by, axis=0, ascending=ascending, inplace=True, na_position=na_position) else: new_df = self.df.sort_values(by=by, axis=0, ascending=ascending, inplace=False, na_position=na_position) return self._inplace(new_df, False, inplace, '') def filter_top_n(self, by: Union[str, List[str]], n: int = 100, ascending: Union[bool, List[bool]] = True, na_position: str = 'last', opposite: bool = False, inplace: bool = True, ): """ Sort the rows by the values of specified column or columns, then keep only the top 'n' rows. :type by: string or list of strings :param by: Names of the column or columns to sort and then filter by. :type n: int :param n: How many features to keep in the Filter object. :type ascending: bool or list of bools (default True) :param ascending: Sort ascending vs. descending. Specify list for multiple sort orders. \ If this is a list of bools, it must have the same length as 'by'. :type na_position: 'first' or 'last', default 'last' :param na_position: If 'first', puts NaNs at the beginning; if 'last', puts NaNs at the end. :type opposite: bool :param opposite: If True, the output of the filtering will be the OPPOSITE of the specified \ (instead of filtering out X, the function will filter out anything BUT X). \ If False (default), the function will filter as expected. :type inplace: bool :param inplace: If True (default), filtering will be applied to the current Filter object. If False, \ the function will return a new Filter instance and the current instance will not be affected. :return: If 'inplace' is False, returns a new instance of Filter. :Examples: >>> from rnalysis import filtering >>> counts = filtering.Filter('tests/counted.csv') >>> # keep only the 10 rows with the highest values in the columns 'cond1' >>> counts.filter_top_n(by='cond1',n=10, ascending=False) Filtered 12 features, leaving 10 of the original 22 features. Filtered inplace. >>> counts = filtering.Filter('tests/counted.csv') >>> # keep only the 10 rows which have the lowest values in the columns 'cond1' >>> # and then the highest values in the column 'cond2' >>> counts.filter_top_n(by=['cond1','cond2'],n=10, ascending=[True,False]) Filtered 12 features, leaving 10 of the original 22 features. Filtered inplace. """ assert isinstance(n, int), "n must be an integer!" assert n > 0, "n must be a positive integer!" if isinstance(by, list): for col in by: assert col in self.columns, f"{col} is not a column in the Filter object!" else: assert by in self.columns, f"{by} is not a column in the Filter object!" self.sort(by=by, ascending=ascending, na_position=na_position, inplace=True) if n > self.df.shape[0]: warnings.warn(f'Current number of rows {self.df.shape[0]} is smaller than the specified n={n}. ' f'Therefore output Filter object will only have {self.df.shape[0]} rows. ') new_df = self.df.iloc[0:min(n, self.df.shape[0])] order = 'asc' if ascending else 'desc' suffix = f"_top{n}{by}{order}" return self._inplace(new_df, opposite, inplace, suffix) @staticmethod def __return_type(index_set: set, return_type: str): assert isinstance(return_type, str), "'return_type' must be a string!!" if return_type == 'set': return index_set elif return_type == 'str': return "\n".join(index_set) else: raise ValueError(f"'return type' must be either 'set' or 'str', is instead '{return_type}'!") def _set_ops(self, others, return_type, op): others = list(others) for i, other in enumerate(others): if isinstance(other, Filter): others[i] = other.index_set elif isinstance(other, set): pass else: raise TypeError("'other' must be a Filter object or a set!") try: op_indices = op(set(self.df.index), *others) except TypeError as e: if op == set.symmetric_difference: raise TypeError( f"Symmetric difference can only be calculated for two objects, {len(others) + 1} were given!") else: raise e return Filter.__return_type(op_indices, return_type) def intersection(self, *others, return_type: str = 'set', inplace: bool = False): """ Keep only the features that exist in ALL of the given Filter objects/sets. \ Can be done inplace on the first Filter object, or return a set/string of features. :type others: Filter or set objects. :param others: Objects to calculate intersection with. :type return_type: 'set' or 'str. :param return_type: If 'set', returns a set of the intersecting WBGene indices. If 'str', returns a string of \ the intersecting indices, delimited by a comma. :type inplace: bool, default False :param inplace: If True, filtering will be applied to the current Filter object. If False (default), \ the function will return a set/str that contains the intersecting indices. :rtype: set or str :return: If inplace=False, returns a set/string of the WBGene indices that intersect between two Filter objects. :Examples: >>> from rnalysis import filtering >>> d = filtering.Filter("tests/test_deseq.csv") >>> a_set = {'WBGene00000001','WBGene00000002','WBGene00000003'} >>> # calculate intersection and return a set >>> d.intersection(a_set) {'WBGene00000002', 'WBGene00000003'} # calculate intersection and filter in-place >>> d.intersection(a_set, inplace=True) Filtered 26 features, leaving 2 of the original 28 features. Filtered inplace. """ if inplace: suffix = f"_intersection" new_set = self._set_ops(others, 'set', set.intersection) return self._inplace(self.df.loc[new_set], opposite=False, inplace=inplace, suffix=suffix) else: new_set = self._set_ops(others, return_type, set.intersection) return new_set def union(self, *others, return_type: str = 'set'): """ Returns a set/string of the union of WBGene indices between multiple Filter objects \ (the indices that exist in at least one of the Filter objects/sets). :type others: Filter or set objects. :param others: Objects to calculate union with. :type return_type: 'set' or 'str. :param return_type: If 'set', returns a set of the union WBGene indices. If 'str', returns a string of \ the union WBGene indices, delimited by a comma. :rtype: set or str :return: a set/string of the WBGene indices that exist in at least one of the Filter objects. :Examples: >>> from rnalysis import filtering >>> d = filtering.Filter("tests/test_deseq.csv") >>> counts = filtering.Filter('tests/counted.csv') >>> # calculate union and return a set >>> d.union(counts) {'WBGene00000017', 'WBGene00000021', 'WBGene00044022', 'WBGene00077504', 'WBGene00000012', 'WBGene00000024', 'WBGene00007079', 'WBGene00000010', 'WBGene00000020', 'WBGene00000005', 'WBGene00007069', 'WBGene00007063', 'WBGene00007067', 'WBGene00077503', 'WBGene00007078', 'WBGene00000026', 'WBGene00000029', 'WBGene00000002', 'WBGene00000003', 'WBGene00000006', 'WBGene00007064', 'WBGene00077502', 'WBGene00044951', 'WBGene00000007', 'WBGene00000008', 'WBGene00000019', 'WBGene00007077', 'WBGene00000004', 'WBGene00007066', 'WBGene00007076', 'WBGene00000013', 'WBGene00014997', 'WBGene00000023', 'WBGene00043990', 'WBGene00007074', 'WBGene00000025', 'WBGene00000011', 'WBGene00043987', 'WBGene00007071', 'WBGene00000015', 'WBGene00000018', 'WBGene00043989', 'WBGene00043988', 'WBGene00000014', 'WBGene00000016', 'WBGene00000027', 'WBGene00000028', 'WBGene00007075', 'WBGene00000022', 'WBGene00000009'} """ return self._set_ops(others, return_type, set.union) def difference(self, *others, return_type: str = 'set', inplace: bool = False): """ Keep only the features that exist in the first Filter object/set but NOT in the others. \ Can be done inplace on the first Filter object, or return a set/string of features. :type others: Filter or set objects. :param others: Objects to calculate difference with. :type return_type: 'set' or 'str. :param return_type: If 'set', returns a set of the WBGene indices that exist only in the first Filter object. \ If 'str', returns a string of the WBGene indices that exist only in the first Filter object, \ delimited by a comma. :type inplace: bool, default False :param inplace: If True, filtering will be applied to the current Filter object. If False (default), \ the function will return a set/str that contains the intersecting indices. :rtype: set or str :return: If inplace=False, returns a set/string of the WBGene indices\ that exist only in the first Filter object/set (set difference). :Examples: >>> from rnalysis import filtering >>> d = filtering.DESeqFilter("tests/test_deseq.csv") >>> counts = filtering.CountFilter('tests/counted.csv') >>> a_set = {'WBGene00000001','WBGene00000002','WBGene00000003'} >>> # calculate difference and return a set >>> d.difference(counts, a_set) {'WBGene00007063', 'WBGene00007064', 'WBGene00007066', 'WBGene00007067', 'WBGene00007069', 'WBGene00007071', 'WBGene00007074', 'WBGene00007075', 'WBGene00007076', 'WBGene00007077', 'WBGene00007078', 'WBGene00007079', 'WBGene00014997', 'WBGene00043987', 'WBGene00043988', 'WBGene00043989', 'WBGene00043990', 'WBGene00044022', 'WBGene00044951', 'WBGene00077502', 'WBGene00077503', 'WBGene00077504'} # calculate difference and filter in-place >>> d.difference(counts, a_set, inplace=True) Filtered 2 features, leaving 26 of the original 28 features. Filtered inplace. """ if inplace: suffix = f"_difference" new_set = self._set_ops(others, 'set', set.difference) return self._inplace(self.df.loc[new_set], opposite=False, inplace=inplace, suffix=suffix) else: new_set = self._set_ops(others, return_type, set.difference) return new_set def symmetric_difference(self, other, return_type: str = 'set'): """ Returns a set/string of the WBGene indices that exist either in the first Filter object/set OR the second, \ but NOT in both (set symmetric difference). :type other: Filter or set. :param other: a second Filter object/set to calculate symmetric difference with. :type return_type: 'set' or 'str. :param return_type: If 'set', returns a set of the WBGene indices that exist in exactly one Filter object. \ If 'str', returns a string of the WBGene indices that exist in exactly one Filter object, delimited by a comma. :rtype: set or str :return: a set/string of the WBGene indices that that exist t in exactly one Filter. (set symmetric difference). :Examples: >>> from rnalysis import filtering >>> d = filtering.DESeqFilter("tests/test_deseq.csv") >>> counts = filtering.CountFilter('tests/counted.csv') >>> # calculate difference and return a set >>> d.symmetric_difference(counts) {'WBGene00000017', 'WBGene00077504', 'WBGene00000024', 'WBGene00000010', 'WBGene00000020', 'WBGene00007069', 'WBGene00007063', 'WBGene00007067', 'WBGene00007078', 'WBGene00000029', 'WBGene00000006', 'WBGene00007064', 'WBGene00000019', 'WBGene00000004', 'WBGene00007066', 'WBGene00014997', 'WBGene00000023', 'WBGene00007074', 'WBGene00000025', 'WBGene00043989', 'WBGene00043988', 'WBGene00000014', 'WBGene00000027', 'WBGene00000021', 'WBGene00044022', 'WBGene00007079', 'WBGene00000012', 'WBGene00000005', 'WBGene00077503', 'WBGene00000026', 'WBGene00000003', 'WBGene00000002', 'WBGene00077502', 'WBGene00044951', 'WBGene00007077', 'WBGene00000007', 'WBGene00000008', 'WBGene00007076', 'WBGene00000013', 'WBGene00043990', 'WBGene00043987', 'WBGene00007071', 'WBGene00000011', 'WBGene00000015', 'WBGene00000018', 'WBGene00000016', 'WBGene00000028', 'WBGene00007075', 'WBGene00000022', 'WBGene00000009'} """ return self._set_ops([other], return_type, set.symmetric_difference) class FoldChangeFilter(Filter): """ A class that contains a single column, representing the gene-specific fold change between two conditions. \ this class does not support 'inf' and '0' values, and importing a file with such values could lead \ to incorrect filtering and statistical analyses. **Attributes** df: pandas Series A Series that contains the fold change values. \ The Series is modified upon usage of filter operations. . shape: tuple (rows, columns) The dimensions of df. columns: list The columns of df. fname: pathlib.Path The path and filename for the purpose of saving df as a csv file. \ Updates automatically when filter operations are applied. index_set: set All of the indices in the current DataFrame (which were not removed by previously used filter methods) \ as a set. index_string: string A string of all feature indices in the current DataFrame separated by newline. numerator: str Name of the numerator used to calculate the fold change. denominator: str Name of the denominator used to calculate the fold change. """ __slots__ = {'numerator': 'name of the numerator', 'denominator': 'name of the denominator'} def __init__(self, fname: Union[str, Path], numerator_name: str, denominator_name: str): super().__init__(fname) self.numerator = numerator_name self.denominator = denominator_name self.df.name = 'Fold Change' if np.inf in self.df or 0 in self.df: warnings.warn( " FoldChangeFilter does not support 'inf' or '0' values! " "Unexpected results may occur during filtering or statistical analyses. ") def __copy__(self): return type(self)((self.fname, self.df.copy(deep=True)), numerator_name=self.numerator, denominator_name=self.denominator) def randomization_test(self, ref, alpha: float = 0.05, reps=10000, save_csv: bool = False, fname=None): """ Perform a randomization test to examine whether the fold change of a group of specific genomic features \ is significantly different than the fold change of a background set of genomic features. :type ref: FoldChangeFilter :param ref: A reference FoldChangeFilter object which contains the fold change for every reference gene. \ Will be used to calculate the expected score and to perform randomizations. :type alpha: float between 0 and 1 :param alpha: Indicates the threshold for significance (alpha). :type reps: int larger than 0 :param reps: How many repetitions to run the randomization for. \ 10,000 is the default. Recommended 10,000 or higher. :type save_csv: bool, default False :param save_csv: If True, will save the results to a .csv file, under the name specified in 'fname'. :type fname: str or pathlib.Path :param fname: The full path and name of the file to which to save the results. For example: \ r'C:\dir\file'. No '.csv' suffix is required. If None (default), fname will be requested in a manual prompt. :rtype: pandas DataFrame :return: A Dataframe with the number of given genes, the observed fold change for the given group of genes, \ the expected fold change for a group of genes of that size and the p value for the comparison. :Examples: >>> from rnalysis import filtering >>> f = filtering.FoldChangeFilter('tests/fc_1.csv' , 'numerator' , 'denominator') >>> f_background = f.filter_biotype('protein_coding', ref='tests/biotype_ref_table_for_tests.csv', inplace=False) #keep only protein-coding genes as reference Filtered 9 features, leaving 13 of the original 22 features. Filtering result saved to new object. >>> f_test = f_background.filter_by_attribute('attribute1', ref='tests/attr_ref_table_for_examples.csv', inplace=False) Filtered 6 features, leaving 7 of the original 13 features. Filtering result saved to new object. >>> rand_test_res = f_test.randomization_test(f_background) Calculating... group size observed fold change ... pval significant 0 7 2.806873 ... 0.360264 False [1 rows x 5 columns] """ obs_fc = self.df.mean(axis=0) ref_df = pd.DataFrame(ref.df.__copy__(deep=True)) # exp_fc = ref_df['Fold Change'].mean(axis=0) n = self.df.shape[0] ref_df['int_index'] = [int(i[6:14]) for i in ref_df.index] srs_int = (ref_df.set_index('int_index', inplace=False))['Fold Change'] print('Calculating...') rand = [srs_int[np.random.choice(srs_int.index, n, replace=False)].mean(axis=0) for _ in range(reps)] exp_fc = np.mean(rand) if obs_fc > exp_fc: success = sum(r >= obs_fc for r in rand) else: success = sum(r <= obs_fc for r in rand) # success = sum((srs_int[np.random.choice(srs_int.index, n, replace=False)].mean( # axis=0) >= obs_fc if obs_fc > exp_fc else srs_int[np.random.choice(srs_int.index, n, replace=False)].mean( # axis=0) <= obs_fc for _ in range(reps))) pval = (success + 1) / (reps + 1) res = [[n, obs_fc, exp_fc, pval]] res_df = pd.DataFrame(res, columns=['group size', 'observed fold change', 'expected fold change', 'pval'], index=[0]) res_df['significant'] = pval <= alpha if save_csv: general.save_to_csv(res_df, fname) print(res_df) return res_df def filter_abs_log2_fold_change(self, abslog2fc: float = 1, opposite: bool = False, inplace: bool = True): """ Filters out all features whose absolute log2 fold change is below the indicated threshold. \ For example: if log2fc is 1.0, all features whose log2 fold change is between 1 and -1 (went up less than \ two-fold or went down less than two-fold) will be filtered out. :param abslog2fc: The threshold absolute log2 fold change for filtering out a feature. Float or int. \ All features whose absolute log2 fold change is lower than log2fc will be filtered out. :type opposite: bool :param opposite: If True, the output of the filtering will be the OPPOSITE of the specified \ (instead of filtering out X, the function will filter out anything BUT X). \ If False (default), the function will filter as expected. :type inplace: bool :param inplace: If True (default), filtering will be applied to the current FoldChangeFilter object. If False, \ the function will return a new FoldChangeFilter instance and the current instance will not be affected. :return: If 'inplace' is False, returns a new instance of FoldChangeFilter. :Examples: >>> from rnalysis import filtering >>> f = filtering.FoldChangeFilter('tests/fc_1.csv','numerator name','denominator name') >>> f.filter_abs_log2_fold_change(2) # keep only rows whose log2(fold change) is >=2 or <=-2 Filtered 18 features, leaving 4 of the original 22 features. Filtered inplace. """ assert isinstance(abslog2fc, (float, int)), "abslog2fc must be a number!" assert abslog2fc >= 0, "abslog2fc must be non-negative!" suffix = f"_{abslog2fc}abslog2foldchange" new_df = self.df[np.abs(np.log2(self.df)) >= abslog2fc].dropna() return self._inplace(new_df, opposite, inplace, suffix) def filter_fold_change_direction(self, direction: str = 'pos', opposite: bool = False, inplace: bool = True): """ Filters out features according to the direction in which they changed between the two conditions. :param direction: 'pos' or 'neg'. If 'pos', will keep only features that have positive log2foldchange. \ If 'neg', will keep only features that have negative log2foldchange. :type opposite: bool :param opposite: If True, the output of the filtering will be the OPPOSITE of the specified \ (instead of filtering out X, the function will filter out anything BUT X). \ If False (default), the function will filter as expected. :type inplace: bool :param inplace: If True (default), filtering will be applied to the current FoldChangeFilter object. If False, \ the function will return a new FoldChangeFilter instance and the current instance will not be affected. :return: If 'inplace' is False, returns a new instance of FoldChangeFilter. :Examples: >>> from rnalysis import filtering >>> f = filtering.FoldChangeFilter('tests/fc_1.csv','numerator name','denominator name') >>> # keep only rows with a positive log2(fold change) value >>> f.filter_fold_change_direction('pos') Filtered 10 features, leaving 12 of the original 22 features. Filtered inplace. >>> f = filtering.FoldChangeFilter('tests/fc_1.csv','numerator name','denominator name') >>> # keep only rows with a negative log2(fold change) value >>> f.filter_fold_change_direction('neg') Filtered 14 features, leaving 8 of the original 22 features. Filtered inplace. >>> f = filtering.FoldChangeFilter('tests/fc_1.csv','numerator name','denominator name') >>> # keep only rows with a non-positive log2(fold change) value >>> f.filter_fold_change_direction('pos', opposite=True) Filtered 12 features, leaving 10 of the original 22 features. Filtered inplace. """ assert isinstance(direction, str), \ "'direction' must be either 'pos' for positive fold-change, or 'neg' for negative fold-change. " if direction == 'pos': new_df = self.df[self.df > 1] suffix = '_PositiveLog2FC' elif direction == 'neg': new_df = self.df[self.df < 1] suffix = '_NegativeLog2FC' else: raise ValueError( "'direction' must be either 'pos' for positive fold-change, or 'neg' for negative fold-change. ") return self._inplace(new_df, opposite, inplace, suffix) def split_fold_change_direction(self) -> tuple: """ Splits the features in the current FoldChangeFilter object into two complementary, non-overlapping \ FoldChangeFilter objects, based on the direction of their log2(fold change). \ The first object will contain only features with a positive log2(fold change), \ the second object will contain only features with a negative log2(fold change). \ Features with log2(fold change) = 0 will be ignored. :rtype: Tuple[filtering.FoldChangeFilter, filtering.FoldChangeFilter] :return: a tuple containing two FoldChangeFilter objects: the first has only features with positive log2 fold change, \ and the other has only features with negative log2 fold change. :Examples: >>> from rnalysis import filtering >>> f = filtering.FoldChangeFilter('tests/fc_1.csv','numerator name','denominator name') >>> pos_log2fc, neg_log2fc = f.split_fold_change_direction() Filtered 10 features, leaving 12 of the original 22 features. Filtering result saved to new object. Filtered 14 features, leaving 8 of the original 22 features. Filtering result saved to new object. """ return self.filter_fold_change_direction(direction='pos', inplace=False), self.filter_fold_change_direction( direction='neg', inplace=False) class DESeqFilter(Filter): """ A class that receives a DESeq output file and can filter it according to various characteristics. **Attributes** df: pandas DataFrame A DataFrame that contains the DESeq output file contents. \ The DataFrame is modified upon usage of filter operations. . shape: tuple (rows, columns) The dimensions of df. columns: list The columns of df. fname: pathlib.Path The path and filename for the purpose of saving df as a csv file. \ Updates automatically when filter operations are applied. index_set: set All of the indices in the current DataFrame (which were not removed by previously used filter methods) \ as a set. index_string: string A string of all feature indices in the current DataFrame separated by newline. """ def filter_significant(self, alpha: float = 0.1, opposite: bool = False, inplace: bool = True, ): """ Removes all features which did not change significantly, according to the provided alpha. :param alpha: the significance threshold to determine which genes will be filtered. between 0 and 1. :type opposite: bool :param opposite: If True, the output of the filtering will be the OPPOSITE of the specified \ (instead of filtering out X, the function will filter out anything BUT X). \ If False (default), the function will filter as expected. :type inplace: bool :param inplace: If True (default), filtering will be applied to the current DESeqFilter object. If False, \ the function will return a new DESeqFilter instance and the current instance will not be affected. :return: If 'inplace' is False, returns a new instance of DESeqFilter. :Examples: >>> from rnalysis import filtering >>> d = filtering.DESeqFilter('tests/sample_deseq.csv') >>> d.filter_significant(0.1) # keep only rows whose adjusted p-value is <=0.1 Filtered 4 features, leaving 25 of the original 29 features. Filtered inplace. >>> d = filtering.DESeqFilter('tests/sample_deseq.csv') >>> d.filter_significant(0.1, opposite=True) # keep only rows whose adjusted p-value is >0.1 Filtered 25 features, leaving 4 of the original 29 features. Filtered inplace. """ assert isinstance(alpha, float), "alpha must be a float!" new_df = self.df[self.df['padj'] <= alpha] suffix = f"_sig{alpha}" return self._inplace(new_df, opposite, inplace, suffix) def filter_abs_log2_fold_change(self, abslog2fc: float = 1, opposite: bool = False, inplace: bool = True): """ Filters out all features whose absolute log2 fold change is below the indicated threshold. \ For example: if log2fc is 2.0, all features whose log2 fold change is between 1 and -1 (went up less than \ two-fold or went down less than two-fold) will be filtered out. :param abslog2fc: The threshold absolute log2 fold change for filtering out a feature. Float or int. \ All features whose absolute log2 fold change is lower than log2fc will be filtered out. :type opposite: bool :param opposite: If True, the output of the filtering will be the OPPOSITE of the specified \ (instead of filtering out X, the function will filter out anything BUT X). \ If False (default), the function will filter as expected. :type inplace: bool :param inplace: If True (default), filtering will be applied to the current DESeqFilter object. If False, \ the function will return a new DESeqFilter instance and the current instance will not be affected. :return: If 'inplace' is False, returns a new instance of DESeqFilter. :Examples: >>> from rnalysis import filtering >>> d = filtering.DESeqFilter('tests/sample_deseq.csv') >>> d.filter_abs_log2_fold_change(2) # keep only rows whose log2(fold change) is >=2 or <=-2 Filtered 1 features, leaving 28 of the original 29 features. Filtered inplace. """ assert isinstance(abslog2fc, (float, int)), "abslog2fc must be a number!" assert abslog2fc >= 0, "abslog2fc must be non-negative!" suffix = f"_{abslog2fc}abslog2foldchange" new_df = self.df[np.abs(self.df['log2FoldChange']) >= abslog2fc] return self._inplace(new_df, opposite, inplace, suffix) def filter_fold_change_direction(self, direction: str = 'pos', opposite: bool = False, inplace: bool = True): """ Filters out features according to the direction in which they changed between the two conditions. :param direction: 'pos' or 'neg'. If 'pos', will keep only features that have positive log2foldchange. \ If 'neg', will keep only features that have negative log2foldchange. :type opposite: bool :param opposite: If True, the output of the filtering will be the OPPOSITE of the specified \ (instead of filtering out X, the function will filter out anything BUT X). \ If False (default), the function will filter as expected. :type inplace: bool :param inplace: If True (default), filtering will be applied to the current DESeqFilter object. If False, \ the function will return a new DESeqFilter instance and the current instance will not be affected. :return: If 'inplace' is False, returns a new instance of DESeqFilter. :Examples: >>> from rnalysis import filtering >>> d = filtering.DESeqFilter('tests/sample_deseq.csv') >>> d.filter_fold_change_direction('pos') # keep only rows with a positive log2(fold change) value Filtered 3 features, leaving 26 of the original 29 features. Filtered inplace. >>> d = filtering.DESeqFilter('tests/sample_deseq.csv') >>> d.filter_fold_change_direction('neg') # keep only rows with a negative log2(fold change) value Filtered 27 features, leaving 2 of the original 29 features. Filtered inplace. >>> d = filtering.DESeqFilter('tests/sample_deseq.csv') >>> d.filter_fold_change_direction('pos', opposite=True) # keep only rows with a non-positive log2(fold change) value Filtered 26 features, leaving 3 of the original 29 features. Filtered inplace. """ assert isinstance(direction, str), \ "'direction' must be either 'pos' for positive fold-change, or 'neg' for negative fold-change. " if direction == 'pos': new_df = self.df[self.df['log2FoldChange'] > 0] suffix = '_PositiveLog2FC' elif direction == 'neg': new_df = self.df[self.df['log2FoldChange'] < 0] suffix = '_NegativeLog2FC' else: raise ValueError( "'direction' must be either 'pos' for positive fold-change, or 'neg' for negative fold-change. ") return self._inplace(new_df, opposite, inplace, suffix) def split_fold_change_direction(self) -> tuple: """ Splits the features in the current DESeqFilter object into two complementary, non-overlapping DESeqFilter \ objects, based on the direction of their log2foldchange. The first object will contain only features with a \ positive log2foldchange, the second object will contain only features with a negative log2foldchange. :rtype: Tuple[filtering.DESeqFilter, filteirng.DESeqFilter] :return: a tuple containing two DESeqFilter objects: the first has only features with positive log2 fold change, \ and the other has only features with negative log2 fold change. :Examples: >>> from rnalysis import filtering >>> d = filtering.DESeqFilter('tests/test_deseq.csv') >>> pos, neg = d.split_fold_change_direction() Filtered 2 features, leaving 26 of the original 28 features. Filtering result saved to new object. Filtered 26 features, leaving 2 of the original 28 features. Filtering result saved to new object. """ return self.filter_fold_change_direction(direction='pos', inplace=False), self.filter_fold_change_direction( direction='neg', inplace=False) def volcano_plot(self, alpha: float = 0.1): """ Plots a volcano plot (log2(fold change) vs -log10(adj. p-value)) of the DESeqFilter object. \ Significantly upregulated features are colored in red, \ and significantly downregulated features are colored in blue. :type alpha: float between 0 and 1 :param alpha: the significance threshold to color data points as significantly up/down-regulated. .. figure:: volcano.png :align: center :scale: 70 % Example plot of volcano_plot() """ plt.figure() plt.style.use('seaborn-white') colors = pd.Series(index=self.df.index) colors.loc[(self.df['padj'] <= alpha) & (self.df['log2FoldChange'] > 0)] = 'tab:red' colors.loc[(self.df['padj'] <= alpha) & (self.df['log2FoldChange'] < 0)] = 'tab:blue' colors.fillna('grey', inplace=True) plt.scatter(self.df['log2FoldChange'], -np.log10(self.df['padj']), c=colors, s=1) plt.title(f"Volcano plot of {self.fname.stem}", fontsize=18) plt.xlabel('Log2(fold change)', fontsize=15) plt.ylabel('-Log10(adj. p-value)', fontsize=15) plt.show() class CountFilter(Filter): """ A class that receives a count matrix and can filter it according to various characteristics. **Attributes** df: pandas DataFrame A DataFrame that contains the count matrix contents. \ The DataFrame is modified upon usage of filter operations. shape: tuple (rows, columns) The dimensions of df. columns: list The columns of df. fname: pathlib.Path The path and filename for the purpose of saving df as a csv file. \ Updates automatically when filter operations are applied. index_set: set All of the indices in the current DataFrame (which were not removed by previously used filter methods) \ as a set. index_string: string A string of all feature indices in the current DataFrame separated by newline. triplicates: list Returns a nested list of the column names in the CountFilter, grouped by alphabetical order into triplicates. \ For example, if counts.columns is ['A_rep1','A_rep2','A_rep3','B_rep1','B_rep2',_B_rep3'], then \ counts.triplicates will be [['A_rep1','A_rep2','A_rep3'],['B_rep1','B_rep2',_B_rep3']] """ @property def triplicates(self): """ Returns a nested list of the column names in the CountFilter, grouped by alphabetical order into triplicates. \ For example, if counts.columns is ['A_rep1','A_rep2','A_rep3','B_rep1','B_rep2',_B_rep3'], then \ counts.triplicates will be [['A_rep1','A_rep2','A_rep3'],['B_rep1','B_rep2',_B_rep3']] """ mltplr = 3 triplicate = [self.columns[(i) * mltplr:(1 + i) * mltplr] for i in range(self.shape[1] // mltplr)] if len(self.columns[(self.shape[1] // mltplr) * mltplr::]) > 0: triplicate.append([self.columns[(self.shape[1] // mltplr) * mltplr::]]) warnings.warn( f'Number of samples {self.shape[1]} is not divisible by 3. Appending the remaining {self.shape[1] % mltplr} as an inncomplete triplicate') return triplicate def fold_change(self, numerator, denominator, numer_name: str = 'default', denom_name: str = 'default'): """ Calculate the fold change between the numerator condition and the denominator condition, \ and return it as a FoldChangeFilter object. :type numerator: str, or list of strs :param numerator: the CountFilter columns to be used as the numerator. If multiple arguments are given \ in a list, they will be averaged. :type denominator: str, or list of strs :param denominator: the CountFilter columns to be used as the denominator. If multiple arguments are given \ in a list, they will be averaged. :type numer_name: str or 'default' :param numer_name: name to give the numerator condition. If 'default', the name will be generarated \ automatically from the names of numerator columns. :type denom_name: str or 'default' :param denom_name: name to give the denominator condition. If 'default', the name will be generarated \ automatically from the names of denominator columns. :rtype: FoldChangeFilter :return: A new instance of FoldChangeFilter :Examples: >>> from rnalysis import filtering >>> c = filtering.CountFilter('tests/counted_fold_change.csv') >>> # calculate the fold change of mean(cond1_rep1,cond1_rep2)/mean(cond2_rep1,cond_2rep2) >>> f = c.fold_change(['cond1_rep1','cond1_rep2'],['cond2_rep1','cond2_rep2']) >>> f.numerator "Mean of ['cond1_rep1', 'cond1_rep2']" >>> f.denominator "Mean of ['cond2_rep1', 'cond2_rep2']" >>> type(f) rnalysis.filtering.FoldChangeFilter """ assert isinstance(numerator, (str, list, tuple)), "numerator must be a string or a list!" assert isinstance(denominator, (str, list, tuple)), "denominator must be a string or a list!" assert isinstance(numer_name, str), "numerator name must be a string or 'default'!" assert isinstance(denom_name, str), "denominator name must be a string or 'default'!" if isinstance(numerator, str): numerator = [numerator] elif isinstance(numerator, tuple): numerator = list(numerator) if isinstance(denominator, str): denominator = [denominator] elif isinstance(denominator, tuple): denominator = list(denominator) for num in numerator: assert num in self.df, f"all numerator arguments must be columns in the CountFilter object! ({num})" for den in denominator: assert den in self.df, f"all denominator arguments must be columns in the CountFilter object! ({den})" srs = (self.df[numerator].mean(axis=1) + 1) / (self.df[denominator].mean(axis=1) + 1) numer_name = f"Mean of {numerator}" if numer_name == 'default' else numer_name denom_name = f"Mean of {denominator}" if denom_name == 'default' else denom_name new_fname = Path(f"{str(self.fname.parent)}\\{self.fname.stem}'_fold_change_'" f"{numer_name}_over_{denom_name}_{self.fname.suffix}") fcfilt = FoldChangeFilter((new_fname, srs), numerator_name=numer_name, denominator_name=denom_name) return fcfilt pass def pairplot(self, sample_list: list = 'all', log2: bool = False): """ Plot pairwise relationships in the dataset. \ Can plot both single samples and average multiple replicates. \ For more information see the documentation of seaborn.pairplot. :type sample_list: 'all', list, or nested list. :param sample_list: A list of the sample names and/or grouped sample names to be included in the pairplot. \ All specified samples must be present in the CountFilter object. \ To average multiple replicates of the same condition, they can be grouped in an inner list. \ Example input: \ [['SAMPLE1A', 'SAMPLE1B', 'SAMPLE1C'], ['SAMPLE2A', 'SAMPLE2B', 'SAMPLE2C'],'SAMPLE3' , 'SAMPLE6'] :type log2: bool :param log2: if True, the pairplot will be calculated with log2 of the dataframe, and not with the raw data. \ If False (default), the pairplot will be calculated with the raw data. :return: An instance of seaborn.PairGrid. .. figure:: pairplot.png :align: center :scale: 40 % Example plot of pairplot() """ if sample_list == 'all': sample_df = self.df else: sample_df = self._avg_subsamples(sample_list) if log2: pairplt = sns.pairplot(np.log2(sample_df)) else: pairplt = sns.pairplot(sample_df) plt.show() return pairplt def _rpm_assertions(self, threshold: float = 1): """ Various assertions for functions that normalize to RPM, or are meant to be used on pre-normalized values. :param threshold: optional. A threshold value for filter_low_rpm to be asserted. """ assert isinstance(threshold, (float, int)), "Threshold must be a number!" assert threshold >= 0, "Threshold must be zero or larger!" if 'rpm' not in str(self.fname) and 'sizefactor' not in str(self.fname): warnings.warn("This function is meant for normalized values, and your values may not be normalized. ") def _avg_subsamples(self, sample_list: list): """ Avarages subsamples/replicates according to the specified sample list. \ Every member in the sample list should be either a name of a single sample (str), \ or a list of multiple sample names to be averaged (list). :param sample_list: A list of the sample names and/or grouped sample names passed by the user. \ All specified samples must be present in the CountFilter object. \ To average multiple replicates of the same condition, they can be grouped in an inner list. \ Example input: \ [['SAMPLE1A', 'SAMPLE1B', 'SAMPLE1C'], ['SAMPLE2A', 'SAMPLE2B', 'SAMPLE2C'],'SAMPLE3' , 'SAMPLE6'] \ and the resulting output will be a DataFrame containing the following columns: \ ['SAMPLE1', 'SAMPLE2', 'SAMPLE3', 'SAMPLE6'] :return: a pandas DataFrame containing samples/averaged subsamples according to the specified sample_list. """ samples_df = pd.DataFrame() for sample in sample_list: if isinstance(sample, str): samples_df[sample] = self.df[sample].values elif isinstance(sample, (list, str, tuple)): samples_df[",".join(sample)] = self.df[sample].mean(axis=1).values return samples_df def normalize_to_rpm(self, special_counter_fname: str, inplace: bool = True): """ Normalizes the reads in the CountFilter to reads per million (RPM). \ Uses a table of feature counts (ambiguous, no feature, not aligned, etc) from HTSeq's output. \ Divides each column in the CountFilter object by (total reads + ambiguous + no feature)*10^-6 . :param special_counter_fname: the .csv file which contains feature information about the RNA library \ (ambiguous, no feature, not aligned, etc). :param inplace: If True (default), filtering will be applied to the current CountFilter object. If False, \ the function will return a new CountFilter instance and the current instance will not be affected. :return: If inplace is False, returns a new instance of the Filter object. :Examples: >>> from rnalysis import filtering >>> c = filtering.CountFilter("tests/counted.csv") >>> c.normalize_to_rpm("tests/uncounted.csv") Normalized the values of 22 features. Normalized inplace. """ suffix = '_rpm' new_df = self.df.copy() if isinstance(special_counter_fname, (str, Path)): features = general.load_csv(special_counter_fname, 0) elif isinstance(special_counter_fname, pd.DataFrame): features = special_counter_fname else: raise TypeError("Invalid type for 'special_counter_fname'!") for column in new_df.columns: norm_factor = (new_df[column].sum() + features.loc[r'__ambiguous', column] + features.loc[ r'__no_feature', column] + features.loc[r'__alignment_not_unique', column]) / (10 ** 6) new_df[column] /= norm_factor return self._inplace(new_df, opposite=False, inplace=inplace, suffix=suffix, printout_operation='normalize') def normalize_with_scaling_factors(self, scaling_factor_fname: Union[str, Path], inplace: bool = True): """ Normalizes the reads in the CountFilter using pre-calculated scaling factors. \ Receives a table of sample names and their corresponding size factors, \ and divides each column in the CountFilter by the corresponding scaling factor. :type scaling_factor_fname: str or pathlib.Path :param scaling_factor_fname: the .csv file which contains size factors for the different libraries. :param inplace: If True (default), filtering will be applied to the current CountFilter object. If False, \ the function will return a new CountFilter instance and the current instance will not be affected. :return: If inplace is False, returns a new instance of the Filter object. :Examples: >>> from rnalysis import filtering >>> c = filtering.CountFilter("tests/counted.csv") >>> c.normalize_with_scaling_factors("tests/scaling_factors.csv") Normalized the values of 22 features. Normalized inplace. """ suffix = '_sizefactor' new_df = self.df.copy() if isinstance(scaling_factor_fname, (str, Path)): size_factors = general.load_csv(scaling_factor_fname) elif isinstance(scaling_factor_fname, pd.DataFrame): size_factors = scaling_factor_fname else: raise TypeError("Invalid type for 'scaling_factor_fname'!") for column in new_df.columns: norm_factor = size_factors[column].values new_df[column] /= norm_factor return self._inplace(new_df, opposite=False, inplace=inplace, suffix=suffix, printout_operation='normalize') def filter_low_reads(self, threshold: float = 5, opposite: bool = False, inplace: bool = True): """ remove features which have less then 'threshold' reads all columns. :type threshold: float :param threshold: The minimal number of reads (counts, rpm, rpkm, tpm, etc) a feature should have \ in at least one sample in order not to be filtered out. :type opposite: bool :param opposite: If True, the output of the filtering will be the OPPOSITE of the specified \ (instead of filtering out X, the function will filter out anything BUT X). \ If False (default), the function will filter as expected. :type inplace: bool :param inplace: If True (default), filtering will be applied to the current CountFilter object. If False, \ the function will return a new CountFilter instance and the current instance will not be affected. :return: If 'inplace' is False, returns a new instance of CountFilter. :Examples: >>> from rnalysis import filtering >>> c = filtering.CountFilter('tests/counted.csv') >>> c.filter_low_reads(5) # remove all rows whose values in all columns are all <5 Filtered 6 features, leaving 16 of the original 22 features. Filtered inplace. """ self._rpm_assertions(threshold=threshold) new_df = self.df.loc[[True if max(vals) > threshold else False for gene, vals in self.df.iterrows()]] suffix = f"_filt{threshold}reads" return self._inplace(new_df, opposite, inplace, suffix) def split_by_reads(self, threshold: float = 5) -> tuple: """ Splits the features in the current CountFilter object into two complementary, non-overlapping CountFilter \ objects, based on the their maximum expression level. The first object will contain only highly-expressed \ features (which have reads over the specified threshold in at least one sample). The second object will \ contain only lowly-expressed features (which have reads below the specified threshold in all samples). :param threshold: The minimal number of reads (counts, RPM, RPKM, TPM etc) a feature needs to have \ in at least one sample in order to be \ included in the "highly expressed" object and no the "lowly expressed" object. :type threshold: float (default 5) :rtype: Tuple[filtering.CountFilter, filtering.CountFilter] :return: A tuple containing two CountFilter objects: the first has only highly-expressed features, \ and the second has only lowly-expressed features. :Examples: >>> from rnalysis import filtering >>> c = filtering.CountFilter('tests/counted.csv') >>> low_expr,high_expr = c.split_by_reads(5) Filtered 6 features, leaving 16 of the original 22 features. Filtering result saved to new object. Filtered 16 features, leaving 6 of the original 22 features. Filtering result saved to new object. """ self._rpm_assertions(threshold=threshold) high_expr = self.df.loc[[True if max(vals) > threshold else False for gene, vals in self.df.iterrows()]] low_expr = self.df.loc[[False if max(vals) > threshold else True for gene, vals in self.df.iterrows()]] return self._inplace(high_expr, opposite=False, inplace=False, suffix=f'_below{threshold}reads'), \ self._inplace(low_expr, opposite=False, inplace=False, suffix=f'_above{threshold}reads') def filter_by_row_sum(self, threshold: float = 5, opposite: bool = False, inplace: bool = True): """ Removes features/rows whose sum is belove 'threshold'. :type threshold: float :param threshold: The minimal sum a row should have in order not to be filtered out. :type opposite: bool :param opposite: If True, the output of the filtering will be the OPPOSITE of the specified \ (instead of filtering out X, the function will filter out anything BUT X). \ If False (default), the function will filter as expected. :type inplace: bool :param inplace: If True (default), filtering will be applied to the current CountFilter object. If False, \ the function will return a new CountFilter instance and the current instance will not be affected. :return: If 'inplace' is False, returns a new instance of CountFilter. :Examples: >>> from rnalysis import filtering >>> c = filtering.CountFilter('tests/counted.csv') >>> c.filter_by_row_sum(5) # remove all rows whose sum is <5 Filtered 4 features, leaving 18 of the original 22 features. Filtered inplace. """ self._rpm_assertions(threshold=threshold) new_df = self.df.loc[self.df.sum(axis=1) >= threshold] suffix = f"_filt{threshold}sum" return self._inplace(new_df, opposite, inplace, suffix) def clustergram(self, sample_names: list = 'all', metric: str = 'euclidean', linkage: str = 'average'): """ Performs hierarchical clustering and plots a clustergram on the base-2 log of a given set of samples. :type sample_names: 'all' or list. :param sample_names: the names of the relevant samples in a list. \ Example input: ["condition1_rep1", "condition1_rep2", "condition1_rep3", \ "condition2_rep1", "condition3_rep1", "condition3_rep2"] :type metric: 'euclidean', 'hamming', 'correlation', or any other \ distance metric available in scipy.spatial.distance.pdist :param metric: the distance metric to use in the clustergram. \ For all possible inputs and their meaning see scipy.spatial.distance.pdist documentation online. :type linkage: 'single', 'average', 'complete', 'weighted', 'centroid', 'median' or 'ward'. :param linkage: the linkage method to use in the clustergram. \ For all possible inputs and their meaning see scipy.cluster.hierarchy.linkage documentation online. :return: A seaborn clustermap object. .. figure:: clustergram.png :align: center :scale: 40 % Example plot of clustergram() """ assert isinstance(metric, str) and isinstance(linkage, str), "Linkage and Metric must be strings!" metrics = ['braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'jensenshannon', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'] linkages = ['single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward'] assert metric in metrics and linkage in linkages if sample_names == 'all': sample_names = list(self.df.columns) print('Calculating clustergram...') plt.style.use('seaborn-whitegrid') clustering = sns.clustermap(np.log2(self.df[sample_names] + 1), method=linkage, metric=metric, cmap=sns.color_palette("RdBu_r", 10), yticklabels=False) plt.show() return clustering def plot_expression(self, features: list, sample_grouping: dict, count_unit: str = 'Reads per million'): """ Plot the average expression and standard error of the specified features under the specified conditions. :type features: str or list of strings :param features: the feature/features to plot expression for. :type sample_grouping: dict, with condition names as keys \ and list of the sample numbers or names for each condition as a list :param sample_grouping: a dictionary of the conditions to plot expression for. \ Each key should be a name of a conditions, and the value for each key is \ a list of the numbers of columns to be used as samples of that condition. \ For example, if the first 3 columns are replicates of the condition 'condition 1' and \ the last 3 column are replicates of the condition 'condition 2', then sample_grouping should be: \ {'condition 1':[0, 1, 2], 'condition 2':[3, 4, 5]} :type count_unit: str, default 'Reads per million' :param count_unit: The unit of the count data. Will be displayed in the y axis. .. figure:: plot_expression.png :align: center :scale: 40 % Example plot of plot_expression() """ plt.style.use('seaborn-white') if isinstance(features, str): features = [features] assert isinstance(features, list), "'features' must be a string or list of strings!" g = strategies.SquareStrategy() subplots = g.get_grid(len(features)) plt.close() f = plt.figure() axes = [] ylims = [] for subplot, feature in zip(subplots, features): axes.append(f.add_subplot(subplot)) mean = [self.df.loc[feature].iloc[ind].mean() if isinstance(ind, int) else self.df.loc[feature][ind].mean() for ind in sample_grouping.values()] sem = [self.df.loc[feature].iloc[ind].sem() if isinstance(ind, int) else self.df.loc[feature][ind].sem() for ind in sample_grouping.values()] axes[-1].bar(np.arange(len(sample_grouping)), mean, yerr=sem) axes[-1].set_xticks(np.arange(len(sample_grouping))) axes[-1].set_xticklabels(list(sample_grouping.keys())) axes[-1].set_title(feature) plt.ylabel(count_unit) sns.despine() ylims.append(axes[-1].get_ylim()[1]) for ax in axes: ax.set_ylim((0.0, max(ylims))) f.tight_layout() plt.show() def pca(self, sample_names: list = 'all', n_components=3, sample_grouping: list = None, labels: bool = True): """ runs and plots a PCA for a given set of samples. :type sample_names: 'all' or list. :param sample_names: the names of the relevant samples in a list. \ Example input: ["1_REP_A", "1_REP_B", "1_REP_C", "2_REP_A", "2_REP_B", "2_REP_C", "2_REP_D", "3_REP_A"] :type n_components: positive int (default 3) :param n_components: number of PCA components to return. :type sample_grouping: list of positive integers, 'triplicates' or None (default) :param sample_grouping: Optional. Indicates which samples are grouped together as replicates, \ so they will be colored similarly in the PCA plot. A list of indices from 1 and up, that indicates the sample \ grouping. \ For example, if sample_names is: \ ["1_REP_A", "1_REP_B", "1_REP_C", "2_REP_A", "2_REP_B", "2_REP_C", "2_REP_D", "3_REP_A"], \ then the sample_grouping will be: \ [1, 1, 1, 2, 2, 2, 2, 3]. \ If 'triplicate', then sample_groupins will automatically group samples into triplicates. For example: \ [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4]. :return: A tuple whose first element is an sklearn.decomposition.pca object, \ and second element is a list of matplotlib.axis objects. .. figure:: pca.png :align: center :scale: 40 % Example plot of pca() """ if sample_names == 'all': sample_names = list(self.df.columns) srna_data = self.df.transpose() else: srna_data = self.df[sample_names].transpose() srna_data_norm = StandardScaler().fit_transform(srna_data) pca_obj = PCA(n_components=n_components) pcomps = pca_obj.fit_transform(srna_data_norm) columns = [f'Principal component {i + 1}' for i in range(n_components)] principal_df =
pd.DataFrame(data=pcomps, columns=columns)
pandas.DataFrame
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import json import logging from datetime import datetime from typing import Any, Optional, Tuple import matplotlib.pyplot as plt import numpy as np import pandas as pd from kats.consts import TimeSeriesData from kats.detectors.detector import DetectorModel from kats.detectors.detector_consts import ( AnomalyResponse, ChangePointInterval, ConfidenceBand, PercentageChange, ) from kats.utils.decomposition import TimeSeriesDecomposition """Statistical Significance Detector Module This module contains simple detectors that apply a t-test over a rolling window to compare check if there is a statistically significant increase or decrease between the control and test time periods. In addition to the univariate version of this test, this module includes a multivariate version that uses a false discovery rate (FDR) controlling procedure to reduce noise. """ class StatSigDetectorModel(DetectorModel): """ StatSigDetectorModel is a simple detector, which compares a control and test period. The detector assumes that the time series data comes from a iid normal distribution, and applies a t-test to check if the means between the control and test period are significantly different. We start with the history data, and then as for the current data, we apply a rolling window, adding one data point at a time from the current data, and detecting significant change. We return the t-statistic as a score, which reflects the severity of the change. We suggest using n_control >= 30 to get good estimates Attributes: n_control: number of data points(or time units) of history to compare with n_test: number of points(or time_units) to compare the history with serialized_model: serialized json containing the parameters time_units: units of time used to measure the intervals. If not provided we infer it from the provided data. rem_season: default value is False, if remove seasonality for historical data and data seasonal_period: str, default value is 'weekly'. Other possible values: 'daily', 'biweekly', 'monthly', 'yearly' use_corrected_scores: bool, default value is False, using original t-scores or correct t-scores. max_split_ts_length: int, default value is 500. If the given TS (except historical part) is longer than max_split_ts_length, we will transform a long univariate TS into a multi-variate TS and then use multistatsig detector, which is faster, >>> # Example usage: >>> # history and ts_pt are TimeSeriesData objects and history is larger >>> # than (n_control + n_test) so that we have sufficient history to >>> # run the detector >>> n_control = 28 >>> n_test = 7 >>> import random >>> control_time = pd.date_range(start='2018-01-01', freq='D', periods=(n_control + n_test)) >>> test_time = pd.date_range(start='2018-02-05', freq='D', periods=n_test) >>> control_val = [random.normalvariate(100,10) for _ in range(n_control + n_test)] >>> test_val = [random.normalvariate(120,10) for _ in range(n_test)] >>> hist_ts = TimeSeriesData(time=control_time, value=pd.Series(control_val)) >>> data_ts = TimeSeriesData(time=test_time, value=pd.Series(test_val)) >>> ss_detect = StatSigDetectorModel(n_control=n_control, n_test=n_test) >>> anom = ss_detect.fit_predict(data=data_ts, historical_data=hist_ts) """ data: Optional[TimeSeriesData] = None def __init__( self, n_control: Optional[int] = None, n_test: Optional[int] = None, serialized_model: Optional[bytes] = None, time_unit: Optional[str] = None, rem_season: bool = False, seasonal_period: str = "weekly", use_corrected_scores: bool = True, max_split_ts_length: int = 500, ) -> None: if serialized_model: model_dict = json.loads(serialized_model) self.n_test: int = model_dict["n_test"] self.n_control: int = model_dict["n_control"] self.time_unit: str = model_dict["time_unit"] # for seasonality self.rem_season: bool = model_dict.get("rem_season", rem_season) self.seasonal_period: str = model_dict.get( "seasonal_period", seasonal_period ) # for big data and correct t-scores self.use_corrected_scores: bool = model_dict.get( "use_corrected_scores", use_corrected_scores ) # threshold for splitting long TS self.max_split_ts_length: int = model_dict.get( "max_split_ts_length", max_split_ts_length ) else: self.n_test: Optional[int] = n_test self.n_control: Optional[int] = n_control self.time_unit: Optional[str] = time_unit # for seasonality self.rem_season: bool = rem_season self.seasonal_period: str = seasonal_period # big data and t-scores self.use_corrected_scores: bool = use_corrected_scores # threshold for splitting long TS self.max_split_ts_length: int = max_split_ts_length if (self.n_control is None) or (self.n_test is None): raise ValueError( "You must either provide serialized model or values for control " "and test intervals." ) self.control_interval: Optional[ChangePointInterval] = None self.test_interval: Optional[ChangePointInterval] = None self.response: Optional[AnomalyResponse] = None self.is_initialized = False # flag on whether initialized or not self.last_N = 0 # this is the size of the last chunk of data we saw self.data_history: Optional[TimeSeriesData] = None # for seasonality self.data_season: Optional[TimeSeriesData] = None # big data strategy self.bigdata_trans_flag: Optional[bool] = None self.remaining: Optional[int] = None def serialize(self) -> bytes: """ Serializes by putting model parameters in a json """ model_dict = { "n_control": self.n_control, "n_test": self.n_test, "time_unit": self.time_unit, "rem_season": self.rem_season, "seasonal_period": self.seasonal_period, "use_corrected_scores": self.use_corrected_scores, "max_split_ts_length": self.max_split_ts_length, } return json.dumps(model_dict).encode("utf-8") def fit_predict( self, data: TimeSeriesData, historical_data: Optional[TimeSeriesData] = None, **kwargs: Any, ) -> AnomalyResponse: """ This is the main working function. The function returns an AnomalyResponse object of length equal to the length of the data. We require len(historical_data) > (n_control + n_test). Args: data: TimeSeriesData, A univariate TimeSeriesData for which we are running the StatSigDetectorModel historical_data: Optional[TimeSeriesData] Historical data used to do detection for initial points in data """ if not data.is_univariate(): msg = "Input is multivariate but StatSigDetector expected univariate input." logging.error(msg) raise ValueError(msg) self._set_time_unit(data=data, historical_data=historical_data) self.last_N = len(data) # this ensures we start with a default response of # the size of the data self._init_response(data) response = self.response assert response is not None # when there is no need to update # just return the initial response of zeros if not self._should_update(data=data, historical_data=historical_data): return response # handle cases where there is either no historical data, or # not enough historical data data, historical_data = self._handle_not_enough_history( data=data, historical_data=historical_data, ) # remove seasonality if self.rem_season: sh_data = SeasonalityHandler( data=data, seasonal_period=self.seasonal_period ) self.data_season = sh_data.get_seasonality() data = sh_data.remove_seasonality() if historical_data: sh_hist_data = SeasonalityHandler( data=historical_data, seasonal_period=self.seasonal_period, ) historical_data = sh_hist_data.remove_seasonality() self.data = data # first initialize this with the historical data self._init_data(historical_data) # if using new t-scores if self.use_corrected_scores: if ( len(data) > self.max_split_ts_length # pyre-ignore[16]: `Optional` has no attribute `time`. and pd.infer_freq(historical_data.time) == pd.infer_freq(data.time) ): self.bigdata_trans_flag = True else: self.bigdata_trans_flag = False else: self.bigdata_trans_flag = False # if need trans to multi-TS if self.bigdata_trans_flag: new_data_ts = self._reorganize_big_data(self.max_split_ts_length) ss_detect = MultiStatSigDetectorModel( n_control=self.n_control, n_test=self.n_test, time_unit=self.time_unit, rem_season=False, seasonal_period=self.seasonal_period, skip_rescaling=True, use_corrected_scores=self.use_corrected_scores, ) anom = ss_detect.fit_predict(data=new_data_ts) self._reorganize_back(anom) else: self._init_control_test( data if historical_data is None else historical_data ) # set the flag to true self.is_initialized = True # now run through the data to get the prediction for i in range(len(data)): current_time = data.time.iloc[i] ts_pt = TimeSeriesData( time=
pd.Series(current_time, copy=False)
pandas.Series
from __future__ import absolute_import import pytest pytest.importorskip('pandas') # noqa import numpy as np import pandas as pd import pandas.util.testing as tm import os from partd.pandas import PandasColumns, PandasBlocks, serialize, deserialize df1 = pd.DataFrame({'a': [1, 2, 3], 'b': [1., 2., 3.], 'c': ['x', 'y', 'x']}, columns=['a', 'b', 'c'], index=pd.Index([1, 2, 3], name='myindex')) df2 = pd.DataFrame({'a': [10, 20, 30], 'b': [10., 20., 30.], 'c': ['X', 'Y', 'X']}, columns=['a', 'b', 'c'], index=
pd.Index([10, 20, 30], name='myindex')
pandas.Index
from __future__ import division import json import re import time from pandas import DataFrame, isnull, notnull, to_datetime from pandas_datareader._utils import RemoteDataError from pandas_datareader.base import _DailyBaseReader class YahooDailyReader(_DailyBaseReader): """ Returns DataFrame of with historical over date range, start to end. To avoid being penalized by Yahoo! Finance servers, pauses between downloading 'chunks' of symbols can be specified. Parameters ---------- symbols : string, array-like object (list, tuple, Series), or DataFrame Single stock symbol (ticker), array-like object of symbols or DataFrame with index containing stock symbols. start : string, int, date, datetime, Timestamp Starting date. Parses many different kind of date representations (e.g., 'JAN-01-2010', '1/1/10', 'Jan, 1, 1980'). Defaults to 5 years before current date. end : string, int, date, datetime, Timestamp Ending date retry_count : int, default 3 Number of times to retry query request. pause : int, default 0.1 Time, in seconds, to pause between consecutive queries of chunks. If single value given for symbol, represents the pause between retries. session : Session, default None requests.sessions.Session instance to be used adjust_price : bool, default False If True, adjusts all prices in hist_data ('Open', 'High', 'Low', 'Close') based on 'Adj Close' price. Adds 'Adj_Ratio' column and drops 'Adj Close'. ret_index : bool, default False If True, includes a simple return index 'Ret_Index' in hist_data. chunksize : int, default 25 Number of symbols to download consecutively before intiating pause. interval : string, default 'd' Time interval code, valid values are 'd' for daily, 'w' for weekly, 'm' for monthly. get_actions : bool, default False If True, adds Dividend and Split columns to dataframe. adjust_dividends: bool, default true If True, adjusts dividends for splits. """ def __init__( self, symbols=None, start=None, end=None, retry_count=3, pause=0.1, session=None, adjust_price=False, ret_index=False, chunksize=1, interval="d", get_actions=False, adjust_dividends=True, ): super(YahooDailyReader, self).__init__( symbols=symbols, start=start, end=end, retry_count=retry_count, pause=pause, session=session, chunksize=chunksize, ) # Ladder up the wait time between subsequent requests to improve # probability of a successful retry self.pause_multiplier = 2.5 self.headers = { "Connection": "keep-alive", "Expires": str(-1), "Upgrade-Insecure-Requests": str(1), # Google Chrome: "User-Agent": ( "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 " "(KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36" ), } self.adjust_price = adjust_price self.ret_index = ret_index self.interval = interval self._get_actions = get_actions if self.interval not in ["d", "wk", "mo", "m", "w"]: raise ValueError( "Invalid interval: valid values are 'd', 'wk' and 'mo'. 'm' and 'w' " "have been implemented for backward compatibility. 'v' has been moved " "to the yahoo-actions or yahoo-dividends APIs." ) elif self.interval in ["m", "mo"]: self.pdinterval = "m" self.interval = "mo" elif self.interval in ["w", "wk"]: self.pdinterval = "w" self.interval = "wk" self.interval = "1" + self.interval self.adjust_dividends = adjust_dividends @property def get_actions(self): return self._get_actions @property def url(self): return "https://finance.yahoo.com/quote/{}/history" # Test test_get_data_interval() crashed because of this issue, probably # whole yahoo part of package wasn't # working properly def _get_params(self, symbol): # This needed because yahoo returns data shifted by 4 hours ago. four_hours_in_seconds = 14400 unix_start = int(time.mktime(self.start.timetuple())) unix_start += four_hours_in_seconds day_end = self.end.replace(hour=23, minute=59, second=59) unix_end = int(time.mktime(day_end.timetuple())) unix_end += four_hours_in_seconds params = { "period1": unix_start, "period2": unix_end, "interval": self.interval, "frequency": self.interval, "filter": "history", "symbol": symbol, } return params def _read_one_data(self, url, params): """ read one data from specified symbol """ symbol = params["symbol"] del params["symbol"] url = url.format(symbol) resp = self._get_response(url, params=params) ptrn = r"root\.App\.main = (.*?);\n}\(this\)\);" try: j = json.loads(re.search(ptrn, resp.text, re.DOTALL).group(1)) data = j["context"]["dispatcher"]["stores"]["HistoricalPriceStore"] except KeyError: msg = "No data fetched for symbol {} using {}" raise RemoteDataError(msg.format(symbol, self.__class__.__name__)) # price data prices = DataFrame(data["prices"]) prices.columns = [col.capitalize() for col in prices.columns] prices["Date"] = to_datetime(to_datetime(prices["Date"], unit="s").dt.date) if "Data" in prices.columns: prices = prices[prices["Data"].isnull()] prices = prices[["Date", "High", "Low", "Open", "Close", "Volume", "Adjclose"]] prices = prices.rename(columns={"Adjclose": "Adj Close"}) prices = prices.set_index("Date") prices = prices.sort_index().dropna(how="all") if self.ret_index: prices["Ret_Index"] = _calc_return_index(prices["Adj Close"]) if self.adjust_price: prices = _adjust_prices(prices) # dividends & splits data if self.get_actions and data["eventsData"]: actions =
DataFrame(data["eventsData"])
pandas.DataFrame