prompt
stringlengths
19
1.03M
completion
stringlengths
4
2.12k
api
stringlengths
8
90
# -*- coding: utf-8 -*- """Decision Tree for players.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1VrKHv1_DqnRr0XUwcIe_zrSfL1mSOZKG """ import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import StandardScaler from sklearn.neural_network import MLPClassifier from sklearn.metrics import confusion_matrix from sklearn.metrics import roc_auc_score from sklearn.ensemble import GradientBoostingClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.decomposition import PCA import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from scipy import stats from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import confusion_matrix from sklearn.metrics import roc_auc_score from sklearn.ensemble import RandomForestClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import f1_score from sklearn.metrics import roc_curve from matplotlib import pyplot import seaborn as sn import pandas as pd import matplotlib.pyplot as plt from sklearn.metrics import plot_confusion_matrix p_2011 =
pd.read_csv("../data/Players_final_2011.csv")
pandas.read_csv
import os import pandas as pd import numpy as np import matplotlib.pyplot as plt from PIL import Image from collections import OrderedDict import gc from current_clamp import * from current_clamp_features import extract_istep_features from visualization.feature_annotations import feature_name_dict from read_metadata import * from file_io import load_current_step # from pymysql import IntegrityError import datajoint as dj schema = dj.schema('yueqi_ephys', locals()) FIG_DIR = 'analysis_current_clamp/figures_plot_recording' ''' class DjImportedFromDirectory(dj.Imported): # Subclass of Imported. Initialize with data directory. def __init__(self, directory=''): self.directory = directory super().__init__() ''' @schema class EphysExperimentsForAnalysis(dj.Manual): definition = """ # Ephys experiments (excel files) for analysis experiment: varchar(128) # excel files to use for analysis --- project: varchar(128) # which project the data belongs to use: enum('Yes', 'No') # whether to use this experiment directory: varchar(256) # the parent project directory """ def insert_experiment(self, excel_file): ''' Insert new sample ephys metadata from excel to datajoint tables ''' entry_list = pd.read_excel(excel_file)[['experiment', 'project', 'use', 'directory']].dropna(how='any') entry_list = entry_list.to_dict('records') no_insert = True for entry in entry_list: if entry['use'] == 'No': continue self.insert1(row=entry, skip_duplicates=True) no_insert = False #print("Inserted: " + str(entry)) if no_insert: print("No new entry inserted.") return @schema class Animals(dj.Imported): definition = """ # Sample metadata -> EphysExperimentsForAnalysis --- id: varchar(128) # organod ID (use date, but need better naming) strain : varchar(128) # genetic strain dob = null: date # date of birth date = null: date # recording date age = null: smallint # nunmber of days (date - dob) slicetype: varchar(128) # what kind of slice prep external: varchar(128) # external solution internal: varchar(128) # internal solution animal_comment = '': varchar(256) # general comments """ def _make_tuples(self, key): ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1() directory = os.path.expanduser(ephys_exp.pop('directory', None)) print('Populating for: ', key) animal_info, _ = read_ephys_info_from_excel_2017( os.path.join(directory, key['experiment'] + '.xlsx')) key['id'] = animal_info['id'] key['strain'] = animal_info['strain'] if not pd.isnull(animal_info['DOB']): key['dob'] = animal_info['DOB'] if not pd.isnull(animal_info['age']): key['age'] = animal_info['age'] key['date'] = animal_info['date'] key['slicetype'] = animal_info['type'] key['external'] = animal_info['external'] key['internal'] = animal_info['internal'] if not pd.isnull(animal_info['comment']): key['animal_comment'] = animal_info['comment'] self.insert1(row=key) return @schema class PatchCells(dj.Imported): definition = """ # Patch clamp metadata for each cell -> EphysExperimentsForAnalysis cell: varchar(128) # cell id --- rp = null: float # pipette resistance cm_est = null: float # estimated Cm ra_est = null: float # estimated Ra right after whole-cell mode rm_est = null: float # estimated Rm v_rest = null: float # resting membrane potential fluor = '': varchar(128) # fluorescent label fill = 'no': enum('yes', 'no', 'unknown', 'out') # wether the cell is biocytin filled. Out -- cell came out with pipette. cell_external = '': varchar(128) # external if different from sample metadata cell_internal = '': varchar(128) # internal if different from sample metadata depth = '': varchar(128) # microns beneath slice surface location = '': varchar(128) # spatial location """ def _make_tuples(self, key): ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1() directory = os.path.expanduser(ephys_exp.pop('directory', None)) print('Populating for: ', key) _, metadata = read_ephys_info_from_excel_2017( os.path.join(directory, key['experiment'] + '.xlsx')) if 'params' in metadata.columns: old_file = True cell_info = parse_cell_info_2017_vertical(metadata) else: old_file = False cell_info = parse_cell_info_2017(metadata) for i, row in cell_info.iterrows(): newkey = {} newkey['experiment'] = key['experiment'] newkey['cell'] = row['cell'] if not pd.isnull(row['Rp']): newkey['rp'] = row['Rp'] if not pd.isnull(row['Cm']): newkey['cm_est'] = row['Cm'] if not pd.isnull(row['Ra']): newkey['ra_est'] = row['Ra'] if not pd.isnull(row['Vrest']): newkey['v_rest'] = row['Vrest'] if not pd.isnull(row['depth']): newkey['depth'] = row['depth'] if not old_file: if not pd.isnull(row['fluor']): newkey['fluor'] = row['fluor'] if not pd.isnull(row['Rm']): newkey['rm_est'] = row['Rm'] if not pd.isnull(row['external']): newkey['cell_external'] = row['external'] if not pd.isnull(row['internal']): newkey['cell_internal'] = row['internal'] if not pd.isnull(row['location']): newkey['location'] = row['location'] if not pd.isnull(row['fill']): if row['fill'].lower() in ['yes', 'no', 'unknown', 'out']: newkey['fill'] = row['fill'].lower() else: print('"fill" must be yes/no/unknown/out. ') #print(newkey) self.insert1(row=newkey) return @schema class EphysRecordings(dj.Imported): definition = """ # Patch clamp metadata for each recording file -> EphysExperimentsForAnalysis cell: varchar(128) # cell id recording: varchar(128) # recording file name --- clamp = null : enum('v', 'i') # voltage or current clamp protocol = '' : varchar(128) # protocols such as gapfree, istep, etc hold = null : smallint # holding current or voltage ra_pre = null : smallint # estimated Ra before protocol ra_post = null : smallint # estimated Ra after protocol compensate = '' : varchar(128) # percentage of Ra compensation gain = null : smallint # amplifier gain filter = null : smallint # filter in kHz start = null : smallint # current step starting current step = null : smallint # step size of current injection stim_strength = '' : varchar(128) # electrical/optical stimulation strength stim_duration = null : smallint # duration of each stim pulse stim_interval = null : smallint # interval between two consecutive pulses response = '' : varchar(256) # what kind of reponse was observed comment = '' : varchar(256) # general comments """ def _make_tuples(self, key): ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1() directory = os.path.expanduser(ephys_exp.pop('directory', None)) print('Populating for: ', key) _, metadata = read_ephys_info_from_excel_2017( os.path.join(directory, key['experiment'] + '.xlsx')) patch_info = parse_patch_info_2017(metadata) for i, row in patch_info.iterrows(): newkey = {} newkey['experiment'] = key['experiment'] newkey['cell'] = row['cell'] newkey['recording'] = row['file'] if not pd.isnull(row['clamp']): newkey['clamp'] = row['clamp'].lower() if not pd.isnull(row['protocol']): newkey['protocol'] = row['protocol'] if not pd.isnull(row['hold']): newkey['hold'] = row['hold'] if not
pd.isnull(row['Ra-pre'])
pandas.isnull
from __future__ import print_function import csv import os import copy import numpy as np import os, sys from dotenv import load_dotenv, find_dotenv load_dotenv(find_dotenv()) sys.path.append(os.environ.get("PROJECT_ROOT")) sys.path.append(os.path.join(os.environ.get("PROJECT_ROOT"), 'test')) import GPy_1_0_5 import scipy.io import zipfile import tarfile import datetime import json import re import sys from .config import * ipython_available=True try: import IPython except ImportError: ipython_available=False try: #In Python 2, cPickle is faster. It does not exist in Python 3 but the underlying code is always used #if available import cPickle as pickle except ImportError: import pickle #A Python2/3 import handler - urllib2 changed its name in Py3 and was also reorganised try: from urllib2 import urlopen from urllib2 import URLError except ImportError: from urllib.request import urlopen from urllib.error import URLError def reporthook(a,b,c): # ',' at the end of the line is important! #print "% 3.1f%% of %d bytes\r" % (min(100, float(a * b) / c * 100), c), #you can also use sys.stdout.write sys.stdout.write("\r% 3.1f%% of %d bytes" % (min(100, float(a * b) / c * 100), c)) sys.stdout.flush() # Global variables data_path = os.path.expandvars(config.get('datasets', 'dir')) #data_path = os.path.join(os.path.dirname(__file__), 'datasets') default_seed = 10000 overide_manual_authorize=False neil_url = 'http://staffwww.dcs.shef.ac.uk/people/N.Lawrence/dataset_mirror/' # Read data resources from json file. # Don't do this when ReadTheDocs is scanning as it breaks things on_rtd = os.environ.get('READTHEDOCS', None) == 'True' #Checks if RTD is scanning if not (on_rtd): path = os.path.join(os.path.dirname(__file__), 'data_resources.json') json_data=open(path).read() data_resources = json.loads(json_data) if not (on_rtd): path = os.path.join(os.path.dirname(__file__), 'football_teams.json') json_data=open(path).read() football_dict = json.loads(json_data) def prompt_user(prompt): """Ask user for agreeing to data set licenses.""" # raw_input returns the empty string for "enter" yes = set(['yes', 'y']) no = set(['no','n']) try: print(prompt) choice = raw_input().lower() # would like to test for exception here, but not sure if we can do that without importing IPython except: print('Stdin is not implemented.') print('You need to set') print('overide_manual_authorize=True') print('to proceed with the download. Please set that variable and continue.') raise if choice in yes: return True elif choice in no: return False else: print(("Your response was a " + choice)) print("Please respond with 'yes', 'y' or 'no', 'n'") #return prompt_user() def data_available(dataset_name=None): """Check if the data set is available on the local machine already.""" try: from itertools import izip_longest except ImportError: from itertools import zip_longest as izip_longest dr = data_resources[dataset_name] zip_urls = (dr['files'], ) if 'save_names' in dr: zip_urls += (dr['save_names'], ) else: zip_urls += ([],) for file_list, save_list in izip_longest(*zip_urls, fillvalue=[]): for f, s in izip_longest(file_list, save_list, fillvalue=None): if s is not None: f=s # If there is a save_name given, use that one if not os.path.exists(os.path.join(data_path, dataset_name, f)): return False return True def download_url(url, store_directory, save_name=None, messages=True, suffix=''): """Download a file from a url and save it to disk.""" i = url.rfind('/') file = url[i+1:] print(file) dir_name = os.path.join(data_path, store_directory) if save_name is None: save_name = os.path.join(dir_name, file) else: save_name = os.path.join(dir_name, save_name) if suffix is None: suffix='' print("Downloading ", url, "->", save_name) if not os.path.exists(dir_name): os.makedirs(dir_name) try: response = urlopen(url+suffix) except URLError as e: if not hasattr(e, "code"): raise response = e if response.code > 399 and response.code<500: raise ValueError('Tried url ' + url + suffix + ' and received client error ' + str(response.code)) elif response.code > 499: raise ValueError('Tried url ' + url + suffix + ' and received server error ' + str(response.code)) with open(save_name, 'wb') as f: meta = response.info() content_length_str = meta.getheaders("Content-Length") if content_length_str: file_size = int(content_length_str[0]) else: file_size = None status = "" file_size_dl = 0 block_sz = 8192 line_length=30 while True: buff = response.read(block_sz) if not buff: break file_size_dl += len(buff) f.write(buff) sys.stdout.write(" "*(len(status)) + "\r") if file_size: status = r"[{perc: <{ll}}] {dl:7.3f}/{full:.3f}MB".format(dl=file_size_dl/(1048576.), full=file_size/(1048576.), ll=line_length, perc="="*int(line_length*float(file_size_dl)/file_size)) else: status = r"[{perc: <{ll}}] {dl:7.3f}MB".format(dl=file_size_dl/(1048576.), ll=line_length, perc="."*int(line_length*float(file_size_dl/(10*1048576.)))) sys.stdout.write(status) sys.stdout.flush() sys.stdout.write(" "*(len(status)) + "\r") print(status) # if we wanted to get more sophisticated maybe we should check the response code here again even for successes. #with open(save_name, 'wb') as f: # f.write(response.read()) #urllib.urlretrieve(url+suffix, save_name, reporthook) def authorize_download(dataset_name=None): """Check with the user that the are happy with terms and conditions for the data set.""" print(('Acquiring resource: ' + dataset_name)) # TODO, check resource is in dictionary! print('') dr = data_resources[dataset_name] print('Details of data: ') print((dr['details'])) print('') if dr['citation']: print('Please cite:') print((dr['citation'])) print('') if dr['size']: print(('After downloading the data will take up ' + str(dr['size']) + ' bytes of space.')) print('') print(('Data will be stored in ' + os.path.join(data_path, dataset_name) + '.')) print('') if overide_manual_authorize: if dr['license']: print('You have agreed to the following license:') print((dr['license'])) print('') return True else: if dr['license']: print('You must also agree to the following license:') print((dr['license'])) print('') return prompt_user('Do you wish to proceed with the download? [yes/no]') def download_data(dataset_name=None): """Check with the user that the are happy with terms and conditions for the data set, then download it.""" import itertools dr = data_resources[dataset_name] if not authorize_download(dataset_name): raise Exception("Permission to download data set denied.") zip_urls = (dr['urls'], dr['files']) if dr.has_key('save_names'): zip_urls += (dr['save_names'], ) else: zip_urls += ([],) if dr.has_key('suffices'): zip_urls += (dr['suffices'], ) else: zip_urls += ([],) for url, files, save_names, suffices in itertools.izip_longest(*zip_urls, fillvalue=[]): for f, save_name, suffix in itertools.izip_longest(files, save_names, suffices, fillvalue=None): download_url(os.path.join(url,f), dataset_name, save_name, suffix=suffix) return True def data_details_return(data, data_set): """Update the data component of the data dictionary with details drawn from the data_resources.""" data.update(data_resources[data_set]) return data def cmu_urls_files(subj_motions, messages = True): ''' Find which resources are missing on the local disk for the requested CMU motion capture motions. ''' dr = data_resources['cmu_mocap_full'] cmu_url = dr['urls'][0] subjects_num = subj_motions[0] motions_num = subj_motions[1] resource = {'urls' : [], 'files' : []} # Convert numbers to strings subjects = [] motions = [list() for _ in range(len(subjects_num))] for i in range(len(subjects_num)): curSubj = str(int(subjects_num[i])) if int(subjects_num[i]) < 10: curSubj = '0' + curSubj subjects.append(curSubj) for j in range(len(motions_num[i])): curMot = str(int(motions_num[i][j])) if int(motions_num[i][j]) < 10: curMot = '0' + curMot motions[i].append(curMot) all_skels = [] assert len(subjects) == len(motions) all_motions = [] for i in range(len(subjects)): skel_dir = os.path.join(data_path, 'cmu_mocap') cur_skel_file = os.path.join(skel_dir, subjects[i] + '.asf') url_required = False file_download = [] if not os.path.exists(cur_skel_file): # Current skel file doesn't exist. if not os.path.isdir(skel_dir): os.makedirs(skel_dir) # Add skel file to list. url_required = True file_download.append(subjects[i] + '.asf') for j in range(len(motions[i])): file_name = subjects[i] + '_' + motions[i][j] + '.amc' cur_motion_file = os.path.join(skel_dir, file_name) if not os.path.exists(cur_motion_file): url_required = True file_download.append(subjects[i] + '_' + motions[i][j] + '.amc') if url_required: resource['urls'].append(cmu_url + '/' + subjects[i] + '/') resource['files'].append(file_download) return resource try: import gpxpy import gpxpy.gpx gpxpy_available = True except ImportError: gpxpy_available = False if gpxpy_available: def epomeo_gpx(data_set='epomeo_gpx', sample_every=4): if not data_available(data_set): download_data(data_set) files = ['endomondo_1', 'endomondo_2', 'garmin_watch_via_endomondo','viewranger_phone', 'viewranger_tablet'] X = [] for file in files: gpx_file = open(os.path.join(data_path, 'epomeo_gpx', file + '.gpx'), 'r') gpx = gpxpy.parse(gpx_file) segment = gpx.tracks[0].segments[0] points = [point for track in gpx.tracks for segment in track.segments for point in segment.points] data = [[(point.time-datetime.datetime(2013,8,21)).total_seconds(), point.latitude, point.longitude, point.elevation] for point in points] X.append(np.asarray(data)[::sample_every, :]) gpx_file.close() return data_details_return({'X' : X, 'info' : 'Data is an array containing time in seconds, latitude, longitude and elevation in that order.'}, data_set) #del gpxpy_available # Some general utilities. def sample_class(f): p = 1. / (1. + np.exp(-f)) c = np.random.binomial(1, p) c = np.where(c, 1, -1) return c def boston_housing(data_set='boston_housing'): if not data_available(data_set): download_data(data_set) all_data = np.genfromtxt(os.path.join(data_path, data_set, 'housing.data')) X = all_data[:, 0:13] Y = all_data[:, 13:14] return data_details_return({'X' : X, 'Y': Y}, data_set) def brendan_faces(data_set='brendan_faces'): if not data_available(data_set): download_data(data_set) mat_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'frey_rawface.mat')) Y = mat_data['ff'].T return data_details_return({'Y': Y}, data_set) def della_gatta_TRP63_gene_expression(data_set='della_gatta', gene_number=None): if not data_available(data_set): download_data(data_set) mat_data = scipy.io.loadmat(os.path.join(data_path, data_set, 'DellaGattadata.mat')) X = np.double(mat_data['timepoints']) if gene_number == None: Y = mat_data['exprs_tp53_RMA'] else: Y = mat_data['exprs_tp53_RMA'][:, gene_number] if len(Y.shape) == 1: Y = Y[:, None] return data_details_return({'X': X, 'Y': Y, 'gene_number' : gene_number}, data_set) def football_data(season='1314', data_set='football_data'): """Football data from English games since 1993. This downloads data from football-data.co.uk for the given season. """ def league2num(string): league_dict = {'E0':0, 'E1':1, 'E2': 2, 'E3': 3, 'EC':4} return league_dict[string] def football2num(string): if football_dict.has_key(string): return football_dict[string] else: football_dict[string] = len(football_dict)+1 return len(football_dict)+1 data_set_season = data_set + '_' + season data_resources[data_set_season] = copy.deepcopy(data_resources[data_set]) data_resources[data_set_season]['urls'][0]+=season + '/' start_year = int(season[0:2]) end_year = int(season[2:4]) files = ['E0.csv', 'E1.csv', 'E2.csv', 'E3.csv'] if start_year>4 and start_year < 93: files += ['EC.csv'] data_resources[data_set_season]['files'] = [files] if not data_available(data_set_season): download_data(data_set_season) from matplotlib import pyplot as pb for file in reversed(files): filename = os.path.join(data_path, data_set_season, file) # rewrite files removing blank rows. writename = os.path.join(data_path, data_set_season, 'temp.csv') input = open(filename, 'rb') output = open(writename, 'wb') writer = csv.writer(output) for row in csv.reader(input): if any(field.strip() for field in row): writer.writerow(row) input.close() output.close() table = np.loadtxt(writename,skiprows=1, usecols=(0, 1, 2, 3, 4, 5), converters = {0: league2num, 1: pb.datestr2num, 2:football2num, 3:football2num}, delimiter=',') X = table[:, :4] Y = table[:, 4:] return data_details_return({'X': X, 'Y': Y}, data_set) def sod1_mouse(data_set='sod1_mouse'): if not data_available(data_set): download_data(data_set) from pandas import read_csv dir_path = os.path.join(data_path, data_set) filename = os.path.join(dir_path, 'sod1_C57_129_exprs.csv') Y = read_csv(filename, header=0, index_col=0) num_repeats=4 num_time=4 num_cond=4 X = 1 return data_details_return({'X': X, 'Y': Y}, data_set) def spellman_yeast(data_set='spellman_yeast'): if not data_available(data_set): download_data(data_set) from pandas import read_csv dir_path = os.path.join(data_path, data_set) filename = os.path.join(dir_path, 'combined.txt') Y = read_csv(filename, header=0, index_col=0, sep='\t') return data_details_return({'Y': Y}, data_set) def spellman_yeast_cdc15(data_set='spellman_yeast'): if not data_available(data_set): download_data(data_set) from pandas import read_csv dir_path = os.path.join(data_path, data_set) filename = os.path.join(dir_path, 'combined.txt') Y = read_csv(filename, header=0, index_col=0, sep='\t') t = np.asarray([10, 30, 50, 70, 80, 90, 100, 110, 120, 130, 140, 150, 170, 180, 190, 200, 210, 220, 230, 240, 250, 270, 290]) times = ['cdc15_'+str(time) for time in t] Y = Y[times].T t = t[:, None] return data_details_return({'Y' : Y, 't': t, 'info': 'Time series of synchronized yeast cells from the CDC-15 experiment of Spellman et al (1998).'}, data_set) def lee_yeast_ChIP(data_set='lee_yeast_ChIP'): if not data_available(data_set): download_data(data_set) from pandas import read_csv import zipfile dir_path = os.path.join(data_path, data_set) filename = os.path.join(dir_path, 'binding_by_gene.tsv') S = read_csv(filename, header=1, index_col=0, sep='\t') transcription_factors = [col for col in S.columns if col[:7] != 'Unnamed'] annotations = S[['Unnamed: 1', 'Unnamed: 2', 'Unnamed: 3']] S = S[transcription_factors] return data_details_return({'annotations' : annotations, 'Y' : S, 'transcription_factors': transcription_factors}, data_set) def fruitfly_tomancak(data_set='fruitfly_tomancak', gene_number=None): if not data_available(data_set): download_data(data_set) from pandas import read_csv dir_path = os.path.join(data_path, data_set) filename = os.path.join(dir_path, 'tomancak_exprs.csv') Y = read_csv(filename, header=0, index_col=0).T num_repeats = 3 num_time = 12 xt = np.linspace(0, num_time-1, num_time) xr = np.linspace(0, num_repeats-1, num_repeats) xtime, xrepeat = np.meshgrid(xt, xr) X = np.vstack((xtime.flatten(), xrepeat.flatten())).T return data_details_return({'X': X, 'Y': Y, 'gene_number' : gene_number}, data_set) def drosophila_protein(data_set='drosophila_protein'): if not data_available(data_set): download_data(data_set) from pandas import read_csv dir_path = os.path.join(data_path, data_set) filename = os.path.join(dir_path, 'becker_et_al.csv') Y =
read_csv(filename, header=0)
pandas.read_csv
import pandas as pd import numpy as np import matplotlib.pyplot as plt from statsmodels.tsa.stattools import adfuller as ADF from pmdarima import auto_arima import statsmodels.api as sm from statsmodels.graphics.tsaplots import plot_acf from statsmodels.graphics.tsaplots import plot_pacf # Loaddataset path = "/Users/husiyan/Google Drive/备份-完成的课题与项目/研究-VirusPaper/review/data/raw_data.csv" names = ['Date', 'Num_confirmed_patients', 'Num_deaths', 'Num_suspects'] dataset = pd.read_csv(path, names=names) # Split trainning and precition train_cls = { 'Num_confirmed_patients': dataset.iloc[:,1].values, 'Num_confirmed_deaths': dataset.iloc[:,2].values, 'Num_suspects': dataset.iloc[:,3].values, } train_dataset = pd.DataFrame(train_cls, columns = ['Num_suspects']) train_dataset.set_index(dataset.iloc[:, 0].values, inplace=True) train_data = pd.Series(train_dataset['Num_suspects']) train_data.head() train_data.plot(figsize=(20,10)) plt.show() #平稳性检验 result = ADF(train_dataset) print('ADF Statistic: %f' % result[0]) print('p-value: %f' % result[1]) plot_acf(train_data) plot_pacf(train_data) plt.show() (p, q) =(sm.tsa.arma_order_select_ic(train_data,max_ar=3 ,max_ma=3 ,ic='aic')['aic_min_order']) print(p,q) data = np.array(train_dataset.values).T[0] fittedmodel = auto_arima(train_data, start_p=1, start_q=1, max_p=3, max_q=3, max_d=3,max_order=None, seasonal=False, m=1, test='adf', trace=False, error_action='ignore', # don't want to know if an order does not work suppress_warnings=True, # don't want convergence warnings stepwise=True, information_criterion='bic', njob=-1) # set to stepwise print(fittedmodel.summary()) def plot_arima(truth, forecasts, title="ARIMA", xaxis_label='Time', yaxis_label='Value', c1='#A6CEE3', c2='#B2DF8A', forecast_start=None, **kwargs): # make truth and forecasts into pandas series n_truth = truth.shape[0] n_forecasts = forecasts.shape[0] # always plot truth the same truth = pd.Series(truth, index=np.arange(truth.shape[0])) # if no defined forecast start, start at the end if forecast_start is None: idx = np.arange(n_truth, n_truth + n_forecasts) else: idx = np.arange(forecast_start, n_forecasts) forecasts =
pd.Series(forecasts, index=idx)
pandas.Series
import pandas as pd import os import spacy from spacy.lang.en import English from pathlib import Path from . import task1_converter class DeftCorpusLoader(object): """""" def __init__(self, deft_corpus_path): super().__init__() self.corpus_path = deft_corpus_path self._default_train_output_path = os.path.join(deft_corpus_path, "deft_files/converted_train") self._default_dev_output_path = os.path.join(deft_corpus_path, "deft_files/converted_dev") # Load English tokenizer, tagger, parser, NER and word vectors self._parser = English() def convert_to_classification_format(self, train_output_path = None, dev_output_path = None): if train_output_path == None or dev_output_path == None: train_output_path = self._default_train_output_path dev_output_path = self._default_dev_output_path if not os.path.exists(train_output_path): os.mkdir(train_output_path) if not os.path.exists(dev_output_path): os.mkdir(dev_output_path) else: if not os.path.exists(train_output_path) or not os.path.exists(dev_output_path): raise ValueError("Passed value for one or both of the output paths is not a valid existing path.") if not os.path.isdir(train_output_path) or not os.path.isdir(dev_output_path): raise NotADirectoryError("Passed value for one or both of the output paths is not a valid directory.") self.converted_train_path = train_output_path self.converted_dev_path = dev_output_path train_source_path = os.path.join(self.corpus_path, "deft_files/train") dev_source_path = os.path.join(self.corpus_path, "deft_files/dev") task1_converter.convert(Path(train_source_path), Path(train_output_path)) task1_converter.convert(Path(dev_source_path), Path(dev_output_path)) def load_classification_data(self, train_data_path = None, dev_data_path = None, preprocess= False, clean=False): if(train_data_path == None or dev_data_path == None): if os.path.exists(self._default_train_output_path) and os.path.exists(self._default_dev_output_path): train_data_path = self._default_train_output_path dev_data_path = self._default_dev_output_path else: self.convert_to_classification_format() train_data_path = self.converted_train_path dev_data_path = self.converted_dev_path train_deft_files = os.listdir(train_data_path) train_dataframe = pd.DataFrame([]) for file in train_deft_files: dataframe = pd.read_csv(os.path.join(train_data_path, file), sep="\t", header = None) dataframe.columns = ["Sentence","HasDef"] train_dataframe = train_dataframe.append(dataframe, ignore_index=True) dev_deft_files = os.listdir(dev_data_path) dev_dataframe =
pd.DataFrame([])
pandas.DataFrame
# pylint: disable=E1101,E1103,W0232 import operator from datetime import datetime, date import numpy as np import pandas.tseries.offsets as offsets from pandas.tseries.frequencies import (get_freq_code as _gfc, _month_numbers, FreqGroup) from pandas.tseries.index import DatetimeIndex, Int64Index, Index from pandas.tseries.tools import parse_time_string import pandas.tseries.frequencies as _freq_mod import pandas.core.common as com from pandas.lib import Timestamp import pandas.lib as lib import pandas.tslib as tslib import pandas.algos as _algos #--------------- # Period logic def _period_field_accessor(name, alias): def f(self): base, mult = _gfc(self.freq) return
tslib.get_period_field(alias, self.ordinal, base)
pandas.tslib.get_period_field
import requests from bs4 import BeautifulSoup as bs import re import pandas as pd import numpy as np from random import randint from time import sleep import logging import os import urllib3 import socket import random import break_up_csv as buc logging.basicConfig(filename="spotcrime_scrape.log", level=logging.DEBUG, filemode='a', format='%(asctime)s %(message)s') base_url = 'https://spotcrime.com' empty_df = pd.DataFrame() try: response = requests.get(base_url) except requests.exceptions.SSLError: print("Looks like SSL libraries are not installed?") print("????????Or a mismatch in hostname??????????") exit if (response.status_code == 200): page = response.text else: logging.error(f"{base_url} reported back {response.status_code}") raise ValueError soup = bs(page, "lxml") state_tag_list = soup.find(id="states-list-menu").find_all('a') state_dict = {s.text: base_url+s.get('href') for s in state_tag_list} # Alabama while True: [(this_state,state_page_link)] = random.sample(list(state_dict.items()),1) state_page = requests.get(state_page_link) if (state_page.status_code == 200): page = state_page.text else: logging.error(f"{state_page_link} reported back {state_page.status_code}") # raise ValueError continue # Go to the next state clean_name = buc.clean_state_name(this_state) crime_file = 'spotcrime_'+clean_name+'.csv' try: spotcrime_df =
pd.read_csv(crime_file, header=0)
pandas.read_csv
#!/usr/bin/env python __author__ = "<NAME>" __version__ = "1.1" __description__ = "Optimal Stop Loss Generator" import sys, os, traceback import logging as log import pandas as pd import numpy as np import re,socket,datetime from lib.Strategy import * # generate trades from signal import tabulate import glob import warnings #------------------------------------------------------------------------------------ # Portfolio dynamically generated from configured strategies # # Optimal Stop Loss order creation with near-real time backtesting # # heavy processing and stateless alpha is calculated in Strategy object or in kdb and wrapped in Signal object # state manipulation and portfolio management is done in PNL object #------------------------------------------------------------------------------------ class PNLPortfolio: dataPath = "../data" docPath = "../doc" pnlFile = dataPath + "/PNL" pnlFileCsv = pnlFile + ".csv" def __init__(self, args): self.args = args if hasattr(args,'view') and args.view: self.file = args.view else: self.file = None self.Sym = args.Sym self._df = None self.isScrapeData = True self.isKdbData = False self.dates = args.dates self.horizon = (self.dates[-1] - self.dates[0]).days self.sDate = self.dates[0].strftime('%Y.%m.%d') self.eDate = self.dates[-1].strftime('%Y.%m.%d') if hasattr(args,'out') and args.out: if self.Sym and self.dates and self.out!=PNLPortfolio.pnlFile: args.out = '{}/sl_{}_{}_{}.csv'.format(self.signal.dataPath,self.Sym,self.sDate,self.eDate) if not os.path.isdir(PNLPortfolio.dataPath): if os.path.isdir("data"): PNLPortfolio.dataPath = "data" PNLPortfolio.docPath = "doc" else: PNLPortfolio.dataPath=PNLPortfolio.docPath="." self._dataSet = [] if hasattr(args,'stratPath') and args.stratPath: self.stratPath = args.stratPath else: args.stratPath = self.stratPath = PNLPortfolio.dataPath + '/strats' self.createPortfolio() def SignalAPIFactory(self): args = self.args if self.isKdbData: # invoke kdb return KDBData(args=args) dataFiles = glob.glob(PNLPortfolio.dataPath+'/store/'+args.Sym+'*.pkl') if dataFiles and len(dataFiles)>0: log.debug('Found {cnt} data files for {sym}'.format(cnt=len(dataFiles),sym=args.Sym)) for f1 in dataFiles: try: s1,sd1,ed1 = f1.split('/store/')[-1].split('.pkl')[0].split('_') sFile = (datetime.datetime.strptime(sd1,'%Y-%m-%d')).date() eFile = (datetime.datetime.strptime(ed1,'%Y-%m-%d')).date() #if sFile<self.args.dates[-1] or eFile>self.args.dates[0]: # start date is within file timeframe, though end date is just greater than start of file if sFile<=self.args.dates[0] and sFile<self.args.dates[-1] and self.args.dates[0]<=eFile: args.file = f1 log.info('located data {file} for {sym} and {dates}'.format(file=f1,sym=args.Sym,dates=self.args.dates)) self.isScrapeData = False return DataStore(args=args) except: if log.DEBUG>=log.getLogger().getEffectiveLevel(): traceback.print_exc() continue if self.isScrapeData: log.warn('data not found, attempting to generate file for {sym}'.format(sym=args.Sym)) # scrape data file return DataScrape(args=args) def setDF(self): args = self.args try: self.dataAPI = self.SignalAPIFactory() self._df=self.dataAPI.DF(args) except: errStr='cannot get data frame, kdb is down, restart and try again' log.error(errStr) if log.DEBUG>=log.getLogger().getEffectiveLevel(): traceback.print_exc() raise Exception(errStr) args._df = self._df self.strats = StrategyFactory.create(self.stratPath, args=args) self.stratDict = {s1.getName():s1 for s1 in self.strats} def getDF(self): # API if not isinstance(self._df, pd.DataFrame) or self._df.empty: self.setDF() df = self._df if 'alpha' in df: df = df.drop('alpha', axis=1) return df # find most optimal strategy and return data for it def createPortfolio(self): args = self.args fss,dfRDCache = [],None if hasattr(self.args,'nodates') and self.args.nodates: fss = glob.glob(PNLPortfolio.dataPath+'/cache/p{}DF_*.pkl'.format(self.Sym)) else: fss = glob.glob(PNLPortfolio.dataPath+'/cache/p{}DF_{}-{}.pkl'.format(self.Sym,self.sDate,self.eDate)) if fss and len(fss)>0: dfRDCache = fss[0] if self.file and not self.file=="NA": dfWRCache = dfRDCache = self.file if not (hasattr(self.args,'force') and self.args.force) and dfRDCache and os.path.isfile(dfRDCache): log.warn('Found cached result {cache} for {sym}'.format(cache=dfRDCache,sym=args.Sym)) self._portDF =
pd.read_pickle(dfRDCache)
pandas.read_pickle
import pandas as pd import numpy as np Info_alunos = {'nome' : ['Aluno1', 'Aluno2', 'Aluno3', 'Aluno4', 'Aluno5', 'Aluno6', 'Aluno7', 'Aluno8', 'Aluno9', 'Aluno10'], 'nota' : [7.5, 8.2, 3.4, np.nan, 8.8, 1.2, np.nan, 4.5, 6.7, 9.4], 'tentativas' : [2,1,3,0,2,1,0,1,3,2], 'aprovado' : ['Sim','Sim','Não','Não','Sim','Não','Não','Não','Sim','Sim']} df_alunos =
pd.DataFrame(Info_alunos)
pandas.DataFrame
import os import sys import json import math import pandas as pd import numpy as np from minepy import MINE import concurrent.futures from multiprocessing import cpu_count __output__ = "mic_output2" def split_data(data, label_names, test_fraction=0.2, random_state=42): """Split input data into training and testing sets data <pd.DataFrame> : Expects dataframe with genes as columns and samples as rows label_names <array> : List of name(s) that are labels """ test = data.sample( frac=test_fraction, random_state=random_state, axis=0) train = data.loc[~data.index.isin(test.index)] X_test = test[test.columns.difference(label_names)] y_test = test[label_names] X_train = train[train.columns.difference(label_names)] y_train = train[label_names] feature_labels = [X_test.columns.tolist(), y_test.columns.tolist()] return np.array(X_train), np.array(X_test), np.array(y_train), np.array(y_test), feature_labels def run_mic(args): _id, arg_dict = args[0], args[1] data = arg_dict["data"] print("\tRunning {0}...".format(str(_id))) sys.stdout.flush() X_train, X_test, y_train, y_test, feature_labels = split_data(data, [_id], test_fraction=0.3) results = np.zeros(X_train.shape[1]) counter = 0 for i in range(X_train.shape[1]): mine = MINE(alpha=0.6, c=15) mine.compute_score(X_train[:, i], y_train[:, 0]) results[i] = mine.mic() counter += 1 if counter % 100 == 0: print("\t\t{0}: {1} records processed.".format(_id, counter)) if counter in [1,5,10,15,20,25,50,75]: print("\t\t{0}: {1} records processed.".format(_id, counter)) sys.stdout.flush() output = pd.DataFrame(results, index=feature_labels[0], columns=feature_labels[1]) output.to_csv(os.path.join("..", __output__, _id + ".tsv"), sep="\t") print(_id + " complete.") def run_pools( func, arg_iter, arg_dict): cores = arg_dict['workers'] pools = int(math.ceil(len(arg_iter) / arg_dict['workers'])) if pools < 1: pools = 1 print("Processing {0} pool(s) on {1} core(s)...".format(pools, cores)) it_list = [] range_number = 0 for x in range(pools): it_list.append([iter for iter in arg_iter[range_number:range_number + arg_dict['workers']]]) range_number += arg_dict['workers'] print("Divided data across {0} pool(s).\n".format(pools)) batch_number = 1 for batch in it_list: print("Starting: {0}...".format(str([x[0] for x in batch]))) with concurrent.futures.ProcessPoolExecutor(max_workers=arg_dict['workers']) as executor: for gene in zip(batch, executor.map(func, batch)): print("{0} has been processed.".format(gene[0][0])) print('Processing of batch {0} of {1} complete...\n'.format(batch_number, pools)) batch_number += 1 if __name__ == '__main__': ### Load data and sample metadata data_path = os.path.join( os.getcwd(), "..", "data", "S_cereviseae_compendia_recount_bio" ) print("\nLoading data...") with open( os.path.join(data_path, 'aggregated_metadata.json'), 'r') as jsonfile: metadata_file = json.load(jsonfile) tables = {} for k, v in metadata_file['experiments'].items(): tables[v["accession_code"]] = v metadata =
pd.DataFrame(tables)
pandas.DataFrame
#!/usr/bin/env python # Copyright 2016-2019 Biomedical Imaging Group Rotterdam, Departments of # Medical Informatics and Radiology, Erasmus MC, Rotterdam, The Netherlands # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import pandas as pd import logging import os from sklearn.model_selection import train_test_split import xlrd from .parameter_optimization import random_search_parameters import WORC.addexceptions as ae def crossval(config, label_data, image_features, param_grid=None, use_fastr=False, fastr_plugin=None, tempsave=False, fixedsplits=None, ensemble={'Use': False}, outputfolder=None, modus='singlelabel'): """ Constructs multiple individual classifiers based on the label settings Parameters ---------- config: dict, mandatory Dictionary with config settings. See the Github Wiki for the available fields and formatting. label_data: dict, mandatory Should contain the following: patient_IDs (list): IDs of the patients, used to keep track of test and training sets, and label data label (list): List of lists, where each list contains the label status for that patient for each label label_name (list): Contains the different names that are stored in the label object image_features: numpy array, mandatory Consists of a tuple of two lists for each patient: (feature_values, feature_labels) param_grid: dictionary, optional Contains the parameters and their values wich are used in the grid or randomized search hyperparamater optimization. See the construct_classifier function for some examples. use_fastr: boolean, default False If False, parallel execution through Joblib is used for fast execution of the hyperparameter optimization. Especially suited for execution on mutlicore (H)PC's. The settings used are specified in the config.ini file in the IOparser folder, which you can adjust to your system. If True, fastr is used to split the hyperparameter optimization in separate jobs. Parameters for the splitting can be specified in the config file. Especially suited for clusters. fastr_plugin: string, default None Determines which plugin is used for fastr executions. When None, uses the default plugin from the fastr config. tempsave: boolean, default False If True, create a .hdf5 file after each cross validation containing the classifier and results from that that split. This is written to the GSOut folder in your fastr output mount. If False, only the result of all combined cross validations will be saved to a .hdf5 file. This will also be done if set to True. fixedsplits: string, optional By default, random split cross validation is used to train and evaluate the machine learning methods. Optionally, you can provide a .xlsx file containing fixed splits to be used. See the Github Wiki for the format. ensemble: dictionary, optional Contains the configuration for constructing an ensemble. modus: string, default 'singlelabel' Determine whether one-vs-all classification (or regression) for each single label is used ('singlelabel') or if multilabel classification is performed ('multilabel'). Returns ---------- panda_data: pandas dataframe Contains all information on the trained classifier. """ if tempsave: import fastr # Define all possible regressors regressors = ['SVR', 'RFR', 'SGDR', 'Lasso', 'ElasticNet'] # Process input data patient_IDs = label_data['patient_IDs'] label_value = label_data['label'] label_name = label_data['label_name'] if outputfolder is None: logfilename = os.path.join(os.getcwd(), 'classifier.log') else: logfilename = os.path.join(outputfolder, 'classifier.log') print("Logging to file " + str(logfilename)) for handler in logging.root.handlers[:]: logging.root.removeHandler(handler) logging.basicConfig(filename=logfilename, level=logging.DEBUG) N_iterations = config['CrossValidation']['N_iterations'] test_size = config['CrossValidation']['test_size'] classifier_labelss = dict() logging.debug('Starting classifier') # We only need one label instance, assuming they are all the sample feature_labels = image_features[0][1] # Check if we need to use fixedsplits: if fixedsplits is not None and '.xlsx' in fixedsplits: # fixedsplits = '/home/mstarmans/Settings/RandomSufflingOfData.xlsx' wb = xlrd.open_workbook(fixedsplits) wb = wb.sheet_by_index(1) if modus == 'singlelabel': print('Performing Single class classification.') logging.debug('Performing Single class classification.') elif modus == 'multilabel': print('Performing Multi label classification.') logging.debug('Performing Multi class classification.') label_value = [label_value] label_name = [label_name] else: m = ('{} is not a valid modus!').format(modus) logging.debug(m) raise ae.WORCKeyError(m) for i_class, i_name in zip(label_value, label_name): if modus == 'singlelabel': i_class_temp = i_class.ravel() save_data = list() for i in range(0, N_iterations): print(('Cross validation iteration {} / {} .').format(str(i + 1), str(N_iterations))) logging.debug(('Cross validation iteration {} / {} .').format(str(i + 1), str(N_iterations))) random_seed = np.random.randint(5000) # Split into test and training set, where the percentage of each # label is maintained if any(clf in regressors for clf in param_grid['classifiers']): # We cannot do a stratified shuffle split with regression stratify = None else: if modus == 'singlelabel': stratify = i_class_temp elif modus == 'multilabel': # Create a stratification object from the labels # Label = 0 means no label equals one # Other label numbers refer to the label name that is 1 stratify = list() for pnum in range(0, len(i_class[0])): plabel = 0 for lnum, slabel in enumerate(i_class): if slabel[pnum] == 1: plabel = lnum + 1 stratify.append(plabel) # Sklearn multiclass requires rows to be objects/patients # i_class = i_class.reshape(i_class.shape[1], i_class.shape[0]) i_class_temp = np.zeros((i_class.shape[1], i_class.shape[0])) for n_patient in range(0, i_class.shape[1]): for n_label in range(0, i_class.shape[0]): i_class_temp[n_patient, n_label] = i_class[n_label, n_patient] i_class_temp = i_class_temp else: raise ae.WORCKeyError('{} is not a valid modus!').format(modus) if fixedsplits is None: # Use Random Split. Split per patient, not per sample unique_patient_IDs, unique_indices =\ np.unique(np.asarray(patient_IDs), return_index=True) if any(clf in regressors for clf in param_grid['classifiers']): unique_stratify = None else: unique_stratify = [stratify[i] for i in unique_indices] try: unique_PID_train, indices_PID_test\ = train_test_split(unique_patient_IDs, test_size=test_size, random_state=random_seed, stratify=unique_stratify) except ValueError as e: e = str(e) + ' Increase the size of your validation set.' raise ae.WORCValueError(e) # Check for all IDs if they are in test or training indices_train = list() indices_test = list() patient_ID_train = list() patient_ID_test = list() for num, pid in enumerate(patient_IDs): if pid in unique_PID_train: indices_train.append(num) # Make sure we get a unique ID if pid in patient_ID_train: n = 1 while str(pid + '_' + str(n)) in patient_ID_train: n += 1 pid = str(pid + '_' + str(n)) patient_ID_train.append(pid) else: indices_test.append(num) # Make sure we get a unique ID if pid in patient_ID_test: n = 1 while str(pid + '_' + str(n)) in patient_ID_test: n += 1 pid = str(pid + '_' + str(n)) patient_ID_test.append(pid) # Split features and labels accordingly X_train = [image_features[i] for i in indices_train] X_test = [image_features[i] for i in indices_test] if modus == 'singlelabel': Y_train = i_class_temp[indices_train] Y_test = i_class_temp[indices_test] elif modus == 'multilabel': Y_train = i_class_temp[indices_train, :] Y_test = i_class_temp[indices_test, :] else: raise ae.WORCKeyError('{} is not a valid modus!').format(modus) else: # Use pre defined splits indices = wb.col_values(i) indices = [int(j) for j in indices[1:]] # First element is "Iteration x" train = indices[0:121] test = indices[121:] # Convert the numbers to the correct indices ind_train = list() for j in train: success = False for num, p in enumerate(patient_IDs): if str(j).zfill(3) == p[0:3]: ind_train.append(num) success = True if not success: raise ae.WORCIOError("Patient " + str(j).zfill(3) + " is not included!") ind_test = list() for j in test: success = False for num, p in enumerate(patient_IDs): if str(j).zfill(3) == p[0:3]: ind_test.append(num) success = True if not success: raise ae.WORCIOError("Patient " + str(j).zfill(3) + " is not included!") X_train = np.asarray(image_features)[ind_train].tolist() Y_train = np.asarray(i_class_temp)[ind_train].tolist() patient_ID_train = patient_IDs[ind_train] X_test = np.asarray(image_features)[ind_test].tolist() Y_test = np.asarray(i_class_temp)[ind_test].tolist() patient_ID_test = patient_IDs[ind_test] # Find best hyperparameters and construct classifier config['HyperOptimization']['use_fastr'] = use_fastr config['HyperOptimization']['fastr_plugin'] = fastr_plugin n_cores = config['General']['Joblib_ncores'] trained_classifier = random_search_parameters(features=X_train, labels=Y_train, param_grid=param_grid, n_cores=n_cores, **config['HyperOptimization']) # Create an ensemble if required if ensemble['Use']: trained_classifier.create_ensemble(X_train, Y_train) # We only want to save the feature values and one label array X_train = [x[0] for x in X_train] X_test = [x[0] for x in X_test] temp_save_data = (trained_classifier, X_train, X_test, Y_train, Y_test, patient_ID_train, patient_ID_test, random_seed) save_data.append(temp_save_data) # Create a temporary save if tempsave: panda_labels = ['trained_classifier', 'X_train', 'X_test', 'Y_train', 'Y_test', 'config', 'patient_ID_train', 'patient_ID_test', 'random_seed'] panda_data_temp =\ pd.Series([trained_classifier, X_train, X_test, Y_train, Y_test, config, patient_ID_train, patient_ID_test, random_seed], index=panda_labels, name='Constructed crossvalidation') panda_data = pd.DataFrame(panda_data_temp) n = 0 filename = os.path.join(fastr.config.mounts['tmp'], 'GSout', 'RS_' + str(i) + '.hdf5') while os.path.exists(filename): n += 1 filename = os.path.join(fastr.config.mounts['tmp'], 'GSout', 'RS_' + str(i + n) + '.hdf5') if not os.path.exists(os.path.dirname(filename)): os.makedirs(os.path.dirname(filename)) panda_data.to_hdf(filename, 'SVMdata') del panda_data, panda_data_temp [classifiers, X_train_set, X_test_set, Y_train_set, Y_test_set, patient_ID_train_set, patient_ID_test_set, seed_set] =\ zip(*save_data) panda_labels = ['classifiers', 'X_train', 'X_test', 'Y_train', 'Y_test', 'config', 'patient_ID_train', 'patient_ID_test', 'random_seed', 'feature_labels'] panda_data_temp =\ pd.Series([classifiers, X_train_set, X_test_set, Y_train_set, Y_test_set, config, patient_ID_train_set, patient_ID_test_set, seed_set, feature_labels], index=panda_labels, name='Constructed crossvalidation') if modus == 'singlelabel': i_name = ''.join(i_name) elif modus == 'multilabel': i_name = ','.join(i_name) classifier_labelss[i_name] = panda_data_temp panda_data = pd.DataFrame(classifier_labelss) return panda_data def nocrossval(config, label_data_train, label_data_test, image_features_train, image_features_test, param_grid=None, use_fastr=False, fastr_plugin=None, ensemble={'Use': False}, modus='singlelabel'): """ Constructs multiple individual classifiers based on the label settings Arguments: config (Dict): Dictionary with config settings label_data (Dict): should contain: patient_IDs (list): IDs of the patients, used to keep track of test and training sets, and label data label (list): List of lists, where each list contains the label status for that patient for each label label_name (list): Contains the different names that are stored in the label object image_features (numpy array): Consists of a tuple of two lists for each patient: (feature_values, feature_labels) ensemble: dictionary, optional Contains the configuration for constructing an ensemble. modus: string, default 'singlelabel' Determine whether one-vs-all classification (or regression) for each single label is used ('singlelabel') or if multilabel classification is performed ('multilabel'). Returns: classifier_data (pandas dataframe) """ patient_IDs_train = label_data_train['patient_IDs'] label_value_train = label_data_train['label'] label_name_train = label_data_train['label_name'] patient_IDs_test = label_data_test['patient_IDs'] if 'label' in label_data_test.keys(): label_value_test = label_data_test['label'] else: label_value_test = [None] * len(patient_IDs_test) logfilename = os.path.join(os.getcwd(), 'classifier.log') logging.basicConfig(filename=logfilename, level=logging.DEBUG) classifier_labelss = dict() print('features') logging.debug('Starting classifier') print(len(image_features_train)) # Determine modus if modus == 'singlelabel': print('Performing Single class classification.') logging.debug('Performing Single class classification.') elif modus == 'multilabel': print('Performing Multi label classification.') logging.debug('Performing Multi class classification.') label_name_train = [label_name_train] else: m = ('{} is not a valid modus!').format(modus) logging.debug(m) raise ae.WORCKeyError(m) # We only need one label instance, assuming they are all the sample feature_labels = image_features_train[0][1] for i_name in label_name_train: save_data = list() random_seed = np.random.randint(5000) # Split into test and training set, where the percentage of each # label is maintained X_train = image_features_train X_test = image_features_test if modus == 'singlelabel': Y_train = label_value_train.ravel() Y_test = label_value_test.ravel() else: # Sklearn multiclass requires rows to be objects/patients Y_train = label_value_train Y_train_temp = np.zeros((Y_train.shape[1], Y_train.shape[0])) for n_patient in range(0, Y_train.shape[1]): for n_label in range(0, Y_train.shape[0]): Y_train_temp[n_patient, n_label] = Y_train[n_label, n_patient] Y_train = Y_train_temp Y_test = label_value_test Y_test_temp = np.zeros((Y_test.shape[1], Y_test.shape[0])) for n_patient in range(0, Y_test.shape[1]): for n_label in range(0, Y_test.shape[0]): Y_test_temp[n_patient, n_label] = Y_test[n_label, n_patient] Y_test = Y_test_temp # Find best hyperparameters and construct classifier config['HyperOptimization']['use_fastr'] = use_fastr config['HyperOptimization']['fastr_plugin'] = fastr_plugin n_cores = config['General']['Joblib_ncores'] trained_classifier = random_search_parameters(features=X_train, labels=Y_train, param_grid=param_grid, n_cores=n_cores, **config['HyperOptimization']) # Create an ensemble if required if ensemble['Use']: trained_classifier.create_ensemble(X_train, Y_train) # Extract the feature values X_train = np.asarray([x[0] for x in X_train]) X_test = np.asarray([x[0] for x in X_test]) temp_save_data = (trained_classifier, X_train, X_test, Y_train, Y_test, patient_IDs_train, patient_IDs_test, random_seed) save_data.append(temp_save_data) [classifiers, X_train_set, X_test_set, Y_train_set, Y_test_set, patient_ID_train_set, patient_ID_test_set, seed_set] =\ zip(*save_data) panda_labels = ['classifiers', 'X_train', 'X_test', 'Y_train', 'Y_test', 'config', 'patient_ID_train', 'patient_ID_test', 'random_seed', 'feature_labels'] panda_data_temp =\ pd.Series([classifiers, X_train_set, X_test_set, Y_train_set, Y_test_set, config, patient_ID_train_set, patient_ID_test_set, seed_set, feature_labels], index=panda_labels, name='Constructed crossvalidation') i_name = ''.join(i_name) classifier_labelss[i_name] = panda_data_temp panda_data =
pd.DataFrame(classifier_labelss)
pandas.DataFrame
# This script converts Wikiversity multiple-choice questions with choice-specific feedback to a standard Respondus format. # import package import pandas as pd # set display options pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
pandas.set_option
from collections import ( abc, deque, ) from decimal import Decimal from warnings import catch_warnings import numpy as np import pytest import pandas as pd from pandas import ( DataFrame, Index, MultiIndex, PeriodIndex, Series, concat, date_range, ) import pandas._testing as tm from pandas.core.arrays import SparseArray from pandas.core.construction import create_series_with_explicit_dtype from pandas.tests.extension.decimal import to_decimal class TestConcatenate: def test_append_concat(self): # GH#1815 d1 = date_range("12/31/1990", "12/31/1999", freq="A-DEC") d2 = date_range("12/31/2000", "12/31/2009", freq="A-DEC") s1 = Series(np.random.randn(10), d1) s2 = Series(np.random.randn(10), d2) s1 = s1.to_period() s2 = s2.to_period() # drops index result = concat([s1, s2]) assert isinstance(result.index, PeriodIndex) assert result.index[0] == s1.index[0] def test_concat_copy(self, using_array_manager): df = DataFrame(np.random.randn(4, 3)) df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1)) df3 = DataFrame({5: "foo"}, index=range(4)) # These are actual copies. result = concat([df, df2, df3], axis=1, copy=True) for arr in result._mgr.arrays: assert arr.base is None # These are the same. result = concat([df, df2, df3], axis=1, copy=False) for arr in result._mgr.arrays: if arr.dtype.kind == "f": assert arr.base is df._mgr.arrays[0].base elif arr.dtype.kind in ["i", "u"]: assert arr.base is df2._mgr.arrays[0].base elif arr.dtype == object: if using_array_manager: # we get the same array object, which has no base assert arr is df3._mgr.arrays[0] else: assert arr.base is not None # Float block was consolidated. df4 = DataFrame(np.random.randn(4, 1)) result = concat([df, df2, df3, df4], axis=1, copy=False) for arr in result._mgr.arrays: if arr.dtype.kind == "f": if using_array_manager: # this is a view on some array in either df or df4 assert any( np.shares_memory(arr, other) for other in df._mgr.arrays + df4._mgr.arrays ) else: # the block was consolidated, so we got a copy anyway assert arr.base is None elif arr.dtype.kind in ["i", "u"]: assert arr.base is df2._mgr.arrays[0].base elif arr.dtype == object: # this is a view on df3 assert any(np.shares_memory(arr, other) for other in df3._mgr.arrays) def test_concat_with_group_keys(self): # axis=0 df = DataFrame(np.random.randn(3, 4)) df2 = DataFrame(np.random.randn(4, 4)) result = concat([df, df2], keys=[0, 1]) exp_index = MultiIndex.from_arrays( [[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]] ) expected = DataFrame(np.r_[df.values, df2.values], index=exp_index) tm.assert_frame_equal(result, expected) result = concat([df, df], keys=[0, 1]) exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]) expected = DataFrame(np.r_[df.values, df.values], index=exp_index2) tm.assert_frame_equal(result, expected) # axis=1 df = DataFrame(np.random.randn(4, 3)) df2 = DataFrame(np.random.randn(4, 4)) result = concat([df, df2], keys=[0, 1], axis=1) expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index) tm.assert_frame_equal(result, expected) result = concat([df, df], keys=[0, 1], axis=1) expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2) tm.assert_frame_equal(result, expected) def test_concat_keys_specific_levels(self): df = DataFrame(np.random.randn(10, 4)) pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]] level = ["three", "two", "one", "zero"] result = concat( pieces, axis=1, keys=["one", "two", "three"], levels=[level], names=["group_key"], ) tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key")) tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3])) assert result.columns.names == ["group_key", None] @pytest.mark.parametrize("mapping", ["mapping", "dict"]) def test_concat_mapping(self, mapping, non_dict_mapping_subclass): constructor = dict if mapping == "dict" else non_dict_mapping_subclass frames = constructor( { "foo": DataFrame(np.random.randn(4, 3)), "bar": DataFrame(np.random.randn(4, 3)), "baz": DataFrame(np.random.randn(4, 3)), "qux": DataFrame(np.random.randn(4, 3)), } ) sorted_keys = list(frames.keys()) result = concat(frames) expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys) tm.assert_frame_equal(result, expected) result = concat(frames, axis=1) expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1) tm.assert_frame_equal(result, expected) keys = ["baz", "foo", "bar"] result = concat(frames, keys=keys) expected = concat([frames[k] for k in keys], keys=keys) tm.assert_frame_equal(result, expected) def test_concat_keys_and_levels(self): df = DataFrame(np.random.randn(1, 3)) df2 = DataFrame(np.random.randn(1, 4)) levels = [["foo", "baz"], ["one", "two"]] names = ["first", "second"] result = concat( [df, df2, df, df2], keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")], levels=levels, names=names, ) expected = concat([df, df2, df, df2]) exp_index = MultiIndex( levels=levels + [[0]], codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]], names=names + [None], ) expected.index = exp_index tm.assert_frame_equal(result, expected) # no names result = concat( [df, df2, df, df2], keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")], levels=levels, ) assert result.index.names == (None,) * 3 # no levels result = concat( [df, df2, df, df2], keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")], names=["first", "second"], ) assert result.index.names == ("first", "second", None) tm.assert_index_equal( result.index.levels[0],
Index(["baz", "foo"], name="first")
pandas.Index
import requests from bs4 import BeautifulSoup import sys import numpy as np # then add this function lower down from memory_profiler import profile import pandas as pd from sortedcontainers import SortedDict import datetime import bs4 # TODO # http://www.meilleursagents.com/immobilier/recherche/?item_types%5B%5D=369681781&item_types%5B%5D=369681782&transaction_type=369681778&place_ids%5B%5D=32696 # http://www.seloger.com/list.htm?idtt=1&idtypebien=1&cp=75&tri=initial def parse_source(html, encoding='utf-8'): parsed = BeautifulSoup(html, from_encoding=encoding) return parsed def fetch_meilleursagents(): base = 'http://www.meilleursagents.com/immobilier/recherche/?redirect_url=&view_mode=list&sort_mode=ma_contract%7Cdesc&transaction_type=369681778&buyer_search_id=&user_email=&place_ids%5B%5D=138724240&place_title=&item_types%5B%5D=369681781&item_types%5B%5D=369681782&item_area_min=&item_area_max=&budget_min=&budget_max=' resp = requests.get(base, timeout=150) resp.raise_for_status() # <- no-op if status==200 parsed = parse_source(resp.content, resp.encoding) def fetch_solger(): base = 'http://www.seloger.com/list.htm?idtt=1&idtypebien=1&cp=75&tri=initial' resp = requests.get(base, timeout=150) resp.raise_for_status() # <- no-op if status==200 parsed = parse_source(resp.content, resp.encoding) def fetch_pap(): base = 'http://www.pap.fr/annonce/locations-appartement-paris-14e-g37781' try: resp = requests.get(base, timeout=150) resp.raise_for_status() # <- no-op if status==200 resp_comb = resp.content except: pass listing = [] string = {} string[15] = '15e-g37782' string[13] = '13e-g37780' string[14] = '14e-g37781' string[2] = '2e-g37769' string[3] = '3e-g37770' string[4] = '4e-g37771' string[5] = '5e-g37772' string[6] = '6e-g37773' string[7] = '7e-g37774' string[8] = '8e-g37775' string[9] = '9e-g37776' string[10] = '10e-g37777' string[11] = '11e-g37778' string[12] = '12e-g37779' string[16] = '16e-g37783' string[17] = '17e-g37784' string[18] = '18e-g37785' string[19] = '19e-g37786' string[20] = '20e-g37787' for i in np.arange(2, 20): print(i) base2 = 'http://www.pap.fr/annonce/locations-appartement-paris-{}'.format(string[i]) try: resp_ = requests.get(base2, timeout=200) except: break # resp_.raise_for_status() # <- no-op if status==200 if resp_.status_code == 404: break parsed = parse_source(resp_.content, resp_.encoding) listing.append(extract_listings_pap(parsed)) # print(listing) # resp_comb += resp_.content + resp_comb for j in np.arange(1, 7): print(j) base2 = 'http://www.pap.fr/annonce/locations-appartement-paris-{}-{}'.format( string[i], j) try: resp_ = requests.get(base2, timeout=200) except: break # resp_.raise_for_status() # <- no-op if status==200 if resp_.status_code == 404: break # resp_comb += resp_.content + resp_comb parsed = parse_source(resp_.content, resp_.encoding) listing.append(extract_listings_pap(parsed)) # return resp_comb, resp.encoding return listing def fetch_fusac(): base = 'http://ads.fusac.fr/ad-category/housing/' listing = [] try: resp = requests.get(base, timeout=100) resp.raise_for_status() # <- no-op if status==200 resp_comb = resp.content parsed = parse_source(resp.content, resp.encoding) listing.append(extract_listings_fusac(parsed)) except: pass for i in np.arange(2, 6): base2 = 'http://ads.fusac.fr/ad-category/housing/housing-offers/page/{}/'.format(i) try: resp_ = requests.get(base2, timeout=100) except: continue # resp_.raise_for_status() # <- no-op if status==200 if resp_.status_code == 404: break # resp_comb += resp_.content + resp_comb parsed = parse_source(resp_.content, resp_.encoding) listing.append(extract_listings_fusac(parsed)) # return resp_comb, resp.encoding return listing # handle response 200 def fetch_search_results( query=None, minAsk=600, maxAsk=1450, bedrooms=None, bundleDuplicates=1, pets_cat=1 ): listing = [] search_params = { key: val for key, val in locals().items() if val is not None } if not search_params: raise ValueError("No valid keywords") base = 'https://paris.craigslist.fr/search/apa' try: resp_ = requests.get(base, params=search_params, timeout=100) resp_.raise_for_status() # <- no-op if status==200 parsed = parse_source(resp_.content, resp_.encoding) listing.append(extract_listings(parsed)) except: return None return listing # def extract_listings(parsed): # listings = parsed.find_all("li", {"class": "result-row"}) # return listings def extract_listings_fusac(parsed): # location_attrs = {'data-latitude': True, 'data-longitude': True} listings = parsed.find_all( 'div', {'class': "prod-cnt prod-box shadow Just-listed"}) extracted = [] for j, listing in enumerate(listings[0:]): # hood = listing.find('span', {'class': 'result-hood'}) # # print(hood) # # location = {key: listing.attrs.get(key, '') for key in location_attrs} # link = listing.find('a', {'class': 'result-title hdrlnk'}) # add this # if link is not None: # descr = link.string.strip() # link_href = link.attrs['href'] price = listing.find('p', {'class': 'post-price'}) if price is not None: price = float(price.string.split()[0].replace(',', '')) # link = listing.find('div', {'class': 'listos'}).find('a',href=True)['href'] # resp = requests.get(link, timeout=10) # resp.raise_for_status() # <- no-op if status==200 desc = listing.find('p', {'class': 'post-desc'} ) if price is not None: desc = desc.string url = listing.find('div', {'class': "post-left"}).find('div', {'class': "grido"}).find('a', href=True).get('href') resp = requests.get(url, timeout=100) resp.raise_for_status() # <- no-op if status==200 parse = parse_source(resp.content, resp.encoding) try: ars = int(parse.find('div', {'class': "single-main"}).find('li', {'class': "acf-details-item"}, id="acf-cp_zipcode").find('span', {'class': 'acf-details-val'}).string[-2:]) except: ars = None this_listing = { # 'location': location, # 'link': link_href, # add this too 'price': price, 'desc': desc, # ==== # 'description': descr, 'pieces': None, 'meters': None, 'chambre': None, 'ars': ars, 'link': None } extracted.append(SortedDict(this_listing)) return extracted def extract_listings_pap(parsed): # location_attrs = {'data-latitude': True, 'data-longitude': True} listings = parsed.find_all( 'div', {'class': "box search-results-item"}) extracted = [] for listing in listings[0:]: # hood = listing.find('span', {'class': 'result-hood'}) # # print(hood) # # location = {key: listing.attrs.get(key, '') for key in location_attrs} # link = listing.find('a', {'class': 'result-title hdrlnk'}) # add this # if link is not None: # descr = link.string.strip() # link_href = link.attrs['href'] price = listing.find('span', {'class': 'price'}) if price is not None: price = float(price.string.split()[0].replace('.', '')) ref = listing.find('div', {'class': 'float-right'}).find('a', href=True)['href'] base = 'http://www.pap.fr/' + ref try: resp = requests.get(base, timeout=100) except: break link = base resp.raise_for_status() # <- no-op if status==200 resp_comb = parse_source(resp.content, resp.encoding) descr = resp_comb.find_all('p', {'class': 'item-description'})[0] desc = ' ' for line in descr.contents: if isinstance(line, bs4.element.NavigableString): desc += ' ' + line.string.strip('<\br>').strip('\n') # return resp_comb.find_all( # 'ul', {'class': 'item-summary'}) try: ars = int(resp_comb.find( 'div', {'class': 'item-geoloc'}).find('h2').string.split('e')[0][-2:]) except: break # return resp_comb.find_all('ul', {'class': 'item-summary'})[0].find_all('li') # print(resp_comb.find_all('ul', {'class': 'item-summary'})[0].find_all('li')) temp_dict_ = {} for lines in resp_comb.find_all('ul', {'class': 'item-summary'})[0].find_all('li'): tag = lines.contents[0].split()[0] value = int(lines.find_all('strong')[0].string.split()[0]) temp_dict_[tag] = value try: pieces = temp_dict_[u'Pi\xe8ces'] except: pieces = None try: chambre = temp_dict_[u'Chambre'] except: chambre = None try: square_meters = temp_dict_['Surface'] except: square_meters = None # meters = resp_comb.find_all('ul', {'class': 'item-summary'} # )[0].find_all('strong').string.split()[0] # link = listing.find('div', {'class': 'listos'}).find('a',href=True)['href'] # resp = requests.get(link, timeout=10) # resp.raise_for_status() # <- no-op if status==200 # desc = listing.find('p', {'class': 'post-desc'} # ) # if price is not None: # desc = desc.string # housing = listing.find('span', {'class': 'housing'}) # if housing is not None: # beds = housing.decode_contents().split('br')[0][-1] # rm = housing.decode_contents().split('m<sup>2</sup>')[0] # sqm = [int(s) for s in rm.split() if s.isdigit()] # if len(sqm) == 0: # sqm = None # else: # sqm = int(sqm[0]) this_listing = { # 'location': location, # 'link': link_href, # add this too # 'description': descr, # and this 'price': price, 'desc': desc, 'pieces': pieces, 'meters': square_meters, 'chambre': chambre, 'ars': ars, # 'meters': sqm, # 'beds': beds 'link': link } extracted.append(SortedDict(this_listing)) return extracted def extract_listings_solger(parsed): # location_attrs = {'data-latitude': True, 'data-longitude': True} listings = parsed.find_all( 'article', {'class': "listing life_annuity gold"}) extracted = [] return listings # for listing in listings[0:]: # # hood = listing.find('span', {'class': 'result-hood'}) # # # print(hood) # # # location = {key: listing.attrs.get(key, '') for key in location_attrs} # # link = listing.find('a', {'class': 'result-title hdrlnk'}) # add this # # if link is not None: # # descr = link.string.strip() # # link_href = link.attrs['href'] # price = listing.find('span', {'class': 'price'}) # if price is not None: # price = float(price.string.split()[0].replace('.', '')) # ref = listing.find('div', {'class': 'float-right'}).find('a', href=True)['href'] # base = 'http://www.pap.fr/' + ref # resp = requests.get(base, timeout=20) # link = base # resp.raise_for_status() # <- no-op if status==200 # resp_comb = parse_source(resp.content, resp.encoding) # descr = resp_comb.find_all('p', {'class': 'item-description'})[0] # desc = ' ' # for line in descr.contents: # if isinstance(line, bs4.element.NavigableString): # desc += ' ' + line.string.strip('<\br>').strip('\n') # # return resp_comb.find_all( # # 'ul', {'class': 'item-summary'}) # try: # ars = int(resp_comb.find( # 'div', {'class': 'item-geoloc'}).find('h2').string.split('e')[0][-2:]) # except: # break # # return resp_comb.find_all('ul', {'class': 'item-summary'})[0].find_all('li') # # print(resp_comb.find_all('ul', {'class': 'item-summary'})[0].find_all('li')) # temp_dict_ = {} # for lines in resp_comb.find_all('ul', {'class': 'item-summary'})[0].find_all('li'): # tag = lines.contents[0].split()[0] # value = int(lines.find_all('strong')[0].string.split()[0]) # temp_dict_[tag] = value # try: # pieces = temp_dict_[u'Pi\xe8ces'] # except: # pieces = None # try: # chambre = temp_dict_[u'Chambre'] # except: # chambre = None # try: # square_meters = temp_dict_['Surface'] # except: # square_meters = None # # meters = resp_comb.find_all('ul', {'class': 'item-summary'} # # )[0].find_all('strong').string.split()[0] # # link = listing.find('div', {'class': 'listos'}).find('a',href=True)['href'] # # resp = requests.get(link, timeout=10) # # resp.raise_for_status() # <- no-op if status==200 # # desc = listing.find('p', {'class': 'post-desc'} # # ) # # if price is not None: # # desc = desc.string # # housing = listing.find('span', {'class': 'housing'}) # # if housing is not None: # # beds = housing.decode_contents().split('br')[0][-1] # # rm = housing.decode_contents().split('m<sup>2</sup>')[0] # # sqm = [int(s) for s in rm.split() if s.isdigit()] # # if len(sqm) == 0: # # sqm = None # # else: # # sqm = int(sqm[0]) # this_listing = { # # 'location': location, # # 'link': link_href, # add this too # # 'description': descr, # and this # 'price': price, # 'desc': desc, # 'pieces': pieces, # 'meters': square_meters, # 'chambre': chambre, # 'ars': ars, # # 'meters': sqm, # # 'beds': beds # 'link': link # } # extracted.append(SortedDict(this_listing)) # return extracted # parsed.find_all( # ...: 'div', {'class': "box search-results-item"})[0].find('div',{'class':'float-right'}).find('a',href=True)['href'] def extract_listings(parsed): # location_attrs = {'data-latitude': True, 'data-longitude': True} listings = parsed.find_all("li", {"class": "result-row"}) extracted = [] for listing in listings[2:]: hood = listing.find('span', {'class': 'result-hood'}) # print(hood) # location = {key: listing.attrs.get(key, '') for key in location_attrs} link = listing.find('a', {'class': 'result-title hdrlnk'}) # add this if link is not None: descr = link.string.strip() link_href = link.attrs['href'] price = listing.find('span', {'class': 'result-price'}) if price is not None: if price.string is not None: price = int(price.string[1:]) housing = listing.find('span', {'class': 'housing'}) if housing is not None: beds = housing.decode_contents().split('br')[0][-1] rm = housing.decode_contents().split('m<sup>2</sup>')[0] sqm = [int(s) for s in rm.split() if s.isdigit()] if len(sqm) == 0: sqm = None else: sqm = int(sqm[0]) this_listing = { # 'location': location, 'link': link_href, # add this too 'desc': descr, # and this 'price': price, 'meters': sqm, 'chambre': beds, 'pieces': None, 'ars': None } extracted.append(SortedDict(this_listing)) return extracted if __name__ == '__main__': # df = pd.read_pickle('./ipapartment_paris.pk') df = pd.DataFrame resu = [] print('loading fusac') resu.append(fetch_fusac()) print('loading pap') resu.append(fetch_pap()) print('loading craig') resu.append(fetch_search_results()) flat = [item for lis in resu for lis1 in lis for item in lis1] df_new =
pd.DataFrame(flat)
pandas.DataFrame
from kfp.components import InputPath, OutputPath from kfp.v2.dsl import (Artifact, Dataset, Input, Model, Output, Metrics, ClassificationMetrics) def get_full_adj_prices( # adj_price_dataset01_path: InputPath('DataFrame'), # adj_price_dataset02_path: InputPath('DataFrame'), # adj_price_dataset03_path: InputPath('DataFrame'), # adj_price_dataset04_path: InputPath('DataFrame'), # adj_price_dataset05_path: InputPath('DataFrame'), # full_adj_prices_dataset_path: OutputPath('DataFrame') adj_price_dataset01: Input[Dataset], adj_price_dataset02: Input[Dataset], adj_price_dataset03: Input[Dataset], adj_price_dataset04: Input[Dataset], adj_price_dataset05: Input[Dataset], adj_price_dataset06: Input[Dataset], adj_price_dataset07: Input[Dataset], adj_price_dataset08: Input[Dataset], adj_price_dataset09: Input[Dataset], adj_price_dataset10: Input[Dataset], adj_price_dataset11: Input[Dataset], full_adj_prices_dataset: Output[Dataset] ): import pandas as pd df_adj_price_01 = pd.read_pickle(adj_price_dataset01.path) df_adj_price_02 = pd.read_pickle(adj_price_dataset02.path) df_adj_price_03 = pd.read_pickle(adj_price_dataset03.path) df_adj_price_04 = pd.read_pickle(adj_price_dataset04.path) df_adj_price_05 = pd.read_pickle(adj_price_dataset05.path) df_adj_price_06 =
pd.read_pickle(adj_price_dataset06.path)
pandas.read_pickle
import pandas as pd import numpy as np import math from tqdm import tqdm from multiprocessing import Process def switch_5(argument): switcher = { 1: "5", 2: "10", 3: "15", 4: "20", 5: "25", 6: "30", 7: "35", 8: "40", 9: "45", 10: "50", 11: "55", 12: "60", 13: "65", 14: "70", 15: "75", 16: "80", 17: "85", 18: "90", 19: "95", 20: "99" } return switcher.get(argument, "0") def Prepare_datasets(log_comb_ite_with_participants_process, process_id, OUTPUT_PATH): # set iki = iki_prev for cases of ite occurrence in order to visualize the graphs correctly #for row in range(log_comb_ite_with_participants_process.shape[0]): # if(log_comb_ite_with_participants_process.iloc[row]['ite_prev'] != 'none'): # log_comb_ite_with_participants_process.loc[log_comb_ite_with_participants_process.index[row],'iki'] = log_comb_ite_with_participants_process.iloc[row]['iki_prev'] #log_combined_ite_1 = pd.read_csv('log_processed_autocorr_and_predict_combined.csv') #log_combined_ite = log_combined_ite_1.loc[log_combined_ite_1['ite'] == 'autocorr_or_predict'] # Add user group column to dataset log_comb_ite_with_participants_process['user_type'] = 0 for row in range(log_comb_ite_with_participants_process.shape[0]): #user_performance_10 = round(log_comb_ite_with_participants.loc[log_comb_ite_with_participants.index[row], 'p_wpm'] / 10) #user_performance_5 = round(log_comb_ite_with_participants.loc[log_comb_ite_with_participants.index[row], 'p_wpm'] / 5) log_comb_ite_with_participants_process.loc[log_comb_ite_with_participants_process.index[row],'user_type'] = switch_5(round(log_comb_ite_with_participants_process.iloc[row]['p_wpm'] / 5)) #log_comb_ite_with_participants_spaces = log_comb_ite_with_participants_process.loc[log_comb_ite_with_participants_process['ite_prev'] == 'none'] #log_comb_ite_with_participants_ites = log_comb_ite_with_participants_process.loc[log_comb_ite_with_participants_process['ite_prev'] != 'none'] # save files # print('Saving files...') log_comb_ite_with_participants_process.to_csv(OUTPUT_PATH + str(process_id) + '.csv', index=False) #log_comb_ite_with_participants_ites.to_csv('log_comb_ite_with_participants_ites.csv') #log_comb_ite_with_participants_spaces.to_csv('log_comb_ite_with_participants_spaces.csv') def Run_B_1(LOG_ID): print(' ----- B1 -----------------------------------------------------------\n') global INPUT_PATH global OUTPUT_PATH INPUT_PATH = r'~\\Desktop\\VB_Schnittstelle\\Dataset\\Data\\Log' + str(LOG_ID) + '\\log_' + str(LOG_ID) + '_valid_processed_new_with_participants.csv' OUTPUT_PATH = r'~\\Desktop\\VB_Schnittstelle\\Dataset\\Data\\Log' + str(LOG_ID) + '\\User_Performance\\log_' + str(LOG_ID) + '_valid_processed_new_with_user_groups_after_process_' # print('Reading files...') log_comb_ite_with_participants = pd.read_csv(INPUT_PATH) # Add multi processing # print('Running processes...') processes = [] logs = {} # create processes num_processes = 40 step = math.ceil(log_comb_ite_with_participants.shape[0]/ num_processes ) for i in range(num_processes): start = i * step if(i + 1 != num_processes): end = start + step else: end = log_comb_ite_with_participants.shape[0] logs[i] =
pd.DataFrame(log_comb_ite_with_participants[start:end], columns=log_comb_ite_with_participants.columns)
pandas.DataFrame
import numpy as np import pandas as pd from sklearn.model_selection import train_test_split import tensorflow_datasets as tfds tfds.disable_progress_bar() def prepare_titanic(test_size=0.3, random_state=123): print('Download or read from disk.') ds = tfds.load('titanic', split='train') # Turn DataSet adapter into DataFrame print('Convert to pandas.DataFrame') X = [] y = [] for ex in tfds.as_numpy(ds): x_i, y_i = ex['features'], ex['survived'] X.append(x_i) y.append(y_i) df_X =
pd.DataFrame(X)
pandas.DataFrame
""" test the scalar Timestamp """ import pytz import pytest import dateutil import calendar import locale import numpy as np from dateutil.tz import tzutc from pytz import timezone, utc from datetime import datetime, timedelta import pandas.util.testing as tm import pandas.util._test_decorators as td from pandas.tseries import offsets from pandas._libs.tslibs import conversion from pandas._libs.tslibs.timezones import get_timezone, dateutil_gettz as gettz from pandas.errors import OutOfBoundsDatetime from pandas.compat import long, PY3 from pandas.compat.numpy import np_datetime64_compat from pandas import Timestamp, Period, Timedelta, NaT class TestTimestampProperties(object): def test_properties_business(self): ts = Timestamp('2017-10-01', freq='B') control = Timestamp('2017-10-01') assert ts.dayofweek == 6 assert not ts.is_month_start # not a weekday assert not ts.is_quarter_start # not a weekday # Control case: non-business is month/qtr start assert control.is_month_start assert control.is_quarter_start ts = Timestamp('2017-09-30', freq='B') control = Timestamp('2017-09-30') assert ts.dayofweek == 5 assert not ts.is_month_end # not a weekday assert not ts.is_quarter_end # not a weekday # Control case: non-business is month/qtr start assert control.is_month_end assert control.is_quarter_end def test_fields(self): def check(value, equal): # that we are int/long like assert isinstance(value, (int, long)) assert value == equal # GH 10050 ts = Timestamp('2015-05-10 09:06:03.000100001') check(ts.year, 2015) check(ts.month, 5) check(ts.day, 10) check(ts.hour, 9) check(ts.minute, 6) check(ts.second, 3) pytest.raises(AttributeError, lambda: ts.millisecond) check(ts.microsecond, 100) check(ts.nanosecond, 1) check(ts.dayofweek, 6) check(ts.quarter, 2) check(ts.dayofyear, 130) check(ts.week, 19) check(ts.daysinmonth, 31) check(ts.daysinmonth, 31) # GH 13303 ts = Timestamp('2014-12-31 23:59:00-05:00', tz='US/Eastern') check(ts.year, 2014) check(ts.month, 12) check(ts.day, 31) check(ts.hour, 23) check(ts.minute, 59) check(ts.second, 0) pytest.raises(AttributeError, lambda: ts.millisecond) check(ts.microsecond, 0) check(ts.nanosecond, 0) check(ts.dayofweek, 2) check(ts.quarter, 4) check(ts.dayofyear, 365) check(ts.week, 1) check(ts.daysinmonth, 31) ts = Timestamp('2014-01-01 00:00:00+01:00') starts = ['is_month_start', 'is_quarter_start', 'is_year_start'] for start in starts: assert getattr(ts, start) ts = Timestamp('2014-12-31 23:59:59+01:00') ends = ['is_month_end', 'is_year_end', 'is_quarter_end'] for end in ends: assert getattr(ts, end) # GH 12806 @pytest.mark.parametrize('data', [Timestamp('2017-08-28 23:00:00'), Timestamp('2017-08-28 23:00:00', tz='EST')]) @pytest.mark.parametrize('time_locale', [ None] if tm.get_locales() is None else [None] + tm.get_locales()) def test_names(self, data, time_locale): # GH 17354 # Test .weekday_name, .day_name(), .month_name with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False): assert data.weekday_name == 'Monday' if time_locale is None: expected_day = 'Monday' expected_month = 'August' else: with tm.set_locale(time_locale, locale.LC_TIME): expected_day = calendar.day_name[0].capitalize() expected_month = calendar.month_name[8].capitalize() assert data.day_name(time_locale) == expected_day assert data.month_name(time_locale) == expected_month # Test NaT nan_ts = Timestamp(NaT) assert np.isnan(nan_ts.day_name(time_locale)) assert np.isnan(nan_ts.month_name(time_locale)) @pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']) def test_is_leap_year(self, tz): # GH 13727 dt = Timestamp('2000-01-01 00:00:00', tz=tz) assert dt.is_leap_year assert isinstance(dt.is_leap_year, bool) dt = Timestamp('1999-01-01 00:00:00', tz=tz) assert not dt.is_leap_year dt = Timestamp('2004-01-01 00:00:00', tz=tz) assert dt.is_leap_year dt = Timestamp('2100-01-01 00:00:00', tz=tz) assert not dt.is_leap_year def test_woy_boundary(self): # make sure weeks at year boundaries are correct d = datetime(2013, 12, 31) result = Timestamp(d).week expected = 1 # ISO standard assert result == expected d = datetime(2008, 12, 28) result = Timestamp(d).week expected = 52 # ISO standard assert result == expected d = datetime(2009, 12, 31) result = Timestamp(d).week expected = 53 # ISO standard assert result == expected d = datetime(2010, 1, 1) result = Timestamp(d).week expected = 53 # ISO standard assert result == expected d = datetime(2010, 1, 3) result = Timestamp(d).week expected = 53 # ISO standard assert result == expected result = np.array([Timestamp(datetime(*args)).week for args in [(2000, 1, 1), (2000, 1, 2), ( 2005, 1, 1), (2005, 1, 2)]]) assert (result == [52, 52, 53, 53]).all() class TestTimestampConstructors(object): def test_constructor(self): base_str = '2014-07-01 09:00' base_dt = datetime(2014, 7, 1, 9) base_expected = 1404205200000000000 # confirm base representation is correct import calendar assert (calendar.timegm(base_dt.timetuple()) * 1000000000 == base_expected) tests = [(base_str, base_dt, base_expected), ('2014-07-01 10:00', datetime(2014, 7, 1, 10), base_expected + 3600 * 1000000000), ('2014-07-01 09:00:00.000008000', datetime(2014, 7, 1, 9, 0, 0, 8), base_expected + 8000), ('2014-07-01 09:00:00.000000005', Timestamp('2014-07-01 09:00:00.000000005'), base_expected + 5)] timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9), ('US/Eastern', -4), ('dateutil/US/Pacific', -7), (pytz.FixedOffset(-180), -3), (dateutil.tz.tzoffset(None, 18000), 5)] for date_str, date, expected in tests: for result in [Timestamp(date_str), Timestamp(date)]: # only with timestring assert result.value == expected assert conversion.pydt_to_i8(result) == expected # re-creation shouldn't affect to internal value result = Timestamp(result) assert result.value == expected assert conversion.pydt_to_i8(result) == expected # with timezone for tz, offset in timezones: for result in [Timestamp(date_str, tz=tz), Timestamp(date, tz=tz)]: expected_tz = expected - offset * 3600 * 1000000000 assert result.value == expected_tz assert conversion.pydt_to_i8(result) == expected_tz # should preserve tz result = Timestamp(result) assert result.value == expected_tz assert conversion.pydt_to_i8(result) == expected_tz # should convert to UTC result = Timestamp(result, tz='UTC') expected_utc = expected - offset * 3600 * 1000000000 assert result.value == expected_utc assert conversion.pydt_to_i8(result) == expected_utc def test_constructor_with_stringoffset(self): # GH 7833 base_str = '2014-07-01 11:00:00+02:00' base_dt = datetime(2014, 7, 1, 9) base_expected = 1404205200000000000 # confirm base representation is correct import calendar assert (calendar.timegm(base_dt.timetuple()) * 1000000000 == base_expected) tests = [(base_str, base_expected), ('2014-07-01 12:00:00+02:00', base_expected + 3600 * 1000000000), ('2014-07-01 11:00:00.000008000+02:00', base_expected + 8000), ('2014-07-01 11:00:00.000000005+02:00', base_expected + 5)] timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9), ('US/Eastern', -4), ('dateutil/US/Pacific', -7), (pytz.FixedOffset(-180), -3), (dateutil.tz.tzoffset(None, 18000), 5)] for date_str, expected in tests: for result in [Timestamp(date_str)]: # only with timestring assert result.value == expected assert conversion.pydt_to_i8(result) == expected # re-creation shouldn't affect to internal value result = Timestamp(result) assert result.value == expected assert conversion.pydt_to_i8(result) == expected # with timezone for tz, offset in timezones: result = Timestamp(date_str, tz=tz) expected_tz = expected assert result.value == expected_tz assert conversion.pydt_to_i8(result) == expected_tz # should preserve tz result = Timestamp(result) assert result.value == expected_tz assert conversion.pydt_to_i8(result) == expected_tz # should convert to UTC result = Timestamp(result, tz='UTC') expected_utc = expected assert result.value == expected_utc assert conversion.pydt_to_i8(result) == expected_utc # This should be 2013-11-01 05:00 in UTC # converted to Chicago tz result = Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago') assert result.value == Timestamp('2013-11-01 05:00').value expected = "Timestamp('2013-11-01 00:00:00-0500', tz='America/Chicago')" # noqa assert repr(result) == expected assert result == eval(repr(result)) # This should be 2013-11-01 05:00 in UTC # converted to Tokyo tz (+09:00) result = Timestamp('2013-11-01 00:00:00-0500', tz='Asia/Tokyo') assert result.value == Timestamp('2013-11-01 05:00').value expected = "Timestamp('2013-11-01 14:00:00+0900', tz='Asia/Tokyo')" assert repr(result) == expected assert result == eval(repr(result)) # GH11708 # This should be 2015-11-18 10:00 in UTC # converted to Asia/Katmandu result = Timestamp("2015-11-18 15:45:00+05:45", tz="Asia/Katmandu") assert result.value == Timestamp("2015-11-18 10:00").value expected = "Timestamp('2015-11-18 15:45:00+0545', tz='Asia/Katmandu')" assert repr(result) == expected assert result == eval(repr(result)) # This should be 2015-11-18 10:00 in UTC # converted to Asia/Kolkata result = Timestamp("2015-11-18 15:30:00+05:30", tz="Asia/Kolkata") assert result.value ==
Timestamp("2015-11-18 10:00")
pandas.Timestamp
from lxml import etree import numpy as np import pandas as pd import re from sklearn.model_selection import train_test_split import Bio from Bio import SeqIO from pathlib import Path import glob #console from tqdm import tqdm as tqdm import re import os import itertools #jupyter #from tqdm import tqdm_notebook as tqdm #not supported in current tqdm version #from tqdm.autonotebook import tqdm #import logging #logging.getLogger('proteomics_utils').addHandler(logging.NullHandler()) #logger=logging.getLogger('proteomics_utils') #for cd-hit import subprocess from sklearn.metrics import f1_score import hashlib #for mhcii datasets from utils.dataset_utils import split_clusters_single,pick_all_members_from_clusters ####################################################################################################### #Parsing all sorts of protein data ####################################################################################################### def parse_uniprot_xml(filename,max_entries=0,parse_features=[]): '''parse uniprot xml file, which contains the full uniprot information (e.g. ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_sprot.xml.gz) using custom low-level https://www.ibm.com/developerworks/xml/library/x-hiperfparse/ c.f. for full format https://www.uniprot.org/docs/uniprot.xsd parse_features: a list of strings specifying the kind of features to be parsed such as "modified residue" for phosphorylation sites etc. (see https://www.uniprot.org/help/mod_res) (see the xsd file for all possible entries) ''' context = etree.iterparse(str(filename), events=["end"], tag="{http://uniprot.org/uniprot}entry") context = iter(context) rows =[] for _, elem in tqdm(context): parse_func_uniprot(elem,rows,parse_features=parse_features) elem.clear() while elem.getprevious() is not None: del elem.getparent()[0] if(max_entries > 0 and len(rows)==max_entries): break df=pd.DataFrame(rows).set_index("ID") df['name'] = df.name.astype(str) df['dataset'] = df.dataset.astype('category') df['organism'] = df.organism.astype('category') df['sequence'] = df.sequence.astype(str) return df def parse_func_uniprot(elem, rows, parse_features=[]): '''extracting a single record from uniprot xml''' seqs = elem.findall("{http://uniprot.org/uniprot}sequence") sequence="" #print(seqs) for s in seqs: sequence=s.text #print("sequence",sequence) if sequence =="" or str(sequence)=="None": continue else: break #Sequence & fragment sequence="" fragment_map = {"single":1, "multiple":2} fragment = 0 seqs = elem.findall("{http://uniprot.org/uniprot}sequence") for s in seqs: if 'fragment' in s.attrib: fragment = fragment_map[s.attrib["fragment"]] sequence=s.text if sequence != "": break #print("sequence:",sequence) #print("fragment:",fragment) #dataset dataset=elem.attrib["dataset"] #accession accession = "" accessions = elem.findall("{http://uniprot.org/uniprot}accession") for a in accessions: accession=a.text if accession !="":#primary accession! https://www.uniprot.org/help/accession_numbers!!! break #print("accession",accession) #protein existence (PE in plain text) proteinexistence_map = {"evidence at protein level":5,"evidence at transcript level":4,"inferred from homology":3,"predicted":2,"uncertain":1} proteinexistence = -1 accessions = elem.findall("{http://uniprot.org/uniprot}proteinExistence") for a in accessions: proteinexistence=proteinexistence_map[a.attrib["type"]] break #print("protein existence",proteinexistence) #name name = "" names = elem.findall("{http://uniprot.org/uniprot}name") for n in names: name=n.text break #print("name",name) #organism organism = "" organisms = elem.findall("{http://uniprot.org/uniprot}organism") for s in organisms: s1=s.findall("{http://uniprot.org/uniprot}name") for s2 in s1: if(s2.attrib["type"]=='scientific'): organism=s2.text break if organism !="": break #print("organism",organism) #dbReference: PMP,GO,Pfam, EC ids = elem.findall("{http://uniprot.org/uniprot}dbReference") pfams = [] gos =[] ecs = [] pdbs =[] for i in ids: #print(i.attrib["id"],i.attrib["type"]) #cf. http://geneontology.org/external2go/uniprotkb_kw2go for Uniprot Keyword<->GO mapping #http://geneontology.org/ontology/go-basic.obo for List of go terms #https://www.uniprot.org/help/keywords_vs_go keywords vs. go if(i.attrib["type"]=="GO"): tmp1 = i.attrib["id"] for i2 in i: if i2.attrib["type"]=="evidence": tmp2= i2.attrib["value"] gos.append([int(tmp1[3:]),int(tmp2[4:])]) #first value is go code, second eco evidence ID (see mapping below) elif(i.attrib["type"]=="Pfam"): pfams.append(i.attrib["id"]) elif(i.attrib["type"]=="EC"): ecs.append(i.attrib["id"]) elif(i.attrib["type"]=="PDB"): pdbs.append(i.attrib["id"]) #print("PMP: ", pmp) #print("GOs:",gos) #print("Pfams:",pfam) #print("ECs:",ecs) #print("PDBs:",pdbs) #keyword keywords = elem.findall("{http://uniprot.org/uniprot}keyword") keywords_lst = [] #print(keywords) for k in keywords: keywords_lst.append(int(k.attrib["id"][-4:]))#remove the KW- #print("keywords: ",keywords_lst) #comments = elem.findall("{http://uniprot.org/uniprot}comment") #comments_lst=[] ##print(comments) #for c in comments: # if(c.attrib["type"]=="function"): # for c1 in c: # comments_lst.append(c1.text) #print("function: ",comments_lst) #ptm etc if len(parse_features)>0: ptms=[] features = elem.findall("{http://uniprot.org/uniprot}feature") for f in features: if(f.attrib["type"] in parse_features):#only add features of the requested type locs=[] for l in f[0]: locs.append(int(l.attrib["position"])) ptms.append([f.attrib["type"],f.attrib["description"] if 'description' in f.attrib else "NaN",locs, f.attrib['evidence'] if 'evidence' in f.attrib else "NaN"]) #print(ptms) data_dict={"ID": accession, "name": name, "dataset":dataset, "proteinexistence":proteinexistence, "fragment":fragment, "organism":organism, "ecs": ecs, "pdbs": pdbs, "pfams" : pfams, "keywords": keywords_lst, "gos": gos, "sequence": sequence} if len(parse_features)>0: data_dict["features"]=ptms #print("all children:") #for c in elem: # print(c) # print(c.tag) # print(c.attrib) rows.append(data_dict) def parse_uniprot_seqio(filename,max_entries=0): '''parse uniprot xml file using the SeqIO parser (smaller functionality e.g. does not extract evidence codes for GO)''' sprot = SeqIO.parse(filename, "uniprot-xml") rows = [] for p in tqdm(sprot): accession = str(p.name) name = str(p.id) dataset = str(p.annotations['dataset']) organism = str(p.annotations['organism']) ecs, pdbs, pfams, gos = [],[],[],[] for ref in p.dbxrefs: k = ref.split(':') if k[0] == 'GO': gos.append(':'.join(k[1:])) elif k[0] == 'Pfam': pfams.append(k[1]) elif k[0] == 'EC': ecs.append(k[1]) elif k[0] == 'PDB': pdbs.append(k[1:]) if 'keywords' in p.annotations.keys(): keywords = p.annotations['keywords'] else: keywords = [] sequence = str(p.seq) row = { 'ID': accession, 'name':name, 'dataset':dataset, 'organism':organism, 'ecs':ecs, 'pdbs':pdbs, 'pfams':pfams, 'keywords':keywords, 'gos':gos, 'sequence':sequence} rows.append(row) if(max_entries>0 and len(rows)==max_entries): break df=pd.DataFrame(rows).set_index("ID") df['name'] = df.name.astype(str) df['dataset'] = df.dataset.astype('category') df['organism'] = df.organism.astype('category') df['sequence'] = df.sequence.astype(str) return df def filter_human_proteome(df_sprot): '''extracts human proteome from swissprot proteines in DataFrame with column organism ''' is_Human = np.char.find(df_sprot.organism.values.astype(str), "Human") !=-1 is_human = np.char.find(df_sprot.organism.values.astype(str), "human") !=-1 is_sapiens = np.char.find(df_sprot.organism.values.astype(str), "sapiens") !=-1 is_Sapiens = np.char.find(df_sprot.organism.values.astype(str), "Sapiens") !=-1 return df_sprot[is_Human|is_human|is_sapiens|is_Sapiens] def filter_aas(df, exclude_aas=["B","J","X","Z"]): '''excludes sequences containing exclude_aas: B = D or N, J = I or L, X = unknown, Z = E or Q''' return df[~df.sequence.apply(lambda x: any([e in x for e in exclude_aas]))] ###################################################################################################### def explode_clusters_df(df_cluster): '''aux. function to convert cluster dataframe from one row per cluster to one row per ID''' df=df_cluster.reset_index(level=0) rows = [] if('repr_accession' in df.columns):#include representative if it exists _ = df.apply(lambda row: [rows.append([nn,row['entry_id'], row['repr_accession']==nn ]) for nn in row.members], axis=1) df_exploded = pd.DataFrame(rows, columns=['ID',"cluster_ID","representative"]).set_index(['ID']) else: _ = df.apply(lambda row: [rows.append([nn,row['entry_id']]) for nn in row.members], axis=1) df_exploded = pd.DataFrame(rows, columns=['ID',"cluster_ID"]).set_index(['ID']) return df_exploded def parse_uniref(filename,max_entries=0,parse_sequence=False, df_selection=None, exploded=True): '''parse uniref (clustered sequences) xml ftp://ftp.ebi.ac.uk/pub/databases/uniprot/uniref/uniref50/uniref50.xml.gz unzipped 100GB file using custom low-level parser https://www.ibm.com/developerworks/xml/library/x-hiperfparse/ max_entries: only return first max_entries entries (0=all) parse_sequences: return also representative sequence df_selection: only include entries with accessions that are present in df_selection.index (None keeps all records) exploded: return one row per ID instead of one row per cluster c.f. for full format ftp://ftp.ebi.ac.uk/pub/databases/uniprot/uniref/uniref50/README ''' #issue with long texts https://stackoverflow.com/questions/30577796/etree-incomplete-child-text #wait for end rather than start tag context = etree.iterparse(str(filename), events=["end"], tag="{http://uniprot.org/uniref}entry") context = iter(context) rows =[] for _, elem in tqdm(context): parse_func_uniref(elem,rows,parse_sequence=parse_sequence, df_selection=df_selection) elem.clear() while elem.getprevious() is not None: del elem.getparent()[0] if(max_entries > 0 and len(rows)==max_entries): break df=pd.DataFrame(rows).set_index("entry_id") df["num_members"]=df.members.apply(len) if(exploded): return explode_clusters_df(df) return df def parse_func_uniref(elem, rows, parse_sequence=False, df_selection=None): '''extract a single uniref entry''' #entry ID entry_id = elem.attrib["id"] #print("cluster id",entry_id) #name name = "" names = elem.findall("{http://uniprot.org/uniref}name") for n in names: name=n.text[9:] break #print("cluster name",name) members=[] #representative member repr_accession = "" repr_sequence ="" repr = elem.findall("{http://uniprot.org/uniref}representativeMember") for r in repr: s1=r.findall("{http://uniprot.org/uniref}dbReference") for s2 in s1: for s3 in s2: if s3.attrib["type"]=="UniProtKB accession": if(repr_accession == ""): repr_accession = s3.attrib["value"]#pick primary accession members.append(s3.attrib["value"]) if parse_sequence is True: s1=r.findall("{http://uniprot.org/uniref}sequence") for s2 in s1: repr_sequence = s2.text if repr_sequence !="": break #print("representative member accession:",repr_accession) #print("representative member sequence:",repr_sequence) #all members repr = elem.findall("{http://uniprot.org/uniref}member") for r in repr: s1=r.findall("{http://uniprot.org/uniref}dbReference") for s2 in s1: for s3 in s2: if s3.attrib["type"]=="UniProtKB accession": members.append(s3.attrib["value"]) #add primary and secondary accessions #print("members", members) if(not(df_selection is None)): #apply selection filter members = [y for y in members if y in df_selection.index] #print("all children") #for c in elem: # print(c) # print(c.tag) # print(c.attrib) if(len(members)>0): data_dict={"entry_id": entry_id, "name": name, "repr_accession":repr_accession, "members":members} if parse_sequence is True: data_dict["repr_sequence"]=repr_sequence rows.append(data_dict) ########################################################################################################################### #proteins and peptides from fasta ########################################################################################################################### def parse_uniprot_fasta(fasta_path, max_entries=0): '''parse uniprot from fasta file (which contains less information than the corresponding xml but is also much smaller e.g. ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_sprot.fasta)''' rows=[] dataset_dict={"sp":"Swiss-Prot","tr":"TrEMBL"} for seq_record in tqdm(SeqIO.parse(fasta_path, "fasta")): sid=seq_record.id.split("|") accession = sid[1] dataset = dataset_dict[sid[0]] name = sid[2] description = seq_record.description sequence=str(seq_record.seq) #print(description) m = re.search('PE=\d', description) pe=int(m.group(0).split("=")[1]) m = re.search('OS=.* (?=OX=)', description) organism=m.group(0).split("=")[1].strip() data_dict={"ID": accession, "name": name, "dataset":dataset, "proteinexistence":pe, "organism":organism, "sequence": sequence} rows.append(data_dict) if(max_entries > 0 and len(rows)==max_entries): break df=pd.DataFrame(rows).set_index("ID") df['name'] = df.name.astype(str) df['dataset'] = df.dataset.astype('category') df['organism'] = df.organism.astype('category') df['sequence'] = df.sequence.astype(str) return df def proteins_from_fasta(fasta_path): '''load proteins (as seqrecords) from fasta (just redirects)''' return seqrecords_from_fasta(fasta_path) def seqrecords_from_fasta(fasta_path): '''load seqrecords from fasta file''' seqrecords = list(SeqIO.parse(fasta_path, "fasta")) return seqrecords def seqrecords_to_sequences(seqrecords): '''converts biopythons seqrecords into a plain list of sequences''' return [str(p.seq) for p in seqrecords] def sequences_to_fasta(sequences, fasta_path, sequence_id_prefix="s"): '''save plain list of sequences to fasta''' with open(fasta_path, "w") as output_handle: for i,s in tqdm(enumerate(sequences)): record = Bio.SeqRecord.SeqRecord(Bio.Seq.Seq(s), id=sequence_id_prefix+str(i), description="") SeqIO.write(record, output_handle, "fasta") def df_to_fasta(df, fasta_path): '''Save column "sequence" from pandas DataFrame to fasta file using the index of the DataFrame as ID. Preserves original IDs in contrast to the function sequences_to_fasta()''' with open(fasta_path, "w") as output_handle: for row in df.iterrows(): record = Bio.SeqRecord.SeqRecord(Bio.Seq.Seq(row[1]["sequence"]), id=str(row[0]), description="") SeqIO.write(record, output_handle, "fasta") def sequences_to_df(sequences, sequence_id_prefix="s"): data = {'ID': [(sequence_id_prefix+str(i) if sequence_id_prefix!="" else i) for i in range(len(sequences))], 'sequence': sequences} df=pd.DataFrame.from_dict(data) return df.set_index("ID") def fasta_to_df(fasta_path): seqs=SeqIO.parse(fasta_path, "fasta") res=[] for s in seqs: res.append({"ID":s.id,"sequence":str(s.seq)}) return pd.DataFrame(res) def peptides_from_proteins(protein_seqrecords, miss_cleavage=2,min_length=5,max_length=300): '''extract peptides from proteins seqrecords by trypsin digestion min_length: only return peptides of length min_length or greater (0 for all) max_length: only return peptides of length max_length or smaller (0 for all) ''' peptides = [] for seq in tqdm(protein_seqrecords): peps = trypsin_digest(str(seq.seq), miss_cleavage) peptides.extend(peps) tmp=list(set(peptides)) if(min_length>0 and max_length>0): tmp=[t for t in tmp if (len(t)>=min_length and len(t)<=max_length)] elif(min_length==0 and max_length>0): tmp=[t for t in tmp if len(t)<=max_length] elif(min_length>0 and max_length==0): tmp=[t for t in tmp if len(t)>=min_length] print("Extracted",len(tmp),"unique peptides.") return tmp def trypsin_digest(proseq, miss_cleavage): '''trypsin digestion of protein seqrecords TRYPSIN from https://github.com/yafeng/trypsin/blob/master/trypsin.py''' peptides=[] cut_sites=[0] for i in range(0,len(proseq)-1): if proseq[i]=='K' and proseq[i+1]!='P': cut_sites.append(i+1) elif proseq[i]=='R' and proseq[i+1]!='P': cut_sites.append(i+1) if cut_sites[-1]!=len(proseq): cut_sites.append(len(proseq)) if len(cut_sites)>2: if miss_cleavage==0: for j in range(0,len(cut_sites)-1): peptides.append(proseq[cut_sites[j]:cut_sites[j+1]]) elif miss_cleavage==1: for j in range(0,len(cut_sites)-2): peptides.append(proseq[cut_sites[j]:cut_sites[j+1]]) peptides.append(proseq[cut_sites[j]:cut_sites[j+2]]) peptides.append(proseq[cut_sites[-2]:cut_sites[-1]]) elif miss_cleavage==2: for j in range(0,len(cut_sites)-3): peptides.append(proseq[cut_sites[j]:cut_sites[j+1]]) peptides.append(proseq[cut_sites[j]:cut_sites[j+2]]) peptides.append(proseq[cut_sites[j]:cut_sites[j+3]]) peptides.append(proseq[cut_sites[-3]:cut_sites[-2]]) peptides.append(proseq[cut_sites[-3]:cut_sites[-1]]) peptides.append(proseq[cut_sites[-2]:cut_sites[-1]]) else: #there is no trypsin site in the protein sequence peptides.append(proseq) return list(set(peptides)) ########################################################################### # Processing CD-HIT clusters ########################################################################### def clusters_df_from_sequence_df(df,threshold=[1.0,0.9,0.5],alignment_coverage=[0.0,0.9,0.8],memory=16000, threads=8, exploded=True, verbose=False): '''create clusters df from sequence df (using cd hit) df: dataframe with sequence information threshold: similarity threshold for clustering (pass a list for hierarchical clustering e.g [1.0, 0.9, 0.5]) alignment_coverage: required minimum coverage of the longer sequence (to mimic uniref https://www.uniprot.org/help/uniref) memory: limit available memory threads: limit number of threads exploded: return exploded view of the dataframe (one row for every member vs. one row for every cluster) uses CD-HIT for clustering https://github.com/weizhongli/cdhit/wiki/3.-User's-Guide copy cd-hit into ~/bin TODO: extend to psi-cd-hit for thresholds smaller than 0.4 ''' if verbose: print("Exporting original dataframe as fasta...") fasta_file = "cdhit.fasta" df_original_index = list(df.index) #reindex the dataframe since cdhit can only handle 19 letters df = df.reset_index(drop=True) df_to_fasta(df, fasta_file) if(not(isinstance(threshold, list))): threshold=[threshold] alignment_coverage=[alignment_coverage] assert(len(threshold)==len(alignment_coverage)) fasta_files=[] for i,thr in enumerate(threshold): if(thr< 0.4):#use psi-cd-hit here print("thresholds lower than 0.4 require psi-cd-hit.pl require psi-cd-hit.pl (building on BLAST) which is currently not supported") return pd.DataFrame() elif(thr<0.5): wl = 2 elif(thr<0.6): wl = 3 elif(thr<0.7): wl = 4 else: wl = 5 aL = alignment_coverage[i] #cd-hit -i nr -o nr80 -c 0.8 -n 5 #cd-hit -i nr80 -o nr60 -c 0.6 -n 4 #psi-cd-hit.pl -i nr60 -o nr30 -c 0.3 if verbose: print("Clustering using cd-hit at threshold", thr, "using wordlength", wl, "and alignment coverage", aL, "...") fasta_file_new= "cdhit"+str(int(thr*100))+".fasta" command = "cd-hit -i "+fasta_file+" -o "+fasta_file_new+" -c "+str(thr)+" -n "+str(wl)+" -aL "+str(aL)+" -M "+str(memory)+" -T "+str(threads) if(verbose): print(command) process= subprocess.Popen(command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) output, error = process.communicate() if(verbose): print(output) if(error !=""): print(error) fasta_files.append(fasta_file) if(i==len(threshold)-1): fasta_files.append(fasta_file_new) fasta_file= fasta_file_new #join results from all clustering steps if verbose: print("Joining results from different clustering steps...") for i,f in enumerate(reversed(fasta_files[1:])): if verbose: print("Processing",f,"...") if(i==0): df_clusters = parse_cdhit_clstr(f+".clstr",exploded=False) else: df_clusters2 = parse_cdhit_clstr(f+".clstr",exploded=False) for id,row in df_clusters.iterrows(): members = row['members'] new_members = [list(df_clusters2[df_clusters2.repr_accession==y].members)[0] for y in members] new_members = [item for sublist in new_members for item in sublist] #flattened row['members']=new_members df_clusters["members"]=df_clusters["members"].apply(lambda x:[df_original_index[int(y)] for y in x]) df_clusters["repr_accession"]=df_clusters["repr_accession"].apply(lambda x:df_original_index[int(x)]) if(exploded): return explode_clusters_df(df_clusters) return df_clusters def parse_cdhit_clstr(filename, exploded=True): '''Aux. Function (used by clusters_df_from_sequence_df) to parse CD-HITs clstr output file in a similar way as the uniref data for the format see https://github.com/weizhongli/cdhit/wiki/3.-User's-Guide#CDHIT exploded: single row for every ID instead of single for every cluster ''' def save_cluster(rows,members,representative): if(len(members)>0): rows.append({"entry_id":filename[:-6]+"_"+representative, "members":members, "repr_accession":representative}) rows=[] with open(filename, 'r') as f: members=[] representative="" for l in tqdm(f): if(l[0]==">"): save_cluster(rows,members,representative) members=[] representative="" else: member=(l.split(">")[1]).split("...")[0] members.append(member) if "*" in l: representative = member save_cluster(rows,members,representative) df=
pd.DataFrame(rows)
pandas.DataFrame
#!/usr/bin/env python # coding: utf-8 # In[1]: import os project_name = "reco-tut-sor"; branch = "main"; account = "sparsh-ai" project_path = os.path.join('/content', project_name) # In[2]: if not os.path.exists(project_path): get_ipython().system(u'cp /content/drive/MyDrive/mykeys.py /content') import mykeys get_ipython().system(u'rm /content/mykeys.py') path = "/content/" + project_name; get_ipython().system(u'mkdir "{path}"') get_ipython().magic(u'cd "{path}"') import sys; sys.path.append(path) get_ipython().system(u'git config --global user.email "<EMAIL>"') get_ipython().system(u'git config --global user.name "reco-tut"') get_ipython().system(u'git init') get_ipython().system(u'git remote add origin https://"{mykeys.git_token}":[email protected]/"{account}"/"{project_name}".git') get_ipython().system(u'git pull origin "{branch}"') get_ipython().system(u'git checkout main') else: get_ipython().magic(u'cd "{project_path}"') # In[33]: get_ipython().system(u'git status') # In[34]: get_ipython().system(u'git add . && git commit -m \'commit\' && git push origin "{branch}"') # --- # In[3]: import torch import torch.nn as nn import torch.nn.functional as F import numpy as np import pandas as pd import matplotlib.pyplot as plt from datetime import datetime from sklearn.metrics import confusion_matrix, accuracy_score from sklearn.metrics import f1_score, recall_score, precision_score import itertools # In[4]: df = pd.read_csv('./data/silver/userdata.csv') df.head() # In[5]: def to_categorical(df, columns): for col in columns: df[col] =
pd.Categorical(df[col])
pandas.Categorical
# -*- coding: utf-8 -*- """ Created on Thu Aug 19 08:55:28 2021 Purpose of the script: Lidar data have been collected on Nergica's site. The purpose of the script is to analyze lidar data availability as a function of environmental parameters. The environmental parameters are collected on 2 met masts, and a Quality Control is performed by Nergica. The lidar data availability is computed by temprature and relavtive humidity, each grouped by bin. Graphes are plotted to analyse the lidar data availability. This code contains 4 sections 1. Importation of python libraries used in the code 2. Lidar data to be analysed is saved as a dataframe and the format of dates is adjusted to be in concordance with dates on the met mast data 3. The lidar data availability is computed monthly 4. MMV1 data are saved as a dataframe, and the quality control is performed. Then, a column is added to store the windspeed corresponding to each timestamp from the lidar. This allow to identify columns containing data on both: Lidar and MMV1 5. The Lidar data availability is computed analyzed, first using MMV1 temperature, second using MMV1 relative humudity 6. Here, step 4. is repeated for MMV2 7. Here, step 5 is repeated for MMV2 8. Figures are plotted for analysis: lidar data availability by months, lidar data availability by temperature comparing MMV1 and MMV2, lidar data availability by relative humidity comparing MMV1 and MMV2, number of points in each bin of temprature and relative humidity Example to run the code : on spyder or other related IDEs -> Run the script on anaconda or other related prompts -> Make sure the file is in the right directory and run this command : python ScriptTask32_LidarDataAvailability_MetMast_Nergica.py @author: chodonou """ ################################################################### ########## 1. Importation of python libraries ##################### ################################################################### import glob import pandas as pd import numpy as np import matplotlib.patches as mpatches import matplotlib.pyplot as plt import time start_time = time.time() # Bins to group temperature or relative humiduty data for analysis, int>0 temp_bin = 1 # temprature bin RHH_bin = 1 # Relative humidity bin # data paths lidar_data_path = "./lidarData/80m_2015_dataWindCube.pkl" mmv1_data_path = "./metMastData/*80m*.csv" mmv2_data_path = "./metMastData/*78m*.csv" # Quality control codes Droped = ["R101","R103","R104","R105","R201","R202","R203","R204","R205","R206"] ################################################################### ########## 2. Extraction of LIDAR data ############################ ################################################################### dataframe_output_2015 = pd.read_pickle(lidar_data_path) print("--- Lidar data extracted ---") # Date format adjustments dataframe_output_2015['TimeStamp'] = pd.to_datetime(dataframe_output_2015.TimeStamp) dataframe_output_2015['Timestamp1'] = dataframe_output_2015['TimeStamp'].dt.strftime('%m/%d/%Y %H:%M') dataframe_output_2015['Month'] = dataframe_output_2015["Timestamp1"].str[:2] ################################################################### ########## 3. Lidar data availability by month #################### ################################################################### column_names = ["2015"] Lidar_avail = pd.DataFrame(columns = column_names) List_months=["09", "10", "11" ,"12"] #["01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11" ,"12"] heights=["80"] k=0 for l in List_months: if '80m Wind Speed (m/s)' in dataframe_output_2015.columns: temp2 = dataframe_output_2015.loc[dataframe_output_2015["Month"] == l] temp = temp2.loc[temp2["80m Wind Speed (m/s)"] != 'NaN' ] if len(temp2) > 0: Lidar_avail.loc[k,"2015"] = 100*len(temp)/len(temp2) k=k+1 print("--- Lidar availability by date computed ---") ################################################################### ########## 4. MMV1 data ########################################### ################################################################### # Dictionnary for MMV1 data data_CQ2015={} # MMV1 data as extracted data_CQ2015_cleaned={} # MMV1 data after quality control data_CQ2015_cleaned_Lidar={} # MMV1 data after quality control for timestamps with lidar data # MMV1 data at 80m #for name in glob.glob("./DataCQ_2015_MMV1_empty/*80m*.csv" ): # Test if files are empty for name in glob.glob(mmv1_data_path ): tp0=name.find('mmv1_') tp1=name.find('.csv') data_CQ2015[name[tp0:tp1]]= pd.read_csv(name, delimiter=';') print("--- MMV1 data extracted ---") # Perform quality control for capteur in data_CQ2015: df=data_CQ2015[capteur] for drp in Droped: df[drp].to_numpy() df = df.drop(df[df[drp] == 1].index) data_CQ2015_cleaned[capteur] = df print("--- MMV1 quality control done ---") # Add empty column to store Lidar speed column_names = ["Timestamp1"] dataframe_output_2015_Time = pd.DataFrame(columns = column_names) dataframe_output_2015_Time["Timestamp1"] = dataframe_output_2015["Timestamp1"] for key, df in data_CQ2015_cleaned.items(): df['Timestamp'] = pd.to_datetime(df.Timestamp) df['Timestamp1'] = df['Timestamp'].dt.strftime('%m/%d/%Y %H:%M') df['Month'] = df['Timestamp'].dt.strftime('%m') common = df.merge(dataframe_output_2015_Time,on=['Timestamp1']) df_Time = df["Timestamp1"] common2 = dataframe_output_2015.merge(df_Time,on=['Timestamp1']) common = common.sort_values(by=['Timestamp1']) common2 = common2.sort_values(by=['Timestamp1']) common["Lidar 80m Wind Speed (m/s)"] = common2["80m Wind Speed (m/s)"] data_CQ2015_cleaned_Lidar[key] = common ################################################################### ########## 5. Lidar vs MMV1 data analysis ######################### ################################################################### # Lidar availability by temperature column_names = ["temp", "Dispo 2015", "Vitesse Moyemme"] Avail_Lidar_temp = pd.DataFrame(columns = column_names) k=0 df = data_CQ2015_cleaned_Lidar["mmv1_TempUnHt80m0d_20150901_20151231"] for l in range(-25,35,temp_bin): temp2 = df.loc[df["Moyenne"] <= l+temp_bin] temp2 = temp2.loc[temp2["Moyenne"] > l] temp = temp2.loc[temp2["Lidar 80m Wind Speed (m/s)"] != 'NaN' ] temp3 = temp2["Lidar 80m Wind Speed (m/s)"].astype(float) if len(temp2) > 0: Avail_Lidar_temp.loc[k,"temp"] = l #str(l) #+ " to " + str(l+1) Avail_Lidar_temp.loc[k,"Dispo 2015"] = 100*len(temp)/len(temp2) Avail_Lidar_temp.loc[k,"Number"] = temp2["Moyenne"].count() Avail_Lidar_temp.loc[k,"temp mean"] = temp2["Moyenne"].mean() Avail_Lidar_temp.loc[k,"temp stdev"] = temp2["Moyenne"].std() Avail_Lidar_temp.loc[k,"Vitesse Moyemme"] = temp3.mean() k=k+1 # Lidar availability by relative humidity column_names = ["RH", "Dispo 2015", "Vitesse Moyemme"] Avail_Lidar_RH = pd.DataFrame(columns = column_names) k=0 df = data_CQ2015_cleaned_Lidar["mmv1_RHHt80m0d_20150901_20151231"] for l in range(5,100,RHH_bin): temp2 = df.loc[df["Moyenne"] <= l+RHH_bin] temp2 = temp2.loc[temp2["Moyenne"] > l] temp = temp2.loc[temp2["Lidar 80m Wind Speed (m/s)"] != 'NaN' ] temp3 = temp2["Lidar 80m Wind Speed (m/s)"].astype(float) if len(temp2) > 0: Avail_Lidar_RH.loc[k,"RH"] = l #str(l) #+ " to " + str(l+1) Avail_Lidar_RH.loc[k,"Dispo 2015"] = 100*len(temp)/len(temp2) Avail_Lidar_RH.loc[k,"Number"] = temp2["Moyenne"].count() Avail_Lidar_RH.loc[k,"RH mean"] = temp2["Moyenne"].mean() Avail_Lidar_RH.loc[k,"RH stdev"] = temp2["Moyenne"].std() Avail_Lidar_RH.loc[k,"Vitesse Moyemme"] = temp3.mean() k=k+1 print("--- MMV1 done ---") print("--- %s seconds ---" % (time.time() - start_time)) ################################################################### ######################## 6. MMV2 data ############################# ################################################################### # Dictionnary for MMV2 data data_CQ2015={} # MMV2 data as extracted data_CQ2015_cleaned={} # MMV2 data after quality control data_CQ2015_cleaned_Lidar={} # MMV2 data after quality control for timestamps with lidar data # MMV2 data at 78m for name in glob.glob(mmv2_data_path): tp0=name.find('mmv2_') tp1=name.find('.csv') data_CQ2015[name[tp0:tp1]]= pd.read_csv(name, delimiter=';') print("--- MMV2 data extracted ---") # Perform quality control for capteur in data_CQ2015: df=data_CQ2015[capteur] for drp in Droped: df[drp].to_numpy() df = df.drop(df[df[drp] == 1].index) data_CQ2015_cleaned[capteur] = df print("--- MMV2 quality control done ---") # Add empty column to store Lidar speed column_names = ["Timestamp1"] dataframe_output_2015_Time = pd.DataFrame(columns = column_names) dataframe_output_2015_Time["Timestamp1"] = dataframe_output_2015["Timestamp1"] for key, df in data_CQ2015_cleaned.items(): df['Timestamp'] =
pd.to_datetime(df.Timestamp)
pandas.to_datetime
import numpy as np import shutil import pandas as pd import os import json import re from sklearn.model_selection import StratifiedKFold RANDOM_SEED = 2018 # Set seed for reproduction datapath = "./kkbox-music-recommendation-challenge/" # !!! Directly using pd.read_csv() leads an error: #rows < 2296833 # songs_df = pd.read_csv(os.path.join(datapath, "songs.csv"), encoding="utf-8", dtype=object) song_list = [] song_header = [] with open(os.path.join(datapath, "songs.csv"), 'r', encoding="utf-8") as fid: k = 0 for line in fid: k += 1 splits = line.strip().split(",") if len(splits) != 7: print(line) splits = splits[0:7] # correction if k == 1: print("headers", splits) song_header = splits else: song_list.append(splits) songs_df = pd.DataFrame(song_list, columns=song_header) print("songs_df shape", songs_df.shape) songs_df['language'] = songs_df['language'].map(lambda x: str(int(float(x))) if not pd.isnull(x) else "") songs_df['genre_ids'] = songs_df['genre_ids'].map(lambda x: x.replace("|", " ") if not pd.isnull(x) else "") song_ids = set(songs_df['song_id'].dropna().unique()) person_names = set(songs_df['artist_name'].dropna().unique()) | set(songs_df['composer'].dropna().unique())\ | set(songs_df['lyricist'].dropna().unique()) def name_tokenize(name_str): persons = re.split(r"[\|\\/&;]", name_str) return [x.replace("\"", "").strip() for x in persons if x.replace("\"", "").strip() != ""] person_set = [] for name_str in person_names: person_set += name_tokenize(name_str) person_set = set(person_set) person_set = sorted(list(person_set)) # sort for reproduction person_dict = dict(list(zip(person_set, range(1, len(person_set) + 1)))) with open("person_id.json", "w", encoding="utf-8") as fout: person_index = dict(list(zip(range(1, len(person_set) + 1), person_set))) json.dump(person_index, fout, indent=4, ensure_ascii=False) del person_index def encode_name(name_str): names = name_tokenize(name_str) names = [str(person_dict[x]) for x in names] return " ".join(names) songs_df['artist_name'] = songs_df['artist_name'].map(lambda x: encode_name(x) if not pd.isnull(x) else "") songs_df['composer'] = songs_df['composer'].map(lambda x: encode_name(x) if not pd.isnull(x) else "") songs_df['lyricist'] = songs_df['lyricist'].map(lambda x: encode_name(x) if not pd.isnull(x) else "") # !!! Directly using pd.read_csv() leads an error: #rows < 2296869 # song_extra_info_df = pd.read_csv(os.path.join(datapath, "song_extra_info.csv"), encoding="utf-8") song_extra_list = [] song_extra_header = [] with open(os.path.join(datapath, "song_extra_info.csv"), 'r', encoding="utf-8") as fid: k = 0 for line in fid: k += 1 splits = line.strip().split(",") if len(splits) != 3: print(line) if k == 1: song_extra_header = splits else: song_extra_list.append(splits) print(k - 1, "lines in song_extra_info.csv") song_extra_info_df = pd.DataFrame(song_extra_list, columns=song_extra_header) print("song_extra_info_df shape", song_extra_info_df.shape) song_ids = song_ids | set(song_extra_info_df['song_id'].dropna().unique()) song_names = set(song_extra_info_df['name'].dropna().unique()) song_names = sorted(list(song_names)) song_name_dict = dict(list(zip(song_names, range(1, len(song_names) + 1)))) song_extra_info_df["name"] = song_extra_info_df["name"].map(lambda x: song_name_dict[x] if not
pd.isnull(x)
pandas.isnull
import hydra import random from omegaconf import OmegaConf, DictConfig from upcycle.random.seed import set_all_seeds import time import pandas as pd from online_gp.utils.dkl import pretrain_stem from gpytorch.settings import * from upcycle import cuda def startup(hydra_cfg): if hydra_cfg.seed is None: seed = random.randint(0, 100000) hydra_cfg['seed'] = seed set_all_seeds(seed) logger = hydra.utils.instantiate(hydra_cfg.logger) hydra_cfg = OmegaConf.to_container(hydra_cfg, resolve=True) # Resolve config interpolations hydra_cfg = DictConfig(hydra_cfg) logger.write_hydra_yaml(hydra_cfg) if hydra_cfg.dtype == 'float32': torch.set_default_dtype(torch.float32) elif hydra_cfg.dtype == 'float64': torch.set_default_dtype(torch.float64) print(hydra_cfg.pretty()) print(f"GPU available: {torch.cuda.is_available()}") return hydra_cfg, logger def get_model(config, init_x, init_y, streaming): stem = hydra.utils.instantiate(config.stem) model_kwargs = dict(stem=stem, init_x=init_x, init_y=init_y) model = hydra.utils.instantiate(config.model, **model_kwargs) return cuda.try_cuda(model) def online_regression(batch_model, online_model, train_x, train_y, test_x, test_y, update_stem, batch_size, logger, logging_freq): online_rmse = online_nll = 0 batch_rmse = batch_nll = 0 logger.add_table('online_metrics') num_chunks = train_x.size(-2) // batch_size for t, (x, y) in enumerate(zip(train_x.chunk(num_chunks), train_y.chunk(num_chunks))): start_clock = time.time() from online_gp.settings import detach_interp_coeff with detach_interp_coeff(True): o_rmse, o_nll = online_model.evaluate(x, y) stem_loss, gp_loss = online_model.update(x, y, update_stem=update_stem) step_time = time.time() - start_clock with torch.no_grad(): b_rmse, b_nll = batch_model.evaluate(x, y) online_rmse += o_rmse online_nll += o_nll batch_rmse += b_rmse batch_nll += b_nll regret = online_rmse - batch_rmse num_steps = (t + 1) * batch_size if t % logging_freq == (logging_freq - 1): rmse, nll = online_model.evaluate(test_x, test_y) print(f'T: {t+1}, test RMSE: {rmse:0.4f}, test NLL: {nll:0.4f}') logger.log(dict( stem_loss=stem_loss, gp_loss=gp_loss, batch_rmse=batch_rmse, batch_nll=batch_nll, online_rmse=online_rmse, online_nll=online_nll, regret=regret, test_rmse=rmse, test_nll=nll, noise=online_model.noise.mean().item(), step_time=step_time ), step=num_steps, table_name='online_metrics') logger.write_csv() def regression_trial(config): config, logger = startup(config) datasets = hydra.utils.instantiate(config.dataset) train_x, train_y = datasets.train_dataset[:] test_x, test_y = datasets.test_dataset[:] config.stem.input_dim = config.dataset.input_dim = train_x.size(-1) batch_model = get_model(config, train_x, train_y, streaming=False) if config.pretrain_stem.enabled: print('==== pretraining stem ====') loss_fn = torch.nn.MSELoss() batch_pretrain_stem_metrics = pretrain_stem(batch_model.stem, train_x, train_y, loss_fn, **config.pretrain_stem) logger.add_table('batch_pretrain_stem_metrics', batch_pretrain_stem_metrics) logger.write_csv() pretrain_df =
pd.DataFrame(logger.data['batch_pretrain_stem_metrics'])
pandas.DataFrame
""" .. module:: mixtures :platform: Unix, Windows :synopsis: a module for defining the class :class:`Mixture`. .. moduleauthor:: <NAME> <<EMAIL>> """ import numpy as np import pandas as pd import mics from mics.funcs import deltaMethod from mics.funcs import diff from mics.funcs import func from mics.utils import InputError from mics.utils import bennett from mics.utils import cases from mics.utils import crypto from mics.utils import errorTitle from mics.utils import info from mics.utils import multimap from mics.utils import propertyDict from mics.utils import stdError class mixture: """ A mixture of independently collected samples (MICS). Parameters ---------- samples : :class:`pooledsample` or list(:class:`sample`) A list of samples. engine : :class:`MICS` or :class:`MBAR` A method for mixture-model analysis. """ def __init__(self, samples, engine): self.samples = samples self.engine = engine m = self.m = len(samples) if mics.verbose: # np.set_printoptions(precision=4, threshold=15, edgeitems=4, suppress=True) info("\n=== Setting up mixture ===") info("Analysis method: ", self.engine.__class__.__name__) info("Number of samples:", m) if m == 0: raise InputError("list of samples is empty") self.n = np.array([len(sample.dataset) for sample in samples]) self.neff = np.array([sample.neff for sample in samples]) names = self.names = list(samples[0].dataset.columns) if mics.verbose: info("Sample sizes:", self.n) info("Effective sample sizes:", self.neff) info("Properties:", ", ".join(names)) potentials = [sample.potential.lambdify() for sample in samples] self.u = [multimap(potentials, sample.dataset) for sample in samples] self.f = bennett(self.u) mics.verbose and info("Initial free-energy guess:", self.f) self.engine.__initialize__(self) # ====================================================================================== def __compute__(self, functions, constants): try: if isinstance(functions, str): funcs = [func(functions, self.names, constants).lambdify()] else: funcs = [func(f, self.names, constants).lambdify() for f in functions] return [multimap(funcs, sample.dataset) for sample in self.samples] except (InputError, KeyError): return None # ====================================================================================== def free_energies(self, reference=0): """ Computes the free energies of all sampled states relative to a given reference state, as well as their standard errors. Parameters ---------- reference : int, optional, default=0 Specifies which sampled state will be considered as a reference for computing free-energy differences. Returns ------- pandas.DataFrame A data frame containing the free-energy differences and their computed standard errors for all sampled states. """ frame = self.samples.__qualifiers__() frame["f"] = self.f - self.f[reference] T = self.Theta frame["df"] = np.sqrt(np.diag(T) - 2*T[:, reference] + T[reference, reference]) return frame # ====================================================================================== def reweighting(self, potential, properties={}, derivatives={}, combinations={}, conditions={}, reference=0, **constants): """ Computes averages of specified properties at target states defined by a given reduced `potential` function with distinct passed parameter values, as well as the free energies of such states with respect to a sampled `reference` state. Also, computes derivatives of these averages and free energies with respect to the mentioned parameters. In addition, evaluates combinations of free energies, averages, and derivatives. In all cases, uncertainty propagation is handled automatically by means of the delta method. Parameters ---------- potential : str A mathematical expression defining the reduced potential of the target states. It might depend on the collective variables of the mixture samples, as well as on external parameters whose values will be passed via `conditions` or `constants`, such as explained below. properties : dict(str: str), optional, default={} A dictionary associating names to mathematical expressions, thus defining a set of properties whose averages must be evaluated at the target states. If it is omitted, then only the relative free energies of the target states will be evaluated. The expressions might depend on the same collective variables and parameters mentioned above for `potential`. derivatives : dict(str: (str, str)), optional, default={} A dictionary associating names to (property, parameter) pairs, thus specifying derivatives of average properties at the target states or relative free energies of these states with respect to external parameters. For each pair, property must be either "f" (for free energy) or a name defined in `properties`, while parameter must be an external parameter such as described above for `potential`. combinations : dict(str: str), optional, default={} A dictionary associating names to mathematical expressions, thus defining combinations among average properties at the target states, the relative free energies of these states, and their derivatives with respect to external parameters. The expressions might depend on "f" (for free energy) or on the names defined in `properties`, as well as on external parameters such as described above for `potential`. conditions : pandas.DataFrame or dict, optional, default={} A data frame whose column names are external parameters present in mathematical expressions specified in arguments `potential`, `properties`, and `combinations`. The rows of the data frame contain sets of values of these parameters, in such as way that the reweighting is carried out for every single set. This is a way of defining multiple target states from a single `potential` expression. The same information can be passed as a dictionary associating names to lists of numerical values, provided that all lists are equally sized. If it is empty, then a unique target state will be considered and all external parameters in `potential`, if any, must be passed as keyword arguments. reference : int, optional, default=0 The index of a sampled state to be considered as a reference for computing relative free energies. **constants : keyword arguments A set of keyword arguments passed as name=value, aimed to define external parameter values for the evaluation of mathematical expressions. These values will be repeated at all target states specified via `potential` and `conditions`. Returns ------- pandas.DataFrame A data frame containing the computed quantities, along with their estimated uncertainties, at all target states specified via `potential` and `conditions`. """ if mics.verbose: info("\n=== Performing reweighting with %s ===" % self.engine.__class__.__name__) info("Reduced potential:", potential) constants and info("Provided constants: ", constants) freeEnergy = "f" if freeEnergy in properties.keys(): raise InputError("Word % is reserved for free energies" % freeEnergy) condframe = pd.DataFrame(data=conditions) if isinstance(conditions, dict) else conditions propfuncs = list(properties.values()) if not derivatives: propnames = [freeEnergy] + list(properties.keys()) combs = combinations.values() gProps = self.__compute__(propfuncs, constants) if combinations: gDelta = deltaMethod(combs, propnames, constants) results = list() for (index, condition) in cases(condframe): mics.verbose and condition and info("Condition[%s]" % index, condition) consts = dict(condition, **constants) u = self.__compute__(potential, consts) y = gProps if gProps else self.__compute__(propfuncs, consts) (yu, Theta) = self.engine.__reweight__(self, u, y, reference) result = propertyDict(propnames, yu, stdError(Theta)) if combinations: delta = gDelta if gDelta.valid else deltaMethod(combs, propnames, consts) (h, dh) = delta.evaluate(yu, Theta) result.update(propertyDict(combinations.keys(), h, dh)) results.append(result.to_frame(index)) return condframe.join(pd.concat(results)) else: symbols = list(condframe.columns) + list(constants.keys()) parameters = set(x for (y, x) in derivatives.values()) props = dict() for x in parameters: props[crypto(x)] = diff(potential, x, symbols) combs = dict() for (z, (y, x)) in derivatives.items(): if y == freeEnergy: combs[z] = crypto(x) else: dydx = diff(properties[y], x, symbols) props[crypto(z)] = "%s - (%s)*(%s)" % (dydx, props[crypto(x)], properties[y]) combs[z] = "%s + (%s)*(%s)" % (crypto(z), crypto(x), y) unwanted = sum([[x, errorTitle(x)] for x in props.keys()], []) return self.reweighting(potential, dict(properties, **props), {}, dict(combs, **combinations), condframe, reference, **constants).drop(unwanted, axis=1) # ====================================================================================== def pmf(self, potential, property, bins=10, interval=None, **constants): if mics.verbose: info("\n=== Computing PMF with %s ===" % self.engine.__class__.__name__) info("Reduced potential:", potential) u = self.__compute__(potential, constants) z = self.__compute__(property, constants) if interval: (zmin, zmax) = interval else: zmin = min(np.amin(x[0, :]) for x in z) zmax = max(np.amax(x[0, :]) for x in z) delta = (zmax - zmin)/bins ibin = [np.floor((x[0:1, :] - zmin)/delta).astype(int) for x in z] results = list() for i in range(bins): zc = zmin + delta*(i + 0.5) mics.verbose and info("Bin[%d]:" % (i + 1), "%s = %s" % (property, str(zc))) y = [np.equal(x, i).astype(np.float) for x in ibin] (yu, Theta) = self.engine.__reweight__(self, u, y) if yu[1] > 0.0: dyu = np.sqrt(max(0.0, Theta[1, 1])) results.append([zc, -np.log(yu[1]), dyu/yu[1]]) return pd.DataFrame(results, columns=[property, "pmf", errorTitle("pmf")]) # ====================================================================================== def histograms(self, property="u0", bins=100, **constants): if property == "u0": y = self.u0 elif property == "state": w = np.arange(self.m) + 1 wsum = sum(w) y = [wsum*np.average(p, axis=0, weights=w) for p in self.P] elif property == "potential": y = [self.u[i][i, :] for i in range(self.m)] else: y = self.__compute__(property, constants) ymin = min([np.amin(x) for x in y]) ymax = max([np.amax(x) for x in y]) delta = (ymax - ymin)/bins center = [ymin + delta*(i + 0.5) for i in range(bins)] frame =
pd.DataFrame({property: center})
pandas.DataFrame
import numpy as np import pytest from pandas import DataFrame import pandas._testing as tm class TestDataFrameReindexLike: def test_reindex_like(self, float_frame): other = float_frame.reindex(index=float_frame.index[:10], columns=["C", "B"]) tm.assert_frame_equal(other, float_frame.reindex_like(other)) @pytest.mark.parametrize( "method,expected_values", [ ("nearest", [0, 1, 1, 2]), ("pad", [np.nan, 0, 1, 1]), ("backfill", [0, 1, 2, 2]), ], ) def test_reindex_like_methods(self, method, expected_values): df = DataFrame({"x": list(range(5))}) result = df.reindex_like(df, method=method, tolerance=0) tm.assert_frame_equal(df, result) result = df.reindex_like(df, method=method, tolerance=[0, 0, 0, 0]) tm.assert_frame_equal(df, result) def test_reindex_like_subclass(self): # https://github.com/pandas-dev/pandas/issues/31925 class MyDataFrame(DataFrame): pass expected = DataFrame() df = MyDataFrame() result = df.reindex_like(expected)
tm.assert_frame_equal(result, expected)
pandas._testing.assert_frame_equal
from collections import defaultdict from datetime import datetime from itertools import product import numpy as np import pytest from pandas import ( DataFrame, MultiIndex, Series, array, concat, merge, ) import pandas._testing as tm from pandas.core.algorithms import safe_sort import pandas.core.common as com from pandas.core.sorting import ( decons_group_index, get_group_index, is_int64_overflow_possible, lexsort_indexer, nargsort, ) class TestSorting: @pytest.mark.slow def test_int64_overflow(self): B = np.concatenate((np.arange(1000), np.arange(1000), np.arange(500))) A = np.arange(2500) df = DataFrame( { "A": A, "B": B, "C": A, "D": B, "E": A, "F": B, "G": A, "H": B, "values": np.random.randn(2500), } ) lg = df.groupby(["A", "B", "C", "D", "E", "F", "G", "H"]) rg = df.groupby(["H", "G", "F", "E", "D", "C", "B", "A"]) left = lg.sum()["values"] right = rg.sum()["values"] exp_index, _ = left.index.sortlevel() tm.assert_index_equal(left.index, exp_index) exp_index, _ = right.index.sortlevel(0) tm.assert_index_equal(right.index, exp_index) tups = list(map(tuple, df[["A", "B", "C", "D", "E", "F", "G", "H"]].values)) tups = com.asarray_tuplesafe(tups) expected = df.groupby(tups).sum()["values"] for k, v in expected.items(): assert left[k] == right[k[::-1]] assert left[k] == v assert len(left) == len(right) def test_int64_overflow_moar(self): # GH9096 values = range(55109) data = DataFrame.from_dict({"a": values, "b": values, "c": values, "d": values}) grouped = data.groupby(["a", "b", "c", "d"]) assert len(grouped) == len(values) arr = np.random.randint(-1 << 12, 1 << 12, (1 << 15, 5)) i = np.random.choice(len(arr), len(arr) * 4) arr = np.vstack((arr, arr[i])) # add sume duplicate rows i = np.random.permutation(len(arr)) arr = arr[i] # shuffle rows df = DataFrame(arr, columns=list("abcde")) df["jim"], df["joe"] = np.random.randn(2, len(df)) * 10 gr = df.groupby(list("abcde")) # verify this is testing what it is supposed to test! assert is_int64_overflow_possible(gr.grouper.shape) # manually compute groupings jim, joe = defaultdict(list), defaultdict(list) for key, a, b in zip(map(tuple, arr), df["jim"], df["joe"]): jim[key].append(a) joe[key].append(b) assert len(gr) == len(jim) mi = MultiIndex.from_tuples(jim.keys(), names=list("abcde")) def aggr(func): f = lambda a: np.fromiter(map(func, a), dtype="f8") arr = np.vstack((f(jim.values()), f(joe.values()))).T res = DataFrame(arr, columns=["jim", "joe"], index=mi) return res.sort_index() tm.assert_frame_equal(gr.mean(), aggr(np.mean)) tm.assert_frame_equal(gr.median(), aggr(np.median)) def test_lexsort_indexer(self): keys = [[np.nan] * 5 + list(range(100)) + [np.nan] * 5] # orders=True, na_position='last' result = lexsort_indexer(keys, orders=True, na_position="last") exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110)) tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp)) # orders=True, na_position='first' result = lexsort_indexer(keys, orders=True, na_position="first") exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105)) tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp)) # orders=False, na_position='last' result = lexsort_indexer(keys, orders=False, na_position="last") exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110)) tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp)) # orders=False, na_position='first' result = lexsort_indexer(keys, orders=False, na_position="first") exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1)) tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp)) def test_nargsort(self): # np.argsort(items) places NaNs last items = [np.nan] * 5 + list(range(100)) + [np.nan] * 5 # np.argsort(items2) may not place NaNs first items2 = np.array(items, dtype="O") # mergesort is the most difficult to get right because we want it to be # stable. # According to numpy/core/tests/test_multiarray, """The number of # sorted items must be greater than ~50 to check the actual algorithm # because quick and merge sort fall over to insertion sort for small # arrays.""" # mergesort, ascending=True, na_position='last' result = nargsort(items, kind="mergesort", ascending=True, na_position="last") exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110)) tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False) # mergesort, ascending=True, na_position='first' result = nargsort(items, kind="mergesort", ascending=True, na_position="first") exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105)) tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False) # mergesort, ascending=False, na_position='last' result = nargsort(items, kind="mergesort", ascending=False, na_position="last") exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110)) tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False) # mergesort, ascending=False, na_position='first' result = nargsort(items, kind="mergesort", ascending=False, na_position="first") exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1)) tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False) # mergesort, ascending=True, na_position='last' result = nargsort(items2, kind="mergesort", ascending=True, na_position="last") exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110)) tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False) # mergesort, ascending=True, na_position='first' result = nargsort(items2, kind="mergesort", ascending=True, na_position="first") exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105)) tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False) # mergesort, ascending=False, na_position='last' result = nargsort(items2, kind="mergesort", ascending=False, na_position="last") exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110)) tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False) # mergesort, ascending=False, na_position='first' result = nargsort( items2, kind="mergesort", ascending=False, na_position="first" ) exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1)) tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False) class TestMerge: @pytest.mark.slow def test_int64_overflow_issues(self): # #2690, combinatorial explosion df1 = DataFrame(np.random.randn(1000, 7), columns=list("ABCDEF") + ["G1"]) df2 = DataFrame(np.random.randn(1000, 7), columns=list("ABCDEF") + ["G2"]) # it works! result = merge(df1, df2, how="outer") assert len(result) == 2000 low, high, n = -1 << 10, 1 << 10, 1 << 20 left = DataFrame(np.random.randint(low, high, (n, 7)), columns=list("ABCDEFG")) left["left"] = left.sum(axis=1) # one-2-one match i = np.random.permutation(len(left)) right = left.iloc[i].copy() right.columns = right.columns[:-1].tolist() + ["right"] right.index = np.arange(len(right)) right["right"] *= -1 out = merge(left, right, how="outer") assert len(out) == len(left) tm.assert_series_equal(out["left"], -out["right"], check_names=False) result = out.iloc[:, :-2].sum(axis=1) tm.assert_series_equal(out["left"], result, check_names=False) assert result.name is None out.sort_values(out.columns.tolist(), inplace=True) out.index = np.arange(len(out)) for how in ["left", "right", "outer", "inner"]: tm.assert_frame_equal(out, merge(left, right, how=how, sort=True)) # check that left merge w/ sort=False maintains left frame order out = merge(left, right, how="left", sort=False) tm.assert_frame_equal(left, out[left.columns.tolist()]) out = merge(right, left, how="left", sort=False) tm.assert_frame_equal(right, out[right.columns.tolist()]) # one-2-many/none match n = 1 << 11 left = DataFrame( np.random.randint(low, high, (n, 7)).astype("int64"), columns=list("ABCDEFG"), ) # confirm that this is checking what it is supposed to check shape = left.apply(Series.nunique).values assert is_int64_overflow_possible(shape) # add duplicates to left frame left = concat([left, left], ignore_index=True) right = DataFrame( np.random.randint(low, high, (n // 2, 7)).astype("int64"), columns=list("ABCDEFG"), ) # add duplicates & overlap with left to the right frame i = np.random.choice(len(left), n) right = concat([right, right, left.iloc[i]], ignore_index=True) left["left"] = np.random.randn(len(left)) right["right"] = np.random.randn(len(right)) # shuffle left & right frames i = np.random.permutation(len(left)) left = left.iloc[i].copy() left.index = np.arange(len(left)) i = np.random.permutation(len(right)) right = right.iloc[i].copy() right.index = np.arange(len(right)) # manually compute outer merge ldict, rdict = defaultdict(list), defaultdict(list) for idx, row in left.set_index(list("ABCDEFG")).iterrows(): ldict[idx].append(row["left"]) for idx, row in right.set_index(list("ABCDEFG")).iterrows(): rdict[idx].append(row["right"]) vals = [] for k, lval in ldict.items(): rval = rdict.get(k, [np.nan]) for lv, rv in product(lval, rval): vals.append( k + ( lv, rv, ) ) for k, rval in rdict.items(): if k not in ldict: for rv in rval: vals.append( k + ( np.nan, rv, ) ) def align(df): df = df.sort_values(df.columns.tolist()) df.index = np.arange(len(df)) return df def verify_order(df): kcols = list("ABCDEFG") tm.assert_frame_equal( df[kcols].copy(), df[kcols].sort_values(kcols, kind="mergesort") ) out = DataFrame(vals, columns=list("ABCDEFG") + ["left", "right"]) out = align(out) jmask = { "left": out["left"].notna(), "right": out["right"].notna(), "inner": out["left"].notna() & out["right"].notna(), "outer": np.ones(len(out), dtype="bool"), } for how in ["left", "right", "outer", "inner"]: mask = jmask[how] frame = align(out[mask].copy()) assert mask.all() ^ mask.any() or how == "outer" for sort in [False, True]: res = merge(left, right, how=how, sort=sort) if sort: verify_order(res) # as in GH9092 dtypes break with outer/right join tm.assert_frame_equal( frame, align(res), check_dtype=how not in ("right", "outer") ) def test_decons(): def testit(codes_list, shape): group_index = get_group_index(codes_list, shape, sort=True, xnull=True) codes_list2 = decons_group_index(group_index, shape) for a, b in zip(codes_list, codes_list2): tm.assert_numpy_array_equal(a, b) shape = (4, 5, 6) codes_list = [ np.tile([0, 1, 2, 3, 0, 1, 2, 3], 100).astype(np.int64), np.tile([0, 2, 4, 3, 0, 1, 2, 3], 100).astype(np.int64), np.tile([5, 1, 0, 2, 3, 0, 5, 4], 100).astype(np.int64), ] testit(codes_list, shape) shape = (10000, 10000) codes_list = [ np.tile(np.arange(10000, dtype=np.int64), 5), np.tile(np.arange(10000, dtype=np.int64), 5), ] testit(codes_list, shape) class TestSafeSort: def test_basic_sort(self): values = [3, 1, 2, 0, 4] result = safe_sort(values) expected = np.array([0, 1, 2, 3, 4]) tm.assert_numpy_array_equal(result, expected) values = list("baaacb") result = safe_sort(values) expected = np.array(list("aaabbc"), dtype="object") tm.assert_numpy_array_equal(result, expected) values = [] result = safe_sort(values) expected = np.array([]) tm.assert_numpy_array_equal(result, expected) @pytest.mark.parametrize("verify", [True, False]) def test_codes(self, verify): values = [3, 1, 2, 0, 4] expected = np.array([0, 1, 2, 3, 4]) codes = [0, 1, 1, 2, 3, 0, -1, 4] result, result_codes = safe_sort(values, codes, verify=verify) expected_codes = np.array([3, 1, 1, 2, 0, 3, -1, 4], dtype=np.intp) tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_codes, expected_codes)
pandas._testing.assert_numpy_array_equal
import geopandas as gpd import matplotlib.pyplot as plt import numpy as np from pathlib import Path import pandas as pd import os import h5py from shapely.geometry import point,polygon import icepyx as ipd from datetime import date from dateutil.relativedelta import relativedelta from mpl_toolkits.axes_grid1 import make_axes_locatable from playsound import playsound def get_data(bbox,date_range,path) : try: os.mkdir(path+'/'+date_range[0]+'--'+date_range[1]) except: None path = path+'/'+date_range[0]+'--'+date_range[1] #creating the icepyx object region = ipd.Query('ATL06',bbox,date_range) print(region.avail_granules()) region.granules.avail #logging into earthdata earthdata_uid = input("Enter your Earthdata username:") email = input("Enter your Eathdata email:") region.earthdata_login(earthdata_uid,email) #creating a default variable list region.order_vars.append(defaults=True) #print(region.order_vars.wanted,sep='/n') region.order_vars.remove(all=True) #modifying the default variable list #print(region.order_vars.wanted) region.order_vars.append(var_list=['latitude']) region.order_vars.append(var_list=['longitude']) region.order_vars.append(var_list=['h_li']) region.order_vars.append(var_list=['x_atc']) region.order_vars.append(var_list=['atl06_quality_summary']) print("The requested data is:") print(region.order_vars.wanted) region.subsetparams(Coverage=region.order_vars.wanted) region.reqparams['page_size']=int(input("Enter desired number of granules per order:")) #ordering data email=input("Do you want an email containing information of your order requests(y/n)") email=True if email=='y' else False region.order_granules(email=email) #downloading data region.download_granules(path) def data_to_csv(path_in): group = ['gt1l','gt1r','gt2l','gt2r','gt3l','gt3r'] try: os.mkdir(path_in+'/CSV') except: None path_out = path_in+'/CSV' a=os.listdir(path_in) try: a.remove('.ipynb_checkpoints') except: None for g in group: beam = pd.DataFrame() beam['lat']=[] beam['lon']=[] beam['h_li']=[] beam['x_atc']=[] beam['q_flag']=[] for fname in a: df = pd.DataFrame() fname = path_in+'/'+fname try: with h5py.File(fname,'r') as f: try: df['lat'] = f['/'+g+'/land_ice_segments/latitude'][:] df['lon'] = f['/'+g+'/land_ice_segments/longitude'][:] df['h_li'] = f['/'+g+'/land_ice_segments/h_li'][:] df['x_atc'] = f['/'+g+'/land_ice_segments/ground_track/x_atc'][:] df['q_flag'] = f['/'+g+'/land_ice_segments/atl06_quality_summary'][:] beam=beam.append(df,ignore_index=True) except: print(fname+" has no relevant data") continue except: print(fname+" is not a hdf5 file.") continue beam=beam[beam['h_li']< 8611] beam.to_csv(path_out+'/'+g+'.csv') def h_li_plot(region,end_time): year, month, day = map(int, end_time.split('-')) start_time = date(year, month, day)+relativedelta(months=-3) start_time = str(start_time) print(start_time) date_range=[start_time,end_time] if region in ['Karakoram','West Himalaya','East Himalaya','Central Himalaya']: #Data download try: os.mkdir(os.getcwd().rsplit('/package')+'/'+region) except: None try: os.mkdir(os.getcwd().rsplit('/package')+'/'+region+'/data') except: None basemap=gpd.read_file(os.getcwd().rsplit('/package')+'/'+region+'/shapefile/'+region+'.shp') if (os.path.isdir(os.getcwd().rsplit('/package')+'/'+region+'/data/'+date_range[0]+'--'+date_range[1])==False or len(os.listdir(os.getcwd().rsplit('/package')+'/'+region+'/data/'+date_range[0]+'--'+date_range[1]))<=1): print("Downloding data") path=os.getcwd().rsplit('/package')+'/'+region+'/data' get_data(fname,date_range,path) data_to_csv(path) else: print("Data aldready exists") path=os.getcwd().rsplit('/package')+'/'+region+'/data/'+date_range[0]+'--'+date_range[1] fname=os.getcwd().rsplit('/package')+'/'+region+'/shpfile/'+region+'.shp' df1=pd.read_csv(path+'/CSV/gt1l.csv') df2=
pd.read_csv(path+'/CSV/gt2l.csv')
pandas.read_csv
# <NAME> # python 3.7 """ To calculate the extremes of the carbon fluxes based on carbon flux anomalies in gC. The code is fairly flexible to pass multiple filters to the code. Output: * Saving the binarys of extremes * Saving the TCE binaries at multiple lags [0-4 months) """ import os import netCDF4 as nc4 import numpy as np import pandas as pd import datetime as dt import seaborn as sns import argparse from scipy import stats from functions import time_dim_dates, index_and_dates_slicing, norm, geo_idx, patch_with_gaps_and_eventsize """ Arguments to input while running the python file --percentile (-per) : percentile under consideration looking at the negative/positive tail of gpp events: {eg.1,5,10,90,95,99} --th_type : Thresholds can be computed at each tail i.e. 'ind' or 'common'. 'common' means that total number of events greater that the modulus of anomalies represent 'per' percentile --sources (-src) : the models that you want to analyze, separated by hyphens or 'all' for all the models --variable (-var) : the variable to analyze gpp/nep/npp/nbp --window (wsize) : time window size in years # Running: run calc_extremes.py -src cesm -var gpp """ print ("Last edit on May 08, 2020") # The abriviation of the models that will be analyzed: source_code = { 'cesm' : 'CESM2', 'can' : 'CanESM5', 'ipsl' : 'IPSL-CM6A-LR', 'bcc' : 'BCC-CSM2-MR', 'cnrn-e': 'CNRM-ESM2-1', 'cnrn-c': 'CNRM-CM6-1' } parser = argparse.ArgumentParser() parser.add_argument('--percentile' ,'-per' , help = "Threshold Percentile?" , type= int, default= 5 ) parser.add_argument('--th_type' ,'-th' , help = "Threshold Percentile?" , type= str, default= 'common' ) parser.add_argument('--sources' ,'-src' , help = "Which model(s) to analyse?" , type= str, default= 'all' ) parser.add_argument('--variable' ,'-var' , help = "variable? gpp/npp/nep/nbp,,,," , type= str, default= 'gpp' ) parser.add_argument('--window' ,'-wsize' , help = "window size (25 years)?" , type= int, default= 25 ) args = parser.parse_args() # The inputs: per = int (args.percentile) th_type = str (args.th_type) src = str (args.sources) variable_run= str (args.variable) window = int (args.window) # Model(s) to analyze: # -------------------- source_selected = [] if len(src.split('-')) >1: source_selected = src.split('-') elif src in ['all', 'a']: source_selected = list(source_code.values() ) elif len(src.split('-')) == 1: if src in source_code.keys(): source_selected = [source_code[src]] else: print (" Enter a valid source id") #running : run calc_extremes.py -per 5 -var nbp -src cesm # Reading the dataframe of the selected files # ------------------------------------------- cori_scratch = '/global/cscratch1/sd/bharat/' # where the anomalies per slave rank are saved in_path = '/global/homes/b/bharat/results/data_processing/' # to read the filters #cmip6_filepath_head = '/global/homes/b/bharat/cmip6_data/CMIP6/' cmip6_filepath_head = '/global/cfs/cdirs/m3522/cmip6/CMIP6/' #web_path = '/project/projectdirs/m2467/www/bharat/' web_path = '/global/homes/b/bharat/results/web/' # exp is actually 'historical + ssp585' but saved as 'ssp585' exp = 'ssp585' # Common members per model # ------------------------ common_members = {} for source_run in source_selected: common_members [source_run] = pd.read_csv (cori_scratch + 'add_cmip6_data/common_members/%s_%s_common_members.csv'%(source_run,exp), header=None).iloc[:,0] # The spreadsheet with all the available data of cmip 6 # ----------------------------------------------------- df_files = pd.read_csv(in_path + 'df_data_selected.csv') temp = df_files.copy(deep = True) # Saving the path of area and lf filepath_areacella = {} filepath_sftlf = {} for s_idx, source_run in enumerate(source_selected): filters = (temp['source_id'] == source_run) & (temp['variable_id'] == variable_run) # original Variable filters_area = (temp['source_id'] == source_run) & (temp['variable_id'] == 'areacella') # areacella filters_lf = (temp['source_id'] == source_run) & (temp['variable_id'] == 'sftlf') # land fraction #passing the filters to the dataframe df_tmp = temp[filters] df_tmp_area = temp[filters_area] df_tmp_lf = temp[filters_lf] for member_run in common_members [source_run]: if source_run == 'BCC-CSM2-MR': filepath_area = "/global/homes/b/bharat/extra_cmip6_data/areacella_fx_BCC-CSM2-MR_hist-resIPO_r1i1p1f1_gn.nc" filepath_lf = "/global/homes/b/bharat/extra_cmip6_data/sftlf_fx_BCC-CSM2-MR_hist-resIPO_r1i1p1f1_gn.nc" else: filters_area = (temp['variable_id'] == 'areacella') & (temp['source_id'] == source_run) filters_lf = (temp['variable_id'] == 'sftlf') & (temp['source_id'] == source_run) filepath_area = cmip6_filepath_head + "/".join(np.array(temp[filters_area].iloc[-1])) filepath_lf = cmip6_filepath_head + "/".join(np.array(temp[filters_lf].iloc[-1])) filepath_areacella [source_run] = filepath_area filepath_sftlf [source_run] = filepath_lf # Extracting the area and land fractions of different models # ========================================================== data_area = {} data_lf = {} for source_run in source_selected: data_area [source_run] = nc4.Dataset (filepath_areacella[source_run]) . variables['areacella'] data_lf [source_run] = nc4.Dataset (filepath_sftlf [source_run]) . variables['sftlf'] # Saving the paths of anomalies # hier. : source_id > member_id # ------------------------------------ paths = {} for source_run in source_selected: paths[source_run] = {} for source_run in source_selected: for member_run in common_members [source_run]: saved_ano = cori_scratch + 'add_cmip6_data/%s/%s/%s/%s/'%(source_run,exp,member_run,variable_run) paths[source_run][member_run] = saved_ano del saved_ano # Reading and saving the data: # ---------------------------- nc_ano = {} for source_run in source_selected: nc_ano[source_run] = {} for source_run in source_selected: for member_run in common_members [source_run]: nc_ano[source_run][member_run] = nc4.Dataset(paths[source_run][member_run] + '%s_%s_%s_%s_anomalies_gC.nc'%(source_run,exp,member_run,variable_run)) # Arranging Time Array for plotting and calling # -------------------------------------------- win_len = 12 * window #number of months in window years total_years = 251 #years from 1850 to 2100 total_months= total_years * 12 dates_ar = time_dim_dates( base_date = dt.date(1850,1,1), total_timestamps = 3012 ) start_dates = np.array( [dates_ar[i*win_len] for i in range(int(total_months/win_len))]) #list of start dates of 25 year window end_dates = np.array( [dates_ar[i*win_len+win_len -1] for i in range(int(total_months/win_len))]) #list of end dates of the 25 year window idx_yr_2100 = 3012 # upper open index 2100 from the year 1850 if the data is monthly i.e. for complete TS write ts[:3012] idx_yr_2014 = 1980 # upper open index 2014 from the year 1850 if the data is monthly i.e. for complete TS write ts[:1980] idx_yr_2099 = 3000 # upper open index 2099 from the year 1850 if the data is monthly i.e. for complete TS write ts[:3000] # Initiation: # ----------- def TS_Dates_and_Index (dates_ar = dates_ar,start_dates = start_dates, end_dates=end_dates ): """ Returns the TS of the dates and index of consecutive windows of len 25 years Parameters: ----------- dates_ar : an array of dates in datetime.date format the dates are chosen from this array start_dates: an array of start dates, the start date will decide the dates and index of the first entry for final time series for that window end_dates: similar to start_dates but for end date Returns: -------- dates_win: a 2-d array with len of start dates/ total windows and each row containing the dates between start and end date idx_dates_win : a 2-d array with len of start dates/ total windows and each row containing the index of dates between start and end date """ idx_dates_win = [] #indicies of time in 25yr windows dates_win = [] #sel dates from time variables in win_len windows for i in range(len(start_dates)): idx_loc, dates_loc = index_and_dates_slicing(dates_ar,start_dates[i],end_dates[i]) # see functions.py idx_dates_win . append (idx_loc) dates_win . append (dates_loc) return np.array(dates_win), np.array(idx_dates_win) # Calling the function "ts_dates_and_index"; Universal for rest of the code dates_win, idx_dates_win = TS_Dates_and_Index () # The saving the results in a dictionary # -------------------------------------- Results = {} for source_run in source_selected: Results[source_run] = {} for member_run in common_members [source_run]: Results[source_run][member_run] = {} # Calculation of thresholds (rth percentile at each tail): # ------------------------------------------------------------ def Threshold_and_Binary_Ar(data = nc_ano[source_run][member_run].variables[variable_run][...], per = per): """ In this method the 1 percentile threshold is calculated are both tails of the pdf of anomalies... i.e. the same number of values are selected on either tails. returns the global percentile based thresholds and binary arrays of consecutive windows Parameters: ----------- data : The anomalies whose threshold you want to calculate Universal: --------- start_dates, idx_dates_win, per Returns: -------- threshold_neg: the threshold for negative extremes; size = # windows threshold_pos: the threshold for positive extremes; size = # windows bin_ext_neg: the binary array 1's are extremes based on the threshold_neg; shape = same as data bin_ext_pos: the binary array 1's are extremes based on the threshold_pos; shape = same as data """ thresholds_1= [] #thresholds for consecutive windows of defined size for a 'per' percentile thresholds_2= [] #thresholds for consecutive windows of defined size for a '100-per' percentile bin_ext_neg = np.ma.zeros((data.shape)) #3d array to capture the True binaray extmalies w.r.t. gpp loss events bin_ext_pos = np.ma.zeros((data.shape)) #3d array to capture the True binaray extmalies w.r.t. gpp gain events for i in range(len(start_dates)): ano_loc = data[idx_dates_win[i][0]:idx_dates_win[i][-1]+1,:,:] threshold_loc_1 = np.percentile(ano_loc[ano_loc.mask == False],per) # calculation of threshold for the local anomalies thresholds_1 . append(threshold_loc_1) threshold_loc_2 = np.percentile(ano_loc[ano_loc.mask == False],(100-per)) thresholds_2 . append(threshold_loc_2) # Binary arrays: if per <=50: bin_ext_neg[idx_dates_win[i][0]:idx_dates_win[i][-1]+1,:,:] = ano_loc < threshold_loc_1 bin_ext_pos[idx_dates_win[i][0]:idx_dates_win[i][-1]+1,:,:] = ano_loc > threshold_loc_2 else: bin_ext_pos[idx_dates_win[i][0]:idx_dates_win[i][-1]+1,:,:] = ano_loc > threshold_loc_1 bin_ext_neg[idx_dates_win[i][0]:idx_dates_win[i][-1]+1,:,:] = ano_loc < threshold_loc_2 # Thresholds for consecutive windows: if per < 50: threshold_neg = np.ma.array(thresholds_1) threshold_pos = np.ma.array(thresholds_2) elif per > 50: threshold_neg = np.ma.array(thresholds_2) threshold_pos = np.ma.array(thresholds_1) return threshold_neg, threshold_pos, bin_ext_neg, bin_ext_pos # Calculation of thresholds (rth percentile combines for both tails): # ------------------------------------------------------------ def Threshold_and_Binary_Ar_Common(data = nc_ano[source_run][member_run].variables[variable_run][...], per = per ): """ In this method the rth percentile threshold is calculated at sum of both tails of the pdf of anomalies... i.e. total number of elements on left and right tail make up for rth percentile (jakob 2014, anex A2)... This can be done by taking a modulus of anomalies and then calcuate the rth percentile th = q Negative extremes: anomalies < -q Positive extremes: anomalies > q Returns the global percentile based thresholds and binary arrays of consecutive windows Parameters: ----------- data : The anomalies whose threshold you want to calculate Universal: --------- start_dates, idx_dates_win, per Returns: -------- threshold_neg: the threshold for negative extremes; size = # windows threshold_pos: the threshold for positive extremes; size = # windows bin_ext_neg: the binary array 1's are extremes based on the threshold_neg; shape = same as data bin_ext_pos: the binary array 1's are extremes based on the threshold_pos; shape = same as data """ thresholds_p= [] #thresholds for consecutive windows of defined size for a 'per' percentile thresholds_n= [] #thresholds for consecutive windows of defined size for a '100-per' percentile bin_ext_neg = np.ma.zeros((data.shape)) #3d array to capture the True binaray extmalies w.r.t. gpp loss events bin_ext_pos = np.ma.zeros((data.shape)) #3d array to capture the True binaray extmalies w.r.t. gpp gain events assert per <50, "Percentile must be less than 50" for i in range(len(start_dates)): ano_loc = data[idx_dates_win[i][0]:idx_dates_win[i][-1]+1,:,:] threshold_loc = np.percentile(np.abs(ano_loc[ano_loc.mask == False]), (100-per) ) # calculation of threshold for the local anomalies # The (100-per) is used because after taking the modulus negative extremes fall along positive on the right hand thresholds_p . append(threshold_loc) thresholds_n . append(-threshold_loc) # Binary arrays: # -------------- bin_ext_neg[idx_dates_win[i][0]:idx_dates_win[i][-1]+1,:,:] = ano_loc < -threshold_loc bin_ext_pos[idx_dates_win[i][0]:idx_dates_win[i][-1]+1,:,:] = ano_loc > threshold_loc # Thresholds for consecutive windows: # ----------------------------------- threshold_neg = np.ma.array(thresholds_n) threshold_pos = np.ma.array(thresholds_p) return threshold_neg, threshold_pos, bin_ext_neg, bin_ext_pos limits = {} limits ['min'] = {} limits ['max'] = {} limits ['min']['th_pos'] = 0 limits ['max']['th_pos'] = 0 limits ['min']['th_neg'] = 0 limits ['max']['th_neg'] = 0 p =0 for source_run in source_selected: for member_run in common_members [source_run]: p = p+1 # threshold at each tail if th_type == 'ind': A,B,C,D = Threshold_and_Binary_Ar(data = nc_ano[source_run][member_run].variables[variable_run][...], per = per ) if th_type == 'common': A,B,C,D = Threshold_and_Binary_Ar_Common(data = nc_ano[source_run][member_run].variables[variable_run][...], per = per ) Results[source_run][member_run]['th_neg'] = A Results[source_run][member_run]['th_pos'] = B Results[source_run][member_run]['bin_ext_neg'] = C Results[source_run][member_run]['bin_ext_pos'] = D Results[source_run][member_run]['ts_th_neg'] = np.array([np.array([A[i]]*win_len) for i in range(len(A))]).flatten() Results[source_run][member_run]['ts_th_pos'] = np.array([np.array([B[i]]*win_len) for i in range(len(B))]).flatten() # Checking if p%3 == 0: print ("Calculating Thresholds ......") elif p%3 == 1: print ("Calculating Thresholds ....") else: print ("Calculating Thresholds ..") del A,B,C,D # Saving the binary data # ---------------------- save_binary_common = 'n' if save_binary_common in ['y','yy','Y','yes']: """ To save the binary matrix of the so that the location and duration of the extremes can be identified. If you want to save the binary matrix of extremes as nc files this was done so that this coulld be used as input the attribution analysis """ for source_run in source_selected: for member_run in common_members [source_run]: path_TCE = cori_scratch + 'add_cmip6_data/%s/%s/%s/%s_TCE/'%(source_run,exp,member_run,variable_run) # Check if the directory 'path_TCE' already exists? If not, then create one: if os.path.isdir(path_TCE) == False: os.makedirs(path_TCE) for ext_type in ['neg','pos']: print("Saving the binary matrix for %s,%s,%s"%(source_run,member_run,ext_type)) with nc4.Dataset( path_TCE + '%s_%s_bin_%s.nc'%(source_run,member_run,ext_type), mode = 'w') as dset: dset .createDimension( "time" ,size = nc_ano[source_run][member_run].variables['time'].size) dset .createDimension( "lat" ,size = nc_ano[source_run][member_run].variables['lat'].size) dset .createDimension( "lon" ,size = nc_ano[source_run][member_run].variables['lon'].size) t = dset.createVariable(varname = "time" ,datatype = float, dimensions = ("time"), fill_value = 1e+36) x = dset.createVariable(varname = "lon" ,datatype = float, dimensions = ("lon") , fill_value = 1e+36) y = dset.createVariable(varname = "lat" ,datatype = float, dimensions = ("lat") , fill_value = 1e+36) z = dset.createVariable(varname = variable_run +'_bin' ,datatype = float, dimensions = ("time","lat","lon"),fill_value = 1e+36) #varible = gpp_bin_ext t.axis = "T" x.axis = "X" y.axis = "Y" t[...] = nc_ano[source_run][member_run].variables['time'] [...] x[...] = nc_ano[source_run][member_run].variables['lon'][...] y[...] = nc_ano[source_run][member_run].variables['lat'][...] z[...] = Results[source_run][member_run]['bin_ext_%s'%ext_type] z.missing_value = 1e+36 z.stardard_name = variable_run+" binarys for %s extremes based on %dth percentile"%(ext_type,per) z.units = "0,1" x.units = nc_ano[source_run][member_run].variables['lon'].units x.missing_value = 1e+36 x.setncattr ("standard_name",nc_ano[source_run][member_run].variables['lon'].standard_name) y.units = nc_ano[source_run][member_run].variables['lat'].units y.missing_value = 1e+36 y.setncattr ("standard_name",nc_ano[source_run][member_run].variables['lat'].standard_name) t.units = nc_ano[source_run][member_run].variables['time'].units t.setncattr ("calendar", nc_ano[source_run][member_run].variables['time'].calendar) t.setncattr ("standard_name", nc_ano[source_run][member_run].variables['time'].standard_name) t.missing_value = 1e+36 # TCE: Calculations: # ------------------ lags_TCE = np.asarray([0,1,2,3,4], dtype = int) def Binary_Mat_TCE_Win (bin_ar, win_start_year=2000,lags = lags_TCE, land_frac= data_lf [source_run]): """ Aim: ---- To save the binary matrix of the Time Continuous Extremes(TCEs) so that the location and duration of the extremes can be identified. Returns: -------- bin_TCE_01s: are the binary values of extreme values in a TCE only at qualified locations with gaps ( actual as value 0) [hightlight extreme values] bin_TCE_1s : are the binary values of extreme values in a TCE only at qualified locations with gaps ( 0 replaced with value 1) [selecting full TCE with only 1s] bin_TCE_len : are the len of TCE extreme events, the length of TCE is captured at the trigger locations shape : These matrix are of shape (5,300,192,288) i.e. lags(0-4 months), time(300 months or 25 years {2000-24}), lat(192) and lon(288). """ from functions import create_seq_mat for i,date in enumerate(start_dates): if date.year in [win_start_year]: start_yr_idx = i data = bin_ar[start_yr_idx*win_len: (start_yr_idx+1)*win_len] del bin_ar bin_TCE_1s = np.ma.zeros((len(lags), data.shape[0],data.shape[1],data.shape[2])) bin_TCE_01s = np.ma.zeros((len(lags), data.shape[0],data.shape[1],data.shape[2])) bin_TCE_len = np.ma.zeros((len(lags), data.shape[0],data.shape[1],data.shape[2])) for lag in lags: for lat_i in range( data.shape[1] ): for lon_i in range( data.shape[2] ): if land_frac[...][lat_i,lon_i] != 0: #print lag, lat_i, lon_i try: tmp = patch_with_gaps_and_eventsize (data[:,lat_i,lon_i], max_gap =2, min_cont_event_size=3, lag=lag) for idx, trig in enumerate (tmp[1]): bin_TCE_01s [lag, trig:trig+len(tmp[0][idx]), lat_i, lon_i] = tmp[0][idx] bin_TCE_1s [lag, trig:trig+len(tmp[0][idx]), lat_i, lon_i] = np.ones(tmp[0][idx].shape) bin_TCE_len [lag, trig, lat_i, lon_i] = np.sum(np.ones(tmp[0][idx].shape)) except: bin_TCE_01s[lag, :, lat_i, lon_i] = np.ma.masked_all(data.shape[0]) bin_TCE_1s [lag, :, lat_i, lon_i] = np.ma.masked_all(data.shape[0]) bin_TCE_len[lag, :, lat_i, lon_i] = np.ma.masked_all(data.shape[0]) else: bin_TCE_01s[lag, :, lat_i, lon_i] = np.ma.masked_all(data.shape[0]) bin_TCE_1s [lag, :, lat_i, lon_i] = np.ma.masked_all(data.shape[0]) bin_TCE_len[lag, :, lat_i, lon_i] = np.ma.masked_all(data.shape[0]) return bin_TCE_01s, bin_TCE_1s, bin_TCE_len all_win_start_years = np.arange(1850,2100,25) # To do TCE analysis for all windows win_start_years = np.arange(1850,2100,25) # To check only for win starting at 2000 #win_start_years = [2000] # Testing with the year 2000-24 dataset first save_TCE_binary = 'n' if save_TCE_binary in ['y','yy','Y','yes']: """ To save the binary matrix of the Time Continuous Extremes(TCEs) so that the location and duration of the extremes can be identified. If you want to save the binary matrix of extremes as nc files this was done so that this coulld be used as input the attribution analysis """ for start_yr in win_start_years: win_idx = np.where( all_win_start_years == start_yr)[0][0] for source_run in source_selected: for member_run in common_members [source_run]: Binary_Data_TCE = {} # Dictionary to save negative and positive Binary TCEs Binary_Data_TCE ['neg'] = {} Binary_Data_TCE ['pos'] = {} bin_neg = Results[source_run][member_run]['bin_ext_neg'] bin_pos = Results[source_run][member_run]['bin_ext_pos'] # Starting with Negative TCEs first # --------------------------------- Binary_Data_TCE ['neg']['bin_TCE_01s'], Binary_Data_TCE ['neg']['bin_TCE_1s'], Binary_Data_TCE ['neg']['bin_TCE_len'] = Binary_Mat_TCE_Win (bin_ar = bin_neg, win_start_year = start_yr,lags = lags_TCE, land_frac= data_lf [source_run]) Binary_Data_TCE ['pos']['bin_TCE_01s'], Binary_Data_TCE ['pos']['bin_TCE_1s'], Binary_Data_TCE ['pos']['bin_TCE_len'] = Binary_Mat_TCE_Win (bin_ar = bin_pos, win_start_year = start_yr,lags = lags_TCE, land_frac= data_lf [source_run]) path_TCE = cori_scratch + 'add_cmip6_data/%s/%s/%s/%s_TCE/'%(source_run,exp,member_run,variable_run) # Check if the directory 'path_TCE' already exists? If not, then create one: if os.path.isdir(path_TCE) == False: os.makedirs(path_TCE) for ext_type in ['neg','pos']: print("Saving the 01 TCE for %s,%s,%d,%s"%(source_run,member_run,start_yr,ext_type)) with nc4.Dataset( path_TCE + 'bin_TCE_01s_'+ext_type+'_%d.nc'%start_yr, mode = 'w') as dset: dset .createDimension( "lag",size = lags_TCE.size) dset .createDimension( "time",size = win_len) dset .createDimension( "lat" ,size = nc_ano[source_run][member_run].variables['lat'].size) dset .createDimension( "lon" ,size = nc_ano[source_run][member_run].variables['lon'].size) w = dset.createVariable(varname = "lag" ,datatype = float, dimensions = ("lag") , fill_value = 1e+36) t = dset.createVariable(varname = "time" ,datatype = float, dimensions = ("time"), fill_value = 1e+36) x = dset.createVariable(varname = "lon" ,datatype = float, dimensions = ("lon") , fill_value = 1e+36) y = dset.createVariable(varname = "lat" ,datatype = float, dimensions = ("lat") , fill_value = 1e+36) z = dset.createVariable(varname = variable_run +'_TCE_01s' ,datatype = float, dimensions = ("lag","time","lat","lon"),fill_value = 1e+36) #varible = gpp_bin_ext w.axis = "T" t.axis = "T" x.axis = "X" y.axis = "Y" w[...] = lags_TCE t[...] = nc_ano[source_run][member_run].variables['time'] [...][win_idx * win_len : (win_idx+1)*win_len] x[...] = nc_ano[source_run][member_run].variables['lon'][...] y[...] = nc_ano[source_run][member_run].variables['lat'][...] z[...] = Binary_Data_TCE [ext_type]['bin_TCE_01s'] z.missing_value = 1e+36 z.stardard_name = variable_run+" binary TCE (01s) matrix for 25 years starting at the year %d"%start_yr z.units = "0,1" x.units = nc_ano[source_run][member_run].variables['lon'].units x.missing_value = 1e+36 x.setncattr ("standard_name",nc_ano[source_run][member_run].variables['lon'].standard_name) y.units = nc_ano[source_run][member_run].variables['lat'].units y.missing_value = 1e+36 y.setncattr ("standard_name",nc_ano[source_run][member_run].variables['lat'].standard_name) t.units = nc_ano[source_run][member_run].variables['time'].units t.setncattr ("calendar", nc_ano[source_run][member_run].variables['time'].calendar) t.setncattr ("standard_name", nc_ano[source_run][member_run].variables['time'].standard_name) t.missing_value = 1e+36 w.units = "month" w.setncattr ("standard_name","lags in months") w.missing_value = 1e+36 print("Saving the 1s TCE for %s,%s,%d,%s"%(source_run,member_run,start_yr,ext_type)) with nc4.Dataset( path_TCE + 'bin_TCE_1s_'+ext_type+'_%d.nc'%start_yr, mode = 'w') as dset: dset .createDimension( "lag",size = lags_TCE.size) dset .createDimension( "time",size = win_len) dset .createDimension( "lat" ,size = nc_ano[source_run][member_run].variables['lat'].size) dset .createDimension( "lon" ,size = nc_ano[source_run][member_run].variables['lon'].size) w = dset.createVariable(varname = "lag" ,datatype = float, dimensions = ("lag") , fill_value = 1e+36) t = dset.createVariable(varname = "time" ,datatype = float, dimensions = ("time"), fill_value = 1e+36) x = dset.createVariable(varname = "lon" ,datatype = float, dimensions = ("lon") , fill_value = 1e+36) y = dset.createVariable(varname = "lat" ,datatype = float, dimensions = ("lat") , fill_value = 1e+36) z = dset.createVariable(varname = variable_run+'_TCE_1s' ,datatype = float, dimensions = ("lag","time","lat","lon"),fill_value = 1e+36) #varible = gpp_bin_ext w.axis = "T" t.axis = "T" x.axis = "X" y.axis = "Y" w[...] = lags_TCE t[...] = nc_ano[source_run][member_run].variables['time'] [...][win_idx * win_len : (win_idx+1)*win_len] x[...] = nc_ano[source_run][member_run].variables['lon'][...] y[...] = nc_ano[source_run][member_run].variables['lat'][...] z[...] = Binary_Data_TCE [ext_type]['bin_TCE_1s'] z.missing_value = 1e+36 z.stardard_name = variable_run +" binary TCE (1s) matrix for 25 years starting at the year %d"%start_yr z.units = "0,1" x.units = nc_ano[source_run][member_run].variables['lon'].units x.missing_value = 1e+36 x.setncattr ("standard_name",nc_ano[source_run][member_run].variables['lon'].standard_name) y.units = nc_ano[source_run][member_run].variables['lat'].units y.missing_value = 1e+36 y.setncattr ("standard_name",nc_ano[source_run][member_run].variables['lat'].standard_name) t.units = nc_ano[source_run][member_run].variables['time'].units t.setncattr ("calendar", nc_ano[source_run][member_run].variables['time'].calendar) t.setncattr ("standard_name", nc_ano[source_run][member_run].variables['time'].standard_name) t.missing_value = 1e+36 w.units = "month" w.setncattr ("standard_name","lags in months") w.missing_value = 1e+36 # Calculation of TS of gain or loss of carbon uptake # -------------------------------------------------- def Global_TS_of_Extremes(bin_ar, ano_gC, area = 0, lf = 0): """ Returns the global TS of : 1. total carbon loss/gain associated neg/pos extremes 2. total freq of extremes 3. total area affected by extremes Parameters: ----------- bin_ar : the binary array of extremes (pos/neg) ano_gC : the array which will use the mask or binary arrays to calc the carbon loss/gain Universal: ---------- 2-d area array (nlat, nlon), dates_win (# wins, win_size) Returns: -------- 1d array of length # wins x win_size for all : ext_gC_ts, ext_freq_ts, ext_area_ts """ print (" Calculating Extremes ... " ) ext_ar = bin_ar * ano_gC # extremes array if (area == 0) and (lf == 0) : print ("The area under extreme will not be calculated... \nGrid area input and land fraction is not provided ... \nThe returned area is 0 (zeros)") ext_area_ar = bin_ar * area[...] * lf[...] # area array of extremes ext_gC_ts = [] ext_freq_ts = [] ext_area_ts = [] for i in range(dates_win.flatten().size): ext_gC_ts . append(np.ma.sum(ext_ar[i])) ext_freq_ts . append(np.ma.sum(bin_ar[i])) ext_area_ts . append(np.ma.sum(ext_area_ar[i])) return np.ma.array(ext_gC_ts), np.ma.array(ext_freq_ts),np.ma.array(ext_area_ts) # Calculating the slopes of GPP extremes # -------------------------------------- def Slope_Intercept_Pv_Trend_Increase ( time, ts, until_idx1=2100, until_idx2=None): """ Returns the slope, intercept, r value , p value and trend line points for time period 1850-2100 (as '_21') and 2101-2300 ('_23') Parameters: ----------- One dimentional time series of len 5400 from 1850 through 2299 Returns: -------- single values for slope, intercept, r value , p value, increase percentage** 1d array for same legnth as 'ts' for 'trend' ** it return the percent increase of trend line relavtive to the year 1850 (mean trend line value),.. """ until_idx1 = int (until_idx1) if until_idx2 != None: until_idx2 = int (until_idx2) # calculation of the magnitudes of global gpp loss and trend from 1850- until idx-1 slope_1, intercept_1,rv_1,pv_1,std_e1 = stats.linregress(time[...][:until_idx1],ts[:until_idx1]) trend_1 = slope_1*time[...][:until_idx1]+intercept_1 increase_1 = (trend_1[-1]-trend_1[0])*100/trend_1[0] # calculation of the magnitudes of global gpp loss and trend from index-1 to until-idx2 if until_idx2 != None: slope_2, intercept_23,rv_23,pv_23,std_e23 = stats.linregress(time[...][until_idx1:until_idx2],ts[until_idx1:until_idx22]) trend_2 = slope_2*time[...][until_idx1:until_idx2]+intercept_23 increase_2 = (trend_2[-1]-trend_2[0])*100/trend_2[0] increase_2_r1850 = (trend_2[-1]-trend_1[0])*100/trend_1[0] return slope_1,intercept_1,pv_1,trend_1,increase_1,slope_2,intercept_2,pv_2,trend_2,increase_2,increase_2_r1850 else: return slope_1,intercept_1,pv_1,trend_1,increase_1 # Saving the results of TS carbon loss/gain for source_run in source_selected: for member_run in common_members [source_run]: Results[source_run][member_run]['ts_global_gC'] = {} Results[source_run][member_run]['ts_global_area'] = {} Results[source_run][member_run]['ts_global_freq'] = {} Results[source_run][member_run]['ts_global_gC']['neg_ext'] = {} Results[source_run][member_run]['ts_global_gC']['pos_ext'] = {} Results[source_run][member_run]['ts_global_area']['neg_ext']= {} Results[source_run][member_run]['ts_global_area']['pos_ext']= {} Results[source_run][member_run]['ts_global_freq']['neg_ext']= {} Results[source_run][member_run]['ts_global_freq']['pos_ext']= {} for source_run in source_selected: print ("Calculating the global TS of Extremes for %s"%source_run) for member_run in common_members [source_run]: # Negative Extremes: # ------------------ ts_ext , ts_freq, ts_area = Global_TS_of_Extremes(bin_ar = Results[source_run][member_run]['bin_ext_neg'], ano_gC = nc_ano[source_run][member_run].variables[variable_run][...], area = data_area [source_run], lf = data_lf [source_run]) Results[source_run][member_run]['ts_global_gC' ]['neg_ext']['ts'] = ts_ext Results[source_run][member_run]['ts_global_area']['neg_ext']['ts'] = ts_area Results[source_run][member_run]['ts_global_freq']['neg_ext']['ts'] = ts_freq del ts_ext , ts_freq, ts_area # Positive Extremes: # ----------------- ts_ext , ts_freq, ts_area = Global_TS_of_Extremes(bin_ar = Results[source_run][member_run]['bin_ext_pos'], ano_gC = nc_ano[source_run][member_run].variables[variable_run][...], area = data_area [source_run], lf = data_lf [source_run]) Results[source_run][member_run]['ts_global_gC' ]['pos_ext']['ts'] = ts_ext Results[source_run][member_run]['ts_global_area']['pos_ext']['ts'] = ts_area Results[source_run][member_run]['ts_global_freq']['pos_ext']['ts'] = ts_freq del ts_ext , ts_freq, ts_area # ----------------- for source_run in source_selected: for member_run in common_members [source_run]: # Negative Extremes gC: # --------------------- slope,intercept,pv,trend,increase = Slope_Intercept_Pv_Trend_Increase ( time = nc_ano[source_run][member_run].variables['time'], ts = Results[source_run][member_run]['ts_global_gC']['neg_ext']['ts'], until_idx1 = idx_yr_2099) Results[source_run][member_run]['ts_global_gC']['neg_ext']['s21' ] = slope Results[source_run][member_run]['ts_global_gC']['neg_ext']['pv21' ] = pv Results[source_run][member_run]['ts_global_gC']['neg_ext']['trend_21'] = trend Results[source_run][member_run]['ts_global_gC']['neg_ext']['inc_21' ] = increase del slope,intercept,pv,trend,increase # Positive Extremes gC: # --------------------- slope,intercept,pv,trend,increase = Slope_Intercept_Pv_Trend_Increase ( time = nc_ano[source_run][member_run].variables['time'], ts = Results[source_run][member_run]['ts_global_gC']['pos_ext']['ts'], until_idx1 = idx_yr_2099) Results[source_run][member_run]['ts_global_gC']['pos_ext']['s21' ] = slope Results[source_run][member_run]['ts_global_gC']['pos_ext']['pv21' ] = pv Results[source_run][member_run]['ts_global_gC']['pos_ext']['trend_21'] = trend Results[source_run][member_run]['ts_global_gC']['pos_ext']['inc_21' ] = increase del slope,intercept,pv,trend,increase # ----------------------------------- # ----------------------------------- # Negative Extremes freq: # ----------------------- slope,intercept,pv,trend,increase = Slope_Intercept_Pv_Trend_Increase ( time = nc_ano[source_run][member_run].variables['time'], ts = Results[source_run][member_run]['ts_global_freq']['neg_ext']['ts'], until_idx1 = idx_yr_2099) Results[source_run][member_run]['ts_global_freq']['neg_ext']['s21' ] = slope Results[source_run][member_run]['ts_global_freq']['neg_ext']['pv21' ] = pv Results[source_run][member_run]['ts_global_freq']['neg_ext']['trend_21']= trend Results[source_run][member_run]['ts_global_freq']['neg_ext']['inc_21' ]= increase del slope,intercept,pv,trend,increase # Positive Extremes freq: # ----------------------- slope,intercept,pv,trend,increase = Slope_Intercept_Pv_Trend_Increase ( time = nc_ano[source_run][member_run].variables['time'], ts = Results[source_run][member_run]['ts_global_freq']['pos_ext']['ts'], until_idx1 = idx_yr_2099) Results[source_run][member_run]['ts_global_freq']['pos_ext']['s21' ] = slope Results[source_run][member_run]['ts_global_freq']['pos_ext']['pv21' ] = pv Results[source_run][member_run]['ts_global_freq']['pos_ext']['trend_21']= trend Results[source_run][member_run]['ts_global_freq']['pos_ext']['inc_21' ]= increase del slope,intercept,pv,trend,increase # ----------------------------------- # ----------------------------------- # Negative Extremes area: # ----------------------- slope,intercept,pv,trend,increase = Slope_Intercept_Pv_Trend_Increase ( time = nc_ano[source_run][member_run].variables['time'], ts = Results[source_run][member_run]['ts_global_area']['neg_ext']['ts'], until_idx1 = idx_yr_2099) Results[source_run][member_run]['ts_global_area']['neg_ext']['s21' ] = slope Results[source_run][member_run]['ts_global_area']['neg_ext']['pv21' ] = pv Results[source_run][member_run]['ts_global_area']['neg_ext']['trend_21']= trend Results[source_run][member_run]['ts_global_area']['neg_ext']['inc_21' ]= increase del slope,intercept,pv,trend,increase # Positive Extremes area: # ----------------------- slope,intercept,pv,trend,increase = Slope_Intercept_Pv_Trend_Increase ( time = nc_ano[source_run][member_run].variables['time'], ts = Results[source_run][member_run]['ts_global_area']['pos_ext']['ts'], until_idx1 = idx_yr_2099) Results[source_run][member_run]['ts_global_area']['pos_ext']['s21' ] = slope Results[source_run][member_run]['ts_global_area']['pos_ext']['pv21' ] = pv Results[source_run][member_run]['ts_global_area']['pos_ext']['trend_21']= trend Results[source_run][member_run]['ts_global_area']['pos_ext']['inc_21' ]= increase del slope,intercept,pv,trend,increase # ----------------------------------- def Sum_and_Diff_of_Fluxes_perWin(ano_gC, bin_ar = None, data_type = 'ext', diff_ref_yr = 1850): """ returns a 2-d array sum of fluxes and difference of the sum of fluxes with reference to the ref yr Parameters: ---------- bin_ar: the binary array of extremes (pos/neg) ano_gC : the array which will use the mask or binary arrays to calc the carbon loss/gain diff_ref_yr : the starting year of the reference time window for differencing data_type : do you want to calculate the sum and difference of extremes or original fluxes? ... 'ext' is for extremes and will mask based on the 'bin_ar' in calculation ... otherwise it will not multiply by bin_ar and the original flux difference will be calculated. 'ext' will calculate the extremes and anything else with calc on original flux diff Universal: ---------- start_dates : the start_dates of every 25 year window, size = # wins Returns: -------- sum_flux : shape (# wins, nlat,nlon), sum of fluxes per window diff_flux : shape (# wins, nlat,nlon), difference of sum of fluxes per window and reference window """ if data_type != 'ext': bin_ar = np.ma.ones(ano_gC.shape) sum_ext = [] for i in range(len(start_dates)): ext_gC = bin_ar[idx_dates_win[i][0] : idx_dates_win [i][-1]+1,:,:] * ano_gC[idx_dates_win[i][0] : idx_dates_win [i][-1]+1,:,:] sum_ext . append (np.ma.sum(ext_gC, axis = 0)) sum_ext = np.ma.asarray(sum_ext) #to calculate the index of the reference year starting window: for i,date in enumerate(start_dates): if date.year in [diff_ref_yr]: diff_yr_idx = i diff_ext = [] for i in range(len(start_dates)): diff = sum_ext[i] - sum_ext[diff_yr_idx] diff_ext . append (diff) diff_ext = np.ma.asarray(diff_ext) return sum_ext , diff_ext # ---------------------------------------------------------- # Preparing the storage # ---------------------------------------------------------- for source_run in source_selected: for member_run in common_members [source_run]: # Negative Extremes: sum_neg_ext , diff_neg_ext = Sum_and_Diff_of_Fluxes_perWin ( bin_ar = Results[source_run][member_run]['bin_ext_neg'], ano_gC = nc_ano[source_run][member_run].variables[variable_run][...], data_type = 'ext', diff_ref_yr = 1850) Results[source_run][member_run]['sum_neg_ext'] = sum_neg_ext Results[source_run][member_run]['diff_neg_ext'] = diff_neg_ext # Positive extremes: sum_pos_ext , diff_pos_ext = Sum_and_Diff_of_Fluxes_perWin ( bin_ar = Results[source_run][member_run]['bin_ext_pos'], ano_gC = nc_ano[source_run][member_run].variables[variable_run][...], data_type = 'ext', diff_ref_yr = 1850) Results[source_run][member_run]['sum_pos_ext'] = sum_pos_ext Results[source_run][member_run]['diff_pos_ext'] = diff_pos_ext del sum_neg_ext , diff_neg_ext, sum_pos_ext , diff_pos_ext #Negative Flux/Ori #sum_neg_ori , diff_neg_ori = Sum_and_Diff_of_Fluxes_perWin ( bin_ar = None, # ano_gC = nc_ano[source_run][member_run].variables[variable_run][...], # data_type = 'ori', # diff_ref_yr = 1850) # Results[source_run][member_run]['sum_neg_ori'] = sum_neg_ori # Results[source_run][member_run]['diff_neg_ori'] = diff_neg_ori # Results[source_run][member_run]['sum_pos_ext'] = {} # Results[source_run][member_run]['diff_neg_ext'] = {} # Results[source_run][member_run]['diff_pos_ext'] = {} # Regional analysis # ----------------- import regionmask # Selection the member_run manually member_run = common_members[source_run] [0] lon = nc_ano[source_run][member_run].variables ['lon'] lat = nc_ano[source_run][member_run].variables ['lat'] # for the plotting lon_bounds = nc_ano[source_run][member_run].variables [lon.bounds] lat_bounds = nc_ano[source_run][member_run].variables [lat.bounds] lon_edges = np.hstack (( lon_bounds[:,0], lon_bounds[-1,-1])) lat_edges = np.hstack (( lat_bounds[:,0], lat_bounds[-1,-1])) # Creating mask of the regions based on the resolution of the model mask = regionmask.defined_regions.srex.mask(lon[...], lat[...]).values # important information: srex_abr = regionmask.defined_regions.srex.abbrevs srex_names = regionmask.defined_regions.srex.names srex_nums = regionmask.defined_regions.srex.numbers srex_centroids = regionmask.defined_regions.srex.centroids srex_polygons = regionmask.defined_regions.srex.polygons mask_ma = np.ma.masked_invalid(mask) import matplotlib.pyplot as plt import os """ Basemaps not working anymore =========================== #1- Hack to fix missing PROJ4 env var import os import conda conda_file_dir = conda.__file__ conda_dir = conda_file_dir.split('lib')[0] proj_lib = os.path.join(os.path.join(conda_dir, 'share'), 'proj') os.environ["PROJ_LIB"] = proj_lib #-1 Hack end import matplotlib.pyplot as plt from mpl_toolkits.basemap import Basemap """ """ Regional Plots -------------- #fig = plt.figure() #ax = plt.subplot(111, projection=ccrs.PlateCarree()) fig,ax = plt.subplots(tight_layout = True, figsize = (9,5), dpi = 400) bmap = Basemap( projection = 'eck4', lon_0 = 0., resolution = 'c') LON,LAT = np.meshgrid(lon_edges,lat_edges) ax = bmap.pcolormesh(LON,LAT, mask_ma, cmap ='viridis') bmap .drawparallels(np.arange(-90., 90., 30.),fontsize=14, linewidth = .2) bmap .drawmeridians(np.arange(0., 360., 60.),fontsize=14, linewidth = .2) bmap .drawcoastlines(linewidth = .25,color='lightgrey') plt.colorbar(ax, orientation='horizontal', pad=0.04) fig.savefig (web_path + "SREX_regions.pdf") # Cartopy Plotting # ---------------- import cartopy.crs as ccrs from shapely.geometry.polygon import Polygon import cartopy.feature as cfeature # Fixing the error {'GeoAxesSubplot' object has no attribute '_hold'} from matplotlib.axes import Axes from cartopy.mpl.geoaxes import GeoAxes GeoAxes._pcolormesh_patched = Axes.pcolormesh proj_trans = ccrs.PlateCarree() fig = plt.figure(figsize = (9,5)) ax = fig.add_subplot(111, projection=ccrs.PlateCarree()) mask_ma = np.ma.masked_invalid(mask) h = ax.pcolormesh(lon_edges[...], lat_edges[...], mask_ma, transform = proj_trans)#, cmap='viridis') ax.coastlines() plt.colorbar(h, orientation='horizontal', pad=0.04) # Plot the abs at the centroids for idx, abr in enumerate(srex_abr): plt.text ( srex_centroids[idx][0], srex_centroids[idx][-1], srex_abr[idx], horizontalalignment='center', transform = proj_trans) ax.add_geometries([srex_polygons[idx]], crs = proj_trans, facecolor='none', edgecolor='red', alpha=0.8) fig.savefig (web_path + "SREX_regions_cpy.pdf") plt.close(fig) """ # ================================================================================================= # ================================================================================================= ## # ## ######## # # # ## ## ## ## # # # ## # # ## ## ## # ##### ## ## # ================================================================================================= # ================================================================================================= # Creating a lis to Unique colors for multiple models: # --------------------------------------------------- NUM_COLORS = len(source_selected) LINE_STYLES = ['solid', 'dashed', 'dashdot', 'dotted'] NUM_STYLES = len(LINE_STYLES) sns.reset_orig() # get default matplotlib styles back clrs = sns.color_palette('husl', n_colors=NUM_COLORS) # Creating the ticks for x axis (every 25 years): # ---------------------------------------------- tmp_idx = np.arange(0, 3013, 300) #for x ticks tmp_idx[-1]=tmp_idx[-1]-1 dates_ticks = [] years_ticks = [] for i in tmp_idx: a = dates_win.flatten()[i] dates_ticks.append(a) years_ticks.append(a.year) # Creating the x-axis years (Monthly) # ----------------------------------- x_years = [d.year for d in dates_win.flatten()] # Caption (optional): This dictionary could be used to save the captions of the figures # ------------------------------------------------------------------------------------- Captions = {} # PLOTING THE THRESHOLD FOR QUALIFICATION OF EXTREME EVENTS: fig[1-9] # =================================================================== if th_type == 'ind': fig1,ax2 = plt.subplots(tight_layout = True, figsize = (9,5), dpi = 400) ymin = 400 ymax = 8000 for s_idx, source_run in enumerate(source_selected): for m_idx, member_run in enumerate(common_members [source_run]): # ax2.plot( dates_win.flatten(), abs(Results[source_run][member_run]['ts_th_neg'])/10**9, # 'r', label = "Th$-$ %s"%source_run, alpha = .7) # ax2.plot( dates_win.flatten(), abs(Results[source_run][member_run]['ts_th_neg'])/10**9, # clrs[s_idx], ls='--', label = "Th$-$ %s"%source_run, alpha = .7) ax2.plot( dates_win.flatten(), abs(Results[source_run][member_run]['ts_th_neg'])/10**9, 'r', ls='--', label = "Th$-$ %s"%source_run, alpha = .3) ax2.set_ylabel("Negative Extremes (GgC)", {'color': 'r'},fontsize =14) ax2.set_xlabel("Time", fontsize = 14) ax2.set_ylim([ymin,ymax]) #ax2.set_yticks(np.arange(int(np.floor(ymin/100)*100),int(np.ceil(ymax/100)*100),25)) #ax2.set_yticklabels(-np.arange(int(np.floor(ymin/100)*100),int(np.ceil(ymax/100)*100),25)) #ax2.tick_params(axis='y', colors='red') # ax2.set_xticks(dates_ticks) ax2.grid(which='major', linestyle=':', linewidth='0.3', color='gray') ax1=ax2.twinx() # ax1.plot( dates_win.flatten(), abs(Results[source_run][member_run]['ts_th_pos'])/10**9, # 'g', label = "Th+ %s"%source_run, alpha = .7) ax1.plot( dates_win.flatten(), abs(Results[source_run][member_run]['ts_th_pos'])/10**9, 'g', label = "Th+ %s"%source_run, alpha = .3) ax1.set_ylabel("Positive Extremes (GgC)", {'color': 'g'},fontsize =14) ax1.set_ylim([ymin,ymax]) #ax1.set_yticks(np.arange(int(np.floor(ymin/100)*100),int(np.ceil(ymax/100)*100),25)) #ax1.tick_params(axis='y', colors='green') # ax1.set_xticks(dates_ticks) # ax1.grid(which='major', linestyle=':', linewidth='0.3', color='gray') lines, labels = ax1.get_legend_handles_labels() lines2, labels2 = ax2.get_legend_handles_labels() labels, ids = np.unique(labels, return_index=True) labels2, ids2 = np.unique(labels2, return_index=True) lines = [lines[i] for i in ids] lines2 = [lines2[i] for i in ids2] # ax2.legend(lines + lines2, labels + labels2, loc= 'best',fontsize =12) #continue fig1.savefig(web_path + 'Threshold/ts_threshold_all_scenario_%s_per_%s.pdf'%(variable_run,int(per))) plt.close(fig1) del fig1 # Threshold per model for the 'th_type' == 'ind' and per = 1.0 # ------------------------------------------------------------- for source_run in source_selected: fig2,ax2 = plt.subplots(tight_layout = True, figsize = (9,5), dpi = 400) pd.plotting.deregister_matplotlib_converters() if source_run == 'CESM2' : ymin = 400 ; ymax = 700 if source_run == 'CanESM5' : ymin = 2000 ; ymax = 8000 if source_run == 'IPSL-CM6A-LR' : ymin = 1700 ; ymax = 2900 if source_run == 'BCC-CSM2-MR' : ymin = 400 ; ymax = 1000 if source_run == 'CNRM-ESM2-1' : ymin = 1000 ; ymax = 1500 if source_run == 'CNRM-CM6-1' : ymin = 1000 ; ymax = 1800 for m_idx, member_run in enumerate(common_members [source_run]): L1= ax2.plot( dates_win.flatten(), abs(Results[source_run][member_run]['ts_th_neg'])/10**9, 'r', label = "Th$-$ %s"%member_run, linewidth = 0.3, alpha = .7) L1[0].set_linestyle(LINE_STYLES[m_idx%NUM_STYLES]) ax2.set_ylabel("Negative Extremes (GgC)", {'color': 'r'},fontsize =14) ax2.set_xlabel("Time", fontsize = 14) #ax2.set_xlim([dates_ticks[0],dates_ticks[-1]]) #ax2.set_yticks(np.arange(int(np.floor(ymin/100)*100),int(np.ceil(ymax/100)*100),25)) #ax2.set_yticklabels(-np.arange(int(np.floor(ymin/100)*100),int(np.ceil(ymax/100)*100),25)) #ax2.tick_params(axis='y', colors='red') ax2.grid(which='major', linestyle='--', linewidth='0.3', color='gray') ax1=ax2.twinx() for m_idx, member_run in enumerate(common_members [source_run]): L2= ax1.plot( dates_win.flatten(), abs(Results[source_run][member_run]['ts_th_pos'])/10**9, 'g', label = "Th+ %s"%member_run, linewidth = 0.3, alpha = .7) L2[0].set_linestyle(LINE_STYLES[m_idx%NUM_STYLES]) ax1.set_ylabel("Positive Extremes (GgC)", {'color': 'g'},fontsize =14) #ax1.set_yticklabels([]) #ax1.set_yticks(np.arange(int(np.floor(ymin/100)*100),int(np.ceil(ymax/100)*100),25)) #ax1.tick_params(axis='y', colors='green') # ax1.grid(which='major', linestyle='--', linewidth='0.3', color='gray') ax2.set_ylabel("Negative Extremes (GgC)", {'color': 'r'},fontsize =14) ax2.set_xlabel("Time", fontsize = 14) ax1.set_ylabel("Positive Extremes (GgC)", {'color': 'g'},fontsize =14) ax2.set_ylim([ymin,ymax]) ax1.set_ylim([ymin,ymax]) ax1.set_xticks(dates_ticks) ax1.set_xticklabels(years_ticks) lines, labels = ax1.get_legend_handles_labels() lines2, labels2 = ax2.get_legend_handles_labels() ax2.legend(lines + lines2, labels + labels2, loc=0,fontsize =8) fig2.savefig(web_path + 'Threshold/ts_threshold_%s_source_%s_per_%s.pdf'%(source_run,variable_run,int(per))) # Saving the plots path_save = cori_scratch + "add_cmip6_data/%s/ssp585/%s/%s/Global_Extremes/non-TCE/Threshold/"%(source_run,member_run, variable_run) if os.path.isdir(path_save) == False: os.makedirs(path_save) fig2.savefig(path_save + 'ts_threshold_%s_source_%s_per_%s.pdf'%(source_run,variable_run,int(per))) plt.close(fig2) del fig2,ax2 # Plotting thresholds when 'th_type' == 'common': # ----------------------------------------------- if th_type == 'common': fig3 = plt.figure(tight_layout = True, figsize = (9,5), dpi = 400) plt.title("TS of Thresholds for CMIP6 models for percentile = %d"%int(per)) pd.plotting.deregister_matplotlib_converters() for s_idx, source_run in enumerate(source_selected): for m_idx, member_run in enumerate(common_members [source_run]): plt.plot( dates_win.flatten(), abs(Results[source_run][member_run]['ts_th_neg'])/10**9, color=clrs[s_idx], ls='-', label = "$q$ %s"%source_run, alpha = .8, linewidth = .7) plt.ylabel("Thresholds (GgC)", {'color': 'k'},fontsize =14) plt.xlabel("Time", fontsize = 14) plt.grid(which='major', linestyle=':', linewidth='0.3', color='gray') plt.legend() break #Plotting only the first ensemble member fig3.savefig(web_path + 'Threshold/ts_thresholdc_all_models_%s_per_%s.pdf'%(variable_run,int(per))) plt.close(fig3) del fig3 # Threshold per model for the 'th_type' == 'common' and per = 5.0 # --------------------------------------------------------------- for s_idx, source_run in enumerate(source_selected): fig4 = plt.figure(tight_layout = True, figsize = (9,5), dpi = 400) plt.title("TS of %d percentile Thresholds of %s for the model %s"%(per, variable_run.upper(), source_run)) pd.plotting.deregister_matplotlib_converters() if variable_run == 'gpp': if source_run == 'CESM2' : ymin = 250 ; ymax = 400 if source_run == 'CanESM5' : ymin = 1500 ; ymax = 4500 if source_run == 'IPSL-CM6A-LR' : ymin = 1200 ; ymax = 2100 if source_run == 'BCC-CSM2-MR' : ymin = 300 ; ymax = 600 if source_run == 'CNRM-ESM2-1' : ymin = 700 ; ymax = 900 if source_run == 'CNRM-CM6-1' : ymin = 600 ; ymax = 1100 if variable_run == 'nbp': if source_run == 'CESM2' : ymin = 130 ; ymax = 230 if variable_run == 'ra': if source_run == 'CESM2' : ymin = 180 ; ymax = 240 if variable_run == 'rh': if source_run == 'CESM2' : ymin = 100 ; ymax = 170 for m_idx, member_run in enumerate(common_members [source_run]): plt.plot( dates_win.flatten(), abs(Results[source_run][member_run]['ts_th_neg'])/10**9, color=clrs[s_idx], ls='-', label = "$q$ %s"%source_run, alpha = 1, linewidth = 1) break #Plotting only the first ensemble member plt.ylim ((ymin,ymax)) plt.ylabel("Thresholds (GgC)", {'color': 'k'},fontsize =14) plt.xlabel("Time", fontsize = 14) plt.grid(which='major', linestyle=':', linewidth='0.4', color='gray') plt.legend() fig4.savefig(web_path + 'Threshold/ts_thresholdc_%s_source_%s_per_%s.pdf'%(source_run,variable_run,int(per))) # Saving the plots path_save = cori_scratch + "add_cmip6_data/%s/ssp585/%s/%s/Global_Extremes/non-TCE/Threshold/"%(source_run,member_run, variable_run) if os.path.isdir(path_save) == False: os.makedirs(path_save) fig4.savefig(path_save + 'ts_thresholdc_%s_source_%s_per_%s.pdf'%(source_run,variable_run,int(per))) plt.close(fig4) del fig4 # PLOTING THE GLOBAL TIMESERIES OF THE EXTREME EVENTS : fig[11-19] # ====================================================================================== for s_idx, source_run in enumerate(source_selected): fig11 = plt.figure(tight_layout = True, figsize = (9,5), dpi = 400) plt.style.use("classic") plt.title ("TS global %s extremes for %s when percentile is %d"%(variable_run.upper(), source_run, per)) pd.plotting.deregister_matplotlib_converters() if variable_run == 'gpp': if source_run == 'CESM2' : ymin = -1.2 ; ymax = 1.2 if source_run == 'CanESM5' : ymin = -1.5 ; ymax = 1.5 if source_run == 'IPSL-CM6A-LR' : ymin = -0.5 ; ymax = 0.5 if source_run == 'BCC-CSM2-MR' : ymin = -1.6 ; ymax = 1.6 if source_run == 'CNRM-ESM2-1' : ymin = -0.8 ; ymax = 0.8 if source_run == 'CNRM-CM6-1' : ymin = -1.7 ; ymax = 1.7 if variable_run == 'nbp': if source_run == 'CESM2' : ymin = -.7 ; ymax = .7 if variable_run == 'ra': if source_run == 'CESM2' : ymin = -.7 ; ymax = .7 if variable_run == 'rh': if source_run == 'CESM2' : ymin = -.4 ; ymax = .4 for m_idx, member_run in enumerate(common_members [source_run]): plt.plot( dates_win.flatten(), Results[source_run][member_run]['ts_global_gC']['neg_ext']['ts'] / 10**15, 'r', label = "Negative Extremes" , linewidth = 0.5, alpha=0.7 ) plt.plot( dates_win.flatten(), Results[source_run][member_run]['ts_global_gC']['pos_ext']['ts'] / 10**15, 'g', label = "Positive Extremes" , linewidth = 0.5, alpha=0.7 ) plt.plot( dates_win.flatten() [:idx_yr_2099], Results[source_run][member_run]['ts_global_gC']['neg_ext']['trend_21'] /10**15, 'k--', label = "Neg Trend 21", linewidth = 0.5, alpha=0.9 ) plt.plot( dates_win.flatten() [:idx_yr_2099], Results[source_run][member_run]['ts_global_gC']['pos_ext']['trend_21'] /10**15, 'k--', label = "Pos Trend 21", linewidth = 0.5, alpha=0.9 ) break #Plotting only the first ensemble member plt.ylim ((ymin,ymax)) #| waiting for the first set of graphs to remove this comment plt.xlabel( 'Time', fontsize = 14) plt.xticks(ticks = dates_ticks, labels = years_ticks, fontsize = 12) plt.ylabel( "Intensity of Extremes (PgC/mon)", fontsize = 14) plt.grid( which='major', linestyle=':', linewidth='0.3', color='gray') plt.text( dates_win.flatten()[900], ymin+0.2,"Slope = %d %s"%(int(Results[source_run][member_run]['ts_global_gC']['neg_ext']['s21']/10**6), 'MgC/month'), size =14, color = 'r' ) plt.text( dates_win.flatten()[900], ymax-0.2,"Slope = %d %s"%(int(Results[source_run][member_run]['ts_global_gC']['pos_ext']['s21']/10**6), 'MgC/month'), size =14, color = 'g' ) fig11.savefig(web_path + 'Intensity/ts_global_carbon_%s_source_%s_per_%s.pdf'%(source_run,variable_run,int(per))) # Saving the plots path_save = cori_scratch + "add_cmip6_data/%s/ssp585/%s/%s/Global_Extremes/non-TCE/Intensity/"%(source_run,member_run, variable_run) if os.path.isdir(path_save) == False: os.makedirs(path_save) fig11.savefig(path_save + 'ts_global_carbon_%s_source_%s_per_%s.pdf'%(source_run,variable_run,int(per))) fig11.savefig(path_save + 'ts_global_carbon_%s_source_%s_per_%s.png'%(source_run,variable_run,int(per))) plt.close(fig11) del fig11 # Rolling mean of annual losses and gains # --------------------------------------- def RM_Nyearly_4m_Mon(ts, rm_years = 5): """ The rolling mean is calculated to the right end value The first 4 years will not be reported in the output of 5 year rolling mean """ ts = np.array(ts) yr = np.array([np.sum(ts[i:i+12]) for i in range(ts.size//12)]) yr_rm = pd.Series(yr).rolling(rm_years).mean() return yr_rm[rm_years-1:] # Ploting 5 Year Rolling Mean figures # ----------------------------------- for s_idx, source_run in enumerate(source_selected): fig12 = plt.figure(tight_layout = True, figsize = (9,5), dpi = 400) plt.title ("5yr RM of TS annual global %s for %s when percentile is %d"%(variable_run.upper(), source_run, per)) pd.plotting.deregister_matplotlib_converters() for m_idx, member_run in enumerate(common_members [source_run]): print (source_run,member_run) plt.plot(np.arange(1854,2100), RM_Nyearly_4m_Mon(Results[source_run][member_run]['ts_global_gC']['neg_ext']['ts'] / 10**15), 'r', label = "Negative Extremes" , linewidth = 0.5, alpha=0.7 ) plt.plot(np.arange(1854,2100), RM_Nyearly_4m_Mon(Results[source_run][member_run]['ts_global_gC']['pos_ext']['ts'] / 10**15), 'g', label = "Positive Extremes" , linewidth = 0.5, alpha=0.7 ) break #Plotting only the first ensemble member plt.xlabel( 'Time', fontsize = 14) plt.ylabel( "Intensity of Extremes (PgC/mon)", fontsize = 14) plt.grid( which='major', linestyle=':', linewidth='0.3', color='gray') fig12.savefig(web_path + 'Intensity/ts_rm5yr_global_carbon_%s_source_%s_per_%s.pdf'%(source_run,variable_run,int(per))) # Saving the plots path_save = cori_scratch + "add_cmip6_data/%s/ssp585/%s/%s/Global_Extremes/non-TCE/Intensity/"%(source_run,member_run, variable_run) if os.path.isdir(path_save) == False: os.makedirs(path_save) fig12.savefig(path_save + 'ts_rm5yr_global_carbon_%s_source_%s_per_%s.pdf'%(source_run,variable_run,int(per))) plt.close(fig12) del fig12 # Frequency of extremes: # ====================== # Ploting 5 Year Rolling Mean figures: # ------------------------------------ for s_idx, source_run in enumerate(source_selected): fig14 = plt.figure(tight_layout = True, figsize = (9,5), dpi = 400) plt.title ("5yr RM of annual TS of global frequency of %s extremes\nfor %s when percentile is %d"%(variable_run.upper(),source_run, per)) pd.plotting.deregister_matplotlib_converters() for m_idx, member_run in enumerate(common_members [source_run]): print (source_run,member_run) plt.plot(np.arange(1854,2100), RM_Nyearly_4m_Mon(Results[source_run][member_run]['ts_global_freq']['neg_ext']['ts'] ), 'r', label = "Negative Extremes" , linewidth = 0.5, alpha=0.7 ) plt.plot(np.arange(1854,2100), RM_Nyearly_4m_Mon(Results[source_run][member_run]['ts_global_freq']['pos_ext']['ts'] ), 'g', label = "Positive Extremes" , linewidth = 0.5, alpha=0.7 ) break #Plotting only the first ensemble member plt.xlabel( 'Time', fontsize = 14) plt.ylabel( "Frequency of Extremes (count/yr)", fontsize = 14) plt.grid( which='major', linestyle=':', linewidth='0.3', color='gray') fig14.savefig(web_path + 'Freq/ts_rm5yr_global_freq_%s_source_%s_per_%s.pdf'%(source_run,variable_run,int(per))) # Saving the plots path_save = cori_scratch + "add_cmip6_data/%s/ssp585/%s/%s/Global_Extremes/non-TCE/Freq/"%(source_run,member_run, variable_run) if os.path.isdir(path_save) == False: os.makedirs(path_save) fig14.savefig(path_save + 'ts_rm5yr_global_freq_%s_source_%s_per_%s.pdf'%(source_run,variable_run,int(per))) plt.close(fig14) del fig14 Captions['fig14'] = " 5 year moving average of the frequency (counts) under positive and negative extremes.\ All ensemble members have same values" # Ploting 5 Year Rolling Mean figures (normalized) - pending: # ------------------------------------------------- # Function to normalize positive and negative freq def Norm_Two_TS(ts1, ts2): ts = np.concatenate((ts1,ts2)) norm_ts = norm(ts) norm_ts1 = norm_ts[:len(ts1)] norm_ts2 = norm_ts[len(ts1):] return norm_ts, norm_ts1, norm_ts2 # TEST p = np.array([ 8, 6, 7, 8, 6, 5, 4, 6]) n = np.array([ 5, 6, 6, 4, 5, 7, 8, 6]) _,norm_p, norm_n = Norm_Two_TS(p,n) norm_np = norm_n/norm_p norm_pn = norm_p/norm_n mask_np = np.ma.masked_greater(norm_np,1) mask_pn = np.ma.masked_greater(norm_pn,1) fig = plt.figure(tight_layout = True, figsize = (9,5), dpi = 400) #plt.plot( mask_np) #plt.plot( -mask_pn) #plt.plot(_) #plt.plot(norm_np) plt.plot(p/n) fig.savefig(web_path + 'ratio_test.pdf') # Dict to capture the ts of ratios pos to neg extremes of models ts_ratio_freq = {} for s_idx, source_run in enumerate(source_selected): fig15 = plt.figure(tight_layout = True, figsize = (9,5), dpi = 400) plt.title ("5yr Ratio of RM of TS annual global frequency (n/p) for %s when percentile is %d"%(source_run, per)) pd.plotting.deregister_matplotlib_converters() for m_idx, member_run in enumerate(common_members [source_run]): print (source_run,member_run) ts_ratio = np.divide ( RM_Nyearly_4m_Mon(Results[source_run][member_run]['ts_global_freq']['neg_ext']['ts'],10) , RM_Nyearly_4m_Mon(Results[source_run][member_run]['ts_global_freq']['pos_ext']['ts'],10) ) ts_ratio_freq[source_run] = ts_ratio plt.plot ( np.arange(1859,2100), ts_ratio, 'k', label = "Pos/Neg Extremes" , linewidth = 0.5, alpha=0.7 ) break #Plotting only the first ensemble member plt.xlabel( 'Time', fontsize = 14) #plt.xticks(ticks = dates_ticks, labels = years_ticks, fontsize = 12) plt.ylabel( "Frequency of Extremes (count/yr)", fontsize = 14) plt.grid( which='major', linestyle=':', linewidth='0.3', color='gray') fig15.savefig(web_path + 'Freq/ts_ratio_rm5yr_global_freq_%s_source_%s_per_%s.pdf'%(source_run,variable_run,int(per))) # Saving the plots path_save = cori_scratch + "add_cmip6_data/%s/ssp585/%s/%s/Global_Extremes/non-TCE/Freq/"%(source_run,member_run, variable_run) if os.path.isdir(path_save) == False: os.makedirs(path_save) fig15.savefig(path_save + 'ts_ratio_rm5yr_global_freq_%s_source_%s_per_%s.pdf'%(source_run,variable_run,int(per))) plt.close(fig15) del fig15 Captions['fig15'] = " Shows the ratio the frequency of negative to positive extremes. Before taking the ratio a moving\ average of 10 years was taken. " # Area Affected by extremes: # ========================== # Ploting 5 Year Rolling Mean figures: # ------------------------------------ for s_idx, source_run in enumerate(source_selected): fig16 = plt.figure(tight_layout = True, figsize = (9,5), dpi = 400) plt.title ("5yr RM of annual TS of global area affected by\n %s extremes for %s when percentile is %d"%(variable_run.upper(), source_run, per)) pd.plotting.deregister_matplotlib_converters() for m_idx, member_run in enumerate(common_members [source_run]): print (source_run,member_run) plt.plot(np.arange(1854,2100), RM_Nyearly_4m_Mon(Results[source_run][member_run]['ts_global_area']['neg_ext']['ts'] /10**15), 'r', label = "Negative Extremes" , linewidth = 0.5, alpha=0.7 ) plt.plot(np.arange(1854,2100), RM_Nyearly_4m_Mon(Results[source_run][member_run]['ts_global_area']['pos_ext']['ts'] /10**15), 'g', label = "Positive Extremes" , linewidth = 0.5, alpha=0.7 ) break #Plotting only the first ensemble member plt.xlabel( 'Time', fontsize = 14) #plt.xticks(ticks = dates_ticks, labels = years_ticks, fontsize = 12) plt.ylabel( "Area Under Extremes ($10^{15}$ $m^2$)", fontsize = 14) plt.grid( which='major', linestyle=':', linewidth='0.3', color='gray') fig16.savefig(web_path + 'Area/ts_rm5yr_global_area_%s_source_%s_per_%s.pdf'%(source_run,variable_run,int(per))) # Saving the plots path_save = cori_scratch + "add_cmip6_data/%s/ssp585/%s/%s/Global_Extremes/non-TCE/Area/"%(source_run,member_run, variable_run) if os.path.isdir(path_save) == False: os.makedirs(path_save) fig16.savefig(path_save + 'ts_rm5yr_global_area_%s_source_%s_per_%s.pdf'%(source_run,variable_run,int(per))) plt.close(fig16) del fig16 Captions['fig16'] = " 5 year moving average of the area under positive and negative extremes.\ All ensemble members have same values" # Ploting 10 Year Rolling Mean ratio (Pos/Neg) of area under extremes: # ------------------------------------------------------------------- # Dict to capture the ts of ratios pos to neg extremes of models ts_ratio_area = {} rm_ratio_yr = 10 for s_idx, source_run in enumerate(source_selected): fig17 = plt.figure(tight_layout = True, figsize = (9,5), dpi = 400) plt.title ("Ratio of RM (%d years) of annual TS of global area under\n%s extremes (n/p) for %s when percentile is %d"%(rm_ratio_yr, variable_run.upper(), source_run, per))
pd.plotting.deregister_matplotlib_converters()
pandas.plotting.deregister_matplotlib_converters
#!/usr/bin/env python # coding: utf-8 # #### Author : <NAME> # #### Topic : Multiple Linear Regression : Car Price Prediction # #### Email : <EMAIL> # It is the extension of simple linear regression that predicts a response using two or more features. Mathematically we can explain it as follows − # # Consider a dataset having n observations, p features i.e. independent variables and y as one response i.e. dependent variable the regression line for p features can be calculated as follows − # # h(xi)=b0+b1xi1+b2xi2+⋯+bpxip # Here, h(xi) is the predicted response value and b0,b1,b2,⋯bp are the regression coefficients. # # Multiple Linear Regression models always includes the errors in the data known as residual error which changes the calculation as follows − # # h(xi)=b0+b1xi1+b2xi2+⋯+bpxip+ei # We can also write the above equation as follows − # # yi=h(xi)+eiorei=yi−h(xi) # #### Import Libraries # In[ ]: import numpy as np import pandas as pd # #### Import Datasets # In[ ]: df = pd.read_csv('datasets_794035_1363047_carprices.csv') df # In[ ]: ### Dummy Variables dummies =
pd.get_dummies(df['Car Model'])
pandas.get_dummies
# -*- coding: utf-8 -*- """ Created on Mon Nov 23 16:23:54 2020 @author: huangyuyao """ import torch from torch.utils.data import Dataset from sklearn.preprocessing import MaxAbsScaler from torch.utils.data import DataLoader import os import numpy as np import pandas as pd import scipy from glob import glob from scipy.io import mmread from sklearn.preprocessing import LabelEncoder import time from torchvision import transforms, datasets from torch import nn, optim from torch.nn import init from tqdm import trange class SingleCellDataset(Dataset): def __init__(self, path, low = 0, high = 0.9, min_peaks = 0, transpose = False, transforms=[]): self.data, self.peaks, self.barcode = load_data(path, transpose) if min_peaks > 0: self.filter_cell(min_peaks) self.filter_peak(low, high) for transform in transforms: self.data = transform(self.data) self.n_cells, self.n_peaks = self.data.shape self.shape = self.data.shape def __len__(self): return self.data.shape[0] def __getitem__(self, index): data = self.data[index]; if type(data) is not np.ndarray: data = data.toarray().squeeze() return data def info(self): print("Dataset Info") print('Cell number: {}\nPeak number: {}'.format(self.n_cells, self.n_peaks)) def filter_peak(self, low=0, high=0.9): total_cells = self.data.shape[0] count = np.array((self.data >0).sum(0)).squeeze() indices = np.where((count > low*total_cells) & (count < high*total_cells))[0] self.data = self.data[:, indices] self.peaks = self.peaks[indices] print('filterpeak------') def filter_cell(self, min_peaks=0): if min_peaks < 1: min_peaks = len(self.peaks)*min_peaks indices = np.where(np.sum(self.data>0, 1)>=min_peaks)[0] self.data = self.data[indices] self.barcode = self.barcode[indices] p = type(self.barcode) print('filtercell------') print(p) def write_data(self,path): print('tmp dataset saving') data_ = self.data data1 = data_.todense() data =data1.T #print(type(data)) recon_x = pd.DataFrame(data, index=self.peaks, columns=self.barcode) recon_x.to_csv(os.path.join(path, 'tmp_data.txt'), sep='\t') def load_data(path, transpose=False): print("Loading data ...") t0 = time.time() if os.path.isdir(path): count, peaks, barcode = read_mtx(path) elif os.path.isfile(path): count, peaks, barcode = read_csv(path) else: raise ValueError("File {} not exists".format(path)) if transpose: count = count.transpose() print('Original data contains {} cells x {} peaks'.format(*count.shape)) assert (len(barcode), len(peaks)) == count.shape print("Finished loading takes {:.2f} min".format((time.time()-t0)/60)) return count, peaks, barcode def read_mtx(path): for filename in glob(path+'/*'): basename = os.path.basename(filename) if (('count' in basename) or ('matrix' in basename)) and ('mtx' in basename): count = mmread(filename).T.tocsr().astype('float32') elif 'barcode' in basename: barcode = pd.read_csv(filename, sep='\t', header=None)[0].values elif 'gene' in basename or 'peak' in basename: feature =
pd.read_csv(filename, sep='\t', header=None)
pandas.read_csv
# Part of the psychopy_ext library # Copyright 2010-2014 <NAME> # The program is distributed under the terms of the GNU General Public License, # either version 3 of the License, or (at your option) any later version. """Creates reports""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys, glob, os, shutil #from cStringIO import StringIO import seaborn as sns class Report(object): def __init__(self, info=None, rp=None, path='', imgdir='img', imgext='svg', actions='make', output='html'): self.info = info self.rp = rp self.path = path self.imgdir = imgdir self.imgext = imgext self.actions = actions self.replist = [] src = os.path.abspath(os.path.dirname(__file__)) self.resources = os.path.join(src, 'resources/') def open(self, reports=None): if not os.path.isdir(self.path): os.makedirs(self.path) else: for root, dirs, files in os.walk(self.path): for d in dirs: try: shutil.rmtree(os.path.join(root, d)) except: pass # for f in glob.glob(self.resources + '*'): # if os.path.isfile(f): # if os.path.basename(f) != 'index.html': # shutil.copy2(f) # else: # dst = os.path.join(self.path, os.path.basename(f)) # shutil.copytree(f, dst, # ignore=shutil.ignore_patterns('index.html')) with open(self.resources + 'index.html', 'rb') as tmp: self.temp_begin, self.temp_end = tmp.read().split('####REPLACE####') self.htmlfile = open(self.path + 'index.html', 'wb') self.write(self.temp_begin) # old_stdout = sys.stdout # mystdout = StringIO() # sys.stdout = mystdout # # if reports is None: # self.report() # else: # for name, report in reports: # self.writeh(name, h='h1') # report.report() def close(self): # sys.stdout = old_stdout self.write(self.temp_end) self.htmlfile.close() def write(self, text): self.htmlfile.write(text) def writeh(self, text, h='h1'): if isinstance(h, int): h = 'h' + str(h) self.htmlfile.write('<%s>%s</%s>\n' % (h, text, h)) def writeimg(self, names, caption=None, win=None): if isinstance(names, (str, unicode)): names = [names] fname = '_'.join(names) img_path = os.path.join(self.path, self.imgdir) fpath = os.path.join(img_path, fname + '.' + self.imgext) relpath = os.path.join(self.imgdir, fname + '.' + self.imgext) if not os.path.isdir(img_path): os.makedirs(img_path) if win is not None: win.saveMovieFrames(fpath) else: sns.plt.savefig(fpath, dpi=300, bbox_inches='tight') if caption is None: caption = ' '.join(names) self.htmlfile.write( '<figure>\n' ' <img src="%s" />\n' ' <figcaption><strong>Figure.</strong> %s</figcaption>\n' '</figure>\n' % (relpath, caption) ) def writetable(self, agg, caption='', fmt=None): if fmt is None: fmt = '%.3f' fmt_lam = lambda x: fmt % x import pandas table =
pandas.DataFrame(agg)
pandas.DataFrame
# -*- coding: utf-8 -*- """ This module contains all the remote tests. The data for these tests is requested to ESA NEOCC portal. * Project: NEOCC portal Python interface * Property: European Space Agency (ESA) * Developed by: Elecnor Deimos * Author: <NAME> * Date: 02-11-2021 © Copyright [European Space Agency][2021] All rights reserved """ import io import os import re import random import pytest import pandas as pd import pandas.testing as pdtesting import pandas.api.types as ptypes import requests from astroquery.esa.neocc.__init__ import conf from astroquery.esa.neocc import neocc, lists, tabs import astropy # Import BASE URL and TIMEOUT API_URL = conf.API_URL DATA_DIR = os.path.join(os.path.dirname(__file__), 'data') TIMEOUT = conf.TIMEOUT VERIFICATION = conf.SSL_CERT_VERIFICATION @pytest.mark.remote_data class TestLists: """Class which contains the unitary tests for lists module. """ # Dictionary for lists lists_dict = { "nea_list": 'allneo.lst', "updated_nea": 'updated_nea.lst', "monthly_update": 'monthly_update.done', "risk_list": 'esa_risk_list', "risk_list_special": 'esa_special_risk_list', "close_approaches_upcoming": 'esa_upcoming_close_app', "close_approaches_recent": 'esa_recent_close_app', "priority_list": 'esa_priority_neo_list', "priority_list_faint": 'esa_faint_neo_list', "close_encounter" : 'close_encounter2.txt', "impacted_objects" : 'impactedObjectsList.txt', "neo_catalogue_current" : 'neo_kc.cat', "neo_catalogue_middle" : 'neo_km.cat' } def test_get_list_url(self): """Test for checking the URL termination for requested lists. Check invalid list name raise KeyError. """ # Valid inputs valid_names = ["nea_list", "updated_nea", "monthly_update", "risk_list", "risk_list_special", "close_approaches_upcoming", "close_approaches_recent", "priority_list", "priority_list_faint", "close_encounter", "impacted_objects"] # Invalid inputs bad_names = ["ASedfe", "%&$", "ÁftR+", 154] # Assert for valid names for element in valid_names: assert lists.get_list_url(element) == \ self.lists_dict[element] # Assert for invalid names for elements in bad_names: with pytest.raises(KeyError): lists.get_list_url(elements) def test_get_list_data(self): """Check data obtained is pandas.DataFrame or pandas.Series """ # Check pd.Series output list_series = ["nea_list", "updated_nea", "monthly_update"] for series in list_series: assert isinstance(lists.get_list_data(self.\ lists_dict[series], series), pd.Series) # Check pd.DataFrame output list_dfs = ["risk_list", "risk_list_special", "close_approaches_upcoming", "close_approaches_recent", "priority_list", "close_encounter", "priority_list_faint", "impacted_objects"] for dfs in list_dfs: assert isinstance(lists.get_list_data(self.\ lists_dict[dfs], dfs), pd.DataFrame) def test_parse_list(self): """Check data obtained is pandas.DataFrame or pandas.Series """ # Check pd.Series output url_series = ["nea_list", "updated_nea", "monthly_update"] for url in url_series: # Get data from URL data_list = requests.get(API_URL + self.lists_dict[url], timeout=TIMEOUT, verify=VERIFICATION).content # Decode the data using UTF-8 data_list_d = io.StringIO(data_list.decode('utf-8')) assert isinstance(lists.parse_list(url, data_list_d), pd.Series) # Check pd.DataFrame output url_dfs = ["risk_list", "risk_list_special", "close_approaches_upcoming", "close_approaches_recent", "priority_list", "close_encounter", "priority_list_faint", "impacted_objects"] for url in url_dfs: # Get data from URL data_list = requests.get(API_URL + self.lists_dict[url], timeout=TIMEOUT, verify=VERIFICATION).content # Decode the data using UTF-8 data_list_d = io.StringIO(data_list.decode('utf-8')) assert isinstance(lists.parse_list(url, data_list_d), pd.DataFrame) # Invalid inputs bad_names = ["ASedfe", "%&$", "ÁftR+", 154] # Assert for invalid names for elements in bad_names: with pytest.raises(KeyError): lists.parse_list(elements, data_list_d) def test_parse_nea(self): """Check data: nea list, updated nea list and monthly update """ url_series = ["nea_list", "updated_nea", "monthly_update"] for url in url_series: # Get data from URL data_list = requests.get(API_URL + self.lists_dict[url], timeout=TIMEOUT, verify=VERIFICATION).content # Decode the data using UTF-8 data_list_d = io.StringIO(data_list.decode('utf-8')) # Parse using parse_nea new_list = lists.parse_nea(data_list_d) # Assert is a pandas Series assert isinstance(new_list, pd.Series) # Assert is not empty assert not new_list.empty # List of all NEAs if url == "nea_list": filename = os.path.join(DATA_DIR, self.lists_dict[url]) content = open(filename, 'r') nea_list = pd.read_csv(content, header=None) # Remove whitespaces nea_list = nea_list[0].str.strip().replace(r'\s+', ' ', regex=True)\ .str.replace('# ', '') # Check size of the data frame assert len(new_list.index) > 20000 # Check 74 first elements are equal from reference # data (since provisional designator may change) pdtesting.assert_series_equal(new_list[0:74], nea_list[0:74]) else: # Check date format DDD MMM DD HH:MM:SS UTC YYYY assert re.match(r'\w{3} \w{3} \d{2} \d{2}:\d{2}:\d{2} ' r'\w{3} \d{4}', new_list.iloc[0]) def test_parse_risk(self): """Check data: risk_list, risk_list_special """ url_risks = ['risk_list', 'risk_list_special'] # Columns of risk lists risk_columns = ['Object Name', 'Diameter in m', '*=Y', 'Date/Time', 'IP max', 'PS max', 'TS', 'Vel in km/s', 'First year', 'Last year', 'IP cum', 'PS cum'] risk_special_columns = risk_columns[0:8] for url in url_risks: # Get data from URL data_list = requests.get(API_URL + self.lists_dict[url], timeout=TIMEOUT, verify=VERIFICATION).content # Decode the data using UTF-8 data_list_d = io.StringIO(data_list.decode('utf-8')) # Parse using parse_nea new_list = lists.parse_risk(data_list_d) # Assert is a pandas DataFrame assert isinstance(new_list, pd.DataFrame) if url == 'risk_list': # Assert dataframe is not empty, columns names, length assert not new_list.empty assert (new_list.columns == risk_columns).all() assert len(new_list.index) > 1000 # Assert columns data types # Floats float_cols = ['Diameter in m', 'IP max', 'PS max', 'Vel in km/s', 'IP cum', 'PS cum'] assert all(ptypes.is_float_dtype(new_list[cols1])\ for cols1 in float_cols) # int64 int_cols = ['TS', 'First year', 'Last year'] assert all(ptypes.is_int64_dtype(new_list[cols2])\ for cols2 in int_cols) # Object object_cols = ['Object Name', '*=Y'] assert all(ptypes.is_object_dtype(new_list[cols3])\ for cols3 in object_cols) # Datetime assert ptypes.is_datetime64_ns_dtype( new_list['Date/Time']) else: # Currently risk special list is empty assert new_list.empty assert (new_list.columns == risk_special_columns).all() def test_parse_clo(self): """Check data: close_approaches_upcoming, close_approaches_recent """ url_close = ['close_approaches_upcoming', 'close_approaches_recent'] # Columns of close approaches lists close_columns = ['Object Name', 'Date', 'Miss Distance in km', 'Miss Distance in au', 'Miss Distance in LD', 'Diameter in m', '*=Yes', 'H', 'Max Bright', 'Rel. vel in km/s'] for url in url_close: # Get data from URL data_list = requests.get(API_URL + self.lists_dict[url], timeout=TIMEOUT, verify=VERIFICATION).content # Decode the data using UTF-8 data_list_d = io.StringIO(data_list.decode('utf-8')) # Parse using parse_nea new_list = lists.parse_clo(data_list_d) # Assert is a pandas DataFrame assert isinstance(new_list, pd.DataFrame) # Assert dataframe is not empty, columns names and length assert not new_list.empty assert (new_list.columns == close_columns).all() assert len(new_list.index) > 100 # Assert Connection Error. In case of internal server error # the request provided an empty file foo_error = io.StringIO('This site cant be reached\n' 'domain.com regused to connect\n' 'Search Google for domain\n' 'ERR_CONNECTION_REFUSED') with pytest.raises(ConnectionError): lists.parse_clo(foo_error) # Assert columns data types # Floats float_cols = ['Miss Distance in au', 'Miss Distance in LD', 'Diameter in m', 'H', 'Max Bright', 'Rel. vel in km/s'] assert all(ptypes.is_float_dtype(new_list[cols1])\ for cols1 in float_cols) # int64 assert ptypes.is_int64_dtype(new_list['Miss Distance in km']) # Object object_cols = ['Object Name', '*=Yes'] assert all(ptypes.is_object_dtype(new_list[cols3])\ for cols3 in object_cols) # Datetime assert ptypes.is_datetime64_ns_dtype(new_list['Date']) def test_parse_pri(self): """Check data: priority_list, priority_list_faint """ url_priority = ['priority_list', 'priority_list_faint'] # Columns of close approaches lists priority_columns = ['Priority', 'Object', 'R.A. in arcsec', 'Decl. in deg', 'Elong. in deg', 'V in mag', 'Sky uncert.', 'End of Visibility'] for url in url_priority: # Get data from URL data_list = requests.get(API_URL + self.lists_dict[url], timeout=TIMEOUT, verify=VERIFICATION).content # Decode the data using UTF-8 data_list_d = io.StringIO(data_list.decode('utf-8')) # Parse using parse_nea new_list = lists.parse_pri(data_list_d) # Assert is a pandas DataFrame assert isinstance(new_list, pd.DataFrame) # Assert dataframe is not empty, columns names and length assert not new_list.empty assert (new_list.columns == priority_columns).all() assert len(new_list.index) > 100 # Assert columns data types # Floats float_cols = ['R.A. in arcsec', 'Decl. in deg', 'V in mag'] assert all(ptypes.is_float_dtype(new_list[cols1])\ for cols1 in float_cols) # int64 int_cols = ['Priority', 'Elong. in deg', 'Sky uncert.'] assert all(ptypes.is_int64_dtype(new_list[cols2])\ for cols2 in int_cols) # Object assert ptypes.is_object_dtype(new_list['Object']) # Datetime assert ptypes.is_datetime64_ns_dtype( new_list['End of Visibility']) def test_parse_encounter(self): """Check data: close_encounter """ url = 'close_encounter' # Columns of close approaches lists encounter_columns = ['Name/desig', 'Planet', 'Date', 'Time approach', 'Time uncert', 'Distance', 'Minimum distance', 'Distance uncertainty', 'Width', 'Stretch', 'Probability', 'Velocity', 'Max Mag'] # Get data from URL data_list = requests.get(API_URL + self.lists_dict[url], timeout=TIMEOUT, verify=VERIFICATION).content # Decode the data using UTF-8 data_list_d = io.StringIO(data_list.decode('utf-8')) # Parse using parse_nea new_list = lists.parse_encounter(data_list_d) # Assert is a pandas DataFrame assert isinstance(new_list, pd.DataFrame) # Assert dataframe is not empty, columns names and length assert not new_list.empty assert (new_list.columns == encounter_columns).all() assert len(new_list.index) > 100000 # Assert columns data types # Floats float_cols = encounter_columns[3:] assert all(
ptypes.is_float_dtype(new_list[cols1])
pandas.api.types.is_float_dtype
# -*- coding:utf-8 -*- # /usr/bin/env python """ Date: 2020/4/18 21:27 Desc: 东方财富-数据中心-年报季报 东方财富-数据中心-年报季报-业绩预告 http://data.eastmoney.com/bbsj/202003/yjyg.html 东方财富-数据中心-年报季报-预约披露时间 http://data.eastmoney.com/bbsj/202003/yysj.html """ import demjson import pandas as pd import requests from tqdm import tqdm def stock_em_yjkb(date: str = "20200331") -> pd.DataFrame: """ 东方财富-数据中心-年报季报-业绩快报 http://data.eastmoney.com/bbsj/202003/yjkb.html :param date: "20200331", "20200630", "20200930", "20201231"; 从 20100331 开始 :type date: str :return: 业绩快报 :rtype: pandas.DataFrame """ url = "http://datacenter.eastmoney.com/api/data/get" params = { 'st': 'UPDATE_DATE,SECURITY_CODE', 'sr': '-1,-1', 'ps': '5000', 'p': '1', 'type': 'RPT_FCI_PERFORMANCEE', 'sty': 'ALL', 'token': '<KEY>', 'filter': f"(REPORT_DATE='{'-'.join([date[:4], date[4:6], date[6:]])}')" } r = requests.get(url, params=params) data_json = r.json() temp_df = pd.DataFrame(data_json["result"]["data"]) temp_df.reset_index(inplace=True) temp_df['index'] = range(1, len(temp_df)+1) temp_df.columns = [ '序号', '股票代码', '股票简称', '市场板块', '_', '证券类型', '_', '_', '公告日期', '每股收益', '营业收入-营业收入', '营业收入-去年同期', '净利润-净利润', '净利润-去年同期', '每股净资产', '净资产收益率', '营业收入-同比增长', '净利润-同比增长', '营业收入-季度环比增长', '净利润-季度环比增长', '所处行业', '_', '_', '_', '_', '_', '_', '_', '_', ] temp_df = temp_df[[ '序号', '股票代码', '股票简称', '每股收益', '营业收入-营业收入', '营业收入-去年同期', '营业收入-同比增长', '营业收入-季度环比增长', '净利润-净利润', '净利润-去年同期', '净利润-同比增长', '净利润-季度环比增长', '每股净资产', '净资产收益率', '所处行业', '公告日期', '市场板块', '证券类型', ]] return temp_df def stock_em_yjyg(date: str = "20200331") -> pd.DataFrame: """ 东方财富-数据中心-年报季报-业绩预告 http://data.eastmoney.com/bbsj/202003/yjyg.html :param date: "2020-03-31", "2020-06-30", "2020-09-30", "2020-12-31"; 从 2008-12-31 开始 :type date: str :return: 业绩预告 :rtype: pandas.DataFrame """ url = "http://datacenter.eastmoney.com/securities/api/data/v1/get" params = { 'sortColumns': 'NOTICE_DATE,SECURITY_CODE', 'sortTypes': '-1,-1', 'pageSize': '50', 'pageNumber': '1', 'reportName': 'RPT_PUBLIC_OP_NEWPREDICT', 'columns': 'ALL', 'token': '<KEY>', 'filter': f" (REPORT_DATE='{'-'.join([date[:4], date[4:6], date[6:]])}')" } r = requests.get(url, params=params) data_json = r.json() big_df = pd.DataFrame() total_page = data_json['result']['pages'] for page in tqdm(range(1, total_page+1)): params = { 'sortColumns': 'NOTICE_DATE,SECURITY_CODE', 'sortTypes': '-1,-1', 'pageSize': '50', 'pageNumber': page, 'reportName': 'RPT_PUBLIC_OP_NEWPREDICT', 'columns': 'ALL', 'token': '<KEY>', 'filter': f" (REPORT_DATE='{'-'.join([date[:4], date[4:6], date[6:]])}')" } r = requests.get(url, params=params) data_json = r.json() temp_df = pd.DataFrame(data_json["result"]["data"]) big_df = big_df.append(temp_df, ignore_index=True) big_df.reset_index(inplace=True) big_df['index'] = range(1, len(big_df)+1) big_df.columns = [ '序号', '_', '股票代码', '股票简称', '_', '公告日期', '报告日期', '_', '预测指标', '_', '_', '_', '_', '业绩变动', '业绩变动原因', '预告类型', '上年同期值', '_', '_', '_', '_', '业绩变动幅度', '预测数值', '_', '_', ] big_df = big_df[[ '序号', '股票代码', '股票简称', '预测指标', '业绩变动', '预测数值', '业绩变动幅度', '业绩变动原因', '预告类型', '上年同期值', '公告日期', ]] return big_df def stock_em_yysj(date: str = "20200331") -> pd.DataFrame: """ 东方财富-数据中心-年报季报-预约披露时间 http://data.eastmoney.com/bbsj/202003/yysj.html :param date: "20190331", "20190630", "20190930", "20191231"; 从 20081231 开始 :type date: str :return: 指定时间的上市公司预约披露时间数据 :rtype: pandas.DataFrame """ url = "http://dcfm.eastmoney.com/em_mutisvcexpandinterface/api/js/get" params = { "type": "YJBB21_YYPL", "token": "<PASSWORD>f2f4f091e459a279469fe49eca5", "st": "frdate", "sr": "1", "p": "1", "ps": "5000", "js": "var HXutCoUP={pages:(tp),data: (x),font:(font)}", "filter": f"(securitytypecode='058001001')(reportdate=^{'-'.join([date[:4], date[4:6], date[6:]])}^)", "rt": "52907209", } r = requests.get(url, params=params) data_text = r.text data_json = demjson.decode(data_text[data_text.find("{"):]) temp_df =
pd.DataFrame(data_json["data"])
pandas.DataFrame
# -*- coding: utf-8 -*- """ Created on Wed Jun 27 19:49:20 2018 @author: Nagasudhir pandas joing dataframes https://pandas.pydata.org/pandas-docs/stable/merging.html#joining-on-index https://www.dataquest.io/blog/excel-and-pandas/ """ import sys # making the parent directory as the main path for imports sys.path.append("..") from report_fetch_modules import login_logout as login from report_fetch_modules import psp_fetch import pandas as pd import datetime as dt import os import numpy as np inputFileFolder = r'C:\Users\Nagasudhir\Documents\Python Projects\python_web_reports_cient\app\ui_modules' # get the directory of the script file # print(os.path.dirname(os.path.realpath(__file__))) if('__file__' in globals()): inputFileFolder = os.path.dirname(os.path.realpath(__file__)) inputFilename = os.path.join(inputFileFolder, 'input_file.xlsx') # get the analysis Key Values analysisKeyValuesInputDF = pd.read_excel(inputFilename,sheetname='input') # read the array of input dfs for analysis analysisKeyValuesInputDFArray = [] inputSheetNames = [] xlsx = pd.ExcelFile(inputFilename) for sheetName in xlsx.sheet_names: if(sheetName.startswith("input")): # sheet name starts with input, hence push to inout dfs array analysisKeyValuesInputDFArray.append(pd.read_excel(inputFilename,sheetname=sheetName)) inputSheetNames.append(sheetName) # read the config params from the input.xlsx file configDF =
pd.read_excel(inputFilename,sheetname='config')
pandas.read_excel
""" Module parse to/from Excel """ # --------------------------------------------------------------------- # ExcelFile class import abc from datetime import date, datetime, time, timedelta from distutils.version import LooseVersion from io import UnsupportedOperation import os from textwrap import fill import warnings import numpy as np import pandas._libs.json as json import pandas.compat as compat from pandas.compat import ( OrderedDict, add_metaclass, lrange, map, range, string_types, u, zip) from pandas.errors import EmptyDataError from pandas.util._decorators import Appender, deprecate_kwarg from pandas.core.dtypes.common import ( is_bool, is_float, is_integer, is_list_like) from pandas.core import config from pandas.core.frame import DataFrame from pandas.io.common import ( _NA_VALUES, _is_url, _stringify_path, _urlopen, _validate_header_arg, get_filepath_or_buffer) from pandas.io.formats.printing import pprint_thing from pandas.io.parsers import TextParser __all__ = ["read_excel", "ExcelWriter", "ExcelFile"] _writer_extensions = ["xlsx", "xls", "xlsm"] _writers = {} _read_excel_doc = """ Read an Excel table into a pandas DataFrame Parameters ---------- io : string, path object (pathlib.Path or py._path.local.LocalPath), file-like object, pandas ExcelFile, or xlrd workbook. The string could be a URL. Valid URL schemes include http, ftp, s3, gcs, and file. For file URLs, a host is expected. For instance, a local file could be file://localhost/path/to/workbook.xlsx sheet_name : string, int, mixed list of strings/ints, or None, default 0 Strings are used for sheet names, Integers are used in zero-indexed sheet positions. Lists of strings/integers are used to request multiple sheets. Specify None to get all sheets. str|int -> DataFrame is returned. list|None -> Dict of DataFrames is returned, with keys representing sheets. Available Cases * Defaults to 0 -> 1st sheet as a DataFrame * 1 -> 2nd sheet as a DataFrame * "Sheet1" -> 1st sheet as a DataFrame * [0,1,"Sheet5"] -> 1st, 2nd & 5th sheet as a dictionary of DataFrames * None -> All sheets as a dictionary of DataFrames sheetname : string, int, mixed list of strings/ints, or None, default 0 .. deprecated:: 0.21.0 Use `sheet_name` instead header : int, list of ints, default 0 Row (0-indexed) to use for the column labels of the parsed DataFrame. If a list of integers is passed those row positions will be combined into a ``MultiIndex``. Use None if there is no header. names : array-like, default None List of column names to use. If file contains no header row, then you should explicitly pass header=None index_col : int, list of ints, default None Column (0-indexed) to use as the row labels of the DataFrame. Pass None if there is no such column. If a list is passed, those columns will be combined into a ``MultiIndex``. If a subset of data is selected with ``usecols``, index_col is based on the subset. parse_cols : int or list, default None .. deprecated:: 0.21.0 Pass in `usecols` instead. usecols : int, str, list-like, or callable default None * If None, then parse all columns, * If int, then indicates last column to be parsed .. deprecated:: 0.24.0 Pass in a list of ints instead from 0 to `usecols` inclusive. * If string, then indicates comma separated list of Excel column letters and column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of both sides. * If list of ints, then indicates list of column numbers to be parsed. * If list of strings, then indicates list of column names to be parsed. .. versionadded:: 0.24.0 * If callable, then evaluate each column name against it and parse the column if the callable returns ``True``. .. versionadded:: 0.24.0 squeeze : boolean, default False If the parsed data only contains one column then return a Series dtype : Type name or dict of column -> type, default None Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32} Use `object` to preserve data as stored in Excel and not interpret dtype. If converters are specified, they will be applied INSTEAD of dtype conversion. .. versionadded:: 0.20.0 engine : string, default None If io is not a buffer or path, this must be set to identify io. Acceptable values are None or xlrd converters : dict, default None Dict of functions for converting values in certain columns. Keys can either be integers or column labels, values are functions that take one input argument, the Excel cell content, and return the transformed content. true_values : list, default None Values to consider as True .. versionadded:: 0.19.0 false_values : list, default None Values to consider as False .. versionadded:: 0.19.0 skiprows : list-like Rows to skip at the beginning (0-indexed) nrows : int, default None Number of rows to parse .. versionadded:: 0.23.0 na_values : scalar, str, list-like, or dict, default None Additional strings to recognize as NA/NaN. If dict passed, specific per-column NA values. By default the following values are interpreted as NaN: '""" + fill("', '".join(sorted(_NA_VALUES)), 70, subsequent_indent=" ") + """'. keep_default_na : bool, default True If na_values are specified and keep_default_na is False the default NaN values are overridden, otherwise they're appended to. verbose : boolean, default False Indicate number of NA values placed in non-numeric columns thousands : str, default None Thousands separator for parsing string columns to numeric. Note that this parameter is only necessary for columns stored as TEXT in Excel, any numeric columns will automatically be parsed, regardless of display format. comment : str, default None Comments out remainder of line. Pass a character or characters to this argument to indicate comments in the input file. Any data between the comment string and the end of the current line is ignored. skip_footer : int, default 0 .. deprecated:: 0.23.0 Pass in `skipfooter` instead. skipfooter : int, default 0 Rows at the end to skip (0-indexed) convert_float : boolean, default True convert integral floats to int (i.e., 1.0 --> 1). If False, all numeric data will be read in as floats: Excel stores all numbers as floats internally mangle_dupe_cols : boolean, default True Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than 'X'...'X'. Passing in False will cause data to be overwritten if there are duplicate names in the columns. Returns ------- parsed : DataFrame or Dict of DataFrames DataFrame from the passed in Excel file. See notes in sheet_name argument for more information on when a dict of DataFrames is returned. Examples -------- An example DataFrame written to a local file >>> df_out = pd.DataFrame([('string1', 1), ... ('string2', 2), ... ('string3', 3)], ... columns=['Name', 'Value']) >>> df_out Name Value 0 string1 1 1 string2 2 2 string3 3 >>> df_out.to_excel('tmp.xlsx') The file can be read using the file name as string or an open file object: >>> pd.read_excel('tmp.xlsx') Name Value 0 string1 1 1 string2 2 2 string3 3 >>> pd.read_excel(open('tmp.xlsx','rb')) Name Value 0 string1 1 1 string2 2 2 string3 3 Index and header can be specified via the `index_col` and `header` arguments >>> pd.read_excel('tmp.xlsx', index_col=None, header=None) 0 1 2 0 NaN Name Value 1 0.0 string1 1 2 1.0 string2 2 3 2.0 string3 3 Column types are inferred but can be explicitly specified >>> pd.read_excel('tmp.xlsx', dtype={'Name':str, 'Value':float}) Name Value 0 string1 1.0 1 string2 2.0 2 string3 3.0 True, False, and NA values, and thousands separators have defaults, but can be explicitly specified, too. Supply the values you would like as strings or lists of strings! >>> pd.read_excel('tmp.xlsx', ... na_values=['string1', 'string2']) Name Value 0 NaN 1 1 NaN 2 2 string3 3 Comment lines in the excel input file can be skipped using the `comment` kwarg >>> df = pd.DataFrame({'a': ['1', '#2'], 'b': ['2', '3']}) >>> df.to_excel('tmp.xlsx', index=False) >>> pd.read_excel('tmp.xlsx') a b 0 1 2 1 #2 3 >>> pd.read_excel('tmp.xlsx', comment='#') a b 0 1 2 """ def register_writer(klass): """Adds engine to the excel writer registry. You must use this method to integrate with ``to_excel``. Also adds config options for any new ``supported_extensions`` defined on the writer.""" if not compat.callable(klass): raise ValueError("Can only register callables as engines") engine_name = klass.engine _writers[engine_name] = klass for ext in klass.supported_extensions: if ext.startswith('.'): ext = ext[1:] if ext not in _writer_extensions: config.register_option("io.excel.{ext}.writer".format(ext=ext), engine_name, validator=str) _writer_extensions.append(ext) def _get_default_writer(ext): _default_writers = {'xlsx': 'openpyxl', 'xlsm': 'openpyxl', 'xls': 'xlwt'} try: import xlsxwriter # noqa _default_writers['xlsx'] = 'xlsxwriter' except ImportError: pass return _default_writers[ext] def get_writer(engine_name): try: return _writers[engine_name] except KeyError: raise ValueError("No Excel writer '{engine}'" .format(engine=engine_name)) @Appender(_read_excel_doc) @deprecate_kwarg("parse_cols", "usecols") @deprecate_kwarg("skip_footer", "skipfooter") def read_excel(io, sheet_name=0, header=0, names=None, index_col=None, usecols=None, squeeze=False, dtype=None, engine=None, converters=None, true_values=None, false_values=None, skiprows=None, nrows=None, na_values=None, parse_dates=False, date_parser=None, thousands=None, comment=None, skipfooter=0, convert_float=True, mangle_dupe_cols=True, **kwds): # Can't use _deprecate_kwarg since sheetname=None has a special meaning if is_integer(sheet_name) and sheet_name == 0 and 'sheetname' in kwds: warnings.warn("The `sheetname` keyword is deprecated, use " "`sheet_name` instead", FutureWarning, stacklevel=2) sheet_name = kwds.pop("sheetname") if 'sheet' in kwds: raise TypeError("read_excel() got an unexpected keyword argument " "`sheet`") if not isinstance(io, ExcelFile): io = ExcelFile(io, engine=engine) return io.parse( sheet_name=sheet_name, header=header, names=names, index_col=index_col, usecols=usecols, squeeze=squeeze, dtype=dtype, converters=converters, true_values=true_values, false_values=false_values, skiprows=skiprows, nrows=nrows, na_values=na_values, parse_dates=parse_dates, date_parser=date_parser, thousands=thousands, comment=comment, skipfooter=skipfooter, convert_float=convert_float, mangle_dupe_cols=mangle_dupe_cols, **kwds) class ExcelFile(object): """ Class for parsing tabular excel sheets into DataFrame objects. Uses xlrd. See read_excel for more documentation Parameters ---------- io : string, path object (pathlib.Path or py._path.local.LocalPath), file-like object or xlrd workbook If a string or path object, expected to be a path to xls or xlsx file engine : string, default None If io is not a buffer or path, this must be set to identify io. Acceptable values are None or xlrd """ def __init__(self, io, **kwds): err_msg = "Install xlrd >= 1.0.0 for Excel support" try: import xlrd except ImportError: raise ImportError(err_msg) else: if xlrd.__VERSION__ < LooseVersion("1.0.0"): raise ImportError(err_msg + ". Current version " + xlrd.__VERSION__) # could be a str, ExcelFile, Book, etc. self.io = io # Always a string self._io = _stringify_path(io) engine = kwds.pop('engine', None) if engine is not None and engine != 'xlrd': raise ValueError("Unknown engine: {engine}".format(engine=engine)) # If io is a url, want to keep the data as bytes so can't pass # to get_filepath_or_buffer() if _is_url(self._io): io =
_urlopen(self._io)
pandas.io.common._urlopen
import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns from minder_utils.util import formatting_plots from minder_utils.configurations import visual_config sns.set() class Visual_Sleep: def __init__(self, path, style='age', filename='imperial_dementia_20211026'): ''' Visualise the sleep data Parameters ---------- path: str, path to the sleep data style: str, plot style - age: lineplot, hue = age - joint: lineplot, hue = age, style = Sleep Time - face: facegrid - re: relation plot ''' self.config = visual_config['sleep'] self.style = style if 'imperial' in filename: self.data = pd.read_csv(os.path.join(path, filename + '.csv'), delimiter=';') else: self.data = pd.read_csv(os.path.join(path, filename + '.csv')) # Divide the data by time self.data.start_date = pd.to_datetime(self.data.start_date) self.data['Sleep Time'] = 'Late' index = pd.DatetimeIndex(self.data.start_date) self.data['Sleep Time'].iloc[index.indexer_between_time('10:00', '21:00')] = 'Early' if 'imperial' in filename: self.data['age'] = 2021 - pd.to_datetime(self.data['birthdate']).dt.year self.data = self.data[self.data['age'] >= 60] # Group by ages self.data.age[self.data.age <= 50] = 0 self.data.age[(self.data.age > 50) & (self.data.age <= 60)] = 1 self.data.age[(self.data.age > 60) & (self.data.age <= 70)] = 2 self.data.age[(self.data.age > 70) & (self.data.age <= 80)] = 3 self.data.age[self.data.age > 80] = 4 mapping = { 0: '<=50', 1: '50-60', 2: '60-70', 3: '70-80', 4: '>80' } self.data.age = self.data.age.map(mapping) new_cols = [] for col in self.data.columns: append = True for ele in self.config['stages']: if col in ele: new_cols.append(ele) append = False if append: new_cols.append(col) self.data.columns = new_cols df = self.data[self.config['stages']] for col in self.config['stages']: if col not in ['Sleep Time', 'age', 'user_id']: df = self.filter_df(df, col) df[col] /= 3600 df['Sleep'] = df['light_duration (s)'] + df['deep_duration (s)'] + df['rem_duration (s)'] df = df[['user_id', 'awake_duration (s)', 'Sleep Time', 'age', 'Sleep']] df = df.melt(id_vars=['user_id', 'Sleep Time', 'age'], var_name='State', value_name='Duration (H)') mapping = { 'awake_duration (s)': 'Awake in bed', 'Sleep': 'Sleep' } df['State'] = df['State'].map(mapping) self.df = df @formatting_plots(save_path=visual_config['sleep']['save_path'], rotation=90, legend=False) def lineplot(self): self.plt_func(sns.lineplot) @formatting_plots(save_path=visual_config['sleep']['save_path'], rotation=90, legend=False) def violinplot(self): self.plt_func(sns.violinplot) @formatting_plots(title='Duration of different states', save_path=visual_config['sleep']['save_path'], rotation=0, legend=False) def boxplot_separate(self): self.plt_func(sns.boxplot) @formatting_plots(title='Duration of different states', save_path=visual_config['sleep']['save_path'], rotation=0, legend=False) def boxplot_joint(self): style = self.style self.style = 'no_hue' self.plt_func(sns.boxplot) self.style = style @staticmethod def filter_df(df, col, width=1.5): # Computing IQR new_df = [] for age in df.age.unique(): Q1 = df[df.age == age][col].quantile(0.25) Q3 = df[df.age == age][col].quantile(0.75) IQR = Q3 - Q1 indices = (df[df.age == age][col] >= Q1 - width * IQR) & (df[df.age == age][col] <= Q3 + width * IQR) new_df.append(df[df.age == age].loc[indices]) # Filtering Values between Q1-1.5IQR and Q3+1.5IQR return pd.concat(new_df) def plt_func(self, func, x_name='State', y_name='Duration (H)', hue_name='age'): if self.style == 'age': length = len(self.df[hue_name].unique()) func(x=x_name, y=y_name, hue=hue_name, data=self.df, hue_order=self.config['hue_order'][-length:]) elif self.style == 'joint': try: func(x=x_name, y=y_name, hue=hue_name, style='Sleep Time', data=self.df, hue_order=self.config['hue_order']) except TypeError: func(x=x_name, y=y_name, hue=hue_name, data=self.df, hue_order=self.config['hue_order']) elif self.style == 'face': g = sns.FacetGrid(self.df, col=hue_name, row='Sleep Time', col_order=self.config['hue_order']) g.map(func, x_name, y_name) for axes in g.axes.flat: _ = axes.set_xticklabels(axes.get_xticklabels(), rotation=90) elif self.style == 're': sns.relplot( data=self.df, x=x_name, y=y_name, hue="Sleep Time", col=hue_name, kind="line", ) else: func(x=x_name, y=y_name, data=self.df, hue_order=self.config['hue_order']) @formatting_plots(save_path=visual_config['sleep']['save_path'], rotation=90, legend=False) def visual_phy(self): df = self.data[self.config['phy_stages']] for col in self.config['phy_stages']: if col not in ['age']: df[col] = df[col].apply(lambda x: float('.'.join(str(x).split(',')))) df[col] = self.filter_df(df, col) df[col] /= df[col].max() df = df.melt(id_vars='age', var_name='Other Data', value_name='Value') sns.boxplot(x='Other Data', y='Value', hue='age', hue_order=self.config['hue_order'], data=df) @formatting_plots(save_path=visual_config['sleep']['save_path'], rotation=90, legend=False, title='Time of participants went to bed') def visualise_counts(self): df = self.data[['age', 'Sleep Time', 'start_date']] df.start_date = pd.to_datetime(df.start_date) df['Time'] = df.start_date.dt.hour df['Percentage'] = 1 df = df.groupby(by=['Sleep Time', 'age', 'Time'])['Percentage'].sum().reset_index() for a in df.age.unique(): df['Percentage'][df.age == a] /= sum(df[df.age == a]['Percentage']) sns.lineplot(x='Time', y='Percentage', hue='age', linestyle='--', hue_order=self.config['hue_order'], data=df) plt.ylim(0, 0.35) ticks = [] for i in range(24): ticks.append('{}.00'.format(str(i).zfill(2))) plt.xticks(range(24), ticks) plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), fancybox=True, shadow=True, ncol=5) if __name__ == '__main__': vs = Visual_Sleep('/Users/mozzie/Desktop/DATA.nosync/sleep_mat', 'age') vs.visualise_counts() vs.boxplot_joint() # vs.boxplot_separate() vs_with = Visual_Sleep('/Users/mozzie/Desktop/DATA.nosync/sleep_mat', 'age', filename='withings_sleep_dataset') # Joint, Controlled age group minder = vs.df minder['Dataset'] = 'Dementia' withings = vs_with.df withings_ages = dict(withings.age.value_counts()) minder_ages = dict(minder.age.value_counts()) min_times = 100000 for age in minder_ages: times = withings_ages[age] / minder_ages[age] if times < min_times: min_times = times withings_df = [] mappings = {} for age in minder_ages: num = minder_ages[age] * min_times mappings[age] = age + ' (' + str(round(minder_ages[age] / len(minder) * 100))[:2] + '%' + ')' withings_df.append(withings[withings.age == age].sample(n=int(num))) withings = pd.concat(withings_df) withings['Dataset'] = 'Aged Matched Control Group' withings.age = withings.age.map(mappings) minder.age = minder.age.map(mappings) df =
pd.concat([minder, withings])
pandas.concat
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Test for the core code. """ import pandas as pd import pandas.testing as tm import pytest from dpipe import Pipeline, Filter, ExprAssign, ResetIndex def test_pipeline_list(): df = pd.DataFrame({'a': [1, 2, 3, -1, 0]}) pipeline = Pipeline([Filter('a', lambda x: x >= 0), ExprAssign('b', 'a * 2'), ResetIndex()]) df_actual = pipeline.apply(df) df_expected = pd.DataFrame({'a': [1, 2, 3, 0], 'b': [2, 4, 6, 0]}) tm.assert_frame_equal(df_actual, df_expected) def test_pipeline_add(): df = pd.DataFrame({'a': [1, 2, 3, -1, 0]}) pipeline = (Filter('a', lambda x: x >= 0) + ExprAssign('b', 'a * 2') + ResetIndex()) df_actual = pipeline.apply(df) df_expected = pd.DataFrame({'a': [1, 2, 3, 0], 'b': [2, 4, 6, 0]}) tm.assert_frame_equal(df_actual, df_expected) def test_field_check(): df =
pd.DataFrame({'a': [1, 2, 3, -1, 0]})
pandas.DataFrame
"""Utility functions for plotting human mobility data from Baidu Huiyan.""" import geopandas as gpd import pandas as pd def plot_choropleth_map(ax, data_geo, data_color, col_color, col_merge='province', def_val=None, c_missing='lightgrey', **kwargs): """ Plot the choropleth map with a given data for coloring. Params ------ ax (matplotlib.axes.Axes): Axes to plot. data_geo (pandas.DataFrame): Geographical data for the choropleth map. data_color (pandas.DataFrame): Dataset including the variable for coloring. col_color (str): Column name of the variable for coloring. col_merge (str): Column on which the two datasets are merged. Default to 'province'. def_val (float): Default value to replace NaNs if not set to None. c_missing (str): Color for missing data. Default to 'lightgrey'. """ data = data_geo.merge(data_color, on=col_merge, how='left') if def_val is not None: # Replace NaNs to a chosen default value data.loc[data[col_color].isna(), col_color] = def_val # Plot the choropleth map for non-missing data data.plot(column=col_color, ax=ax, **kwargs) else: isnan = pd.isna(data[col_color]) # Plot missing data as regions of a given color if isnan.any(): data.loc[isnan, :].plot(color=c_missing, ax=ax) # Plot the choropleth map for non-missing data data.loc[~isnan, :].plot(column=col_color, ax=ax, **kwargs) def read_geojson(path='../data/'): """Return geo-data loaded from the geojson on github/d3cn/data.""" data = gpd.read_file(path + 'china-province.geojson') # Add province labels used in the line list cols = ('ADM1_ZH', 'province') table = pd.read_csv(path + 'region_translation.csv').loc[:, cols] table = table.sort_values(by='ADM1_ZH').reset_index() names = data['NAME'].sort_values().reset_index() labels =
pd.concat([names, table], axis=1)
pandas.concat
from typing import List import pandas as pd from utils import request_to_json, get_repo_names from github_pr import GitHubPR from github_users import GitHubUsers # temporary - to minimize the number of requests REPO_NAMES = [ "dyvenia", "elt_workshop", "git-workshop", "gitflow", "notebooks", "timeflow", "timeflow_ui", "timelogs", "viadot", ] class GitHubFlow: """ For getting all informations per contributor. """ def __init__(self): # self.repo_names = get_repo_names() self.contributor_info = GitHubUsers() self.pr_info = GitHubPR() def get_prs_per_user(self, contributor: str = None, repo: str = None) -> dict: """ List all pull requests per pointed user from repository. Args: contributor (str, optional): Contributor name. Defaults to None. repo (str, optional): Repo name. Defaults to None. Returns: dict: Dictionary included all PRs per contributor from specific repository. """ url = f"https://api.github.com/search/issues?q=is:pr+repo:dyvenia/{repo}+author:{contributor}" pr_info = request_to_json(url) final_dict_per_user = {} try: for ind in range(len(pr_info["items"])): dict_per_user = { "contributor": contributor, "repo": repo, "number": pr_info["items"][ind]["number"], "title": pr_info["items"][ind]["title"], } final_dict_per_user[pr_info["items"][ind]["id"]] = dict_per_user except KeyError as e: print(f"For {contributor} : {e} is not found") return final_dict_per_user def list_all_pr_per_contributors(self, dict_repo_login: dict = None) -> List[dict]: """ List combined pull requests per every Args: dict_repo_login (dict, optional): Each contribution that occurs in a given organization. The contributor is the key. The value is the repository list that the user contributes. Defaults to None. Returns: List[dict]: List of dictionaries. Key is the PR id. Info about specific PR in a value. """ list_of_dict_prs = [] for key, value in dict_repo_login.items(): for repo in value: dict_pr = self.get_prs_per_user(key, repo) list_of_dict_prs.append(dict_pr) return list_of_dict_prs def create_pairs_contributor_repo(self, df_repo_login: pd.DataFrame = None) -> dict: """ Create pairs contributor-repository. Pairing for each contribution that occurs in a given organization. Args: df_repo_login (pd.DataFrame, optional): Each contribution that occurs in a given organization. Defaults to None. Returns: dict: The contributor is the key. The value is the repository list that the user contributes. """ dict_repo_login = {} dict_repo_login_raw = df_repo_login.to_dict("records") for dct in dict_repo_login_raw: try: dict_repo_login[dct["login"]].append(dct["repo"]) except KeyError: dict_repo_login[dct["login"]] = [dct["repo"]] return dict_repo_login def run_pr_info(self) -> pd.DataFrame: """ Method to generate DataFrame with information about all pull requests. DataFrame contains information about PR name and PR number per user and repository where he contributes. Returns: pd.DataFrame: Data Frame["contributor", "repo", "number", "title"]. """ df_all_contributions = self.contributor_info.get_all_contributions(REPO_NAMES) dict_repo_login = self.create_pairs_contributor_repo( df_all_contributions[["repo", "login"]] ) list_of_dict_prs = self.list_all_pr_per_contributors(dict_repo_login) df_transformed = pd.DataFrame( [list_of.values() for list_of in list_of_dict_prs] ) df = pd.DataFrame() for x in df_transformed.columns: df = pd.concat([df, df_transformed[x].apply(pd.Series)]) return df[["contributor", "repo", "number", "title"]].dropna() def run_commit_info(self) -> pd.DataFrame: """ Method to generate DataFrame with information about all commits from pull requests. DataFrame contains information about author, PR number, message and date_commit. Returns: pd.DataFrame: Data Frame["author", "pr_number", "date_commit", "message", "comment_count"]. """ df = self.run_pr_info() dict_pr_number_repo = {row["number"]: row["repo"] for _, row in df.iterrows()} df_combined = pd.DataFrame() try: for pr_number, repo in dict_pr_number_repo.items(): df_commits = self.pr_info.commits_to_df( self.pr_info.get_commits_from_pr(repo, pr_number) ) df_combined = pd.concat([df_combined, df_commits]) except Exception as e: print("Exception in get_commits_from_pr - ", e) df_combined = pd.concat([df_combined, pd.DataFrame()]) return df_combined def run_files_info(self) -> pd.DataFrame: """ Method to generate DataFrame with information about all files changed in pull requests. DataFrame contains information about filename, path_to_file, pr_number, repo name, status what happend with the file (additions, deletions, changes). Returns: pd.DataFrame: Data Frame["filename", "path_to_file", "pr_number", "repo", "status", "additions","deletions", "changes"]. """ df = self.run_pr_info() dict_pr_number_repo = {row["number"]: row["repo"] for _, row in df.iterrows()} df_combined =
pd.DataFrame()
pandas.DataFrame
import pytest from datetime import datetime import pandas as pd from tadpole_algorithms.transformations import convert_to_year_month, \ convert_to_year_month_day, map_string_diagnosis def test_forecastDf_date_conversion(): forecastDf = pd.DataFrame([{'Forecast Date': '2019-07'}]) assert pd.api.types.is_string_dtype(forecastDf.dtypes) # original conversion code forecastDf['Forecast Date'] = [datetime.strptime(x, '%Y-%m') for x in forecastDf['Forecast Date']] # considers every month estimate to be the actual first day 2017-01 print(forecastDf.dtypes) assert pd.api.types.is_datetime64_ns_dtype(forecastDf['Forecast Date']) # new conversion code # from string forecastDf_new1 = pd.DataFrame([{'Forecast Date': '2019-07'}]) forecastDf_new1['Forecast Date'] = convert_to_year_month(forecastDf_new1['Forecast Date']) assert pd.api.types.is_datetime64_ns_dtype(forecastDf_new1['Forecast Date']) # from date object forecastDf_new2 = pd.DataFrame([{'Forecast Date': datetime(2019, 7, 1, 0, 0, 0, 0)}]) forecastDf_new2['Forecast Date'] = convert_to_year_month(forecastDf_new2['Forecast Date']) assert pd.api.types.is_datetime64_ns_dtype(forecastDf_new2['Forecast Date']) assert forecastDf['Forecast Date'].equals(forecastDf_new1['Forecast Date']) assert forecastDf_new1['Forecast Date'].equals(forecastDf_new2['Forecast Date']) def test_d4Df_date_conversions(): d4Df =
pd.DataFrame([{'ScanDate': '2019-07-10'}])
pandas.DataFrame
""" Earth Engine loading utils ------------------------------ Function to load and export from earth engine """ from pathlib import Path import ee import pandas as pd def authenticate_google_service_account( json_path: Path, service_account: str = '<EMAIL>', ) -> None: credentials = ee.ServiceAccountCredentials(service_account, str(json_path)) ee.Initialize(credentials) def load_collection(collection): """ Load collection from Earth Engine :param collection: String. :return: ee.ImageCollection """ if collection in ('MODIS/006/MCD12Q1', 'MODIS/006/MOD11A1','COPERNICUS/S5P/OFFL/L3_CH4'): return ee.ImageCollection(collection) elif collection in ('USGS/SRTMGL1_003'): return ee.Image(collection) else: raise ValueError('Unknown collection') def ee_array_to_df(arr, list_of_bands): """Transforms client-side ee.Image.getRegion array to pandas.DataFrame. :param arr: ee.Array :list_of_bands: List[str] :return: pd.DataFrame """ df = pd.DataFrame(arr) # Rearrange the header. headers = df.iloc[0] df =
pd.DataFrame(df.values[1:], columns=headers)
pandas.DataFrame
#!/usr/bin/env python """ Purpose: collect coverage of junctional reads in sexed-tissues for certain species """ import sharedinfo import focaljunction import pandas as pd def write_junction_summary_to_table(species, locations, genesymbol): dcts = list() for location in locations: dct = focaljunction.get_junction_of_species_by_location(species, location) dcts.append(dct) mdct = focaljunction.merge_dcts(dcts) mpd =
pd.DataFrame.from_dict(mdct)
pandas.DataFrame.from_dict
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 mouse=a import matplotlib matplotlib.rcParams['figure.facecolor'] = '1.' matplotlib.use('Agg') import ants import numpy as np import pandas as pd import os import imageio import nipype.pipeline.engine as pe import nipype.interfaces.utility as niu import nibabel as nib import shutil import ntpath import nipype.pipeline.engine as pe import nipype.interfaces.utility as niu import nipype.interfaces.io as nio import matplotlib.pyplot as plt import seaborn as sns import inspect import json import re import time import matplotlib.animation as animation from skimage.feature import canny from nibabel.processing import resample_to_output from sklearn.metrics import normalized_mutual_info_score from sklearn.ensemble import IsolationForest from sklearn.cluster import DBSCAN from sklearn.neighbors import LocalOutlierFactor from sklearn.svm import OneClassSVM from skimage.filters import threshold_otsu from math import sqrt, log, ceil from os import getcwd from os.path import basename from sys import argv, exit from glob import glob from src.outlier import kde, MAD from sklearn.neighbors import LocalOutlierFactor from src.utils import concat_df from nipype.interfaces.base import (TraitedSpec, File, traits, InputMultiPath, BaseInterface, OutputMultiPath, BaseInterfaceInputSpec, isdefined) from scipy.ndimage.filters import gaussian_filter from nipype.utils.filemanip import (load_json, save_json, split_filename, fname_presuffix, copyfile) _file_dir, fn =os.path.split( os.path.abspath(__file__) ) def load_3d(fn, t=0): print('Reading Frame %d'%t,'from', fn) img = nib.load(fn) vol = img.get_fdata() hd = img.header if len(vol.shape) == 4 : vol = vol[:,:,:,t] vol = vol.reshape(vol.shape[0:3] ) img = nib.Nifti1Image(vol, img.affine) return img, vol def get_spacing(aff, i) : return aff[i, np.argmax(np.abs(aff[i,0:3]))] ###################### # Group-level QC # ###################### #datasink for dist metrics #check how the calc outlier measure node is implemented, may need to be reimplemented final_dir="qc" def group_level_qc(opts, args): #setup workflow workflow = pe.Workflow(name=qc_err+opts.preproc_dir) workflow.base_dir = opts.targetDir #Datasink datasink=pe.Node(interface=nio.DataSink(), name=qc_err+"output") datasink.inputs.base_directory= opts.targetDir +os.sep +"qc" datasink.inputs.substitutions = [('_cid_', ''), ('sid_', '')] outfields=['coreg_metrics','tka_metrics','pvc_metrics'] paths={'coreg_metrics':"*/coreg_qc_metrics/*_metric.csv", 'tka_metrics':"*/results_tka/*_3d.csv",'pvc_metrics':"*/pvc_qc_metrics/*qc_metric.csv"} #If any one of the sets of metrics does not exist because it has not been run at the scan level, then #remove it from the list of outfields and paths that the datagrabber will look for. for outfield, path in paths.items(): # zip(paths, outfields): full_path = opts.targetDir + os.sep + opts.preproc_dir + os.sep + path print(full_path) if len(glob(full_path)) == 0 : outfields.remove(outfield) paths.pop(outfield) #Datagrabber datasource = pe.Node( interface=nio.DataGrabber( outfields=outfields, raise_on_empty=True, sort_filelist=False), name=qc_err+"datasource") datasource.inputs.base_directory = opts.targetDir + os.sep +opts.preproc_dir datasource.inputs.template = '*' datasource.inputs.field_template = paths #datasource.inputs.template_args = dict( coreg_metrics = [['preproc_dir']] ) ################## # Coregistration # ################## qc_err='' if opts.pvc_label_name != None : qc_err += "_"+opts.pvc_label_name if opts.quant_label_name != None : qc_err += "_"+opts.quant_label_name if opts.results_label_name != None : qc_err += "_"+opts.results_label_name qc_err += "_" if 'coreg_metrics' in outfields: #Concatenate distance metrics concat_coreg_metricsNode=pe.Node(interface=concat_df(), name=qc_err+"concat_coreg_metrics") concat_coreg_metricsNode.inputs.out_file="coreg_qc_metrics.csv" workflow.connect(datasource, 'coreg_metrics', concat_coreg_metricsNode, 'in_list') workflow.connect(concat_coreg_metricsNode, "out_file", datasink, 'coreg/metrics') #Plot Coregistration Metrics plot_coreg_metricsNode=pe.Node(interface=plot_qcCommand(), name=qc_err+"plot_coreg_metrics") workflow.connect(concat_coreg_metricsNode, "out_file", plot_coreg_metricsNode, 'in_file') workflow.connect(plot_coreg_metricsNode, "out_file", datasink, 'coreg/metrics_plot') #Calculate Coregistration outlier measures outlier_measureNode = pe.Node(interface=outlier_measuresCommand(), name=qc_err+"coregistration_outlier_measure") workflow.connect(concat_coreg_metricsNode, 'out_file', outlier_measureNode, 'in_file') workflow.connect(outlier_measureNode, "out_file", datasink, 'coreg/outlier') #Plot coregistration outlier measures plot_coreg_measuresNode=pe.Node(interface=plot_qcCommand(),name=qc_err+"plot_coreg_measures") workflow.connect(outlier_measureNode,"out_file",plot_coreg_measuresNode,'in_file') workflow.connect(plot_coreg_measuresNode,"out_file",datasink,'coreg/measures_plot') ####### # PVC # ####### if 'pvc_metrics' in outfields: #Concatenate PVC metrics concat_pvc_metricsNode=pe.Node(interface=concat_df(), name=qc_err+"concat_pvc_metrics") concat_pvc_metricsNode.inputs.out_file="pvc_qc_metrics.csv" workflow.connect(datasource, 'pvc_metrics', concat_pvc_metricsNode, 'in_list') workflow.connect(concat_pvc_metricsNode, "out_file", datasink, 'pvc/metrics') #Plot PVC Metrics plot_pvc_metricsNode=pe.Node(interface=plot_qcCommand(), name=qc_err+"plot_pvc_metrics") workflow.connect(concat_pvc_metricsNode, "out_file", plot_pvc_metricsNode, 'in_file') workflow.connect(plot_pvc_metricsNode, "out_file", datasink, 'pvc/metrics_plot') #Calculate PVC outlier measures pvc_outlier_measureNode = pe.Node(interface=outlier_measuresCommand(), name=qc_err+"pvc_outlier_measure") workflow.connect(concat_pvc_metricsNode, 'out_file', pvc_outlier_measureNode, 'in_file') workflow.connect(pvc_outlier_measureNode, "out_file", datasink, 'pvc/outlier') #Plot PVC outlier measures plot_pvc_measuresNode=pe.Node(interface=plot_qcCommand(), name=qc_err+"plot_pvc_measures") workflow.connect(pvc_outlier_measureNode,"out_file",plot_pvc_measuresNode,'in_file') workflow.connect(plot_pvc_measuresNode, "out_file", datasink, 'pvc/measures_plot') ####### # TKA # ####### if 'tka_metrics' in outfields: #Concatenate TKA metrics concat_tka_metricsNode=pe.Node(interface=concat_df(), name=qc_err+"concat_tka_metrics") concat_tka_metricsNode.inputs.out_file="tka_qc_metrics.csv" workflow.connect(datasource, 'tka_metrics', concat_tka_metricsNode, 'in_list') workflow.connect(concat_tka_metricsNode, "out_file", datasink, 'tka/metrics') #Plot TKA Metrics plot_tka_metricsNode=pe.Node(interface=plot_qcCommand(), name=qc_err+"plot_tka_metrics") workflow.connect(concat_tka_metricsNode, "out_file", plot_tka_metricsNode, 'in_file') workflow.connect(plot_tka_metricsNode, "out_file", datasink, 'tka/metrics_plot') #Calculate TKA outlier measures tka_outlier_measureNode = pe.Node(interface=outlier_measuresCommand(), name=qc_err+"tka_outlier_measure") workflow.connect(concat_tka_metricsNode, 'out_file', tka_outlier_measureNode, 'in_file') workflow.connect(tka_outlier_measureNode, "out_file", datasink, 'tka/outlier') #Plot PVC outlier measures plot_tka_measuresNode=pe.Node(interface=plot_qcCommand(), name=qc_err+"plot_tka_measures") workflow.connect(tka_outlier_measureNode,"out_file",plot_tka_measuresNode,'in_file') workflow.connect(plot_tka_measuresNode, "out_file", datasink, 'tka/measures_plot') workflow.run() #################### # Distance Metrics # #################### __NBINS=-1 import copy def pvc_mse(pvc_fn, pve_fn, fwhm): pvc = nib.load(pvc_fn) pvc.data = pvc.get_data() pve = nib.load(pve_fn) pve.data = pve.get_data() mse = 0 if len(pvc.data.shape) > 3 :#if volume has more than 3 dimensions t = int(pvc.data.shape[3]/2) #for t in range(pvc.sizes[0]): pve_frame = pve.data[:,:,:,t] pvc_frame = pvc.data[:,:,:,t] n = np.sum(pve.data[t,:,:,:]) # np.prod(pve.data.shape[0:4]) pvc_blur = gaussian_filter(pvc_frame,fwhm) m = np.sum(np.sqrt((pve_frame - pvc_blur)**2)) mse += m print(t, m) else : #volume has 3 dimensions n = np.sum(pve.data) # np.prod(pve.data.shape[0:3]) pvc_blur = gaussian_filter(pvc.data,fwhm) m = np.sum(np.sqrt((pve.data - pvc_blur)**2)) mse += m mse = -mse / n #np.sum(pve.data) print("PVC MSE:", mse) return mse #################### # Outlier Measures # #################### def _IsolationForest(X): X = np.array(X) if len(X.shape) == 1 : X=X.reshape(-1,1) rng = np.random.RandomState(42) clf = IsolationForest(max_samples=X.shape[0], random_state=rng) return clf.fit(X).predict(X) def _LocalOutlierFactor(X): X = np.array(X) if len(X.shape) == 1 : X=X.reshape(-1,1) n=int(round(X.shape[0]*0.2)) clf = LocalOutlierFactor(n_neighbors=n) clf.fit_predict(X) return clf.negative_outlier_factor_ def _OneClassSVM(X): clf = OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1) clf.fit(X) return clf.predict(X) def _dbscan(X): db = DBSCAN(eps=0.3) return db.fit_predict(X) ########### # Globals # ########### global distance_metrics global outlier_measures global metric_columns global outlier_columns outlier_measures={"KDE":kde, "LOF": _LocalOutlierFactor, "IsolationForest":_IsolationForest, "MAD":MAD} #, "DBSCAN":_dbscan, "OneClassSVM":_OneClassSVM } metric_columns = ['analysis', 'sub','ses','task','run','acq','rec','roi','metric','value'] outlier_columns = ['analysis', 'sub','ses','task','roi','metric','measure','value'] ####################### ### Outlier Metrics ### ####################### ### PVC Metrics class pvc_qc_metricsOutput(TraitedSpec): out_file = traits.File(desc="Output file") class pvc_qc_metricsInput(BaseInterfaceInputSpec): pve = traits.File(exists=True, mandatory=True, desc="Input PVE PET image") pvc = traits.File(exists=True, mandatory=True, desc="Input PVC PET") fwhm = traits.List(desc='FWHM of the scanner') sub = traits.Str("Subject ID") task = traits.Str("Task") ses = traits.Str("Ses") run = traits.Str("Run") rec = traits.Str("Reconstruction") acq = traits.Str("Acquisition") out_file = traits.File(desc="Output file") class pvc_qc_metrics(BaseInterface): input_spec = pvc_qc_metricsInput output_spec = pvc_qc_metricsOutput def _gen_output(self, sid, ses, task,run,acq,rec, fname ="pvc_qc_metric.csv"): dname = os.getcwd() fn = dname+os.sep+'sub-'+sid+'_ses-'+ses+'_task-'+task if isdefined(run) : fn += '_run-'+str(run) fn += "_acq-"+str(acq)+"_rec-"+str(rec)+fname return fn def _run_interface(self, runtime): sub = self.inputs.sub ses = self.inputs.ses task = self.inputs.task fwhm = self.inputs.fwhm run = self.inputs.run rec = self.inputs.rec acq = self.inputs.acq df = pd.DataFrame([], columns=metric_columns) pvc_metrics={'mse':pvc_mse } for metric_name, metric_function in pvc_metrics.items(): mse = pvc_mse(self.inputs.pvc, self.inputs.pve, fwhm) temp = pd.DataFrame([['pvc', sub,ses,task,run,acq,rec,'02',metric_name,mse]], columns=metric_columns) df = pd.concat([df, temp]) df.fillna(0, inplace=True) if not isdefined(self.inputs.out_file): self.inputs.out_file = self._gen_output(self.inputs.sub, self.inputs.ses, self.inputs.task, self.inputs.run, self.inputs.acq, self.inputs.rec) df.to_csv(self.inputs.out_file, index=False) return runtime def _list_outputs(self): outputs = self.output_spec().get() if not isdefined(self.inputs.out_file): self.inputs.out_file = self.inputs._gen_output(self.inputs.sid,self.inputs.ses, self.inputs.task, self.inputs.run, self.inputs.acq, self.inputs.rec) outputs["out_file"] = self.inputs.out_file return outputs ### Coregistration Metrics class coreg_qc_metricsOutput(TraitedSpec): out_file = traits.File(desc="Output file") class coreg_qc_metricsInput(BaseInterfaceInputSpec): pet = traits.File(exists=True, mandatory=True, desc="Input PET image") t1 = traits.File(exists=True, mandatory=True, desc="Input T1 MRI") brain_mask_space_mri = traits.File(exists=True, mandatory=True, desc="Input T1 MRI") pet_brain_mask = traits.File(exists=True, mandatory=True, desc="Input T1 MRI") sid = traits.Str(desc="Subject") ses = traits.Str(desc="Session") task = traits.Str(desc="Task") run = traits.Str(desc="Run") rec = traits.Str(desc="Reconstruction") acq = traits.Str(desc="Acquisition") study_prefix = traits.Str(desc="Study Prefix") out_file = traits.File(desc="Output file") clobber = traits.Bool(desc="Overwrite output file", default=False) class coreg_qc_metricsCommand(BaseInterface): input_spec = coreg_qc_metricsInput output_spec = coreg_qc_metricsOutput def _gen_output(self, sid, ses, task, run, rec, acq, fname ="distance_metric.csv"): dname = os.getcwd() fn = dname+os.sep+'sub-'+sid+'_ses-'+ses+'_task-'+task if isdefined(run) : fn += '_run-'+str(run) fn += "_acq-"+str(acq)+"_rec-"+str(rec)+fname return fn def _run_interface(self, runtime): sub_df=pd.DataFrame(columns=metric_columns ) pet = self.inputs.pet t1 = self.inputs.t1 sid = self.inputs.sid ses = self.inputs.ses task = self.inputs.task run = self.inputs.run rec = self.inputs.rec acq = self.inputs.acq brain_mask_space_mri = self.inputs.brain_mask_space_mri pet_brain_mask = self.inputs.pet_brain_mask coreg_metrics=['MattesMutualInformation'] path, ext = os.path.splitext(pet) base=basename(path) param=base.split('_')[-1] param_type=base.split('_')[-2] df=pd.DataFrame(columns=metric_columns ) def image_read(fn) : img, vol = load_3d(fn) vol = vol.astype(float) aff = img.affine origin = [ aff[0,3], aff[1,3], aff[2,3]] spacing = [ get_spacing(aff, 0), get_spacing(aff, 1), get_spacing(aff, 2) ] return ants.from_numpy( vol, origin=origin, spacing=spacing ) for metric in coreg_metrics : print("t1 ",t1) fixed = image_read( t1 ) moving = image_read( pet ) try : metric_val = ants.create_ants_metric( fixed = fixed, moving= moving, fixed_mask=ants.image_read( brain_mask_space_mri ), moving_mask=ants.image_read( pet_brain_mask ), metric_type=metric ).get_value() except RuntimeError : metric_val = np.NaN temp =
pd.DataFrame([['coreg',sid,ses,task,run,acq,rec,'01',metric,metric_val]],columns=df.columns )
pandas.DataFrame
""" Process PMLV2 ET data for basins """ import fnmatch import os import numpy as np import pandas as pd import datetime def trans_8day_pmlv2_to_camels_format(pmlv2_dir, output_dir, gage_dict, region, year): """ Transform 8-day PMLV2 data downloaded from GEE to the format in CAMELS. If you can read Chinese, and prefer Python code, you can see here: https://github.com/wangmengyun1998/hydroGIS/blob/master/GEE/4-geepy-gallery.ipynb Parameters ---------- pmlv2_dir the original data's directory output_dir the transformed data's directory gage_dict a dict containing gage's ids and the correspond HUC02 ids region we named the file downloaded from GEE as daymet_<region>_mean_<year>.csv, because we use GEE code to generate data for each year for each shape file (region) containing some basins. For example, if we use the basins' shpfile in CAMELS, the region is "camels". year we use GEE code to generate data for each year, so each year for each region has one data file. Returns ------- None """ # you can add features or delete features, or change the order, which depends on your txt content pmlv2_dataset = ["hru_id", "system:time_start", "GPP", "Ec", "Es", "Ei", "ET_water"] camels_format_index = [ "Year", "Mnth", "Day", "Hr", "GPP(gC/m2/d)", "Ec(mm/d)", "Es(mm/d)", "Ei(mm/d)", "ET_water(mm/d)", ] if "STAID" in gage_dict.keys(): gage_id_key = "STAID" elif "gauge_id" in gage_dict.keys(): gage_id_key = "gauge_id" elif "gage_id" in gage_dict.keys(): gage_id_key = "gage_id" else: raise NotImplementedError("No such gage id name") if "HUC02" in gage_dict.keys(): huc02_key = "HUC02" elif "huc_02" in gage_dict.keys(): huc02_key = "huc_02" else: raise NotImplementedError("No such huc02 id") # because this function only work for one year and one region, it's better to chose avg and sum files at first for f_name in os.listdir(pmlv2_dir): if fnmatch.fnmatch(f_name, "PML_V2_" + region + "_mean_" + str(year) + "*.csv"): pmlv2_data_file = os.path.join(pmlv2_dir, f_name) data_temp = pd.read_csv(pmlv2_data_file, sep=",", dtype={pmlv2_dataset[0]: str}) for i_basin in range(len(gage_dict[gage_id_key])): basin_data = data_temp[ data_temp[pmlv2_dataset[0]].values.astype(int) == int(gage_dict[gage_id_key][i_basin]) ] if basin_data.shape[0] == 0: raise ArithmeticError("chosen basins' number is zero") # get Year,Month,Day,Hour info # if system:time_start is millisecond tmp_times = [ datetime.datetime.fromtimestamp(tmp) for tmp in basin_data[pmlv2_dataset[1]].values / 1000 ] csv_date = pd.to_datetime(tmp_times) # csv_date = pd.to_datetime(basin_data[pmlv2_dataset[1]]) # the hour is set to 12, as 12 is the average hour of a day year_month_day_hour = pd.DataFrame( [[dt.year, dt.month, dt.day, 12] for dt in csv_date], columns=camels_format_index[0:4], ) data_df = pd.DataFrame( basin_data.iloc[:, 2:].values, columns=camels_format_index[4:] ) # concat new_data_df = pd.concat([year_month_day_hour, data_df], axis=1) # output the result huc_id = gage_dict[huc02_key][i_basin] output_huc_dir = os.path.join(output_dir, huc_id) if not os.path.isdir(output_huc_dir): os.makedirs(output_huc_dir) output_file = os.path.join( output_huc_dir, gage_dict[gage_id_key][i_basin] + "_lump_pmlv2_et.txt" ) print( "output pmlv2 et data of", gage_dict[gage_id_key][i_basin], "year", str(year), ) if os.path.isfile(output_file): data_old =
pd.read_csv(output_file)
pandas.read_csv
import pandas as pd from cellphonedb.src.core.methods import cpdb_statistical_analysis_helper from cellphonedb.src.core.core_logger import core_logger from cellphonedb.src.core.models.interaction import interaction_filter def call(meta: pd.DataFrame, counts: pd.DataFrame, interactions: pd.DataFrame, iterations: int = 1000, threshold: float = 0.1, threads: int = 4, debug_seed: int = -1, result_precision: int = 3 ) -> (pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame): core_logger.info( '[Cluster Statistical Analysis Simple] ' 'Threshold:{} Iterations:{} Debug-seed:{} Threads:{} Precision:{}'.format(threshold, iterations, debug_seed, threads, result_precision)) if debug_seed >= 0: pd.np.random.seed(debug_seed) core_logger.warning('Debug random seed enabled. Setted to {}'.format(debug_seed)) interactions_filtered, counts_filtered = prefilters(counts, interactions) if interactions_filtered.empty or counts_filtered.empty: return pd.DataFrame(),
pd.DataFrame()
pandas.DataFrame
import os import gc import sys import time import click import random import sklearn import numpy as np import pandas as pd import lightgbm as lgb from tqdm import tqdm from pprint import pprint from functools import reduce from lightgbm import LGBMClassifier from sklearn.metrics import roc_auc_score, roc_curve from config import read_config, KEY_FEATURE_MAP, KEY_MODEL_MAP from utils import timer from features.base import Base from features.stacking import StackingFeaturesWithPasses import warnings warnings.simplefilter(action='ignore', category=FutureWarning) def get_train_test(conf): df = Base.get_df(conf) # pd.DataFrame feature_classes = [KEY_FEATURE_MAP[key] for key in conf.features] features = [df] for feature in feature_classes: with timer(f"load (or create) {feature.__name__}"): f = feature.get_df(conf) features.append(f) with timer("join on SK_ID_CURR"): df = reduce(lambda lhs, rhs: lhs.merge(rhs, how='left', on='SK_ID_CURR'), features) del features gc.collect() train_df = df[df['TARGET'].notnull()].copy() test_df = df[df['TARGET'].isnull()].copy() del df gc.collect() return train_df, test_df def get_feature_importances(data, shuffle, seed=None): # Gather real features train_features = [f for f in data.columns if f not in ([ 'TARGET', 'SK_ID_CURR', 'SK_ID_BUREAU', 'SK_ID_PREV', 'index' ])] # Go over fold and keep track of CV score (train and valid) and feature importances # Shuffle target if required y = data['TARGET'].copy() if shuffle: # Here you could as well use a binomial distribution y = data['TARGET'].copy().sample(frac=1.0) # Fit LightGBM in RF mode, yes it's quicker than sklearn RandomForest dtrain = lgb.Dataset(data[train_features], y, free_raw_data=False, silent=True) lgb_params = { 'objective': 'binary', 'boosting_type': 'rf', 'subsample': 0.623, 'colsample_bytree': 0.7, 'num_leaves': 127, 'max_depth': 8, 'seed': seed, 'bagging_freq': 1, 'num_threads': 4, 'verbose': -1 } # Fit the model clf = lgb.train(params=lgb_params, train_set=dtrain, num_boost_round=600) # Get feature importances imp_df = pd.DataFrame() imp_df["feature"] = list(train_features) imp_df["importance_gain"] = clf.feature_importance(importance_type='gain') imp_df["importance_split"] = clf.feature_importance(importance_type='split') imp_df['trn_score'] = roc_auc_score(y, clf.predict(data[train_features])) return imp_df def score_feature_selection(df=None, train_features=None, target=None): # Fit LightGBM dtrain = lgb.Dataset(df[train_features], target, free_raw_data=False, silent=True) lgb_params = { 'objective': 'binary', 'boosting_type': 'gbdt', 'learning_rate': .1, 'subsample': 0.8, 'colsample_bytree': 0.8, 'num_leaves': 31, 'max_depth': -1, 'seed': 13, 'num_threads': 4, 'min_split_gain': .00001, 'reg_alpha': .00001, 'reg_lambda': .00001, 'metric': 'auc' } # Fit the model hist = lgb.cv( params=lgb_params, train_set=dtrain, num_boost_round=2000, nfold=5, stratified=True, shuffle=True, early_stopping_rounds=50, verbose_eval=500, seed=47 ) # Return the last mean / std values return hist['auc-mean'][-1], hist['auc-stdv'][-1] @click.command() @click.option('--config_file', type=str, default='./configs/lgbm_0.json') def main(config_file): np.random.seed(47) conf = read_config(config_file) print("config:") pprint(conf) data, _ = get_train_test(conf) with timer("calc actual importance"): if os.path.exists("misc/actual_imp_df.pkl"): actual_imp_df = pd.read_pickle("misc/actual_imp_df.pkl") else: actual_imp_df = get_feature_importances(data=data, shuffle=False) actual_imp_df.to_pickle("misc/actual_imp_df.pkl") print(actual_imp_df.head()) with timer("calc null importance"): nb_runs = 100 if os.path.exists(f"misc/null_imp_df_run{nb_runs}time.pkl"): null_imp_df = pd.read_pickle(f"misc/null_imp_df_run{nb_runs}time.pkl") else: null_imp_df =
pd.DataFrame()
pandas.DataFrame
# # Copyright 2013 Quantopian, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division import collections from datetime import ( datetime, timedelta, ) import logging import operator import unittest from nose_parameterized import parameterized import nose.tools as nt import pytz import itertools import pandas as pd import numpy as np from six.moves import range, zip import zipline.utils.factory as factory import zipline.finance.performance as perf from zipline.finance.performance import position_tracker from zipline.finance.slippage import Transaction, create_transaction import zipline.utils.math_utils as zp_math from zipline.gens.composites import date_sorted_sources from zipline.finance.trading import SimulationParameters from zipline.finance.blotter import Order from zipline.finance.commission import PerShare, PerTrade, PerDollar from zipline.finance.trading import TradingEnvironment from zipline.utils.factory import create_simulation_parameters from zipline.utils.serialization_utils import ( loads_with_persistent_ids, dumps_with_persistent_ids ) import zipline.protocol as zp from zipline.protocol import Event, DATASOURCE_TYPE from zipline.sources.data_frame_source import DataPanelSource logger = logging.getLogger('Test Perf Tracking') onesec = timedelta(seconds=1) oneday = timedelta(days=1) tradingday = timedelta(hours=6, minutes=30) # nose.tools changed name in python 3 if not hasattr(nt, 'assert_count_equal'): nt.assert_count_equal = nt.assert_items_equal def check_perf_period(pp, gross_leverage, net_leverage, long_exposure, longs_count, short_exposure, shorts_count): perf_data = pp.to_dict() np.testing.assert_allclose( gross_leverage, perf_data['gross_leverage'], rtol=1e-3) np.testing.assert_allclose( net_leverage, perf_data['net_leverage'], rtol=1e-3) np.testing.assert_allclose( long_exposure, perf_data['long_exposure'], rtol=1e-3) np.testing.assert_allclose( longs_count, perf_data['longs_count'], rtol=1e-3) np.testing.assert_allclose( short_exposure, perf_data['short_exposure'], rtol=1e-3) np.testing.assert_allclose( shorts_count, perf_data['shorts_count'], rtol=1e-3) def check_account(account, settled_cash, equity_with_loan, total_positions_value, regt_equity, available_funds, excess_liquidity, cushion, leverage, net_leverage, net_liquidation): # this is a long only portfolio that is only partially invested # so net and gross leverage are equal. np.testing.assert_allclose(settled_cash, account['settled_cash'], rtol=1e-3) np.testing.assert_allclose(equity_with_loan, account['equity_with_loan'], rtol=1e-3) np.testing.assert_allclose(total_positions_value, account['total_positions_value'], rtol=1e-3) np.testing.assert_allclose(regt_equity, account['regt_equity'], rtol=1e-3) np.testing.assert_allclose(available_funds, account['available_funds'], rtol=1e-3) np.testing.assert_allclose(excess_liquidity, account['excess_liquidity'], rtol=1e-3) np.testing.assert_allclose(cushion, account['cushion'], rtol=1e-3) np.testing.assert_allclose(leverage, account['leverage'], rtol=1e-3) np.testing.assert_allclose(net_leverage, account['net_leverage'], rtol=1e-3) np.testing.assert_allclose(net_liquidation, account['net_liquidation'], rtol=1e-3) def create_txn(trade_event, price, amount): """ Create a fake transaction to be filled and processed prior to the execution of a given trade event. """ mock_order = Order(trade_event.dt, trade_event.sid, amount, id=None) return create_transaction(trade_event, mock_order, price, amount) def benchmark_events_in_range(sim_params, env): return [ Event({'dt': dt, 'returns': ret, 'type': zp.DATASOURCE_TYPE.BENCHMARK, # We explicitly rely on the behavior that benchmarks sort before # any other events. 'source_id': '1Abenchmarks'}) for dt, ret in env.benchmark_returns.iteritems() if dt.date() >= sim_params.period_start.date() and dt.date() <= sim_params.period_end.date() ] def calculate_results(sim_params, env, benchmark_events, trade_events, dividend_events=None, splits=None, txns=None): """ Run the given events through a stripped down version of the loop in AlgorithmSimulator.transform. IMPORTANT NOTE FOR TEST WRITERS/READERS: This loop has some wonky logic for the order of event processing for datasource types. This exists mostly to accomodate legacy tests accomodate existing tests that were making assumptions about how events would be sorted. In particular: - Dividends passed for a given date are processed PRIOR to any events for that date. - Splits passed for a given date are process AFTER any events for that date. Tests that use this helper should not be considered useful guarantees of the behavior of AlgorithmSimulator on a stream containing the same events unless the subgroups have been explicitly re-sorted in this way. """ txns = txns or [] splits = splits or [] perf_tracker = perf.PerformanceTracker(sim_params, env) if dividend_events is not None: dividend_frame = pd.DataFrame( [ event.to_series(index=zp.DIVIDEND_FIELDS) for event in dividend_events ], ) perf_tracker.update_dividends(dividend_frame) # Raw trades trade_events = sorted(trade_events, key=lambda ev: (ev.dt, ev.source_id)) # Add a benchmark event for each date. trades_plus_bm = date_sorted_sources(trade_events, benchmark_events) # Filter out benchmark events that are later than the last trade date. filtered_trades_plus_bm = (filt_event for filt_event in trades_plus_bm if filt_event.dt <= trade_events[-1].dt) grouped_trades_plus_bm = itertools.groupby(filtered_trades_plus_bm, lambda x: x.dt) results = [] bm_updated = False for date, group in grouped_trades_plus_bm: for txn in filter(lambda txn: txn.dt == date, txns): # Process txns for this date. perf_tracker.process_transaction(txn) for event in group: if event.type == zp.DATASOURCE_TYPE.TRADE: perf_tracker.process_trade(event) elif event.type == zp.DATASOURCE_TYPE.DIVIDEND: perf_tracker.process_dividend(event) elif event.type == zp.DATASOURCE_TYPE.BENCHMARK: perf_tracker.process_benchmark(event) bm_updated = True elif event.type == zp.DATASOURCE_TYPE.COMMISSION: perf_tracker.process_commission(event) for split in filter(lambda split: split.dt == date, splits): # Process splits for this date. perf_tracker.process_split(split) if bm_updated: msg = perf_tracker.handle_market_close_daily() msg['account'] = perf_tracker.get_account(True) results.append(msg) bm_updated = False return results def check_perf_tracker_serialization(perf_tracker): scalar_keys = [ 'emission_rate', 'txn_count', 'market_open', 'last_close', '_dividend_count', 'period_start', 'day_count', 'capital_base', 'market_close', 'saved_dt', 'period_end', 'total_days', ] p_string = dumps_with_persistent_ids(perf_tracker) test = loads_with_persistent_ids(p_string, env=perf_tracker.env) for k in scalar_keys: nt.assert_equal(getattr(test, k), getattr(perf_tracker, k), k) for period in test.perf_periods: nt.assert_true(hasattr(period, '_position_tracker')) class TestSplitPerformance(unittest.TestCase): def setUp(self): self.env = TradingEnvironment() self.env.write_data(equities_identifiers=[1]) self.sim_params = create_simulation_parameters(num_days=2) # start with $10,000 self.sim_params.capital_base = 10e3 self.benchmark_events = benchmark_events_in_range(self.sim_params, self.env) def test_split_long_position(self): events = factory.create_trade_history( 1, [20, 20], [100, 100], oneday, self.sim_params, env=self.env ) # set up a long position in sid 1 # 100 shares at $20 apiece = $2000 position txns = [create_txn(events[0], 20, 100)] # set up a split with ratio 3 occurring at the start of the second # day. splits = [ factory.create_split( 1, 3, events[1].dt, ), ] results = calculate_results(self.sim_params, self.env, self.benchmark_events, events, txns=txns, splits=splits) # should have 33 shares (at $60 apiece) and $20 in cash self.assertEqual(2, len(results)) latest_positions = results[1]['daily_perf']['positions'] self.assertEqual(1, len(latest_positions)) # check the last position to make sure it's been updated position = latest_positions[0] self.assertEqual(1, position['sid']) self.assertEqual(33, position['amount']) self.assertEqual(60, position['cost_basis']) self.assertEqual(60, position['last_sale_price']) # since we started with $10000, and we spent $2000 on the # position, but then got $20 back, we should have $8020 # (or close to it) in cash. # we won't get exactly 8020 because sometimes a split is # denoted as a ratio like 0.3333, and we lose some digits # of precision. thus, make sure we're pretty close. daily_perf = results[1]['daily_perf'] self.assertTrue( zp_math.tolerant_equals(8020, daily_perf['ending_cash'], 1)) # Validate that the account attributes were updated. account = results[1]['account'] self.assertEqual(float('inf'), account['day_trades_remaining']) # this is a long only portfolio that is only partially invested # so net and gross leverage are equal. np.testing.assert_allclose(0.198, account['leverage'], rtol=1e-3) np.testing.assert_allclose(0.198, account['net_leverage'], rtol=1e-3) np.testing.assert_allclose(8020, account['regt_equity'], rtol=1e-3) self.assertEqual(float('inf'), account['regt_margin']) np.testing.assert_allclose(8020, account['available_funds'], rtol=1e-3) self.assertEqual(0, account['maintenance_margin_requirement']) np.testing.assert_allclose(10000, account['equity_with_loan'], rtol=1e-3) self.assertEqual(float('inf'), account['buying_power']) self.assertEqual(0, account['initial_margin_requirement']) np.testing.assert_allclose(8020, account['excess_liquidity'], rtol=1e-3) np.testing.assert_allclose(8020, account['settled_cash'], rtol=1e-3) np.testing.assert_allclose(10000, account['net_liquidation'], rtol=1e-3) np.testing.assert_allclose(0.802, account['cushion'], rtol=1e-3) np.testing.assert_allclose(1980, account['total_positions_value'], rtol=1e-3) self.assertEqual(0, account['accrued_interest']) for i, result in enumerate(results): for perf_kind in ('daily_perf', 'cumulative_perf'): perf_result = result[perf_kind] # prices aren't changing, so pnl and returns should be 0.0 self.assertEqual(0.0, perf_result['pnl'], "day %s %s pnl %s instead of 0.0" % (i, perf_kind, perf_result['pnl'])) self.assertEqual(0.0, perf_result['returns'], "day %s %s returns %s instead of 0.0" % (i, perf_kind, perf_result['returns'])) class TestCommissionEvents(unittest.TestCase): def setUp(self): self.env = TradingEnvironment() self.env.write_data( equities_identifiers=[0, 1, 133] ) self.sim_params = create_simulation_parameters(num_days=5) logger.info("sim_params: %s" % self.sim_params) self.sim_params.capital_base = 10e3 self.benchmark_events = benchmark_events_in_range(self.sim_params, self.env) def test_commission_event(self): events = factory.create_trade_history( 1, [10, 10, 10, 10, 10], [100, 100, 100, 100, 100], oneday, self.sim_params, env=self.env ) # Test commission models and validate result # Expected commission amounts: # PerShare commission: 1.00, 1.00, 1.50 = $3.50 # PerTrade commission: 5.00, 5.00, 5.00 = $15.00 # PerDollar commission: 1.50, 3.00, 4.50 = $9.00 # Total commission = $3.50 + $15.00 + $9.00 = $27.50 # Create 3 transactions: 50, 100, 150 shares traded @ $20 transactions = [create_txn(events[0], 20, i) for i in [50, 100, 150]] # Create commission models and validate that produce expected # commissions. models = [PerShare(cost=0.01, min_trade_cost=1.00), PerTrade(cost=5.00), PerDollar(cost=0.0015)] expected_results = [3.50, 15.0, 9.0] for model, expected in zip(models, expected_results): total_commission = 0 for trade in transactions: total_commission += model.calculate(trade)[1] self.assertEqual(total_commission, expected) # Verify that commission events are handled correctly by # PerformanceTracker. cash_adj_dt = events[0].dt cash_adjustment = factory.create_commission(1, 300.0, cash_adj_dt) events.append(cash_adjustment) # Insert a purchase order. txns = [create_txn(events[0], 20, 1)] results = calculate_results(self.sim_params, self.env, self.benchmark_events, events, txns=txns) # Validate that we lost 320 dollars from our cash pool. self.assertEqual(results[-1]['cumulative_perf']['ending_cash'], 9680) # Validate that the cost basis of our position changed. self.assertEqual(results[-1]['daily_perf']['positions'] [0]['cost_basis'], 320.0) # Validate that the account attributes were updated. account = results[1]['account'] self.assertEqual(float('inf'), account['day_trades_remaining']) np.testing.assert_allclose(0.001, account['leverage'], rtol=1e-3, atol=1e-4) np.testing.assert_allclose(9680, account['regt_equity'], rtol=1e-3) self.assertEqual(float('inf'), account['regt_margin']) np.testing.assert_allclose(9680, account['available_funds'], rtol=1e-3) self.assertEqual(0, account['maintenance_margin_requirement']) np.testing.assert_allclose(9690, account['equity_with_loan'], rtol=1e-3) self.assertEqual(float('inf'), account['buying_power']) self.assertEqual(0, account['initial_margin_requirement']) np.testing.assert_allclose(9680, account['excess_liquidity'], rtol=1e-3) np.testing.assert_allclose(9680, account['settled_cash'], rtol=1e-3) np.testing.assert_allclose(9690, account['net_liquidation'], rtol=1e-3) np.testing.assert_allclose(0.999, account['cushion'], rtol=1e-3) np.testing.assert_allclose(10, account['total_positions_value'], rtol=1e-3) self.assertEqual(0, account['accrued_interest']) def test_commission_zero_position(self): """ Ensure no div-by-zero errors. """ events = factory.create_trade_history( 1, [10, 10, 10, 10, 10], [100, 100, 100, 100, 100], oneday, self.sim_params, env=self.env ) # Buy and sell the same sid so that we have a zero position by the # time of events[3]. txns = [ create_txn(events[0], 20, 1), create_txn(events[1], 20, -1), ] # Add a cash adjustment at the time of event[3]. cash_adj_dt = events[3].dt cash_adjustment = factory.create_commission(1, 300.0, cash_adj_dt) events.append(cash_adjustment) results = calculate_results(self.sim_params, self.env, self.benchmark_events, events, txns=txns) # Validate that we lost 300 dollars from our cash pool. self.assertEqual(results[-1]['cumulative_perf']['ending_cash'], 9700) def test_commission_no_position(self): """ Ensure no position-not-found or sid-not-found errors. """ events = factory.create_trade_history( 1, [10, 10, 10, 10, 10], [100, 100, 100, 100, 100], oneday, self.sim_params, env=self.env ) # Add a cash adjustment at the time of event[3]. cash_adj_dt = events[3].dt cash_adjustment = factory.create_commission(1, 300.0, cash_adj_dt) events.append(cash_adjustment) results = calculate_results(self.sim_params, self.env, self.benchmark_events, events) # Validate that we lost 300 dollars from our cash pool. self.assertEqual(results[-1]['cumulative_perf']['ending_cash'], 9700) class TestDividendPerformance(unittest.TestCase): @classmethod def setUpClass(cls): cls.env = TradingEnvironment() cls.env.write_data(equities_identifiers=[1, 2]) @classmethod def tearDownClass(cls): del cls.env def setUp(self): self.sim_params = create_simulation_parameters(num_days=6) self.sim_params.capital_base = 10e3 self.benchmark_events = benchmark_events_in_range(self.sim_params, self.env) def test_market_hours_calculations(self): # DST in US/Eastern began on Sunday March 14, 2010 before = datetime(2010, 3, 12, 14, 31, tzinfo=pytz.utc) after = factory.get_next_trading_dt( before, timedelta(days=1), self.env, ) self.assertEqual(after.hour, 13) def test_long_position_receives_dividend(self): # post some trades in the market events = factory.create_trade_history( 1, [10, 10, 10, 10, 10], [100, 100, 100, 100, 100], oneday, self.sim_params, env=self.env ) dividend = factory.create_dividend( 1, 10.00, # declared date, when the algorithm finds out about # the dividend events[0].dt, # ex_date, the date before which the algorithm must hold stock # to receive the dividend events[1].dt, # pay date, when the algorithm receives the dividend. events[2].dt ) # Simulate a transaction being filled prior to the ex_date. txns = [create_txn(events[0], 10.0, 100)] results = calculate_results( self.sim_params, self.env, self.benchmark_events, events, dividend_events=[dividend], txns=txns, ) self.assertEqual(len(results), 5) cumulative_returns = \ [event['cumulative_perf']['returns'] for event in results] self.assertEqual(cumulative_returns, [0.0, 0.0, 0.1, 0.1, 0.1]) daily_returns = [event['daily_perf']['returns'] for event in results] self.assertEqual(daily_returns, [0.0, 0.0, 0.10, 0.0, 0.0]) cash_flows = [event['daily_perf']['capital_used'] for event in results] self.assertEqual(cash_flows, [-1000, 0, 1000, 0, 0]) cumulative_cash_flows = \ [event['cumulative_perf']['capital_used'] for event in results] self.assertEqual(cumulative_cash_flows, [-1000, -1000, 0, 0, 0]) cash_pos = \ [event['cumulative_perf']['ending_cash'] for event in results] self.assertEqual(cash_pos, [9000, 9000, 10000, 10000, 10000]) def test_long_position_receives_stock_dividend(self): # post some trades in the market events = [] for sid in (1, 2): events.extend( factory.create_trade_history( sid, [10, 10, 10, 10, 10], [100, 100, 100, 100, 100], oneday, self.sim_params, env=self.env) ) dividend = factory.create_stock_dividend( 1, payment_sid=2, ratio=2, # declared date, when the algorithm finds out about # the dividend declared_date=events[0].dt, # ex_date, the date before which the algorithm must hold stock # to receive the dividend ex_date=events[1].dt, # pay date, when the algorithm receives the dividend. pay_date=events[2].dt ) txns = [create_txn(events[0], 10.0, 100)] results = calculate_results( self.sim_params, self.env, self.benchmark_events, events, dividend_events=[dividend], txns=txns, ) self.assertEqual(len(results), 5) cumulative_returns = \ [event['cumulative_perf']['returns'] for event in results] self.assertEqual(cumulative_returns, [0.0, 0.0, 0.2, 0.2, 0.2]) daily_returns = [event['daily_perf']['returns'] for event in results] self.assertEqual(daily_returns, [0.0, 0.0, 0.2, 0.0, 0.0]) cash_flows = [event['daily_perf']['capital_used'] for event in results] self.assertEqual(cash_flows, [-1000, 0, 0, 0, 0]) cumulative_cash_flows = \ [event['cumulative_perf']['capital_used'] for event in results] self.assertEqual(cumulative_cash_flows, [-1000] * 5) cash_pos = \ [event['cumulative_perf']['ending_cash'] for event in results] self.assertEqual(cash_pos, [9000] * 5) def test_long_position_purchased_on_ex_date_receives_no_dividend(self): # post some trades in the market events = factory.create_trade_history( 1, [10, 10, 10, 10, 10], [100, 100, 100, 100, 100], oneday, self.sim_params, env=self.env ) dividend = factory.create_dividend( 1, 10.00, events[0].dt, # Declared date events[1].dt, # Exclusion date events[2].dt # Pay date ) # Simulate a transaction being filled on the ex_date. txns = [create_txn(events[1], 10.0, 100)] results = calculate_results( self.sim_params, self.env, self.benchmark_events, events, dividend_events=[dividend], txns=txns, ) self.assertEqual(len(results), 5) cumulative_returns = \ [event['cumulative_perf']['returns'] for event in results] self.assertEqual(cumulative_returns, [0, 0, 0, 0, 0]) daily_returns = [event['daily_perf']['returns'] for event in results] self.assertEqual(daily_returns, [0, 0, 0, 0, 0]) cash_flows = [event['daily_perf']['capital_used'] for event in results] self.assertEqual(cash_flows, [0, -1000, 0, 0, 0]) cumulative_cash_flows = \ [event['cumulative_perf']['capital_used'] for event in results] self.assertEqual(cumulative_cash_flows, [0, -1000, -1000, -1000, -1000]) def test_selling_before_dividend_payment_still_gets_paid(self): # post some trades in the market events = factory.create_trade_history( 1, [10, 10, 10, 10, 10], [100, 100, 100, 100, 100], oneday, self.sim_params, env=self.env ) dividend = factory.create_dividend( 1, 10.00, events[0].dt, # Declared date events[1].dt, # Exclusion date events[3].dt # Pay date ) buy_txn = create_txn(events[0], 10.0, 100) sell_txn = create_txn(events[2], 10.0, -100) txns = [buy_txn, sell_txn] results = calculate_results( self.sim_params, self.env, self.benchmark_events, events, dividend_events=[dividend], txns=txns, ) self.assertEqual(len(results), 5) cumulative_returns = \ [event['cumulative_perf']['returns'] for event in results] self.assertEqual(cumulative_returns, [0, 0, 0, 0.1, 0.1]) daily_returns = [event['daily_perf']['returns'] for event in results] self.assertEqual(daily_returns, [0, 0, 0, 0.1, 0]) cash_flows = [event['daily_perf']['capital_used'] for event in results] self.assertEqual(cash_flows, [-1000, 0, 1000, 1000, 0]) cumulative_cash_flows = \ [event['cumulative_perf']['capital_used'] for event in results] self.assertEqual(cumulative_cash_flows, [-1000, -1000, 0, 1000, 1000]) def test_buy_and_sell_before_ex(self): # post some trades in the market events = factory.create_trade_history( 1, [10, 10, 10, 10, 10, 10], [100, 100, 100, 100, 100, 100], oneday, self.sim_params, env=self.env ) dividend = factory.create_dividend( 1, 10.00, events[3].dt, events[4].dt, events[5].dt ) buy_txn = create_txn(events[1], 10.0, 100) sell_txn = create_txn(events[2], 10.0, -100) txns = [buy_txn, sell_txn] results = calculate_results( self.sim_params, self.env, self.benchmark_events, events, dividend_events=[dividend], txns=txns, ) self.assertEqual(len(results), 6) cumulative_returns = \ [event['cumulative_perf']['returns'] for event in results] self.assertEqual(cumulative_returns, [0, 0, 0, 0, 0, 0]) daily_returns = [event['daily_perf']['returns'] for event in results] self.assertEqual(daily_returns, [0, 0, 0, 0, 0, 0]) cash_flows = [event['daily_perf']['capital_used'] for event in results] self.assertEqual(cash_flows, [0, -1000, 1000, 0, 0, 0]) cumulative_cash_flows = \ [event['cumulative_perf']['capital_used'] for event in results] self.assertEqual(cumulative_cash_flows, [0, -1000, 0, 0, 0, 0]) def test_ending_before_pay_date(self): # post some trades in the market events = factory.create_trade_history( 1, [10, 10, 10, 10, 10], [100, 100, 100, 100, 100], oneday, self.sim_params, env=self.env ) pay_date = self.sim_params.first_open # find pay date that is much later. for i in range(30): pay_date = factory.get_next_trading_dt(pay_date, oneday, self.env) dividend = factory.create_dividend( 1, 10.00, events[0].dt, events[0].dt, pay_date ) txns = [create_txn(events[1], 10.0, 100)] results = calculate_results( self.sim_params, self.env, self.benchmark_events, events, dividend_events=[dividend], txns=txns, ) self.assertEqual(len(results), 5) cumulative_returns = \ [event['cumulative_perf']['returns'] for event in results] self.assertEqual(cumulative_returns, [0, 0, 0, 0.0, 0.0]) daily_returns = [event['daily_perf']['returns'] for event in results] self.assertEqual(daily_returns, [0, 0, 0, 0, 0]) cash_flows = [event['daily_perf']['capital_used'] for event in results] self.assertEqual(cash_flows, [0, -1000, 0, 0, 0]) cumulative_cash_flows = \ [event['cumulative_perf']['capital_used'] for event in results] self.assertEqual( cumulative_cash_flows, [0, -1000, -1000, -1000, -1000] ) def test_short_position_pays_dividend(self): # post some trades in the market events = factory.create_trade_history( 1, [10, 10, 10, 10, 10], [100, 100, 100, 100, 100], oneday, self.sim_params, env=self.env ) dividend = factory.create_dividend( 1, 10.00, # declare at open of test events[0].dt, # ex_date same as trade 2 events[2].dt, events[3].dt ) txns = [create_txn(events[1], 10.0, -100)] results = calculate_results( self.sim_params, self.env, self.benchmark_events, events, dividend_events=[dividend], txns=txns, ) self.assertEqual(len(results), 5) cumulative_returns = \ [event['cumulative_perf']['returns'] for event in results] self.assertEqual(cumulative_returns, [0.0, 0.0, 0.0, -0.1, -0.1]) daily_returns = [event['daily_perf']['returns'] for event in results] self.assertEqual(daily_returns, [0.0, 0.0, 0.0, -0.1, 0.0]) cash_flows = [event['daily_perf']['capital_used'] for event in results] self.assertEqual(cash_flows, [0, 1000, 0, -1000, 0]) cumulative_cash_flows = \ [event['cumulative_perf']['capital_used'] for event in results] self.assertEqual(cumulative_cash_flows, [0, 1000, 1000, 0, 0]) def test_no_position_receives_no_dividend(self): # post some trades in the market events = factory.create_trade_history( 1, [10, 10, 10, 10, 10], [100, 100, 100, 100, 100], oneday, self.sim_params, env=self.env ) dividend = factory.create_dividend( 1, 10.00, events[0].dt, events[1].dt, events[2].dt ) results = calculate_results( self.sim_params, self.env, self.benchmark_events, events, dividend_events=[dividend], ) self.assertEqual(len(results), 5) cumulative_returns = \ [event['cumulative_perf']['returns'] for event in results] self.assertEqual(cumulative_returns, [0.0, 0.0, 0.0, 0.0, 0.0]) daily_returns = [event['daily_perf']['returns'] for event in results] self.assertEqual(daily_returns, [0.0, 0.0, 0.0, 0.0, 0.0]) cash_flows = [event['daily_perf']['capital_used'] for event in results] self.assertEqual(cash_flows, [0, 0, 0, 0, 0]) cumulative_cash_flows = \ [event['cumulative_perf']['capital_used'] for event in results] self.assertEqual(cumulative_cash_flows, [0, 0, 0, 0, 0]) def test_no_dividend_at_simulation_end(self): # post some trades in the market events = factory.create_trade_history( 1, [10, 10, 10, 10, 10], [100, 100, 100, 100, 100], oneday, self.sim_params, env=self.env ) dividend = factory.create_dividend( 1, 10.00, # declared date, when the algorithm finds out about # the dividend events[-3].dt, # ex_date, the date before which the algorithm must hold stock # to receive the dividend events[-2].dt, # pay date, when the algorithm receives the dividend. # This pays out on the day after the last event self.env.next_trading_day(events[-1].dt) ) # Set the last day to be the last event self.sim_params.period_end = events[-1].dt self.sim_params.update_internal_from_env(self.env) # Simulate a transaction being filled prior to the ex_date. txns = [create_txn(events[0], 10.0, 100)] results = calculate_results( self.sim_params, self.env, self.benchmark_events, events, dividend_events=[dividend], txns=txns, ) self.assertEqual(len(results), 5) cumulative_returns = \ [event['cumulative_perf']['returns'] for event in results] self.assertEqual(cumulative_returns, [0.0, 0.0, 0.0, 0.0, 0.0]) daily_returns = [event['daily_perf']['returns'] for event in results] self.assertEqual(daily_returns, [0.0, 0.0, 0.0, 0.0, 0.0]) cash_flows = [event['daily_perf']['capital_used'] for event in results] self.assertEqual(cash_flows, [-1000, 0, 0, 0, 0]) cumulative_cash_flows = \ [event['cumulative_perf']['capital_used'] for event in results] self.assertEqual(cumulative_cash_flows, [-1000, -1000, -1000, -1000, -1000]) class TestDividendPerformanceHolidayStyle(TestDividendPerformance): # The holiday tests begins the simulation on the day # before Thanksgiving, so that the next trading day is # two days ahead. Any tests that hard code events # to be start + oneday will fail, since those events will # be skipped by the simulation. def setUp(self): self.dt = datetime(2003, 11, 30, tzinfo=pytz.utc) self.end_dt = datetime(2004, 11, 25, tzinfo=pytz.utc) self.sim_params = SimulationParameters( self.dt, self.end_dt, env=self.env) self.sim_params.capital_base = 10e3 self.benchmark_events = benchmark_events_in_range(self.sim_params, self.env) class TestPositionPerformance(unittest.TestCase): @classmethod def setUpClass(cls): cls.env = TradingEnvironment() cls.env.write_data(equities_identifiers=[1, 2]) @classmethod def tearDownClass(cls): del cls.env def setUp(self): self.sim_params = create_simulation_parameters(num_days=4) self.finder = self.env.asset_finder self.benchmark_events = benchmark_events_in_range(self.sim_params, self.env) def test_long_short_positions(self): """ start with $1000 buy 100 stock1 shares at $10 sell short 100 stock2 shares at $10 stock1 then goes down to $9 stock2 goes to $11 """ trades_1 = factory.create_trade_history( 1, [10, 10, 10, 9], [100, 100, 100, 100], onesec, self.sim_params, env=self.env ) trades_2 = factory.create_trade_history( 2, [10, 10, 10, 11], [100, 100, 100, 100], onesec, self.sim_params, env=self.env ) txn1 = create_txn(trades_1[1], 10.0, 100) txn2 = create_txn(trades_2[1], 10.0, -100) pt = perf.PositionTracker(self.env.asset_finder) pp = perf.PerformancePeriod(1000.0, self.env.asset_finder) pp.position_tracker = pt pt.execute_transaction(txn1) pp.handle_execution(txn1) pt.execute_transaction(txn2) pp.handle_execution(txn2) for trade in itertools.chain(trades_1[:-2], trades_2[:-2]): pt.update_last_sale(trade) pp.calculate_performance() check_perf_period( pp, gross_leverage=2.0, net_leverage=0.0, long_exposure=1000.0, longs_count=1, short_exposure=-1000.0, shorts_count=1) # Validate that the account attributes were updated. account = pp.as_account() check_account(account, settled_cash=1000.0, equity_with_loan=1000.0, total_positions_value=0.0, regt_equity=1000.0, available_funds=1000.0, excess_liquidity=1000.0, cushion=1.0, leverage=2.0, net_leverage=0.0, net_liquidation=1000.0) # now simulate stock1 going to $9 pt.update_last_sale(trades_1[-1]) # and stock2 going to $11 pt.update_last_sale(trades_2[-1]) pp.calculate_performance() # Validate that the account attributes were updated. account = pp.as_account() check_perf_period( pp, gross_leverage=2.5, net_leverage=-0.25, long_exposure=900.0, longs_count=1, short_exposure=-1100.0, shorts_count=1) check_account(account, settled_cash=1000.0, equity_with_loan=800.0, total_positions_value=-200.0, regt_equity=1000.0, available_funds=1000.0, excess_liquidity=1000.0, cushion=1.25, leverage=2.5, net_leverage=-0.25, net_liquidation=800.0) def test_levered_long_position(self): """ start with $1,000, then buy 1000 shares at $10. price goes to $11 """ # post some trades in the market trades = factory.create_trade_history( 1, [10, 10, 10, 11], [100, 100, 100, 100], onesec, self.sim_params, env=self.env ) txn = create_txn(trades[1], 10.0, 1000) pt = perf.PositionTracker(self.env.asset_finder) pp = perf.PerformancePeriod(1000.0, self.env.asset_finder) pp.position_tracker = pt pt.execute_transaction(txn) pp.handle_execution(txn) for trade in trades[:-2]: pt.update_last_sale(trade) pp.calculate_performance() check_perf_period( pp, gross_leverage=10.0, net_leverage=10.0, long_exposure=10000.0, longs_count=1, short_exposure=0.0, shorts_count=0) # Validate that the account attributes were updated. account = pp.as_account() check_account(account, settled_cash=-9000.0, equity_with_loan=1000.0, total_positions_value=10000.0, regt_equity=-9000.0, available_funds=-9000.0, excess_liquidity=-9000.0, cushion=-9.0, leverage=10.0, net_leverage=10.0, net_liquidation=1000.0) # now simulate a price jump to $11 pt.update_last_sale(trades[-1]) pp.calculate_performance() check_perf_period( pp, gross_leverage=5.5, net_leverage=5.5, long_exposure=11000.0, longs_count=1, short_exposure=0.0, shorts_count=0) # Validate that the account attributes were updated. account = pp.as_account() check_account(account, settled_cash=-9000.0, equity_with_loan=2000.0, total_positions_value=11000.0, regt_equity=-9000.0, available_funds=-9000.0, excess_liquidity=-9000.0, cushion=-4.5, leverage=5.5, net_leverage=5.5, net_liquidation=2000.0) def test_long_position(self): """ verify that the performance period calculates properly for a single buy transaction """ # post some trades in the market trades = factory.create_trade_history( 1, [10, 10, 10, 11], [100, 100, 100, 100], onesec, self.sim_params, env=self.env ) txn = create_txn(trades[1], 10.0, 100) pt = perf.PositionTracker(self.env.asset_finder) pp = perf.PerformancePeriod(1000.0, self.env.asset_finder) pp.position_tracker = pt pt.execute_transaction(txn) pp.handle_execution(txn) # This verifies that the last sale price is being correctly # set in the positions. If this is not the case then returns can # incorrectly show as sharply dipping if a transaction arrives # before a trade. This is caused by returns being based on holding # stocks with a last sale price of 0. self.assertEqual(pp.positions[1].last_sale_price, 10.0) for trade in trades: pt.update_last_sale(trade) pp.calculate_performance() self.assertEqual( pp.period_cash_flow, -1 * txn.price * txn.amount, "capital used should be equal to the opposite of the transaction \ cost of sole txn in test" ) self.assertEqual( len(pp.positions), 1, "should be just one position") self.assertEqual( pp.positions[1].sid, txn.sid, "position should be in security with id 1") self.assertEqual( pp.positions[1].amount, txn.amount, "should have a position of {sharecount} shares".format( sharecount=txn.amount ) ) self.assertEqual( pp.positions[1].cost_basis, txn.price, "should have a cost basis of 10" ) self.assertEqual( pp.positions[1].last_sale_price, trades[-1]['price'], "last sale should be same as last trade. \ expected {exp} actual {act}".format( exp=trades[-1]['price'], act=pp.positions[1].last_sale_price) ) self.assertEqual( pp.ending_value, 1100, "ending value should be price of last trade times number of \ shares in position" ) self.assertEqual(pp.pnl, 100, "gain of 1 on 100 shares should be 100") check_perf_period( pp, gross_leverage=1.0, net_leverage=1.0, long_exposure=1100.0, longs_count=1, short_exposure=0.0, shorts_count=0) # Validate that the account attributes were updated. account = pp.as_account() check_account(account, settled_cash=0.0, equity_with_loan=1100.0, total_positions_value=1100.0, regt_equity=0.0, available_funds=0.0, excess_liquidity=0.0, cushion=0.0, leverage=1.0, net_leverage=1.0, net_liquidation=1100.0) def test_short_position(self): """verify that the performance period calculates properly for a \ single short-sale transaction""" trades = factory.create_trade_history( 1, [10, 10, 10, 11, 10, 9], [100, 100, 100, 100, 100, 100], onesec, self.sim_params, env=self.env ) trades_1 = trades[:-2] txn = create_txn(trades[1], 10.0, -100) pt = perf.PositionTracker(self.env.asset_finder) pp = perf.PerformancePeriod(1000.0, self.env.asset_finder) pp.position_tracker = pt pt.execute_transaction(txn) pp.handle_execution(txn) for trade in trades_1: pt.update_last_sale(trade) pp.calculate_performance() self.assertEqual( pp.period_cash_flow, -1 * txn.price * txn.amount, "capital used should be equal to the opposite of the transaction\ cost of sole txn in test" ) self.assertEqual( len(pp.positions), 1, "should be just one position") self.assertEqual( pp.positions[1].sid, txn.sid, "position should be in security from the transaction" ) self.assertEqual( pp.positions[1].amount, -100, "should have a position of -100 shares" ) self.assertEqual( pp.positions[1].cost_basis, txn.price, "should have a cost basis of 10" ) self.assertEqual( pp.positions[1].last_sale_price, trades_1[-1]['price'], "last sale should be price of last trade" ) self.assertEqual( pp.ending_value, -1100, "ending value should be price of last trade times number of \ shares in position" ) self.assertEqual(pp.pnl, -100, "gain of 1 on 100 shares should be 100") # simulate additional trades, and ensure that the position value # reflects the new price trades_2 = trades[-2:] # simulate a rollover to a new period pp.rollover() for trade in trades_2: pt.update_last_sale(trade) pp.calculate_performance() self.assertEqual( pp.period_cash_flow, 0, "capital used should be zero, there were no transactions in \ performance period" ) self.assertEqual( len(pp.positions), 1, "should be just one position" ) self.assertEqual( pp.positions[1].sid, txn.sid, "position should be in security from the transaction" ) self.assertEqual( pp.positions[1].amount, -100, "should have a position of -100 shares" ) self.assertEqual( pp.positions[1].cost_basis, txn.price, "should have a cost basis of 10" ) self.assertEqual( pp.positions[1].last_sale_price, trades_2[-1].price, "last sale should be price of last trade" ) self.assertEqual( pp.ending_value, -900, "ending value should be price of last trade times number of \ shares in position") self.assertEqual( pp.pnl, 200, "drop of 2 on -100 shares should be 200" ) # now run a performance period encompassing the entire trade sample. ptTotal = perf.PositionTracker(self.env.asset_finder) ppTotal = perf.PerformancePeriod(1000.0, self.env.asset_finder) ppTotal.position_tracker = pt for trade in trades_1: ptTotal.update_last_sale(trade) ptTotal.execute_transaction(txn) ppTotal.handle_execution(txn) for trade in trades_2: ptTotal.update_last_sale(trade) ppTotal.calculate_performance() self.assertEqual( ppTotal.period_cash_flow, -1 * txn.price * txn.amount, "capital used should be equal to the opposite of the transaction \ cost of sole txn in test" ) self.assertEqual( len(ppTotal.positions), 1, "should be just one position" ) self.assertEqual( ppTotal.positions[1].sid, txn.sid, "position should be in security from the transaction" ) self.assertEqual( ppTotal.positions[1].amount, -100, "should have a position of -100 shares" ) self.assertEqual( ppTotal.positions[1].cost_basis, txn.price, "should have a cost basis of 10" ) self.assertEqual( ppTotal.positions[1].last_sale_price, trades_2[-1].price, "last sale should be price of last trade" ) self.assertEqual( ppTotal.ending_value, -900, "ending value should be price of last trade times number of \ shares in position") self.assertEqual( ppTotal.pnl, 100, "drop of 1 on -100 shares should be 100" ) check_perf_period( pp, gross_leverage=0.8181, net_leverage=-0.8181, long_exposure=0.0, longs_count=0, short_exposure=-900.0, shorts_count=1) # Validate that the account attributes. account = ppTotal.as_account() check_account(account, settled_cash=2000.0, equity_with_loan=1100.0, total_positions_value=-900.0, regt_equity=2000.0, available_funds=2000.0, excess_liquidity=2000.0, cushion=1.8181, leverage=0.8181, net_leverage=-0.8181, net_liquidation=1100.0) def test_covering_short(self): """verify performance where short is bought and covered, and shares \ trade after cover""" trades = factory.create_trade_history( 1, [10, 10, 10, 11, 9, 8, 7, 8, 9, 10], [100, 100, 100, 100, 100, 100, 100, 100, 100, 100], onesec, self.sim_params, env=self.env ) short_txn = create_txn( trades[1], 10.0, -100, ) cover_txn = create_txn(trades[6], 7.0, 100) pt = perf.PositionTracker(self.env.asset_finder) pp = perf.PerformancePeriod(1000.0, self.env.asset_finder) pp.position_tracker = pt pt.execute_transaction(short_txn) pp.handle_execution(short_txn) pt.execute_transaction(cover_txn) pp.handle_execution(cover_txn) for trade in trades: pt.update_last_sale(trade) pp.calculate_performance() short_txn_cost = short_txn.price * short_txn.amount cover_txn_cost = cover_txn.price * cover_txn.amount self.assertEqual( pp.period_cash_flow, -1 * short_txn_cost - cover_txn_cost, "capital used should be equal to the net transaction costs" ) self.assertEqual( len(pp.positions), 1, "should be just one position" ) self.assertEqual( pp.positions[1].sid, short_txn.sid, "position should be in security from the transaction" ) self.assertEqual( pp.positions[1].amount, 0, "should have a position of -100 shares" ) self.assertEqual( pp.positions[1].cost_basis, 0, "a covered position should have a cost basis of 0" ) self.assertEqual( pp.positions[1].last_sale_price, trades[-1].price, "last sale should be price of last trade" ) self.assertEqual( pp.ending_value, 0, "ending value should be price of last trade times number of \ shares in position" ) self.assertEqual( pp.pnl, 300, "gain of 1 on 100 shares should be 300" ) check_perf_period( pp, gross_leverage=0.0, net_leverage=0.0, long_exposure=0.0, longs_count=0, short_exposure=0.0, shorts_count=0) account = pp.as_account() check_account(account, settled_cash=1300.0, equity_with_loan=1300.0, total_positions_value=0.0, regt_equity=1300.0, available_funds=1300.0, excess_liquidity=1300.0, cushion=1.0, leverage=0.0, net_leverage=0.0, net_liquidation=1300.0) def test_cost_basis_calc(self): history_args = ( 1, [10, 11, 11, 12], [100, 100, 100, 100], onesec, self.sim_params, self.env ) trades = factory.create_trade_history(*history_args) transactions = factory.create_txn_history(*history_args) pt = perf.PositionTracker(self.env.asset_finder) pp = perf.PerformancePeriod(1000.0, self.env.asset_finder) pp.position_tracker = pt average_cost = 0 for i, txn in enumerate(transactions): pt.execute_transaction(txn) pp.handle_execution(txn) average_cost = (average_cost * i + txn.price) / (i + 1) self.assertEqual(pp.positions[1].cost_basis, average_cost) for trade in trades: pt.update_last_sale(trade) pp.calculate_performance() self.assertEqual( pp.positions[1].last_sale_price, trades[-1].price, "should have a last sale of 12, got {val}".format( val=pp.positions[1].last_sale_price) ) self.assertEqual( pp.positions[1].cost_basis, 11, "should have a cost basis of 11" ) self.assertEqual( pp.pnl, 400 ) down_tick = factory.create_trade( 1, 10.0, 100, trades[-1].dt + onesec) sale_txn = create_txn( down_tick, 10.0, -100) pp.rollover() pt.execute_transaction(sale_txn) pp.handle_execution(sale_txn) pt.update_last_sale(down_tick) pp.calculate_performance() self.assertEqual( pp.positions[1].last_sale_price, 10, "should have a last sale of 10, was {val}".format( val=pp.positions[1].last_sale_price) ) self.assertEqual( pp.positions[1].cost_basis, 11, "should have a cost basis of 11" ) self.assertEqual(pp.pnl, -800, "this period goes from +400 to -400") pt3 = perf.PositionTracker(self.env.asset_finder) pp3 = perf.PerformancePeriod(1000.0, self.env.asset_finder) pp3.position_tracker = pt3 average_cost = 0 for i, txn in enumerate(transactions): pt3.execute_transaction(txn) pp3.handle_execution(txn) average_cost = (average_cost * i + txn.price) / (i + 1) self.assertEqual(pp3.positions[1].cost_basis, average_cost) pt3.execute_transaction(sale_txn) pp3.handle_execution(sale_txn) trades.append(down_tick) for trade in trades: pt3.update_last_sale(trade) pp3.calculate_performance() self.assertEqual( pp3.positions[1].last_sale_price, 10, "should have a last sale of 10" ) self.assertEqual( pp3.positions[1].cost_basis, 11, "should have a cost basis of 11" ) self.assertEqual( pp3.pnl, -400, "should be -400 for all trades and transactions in period" ) def test_cost_basis_calc_close_pos(self): history_args = ( 1, [10, 9, 11, 8, 9, 12, 13, 14], [200, -100, -100, 100, -300, 100, 500, 400], onesec, self.sim_params, self.env ) cost_bases = [10, 10, 0, 8, 9, 9, 13, 13.5] trades = factory.create_trade_history(*history_args) transactions = factory.create_txn_history(*history_args) pt = perf.PositionTracker(self.env.asset_finder) pp = perf.PerformancePeriod(1000.0, self.env.asset_finder) pp.position_tracker = pt for txn, cb in zip(transactions, cost_bases): pt.execute_transaction(txn) pp.handle_execution(txn) self.assertEqual(pp.positions[1].cost_basis, cb) for trade in trades: pt.update_last_sale(trade) pp.calculate_performance() self.assertEqual(pp.positions[1].cost_basis, cost_bases[-1]) class TestPerformanceTracker(unittest.TestCase): @classmethod def setUpClass(cls): cls.env = TradingEnvironment() cls.env.write_data(equities_identifiers=[1, 2, 133, 134]) @classmethod def tearDownClass(cls): del cls.env NumDaysToDelete = collections.namedtuple( 'NumDaysToDelete', ('start', 'middle', 'end')) @parameterized.expand([ ("Don't delete any events", NumDaysToDelete(start=0, middle=0, end=0)), ("Delete first day of events", NumDaysToDelete(start=1, middle=0, end=0)), ("Delete first two days of events", NumDaysToDelete(start=2, middle=0, end=0)), ("Delete one day of events from the middle", NumDaysToDelete(start=0, middle=1, end=0)), ("Delete two events from the middle", NumDaysToDelete(start=0, middle=2, end=0)), ("Delete last day of events", NumDaysToDelete(start=0, middle=0, end=1)), ("Delete last two days of events", NumDaysToDelete(start=0, middle=0, end=2)), ("Delete all but one event.", NumDaysToDelete(start=2, middle=1, end=2)), ]) def test_tracker(self, parameter_comment, days_to_delete): """ @days_to_delete - configures which days in the data set we should remove, used for ensuring that we still return performance messages even when there is no data. """ # This date range covers Columbus day, # however Columbus day is not a market holiday # # October 2008 # Su Mo Tu We Th Fr Sa # 1 2 3 4 # 5 6 7 8 9 10 11 # 12 13 14 15 16 17 18 # 19 20 21 22 23 24 25 # 26 27 28 29 30 31 start_dt = datetime(year=2008, month=10, day=9, tzinfo=pytz.utc) end_dt = datetime(year=2008, month=10, day=16, tzinfo=pytz.utc) trade_count = 6 sid = 133 price = 10.1 price_list = [price] * trade_count volume = [100] * trade_count trade_time_increment = timedelta(days=1) sim_params = SimulationParameters( period_start=start_dt, period_end=end_dt, env=self.env, ) benchmark_events = benchmark_events_in_range(sim_params, self.env) trade_history = factory.create_trade_history( sid, price_list, volume, trade_time_increment, sim_params, source_id="factory1", env=self.env ) sid2 = 134 price2 = 12.12 price2_list = [price2] * trade_count trade_history2 = factory.create_trade_history( sid2, price2_list, volume, trade_time_increment, sim_params, source_id="factory2", env=self.env ) # 'middle' start of 3 depends on number of days == 7 middle = 3 # First delete from middle if days_to_delete.middle: del trade_history[middle:(middle + days_to_delete.middle)] del trade_history2[middle:(middle + days_to_delete.middle)] # Delete start if days_to_delete.start: del trade_history[:days_to_delete.start] del trade_history2[:days_to_delete.start] # Delete from end if days_to_delete.end: del trade_history[-days_to_delete.end:] del trade_history2[-days_to_delete.end:] sim_params.capital_base = 1000.0 sim_params.frame_index = [ 'sid', 'volume', 'dt', 'price', 'changed'] perf_tracker = perf.PerformanceTracker( sim_params, self.env ) events = date_sorted_sources(trade_history, trade_history2) events = [event for event in self.trades_with_txns(events, trade_history[0].dt)] # Extract events with transactions to use for verification. txns = [event for event in events if event.type == zp.DATASOURCE_TYPE.TRANSACTION] orders = [event for event in events if event.type == zp.DATASOURCE_TYPE.ORDER] all_events = date_sorted_sources(events, benchmark_events) filtered_events = [filt_event for filt_event in all_events if filt_event.dt <= end_dt] filtered_events.sort(key=lambda x: x.dt) grouped_events = itertools.groupby(filtered_events, lambda x: x.dt) perf_messages = [] for date, group in grouped_events: for event in group: if event.type == zp.DATASOURCE_TYPE.TRADE: perf_tracker.process_trade(event) elif event.type == zp.DATASOURCE_TYPE.ORDER: perf_tracker.process_order(event) elif event.type == zp.DATASOURCE_TYPE.BENCHMARK: perf_tracker.process_benchmark(event) elif event.type == zp.DATASOURCE_TYPE.TRANSACTION: perf_tracker.process_transaction(event) msg = perf_tracker.handle_market_close_daily() perf_messages.append(msg) self.assertEqual(perf_tracker.txn_count, len(txns)) self.assertEqual(perf_tracker.txn_count, len(orders)) positions = perf_tracker.cumulative_performance.positions if len(txns) == 0: self.assertNotIn(sid, positions) else: expected_size = len(txns) / 2 * -25 cumulative_pos = positions[sid] self.assertEqual(cumulative_pos.amount, expected_size) self.assertEqual(len(perf_messages), sim_params.days_in_period) check_perf_tracker_serialization(perf_tracker) def trades_with_txns(self, events, no_txn_dt): for event in events: # create a transaction for all but # first trade in each sid, to simulate None transaction if event.dt != no_txn_dt: order = Order( sid=event.sid, amount=-25, dt=event.dt ) order.source_id = 'MockOrderSource' yield order yield event txn = Transaction( sid=event.sid, amount=-25, dt=event.dt, price=10.0, commission=0.50, order_id=order.id ) txn.source_id = 'MockTransactionSource' yield txn else: yield event def test_minute_tracker(self): """ Tests minute performance tracking.""" start_dt = self.env.exchange_dt_in_utc(datetime(2013, 3, 1, 9, 31)) end_dt = self.env.exchange_dt_in_utc(datetime(2013, 3, 1, 16, 0)) foosid = 1 barsid = 2 sim_params = SimulationParameters( period_start=start_dt, period_end=end_dt, emission_rate='minute', env=self.env, ) tracker = perf.PerformanceTracker(sim_params, env=self.env) foo_event_1 = factory.create_trade(foosid, 10.0, 20, start_dt) order_event_1 = Order(sid=foo_event_1.sid, amount=-25, dt=foo_event_1.dt) bar_event_1 = factory.create_trade(barsid, 100.0, 200, start_dt) txn_event_1 = Transaction(sid=foo_event_1.sid, amount=-25, dt=foo_event_1.dt, price=10.0, commission=0.50, order_id=order_event_1.id) benchmark_event_1 = Event({ 'dt': start_dt, 'returns': 0.01, 'type': zp.DATASOURCE_TYPE.BENCHMARK }) foo_event_2 = factory.create_trade( foosid, 11.0, 20, start_dt + timedelta(minutes=1)) bar_event_2 = factory.create_trade( barsid, 11.0, 20, start_dt + timedelta(minutes=1)) benchmark_event_2 = Event({ 'dt': start_dt + timedelta(minutes=1), 'returns': 0.02, 'type': zp.DATASOURCE_TYPE.BENCHMARK }) events = [ foo_event_1, order_event_1, benchmark_event_1, txn_event_1, bar_event_1, foo_event_2, benchmark_event_2, bar_event_2, ] grouped_events = itertools.groupby( events, operator.attrgetter('dt')) messages = {} for date, group in grouped_events: tracker.set_date(date) for event in group: if event.type == zp.DATASOURCE_TYPE.TRADE: tracker.process_trade(event) elif event.type == zp.DATASOURCE_TYPE.BENCHMARK: tracker.process_benchmark(event) elif event.type == zp.DATASOURCE_TYPE.ORDER: tracker.process_order(event) elif event.type == zp.DATASOURCE_TYPE.TRANSACTION: tracker.process_transaction(event) msg, _ = tracker.handle_minute_close(date) messages[date] = msg self.assertEquals(2, len(messages)) msg_1 = messages[foo_event_1.dt] msg_2 = messages[foo_event_2.dt] self.assertEquals(1, len(msg_1['minute_perf']['transactions']), "The first message should contain one " "transaction.") # Check that transactions aren't emitted for previous events. self.assertEquals(0, len(msg_2['minute_perf']['transactions']), "The second message should have no " "transactions.") self.assertEquals(1, len(msg_1['minute_perf']['orders']), "The first message should contain one orders.") # Check that orders aren't emitted for previous events. self.assertEquals(0, len(msg_2['minute_perf']['orders']), "The second message should have no orders.") # Ensure that period_close moves through time. # Also, ensure that the period_closes are the expected dts. self.assertEquals(foo_event_1.dt, msg_1['minute_perf']['period_close']) self.assertEquals(foo_event_2.dt, msg_2['minute_perf']['period_close']) # In this test event1 transactions arrive on the first bar. # This leads to no returns as the price is constant. # Sharpe ratio cannot be computed and is None. # In the second bar we can start establishing a sharpe ratio. self.assertIsNone(msg_1['cumulative_risk_metrics']['sharpe']) self.assertIsNotNone(msg_2['cumulative_risk_metrics']['sharpe']) check_perf_tracker_serialization(tracker) def test_close_position_event(self): pt = perf.PositionTracker(asset_finder=self.env.asset_finder) dt = pd.Timestamp("1984/03/06 3:00PM") pos1 = perf.Position(1, amount=np.float64(120.0), last_sale_date=dt, last_sale_price=3.4) pos2 = perf.Position(2, amount=np.float64(-100.0), last_sale_date=dt, last_sale_price=3.4) pt.update_positions({1: pos1, 2: pos2}) event_type = DATASOURCE_TYPE.CLOSE_POSITION index = [dt + timedelta(days=1)] pan = pd.Panel({1: pd.DataFrame({'price': 1, 'volume': 0, 'type': event_type}, index=index), 2: pd.DataFrame({'price': 1, 'volume': 0, 'type': event_type}, index=index), 3: pd.DataFrame({'price': 1, 'volume': 0, 'type': event_type}, index=index)}) source = DataPanelSource(pan) for i, event in enumerate(source): txn = pt.maybe_create_close_position_transaction(event) if event.sid == 1: # Test owned long self.assertEqual(-120, txn.amount) elif event.sid == 2: # Test owned short self.assertEqual(100, txn.amount) elif event.sid == 3: # Test not-owned SID self.assertIsNone(txn) def test_handle_sid_removed_from_universe(self): # post some trades in the market sim_params = create_simulation_parameters(num_days=5) events = factory.create_trade_history( 1, [10, 10, 10, 10, 10], [100, 100, 100, 100, 100], oneday, sim_params, env=self.env ) # Create a tracker and a dividend perf_tracker = perf.PerformanceTracker(sim_params, env=self.env) dividend = factory.create_dividend( 1, 10.00, # declared date, when the algorithm finds out about # the dividend events[0].dt, # ex_date, the date before which the algorithm must hold stock # to receive the dividend events[1].dt, # pay date, when the algorithm receives the dividend. events[2].dt ) dividend_frame = pd.DataFrame( [dividend.to_series(index=zp.DIVIDEND_FIELDS)], ) perf_tracker.update_dividends(dividend_frame) # Ensure that the dividend is in the tracker self.assertIn(1, perf_tracker.dividend_frame['sid'].values) # Inform the tracker that sid 1 has been removed from the universe perf_tracker.handle_sid_removed_from_universe(1) # Ensure that the dividend for sid 1 has been removed from dividend # frame self.assertNotIn(1, perf_tracker.dividend_frame['sid'].values) def test_serialization(self): start_dt = datetime(year=2008, month=10, day=9, tzinfo=pytz.utc) end_dt = datetime(year=2008, month=10, day=16, tzinfo=pytz.utc) sim_params = SimulationParameters( period_start=start_dt, period_end=end_dt, env=self.env, ) perf_tracker = perf.PerformanceTracker( sim_params, env=self.env ) check_perf_tracker_serialization(perf_tracker) class TestPosition(unittest.TestCase): def setUp(self): pass def test_serialization(self): dt = pd.Timestamp("1984/03/06 3:00PM") pos = perf.Position(10, amount=np.float64(120.0), last_sale_date=dt, last_sale_price=3.4) p_string = dumps_with_persistent_ids(pos) test = loads_with_persistent_ids(p_string, env=None) nt.assert_dict_equal(test.__dict__, pos.__dict__) class TestPositionTracker(unittest.TestCase): @classmethod def setUpClass(cls): cls.env = TradingEnvironment() futures_metadata = {3: {'contract_multiplier': 1000}, 4: {'contract_multiplier': 1000}} cls.env.write_data(equities_identifiers=[1, 2], futures_data=futures_metadata) @classmethod def tearDownClass(cls): del cls.env def test_empty_positions(self): """ make sure all the empty position stats return a numeric 0 Originally this bug was due to np.dot([], []) returning np.bool_(False) """ pt = perf.PositionTracker(self.env.asset_finder) pos_stats = position_tracker.calc_position_stats(pt) stats = [ 'net_value', 'net_exposure', 'gross_value', 'gross_exposure', 'short_value', 'short_exposure', 'shorts_count', 'long_value', 'long_exposure', 'longs_count', ] for name in stats: val = getattr(pos_stats, name) self.assertEquals(val, 0) self.assertNotIsInstance(val, (bool, np.bool_)) def test_update_last_sale(self): pt = perf.PositionTracker(self.env.asset_finder) dt =
pd.Timestamp("1984/03/06 3:00PM")
pandas.Timestamp
#from inspect import getmembers, isfunction import pandas as pd import numpy as np from pandas import DataFrame as df import glob preppin_data_scripts = glob.glob("20*.py") pandas_df = pd.DataFrame() pandas_df['Function'] = dir(pd) pandas_df['Package'] = 'Pandas' dataframe_df = pd.DataFrame() a = set(dir(df)) b = set(dir(np)) c = list(a - b) print(c) dataframe_df['Function'] = c dataframe_df['Package'] = 'Pandas DataFrame' numpy_df =
pd.DataFrame()
pandas.DataFrame
''' This code will clean the OB datasets and combine all the cleaned data into one Dataset name: O-17-Mateus Bavaresco 1. two excel files for room 1 and room 2 2. each excel file has multiple sheets in it 3. extract different information from the excel file 4. store data in the templates ''' import os import glob import datetime import pandas as pd # specify the path data_path = "D:/yapan_office_D/Data/Annex-79-OB-Database/2021-05-28-1130-raw-data/Annex 79 Data Collection/O-17-Mateus Bavaresco/_yapan_processing/" template_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/OB Database Consolidation/Templates/' begin_time = datetime.datetime.now() ''' 1. read the two excel files into pandas and clean the data ''' # read the data from excel all in one combined_room1 = pd.ExcelFile(data_path + 'Room 01 modified.xlsx') combined_room2 = pd.ExcelFile(data_path + 'Room 02 modified.xlsx') # parse the data sheet_names1 = combined_room1.sheet_names # get the sheet names in the excel file sheet_names2 = combined_room2.sheet_names # get the sheet names in the excel file # filter out the desired data and combine them window1 = list(filter(lambda name: 'Wind-' in name, sheet_names1)) light1 = list(filter(lambda name: 'Light-' in name, sheet_names1)) ac1 = list(filter(lambda name: 'AC-' in name, sheet_names1)) indoor1 = list(filter(lambda name: 'Central' in name, sheet_names1)) outdoor1 = list(filter(lambda name: 'Outdoor' in name, sheet_names1)) window2 = list(filter(lambda name: 'Window' in name, sheet_names2)) light2 = list(filter(lambda name: 'Light-' in name, sheet_names2)) indoor2 = list(filter(lambda name: 'Central' in name, sheet_names2)) outdoor2 = list(filter(lambda name: 'Outdoor' in name, sheet_names2)) ''' 2. Data Processing''' # read templates into pandas template_window = pd.read_csv(template_path+'Window_Status.csv') template_light = pd.read_csv(template_path+'Ligthing_Status.csv') template_hvac = pd.read_csv(template_path+'HVAC_Measurement.csv') template_indoor = pd.read_csv(template_path+'Indoor_Measurement.csv') template_outdoor = pd.read_csv(template_path+'Outdoor_Measurement.csv') ''' 2.1 Window_Status.csv ''' # read and combine data by category and add IDs when combining window_combined = pd.DataFrame() # combine data from room 1 and assign room ID for index, name in enumerate(window1): temp_df = pd.read_excel(combined_room1, sheet_name=name) temp_df['Window_ID'] = index+1 temp_df['Room_ID'] = 1 window_combined = pd.concat([window_combined, temp_df], ignore_index=True) # print(index) # combine data from room 2 and assign room ID for index, name in enumerate(window2): temp_df = pd.read_excel(combined_room2, sheet_name=name) temp_df['Window_ID'] = index+1 temp_df['Room_ID'] = 2 window_combined = pd.concat([window_combined, temp_df], ignore_index=True) # print(index) # this column has mixed datetime and string data, convert all to datetime window_combined.DATE = pd.to_datetime(window_combined['DATE'], infer_datetime_format=True) # combine date and time columns together window_combined['Date_Time'] = window_combined['DATE'].astype(str) + ' ' + window_combined['TIME'].astype(str) # add date and time window_combined['Date_Time'] = pd.to_datetime(window_combined['Date_Time'], format="%Y-%m-%d %H:%M:%S") # convert to datetime window_combined = window_combined[['Date_Time', 'STATUS', 'Window_ID', 'Room_ID']] # re-order columns window_combined.columns = ['Date_Time', 'Window_Status', 'Window_ID', 'Room_ID'] # rename the column names window_combined = window_combined.replace(['OPEN', 'open', 'CLOSED', 'Closed'], [1, 1, 0, 0], inplace=False) # convert window status to values window_combined['Window_Status'].unique() # check if all the text has been replaced # concat the combined data to the template template_window = pd.concat([template_window, window_combined], ignore_index=True) # assign data type to each columns # template_window.dtypes template_window['Window_Status_ID'] = '' template_window['Window_Status'] = template_window['Window_Status'].astype(int) template_window['Window_ID'] = template_window['Window_ID'].astype(int) template_window['Room_ID'] = template_window['Room_ID'].astype(int) # sort the dataframe # cannot sort by three columns by ascending, because of the Date_Time # template_window.sort_values(by=['Date_Time', 'Window_ID', 'Room_ID'], ascending=True) # check missing values, and sum missing value count by column print('Check missing values in : window_combined') print(template_window.isnull().sum()) # save # save Window_Status.csv template_window.to_csv(data_path+'Window_Status.csv ', index=False) ''' 2.2 Ligthing_Status.csv ''' # read and combine data by category and add IDs when combining light_combined = pd.DataFrame() # combine data from room 1 and assign room ID for index, name in enumerate(light1): temp_df = pd.read_excel(combined_room1, sheet_name=name) temp_df['Lighting_Zone_ID'] = index+1 temp_df['Room_ID'] = 1 light_combined = pd.concat([light_combined, temp_df], ignore_index=True) # print(index) # combine data from room 2 and assign room ID for index, name in enumerate(light2): temp_df = pd.read_excel(combined_room2, sheet_name=name) temp_df['Lighting_Zone_ID'] = index+1 temp_df['Room_ID'] = 2 light_combined = pd.concat([light_combined, temp_df], ignore_index=True) # print(index) # this column has mixed datetime and string data, convert all to datetime light_combined.DATE = pd.to_datetime(light_combined['DATE'], infer_datetime_format=True) # combine date and time columns together light_combined['Date_Time'] = light_combined['DATE'].astype(str) + ' ' + light_combined['TIME'].astype(str) # add date and time light_combined['Date_Time'] = pd.to_datetime(light_combined['Date_Time'], format="%Y-%m-%d %H:%M:%S") # convert to datetime light_combined = light_combined[['Date_Time', 'STATUS', 'Lighting_Zone_ID', 'Room_ID']] # re-order columns light_combined.columns = ['Date_Time', 'Ligthing_Status', 'Lighting_Zone_ID', 'Room_ID'] # rename the column names light_combined['Ligthing_Status'].unique() # check if all the text has been replaced light_combined = light_combined.replace(['ON', 'OFF'], [1, 0], inplace=False) # convert window status to values light_combined['Ligthing_Status'].unique() # check if all the text has been replaced # concat the combined data to the template template_light = pd.concat([template_light, light_combined], ignore_index=True) # assign data type to each columns # template_light.dtypes template_light['Lighting_Status_ID'] = '' template_light['Ligthing_Status'] = template_light['Ligthing_Status'].astype(int) template_light['Lighting_Zone_ID'] = template_light['Lighting_Zone_ID'].astype(int) template_light['Room_ID'] = template_light['Room_ID'].astype(int) # sort the dataframe # cannot sort by three columns by ascending, because of the Date_Time # template_light.sort_values(by=['Date_Time', 'Window_ID', 'Room_ID'], ascending=True) # check missing values, and sum missing value count by column print('Check missing values in : light_combined') print(template_light.isnull().sum()) # save # save Window_Status.csv template_light.to_csv(data_path+'Ligthing_Status.csv ', index=False) ''' 2.3 HVAC_Measurement.csv ''' # template_hvac; 'HVAC_Measurement.csv' # only room 1 has hvac measurement data # read and combine data by category and add IDs when combining hvac_combined = pd.DataFrame() # combine data from room 1 and assign room ID for index, name in enumerate(ac1): temp_df = pd.read_excel(combined_room1, sheet_name=name) temp_df['HVAC_Zone_ID'] = int(name[-1]) # ac 1,2,4; ac3 is missing temp_df['Room_ID'] = 1 hvac_combined = pd.concat([hvac_combined, temp_df], ignore_index=True) # print(index) # this column has mixed datetime and string data, convert all to datetime hvac_combined.DATE = pd.to_datetime(hvac_combined['DATE'], infer_datetime_format=True) # combine date and time columns together hvac_combined['Date_Time'] = hvac_combined['DATE'].astype(str) + ' ' + hvac_combined['TIME'].astype(str) # add date and time hvac_combined['Date_Time'] = pd.to_datetime(hvac_combined['Date_Time'], format="%Y-%m-%d %H:%M:%S") # convert to datetime hvac_combined = hvac_combined[['Date_Time', 'STATUS', 'HVAC_Zone_ID', 'Room_ID']] # re-order columns hvac_combined.columns = ['Date_Time', 'Cooling_Status', 'HVAC_Zone_ID', 'Room_ID'] # rename the column names hvac_combined['Cooling_Status'].unique() # check if all the text has been replaced hvac_combined = hvac_combined.replace(['ON', 'OFF'], [1, 0], inplace=False) # convert window status to values hvac_combined['Cooling_Status'].unique() # check if all the text has been replaced # concat the combined data to the template template_hvac = pd.concat([template_hvac, hvac_combined], ignore_index=True) # check missing values, and sum missing value count by column print('Check missing values in : hvac_combined') print(template_hvac.isnull().sum()) # no missing values in the combined raw data # assign data type to each columns # template_hvac.dtypes template_hvac = template_hvac.fillna('') template_hvac['Cooling_Status'] = template_hvac['Cooling_Status'].astype(int) template_hvac['HVAC_Zone_ID'] = template_hvac['HVAC_Zone_ID'].astype(int) template_hvac['Room_ID'] = template_hvac['Room_ID'].astype(int) # sort the dataframe # cannot sort by three columns by ascending, because of the Date_Time # template_hvac.sort_values(by=['Date_Time', 'Window_ID', 'Room_ID'], ascending=True) # check missing values, and sum missing value count by column print('Check missing values in : hvac_combined') print(template_hvac.isnull().sum()) # save # save Window_Status.csv template_hvac.to_csv(data_path+'HVAC_Measurement.csv ', index=False) ''' 2.4 Indoor_Measurement.csv ''' # template_indoor; 'Indoor_Measurement.csv' # read and combine data by category and add IDs when combining indoor_combined = pd.DataFrame() # combine data from room 1 and assign room ID for index, name in enumerate(indoor1): temp_df = pd.read_excel(combined_room1, sheet_name=name) temp_df.columns = ['DATE', 'TIME', 'Indoor_Temp', 'Indoor_RH'] # indoor 1 and indoor 2 have different column names temp_df['Room_ID'] = 1 indoor_combined = pd.concat([indoor_combined, temp_df], ignore_index=True) # print(index) ''' Room 1''' # indoor 1 TIME column has different format of timestamp # format time indoor_combined['DATE'] = pd.to_datetime(indoor_combined['DATE'], infer_datetime_format=True) indoor_combined.isnull().sum() # check null values indoor_combined.dropna(subset=["Indoor_Temp"], inplace=True) # drop rows have null values # check how many hours of data in one day indoor_combined['DATE'].value_counts() # most have 96 rows of data which is 15 minutes interval, 24 hours' data days = list(indoor_combined['DATE'].value_counts().index) # indoor_combined[indoor_combined['DATE'] == days[1]]['TIME'] # change the time to desired time format for day in days: print(f'Day: {day}') # process one day's data at one time time_one_day = indoor_combined[indoor_combined['DATE'] == day]['TIME'].copy() time_one_day.reset_index(drop=True, inplace=True) # check firt row of data, get the hour start_hour = int(time_one_day[0].split('h')[0]) # 2019-04-26 starts at afternoon # 2019-12-12 starts at afternoon # 2019-06-11 starts with 12 at the morning if start_hour != 12: for index, i in enumerate(time_one_day): time_row = i old_hour = time_row.split('h')[0] new_hour = str(int(old_hour)+12) # afternoon time time_one_day[index] = i.replace(old_hour+'h', new_hour+'h') # assign data back to the dataframe, using vlues of the series, avoid index matching problem indoor_combined.loc[indoor_combined['DATE'] == day, ['TIME']] = time_one_day.values else: # start_hour = 12 am_flag = 0 pm_flag = 0 # 0 is morning, 1 is afternoon for index, i in enumerate(time_one_day): time_row = i old_hour = time_row.split('h')[0] if not am_flag: if int(old_hour) == 12: # if it is early mooring new_hour = str(int(old_hour)-12) # early morning time time_one_day[index] = i.replace(old_hour, new_hour) else: # if it is am before noon am_flag = 1 elif not pm_flag: if int(old_hour) == 12: # noon, change pm_flag to 1 pm_flag = 1 elif pm_flag: # afternoon if int(old_hour) < 12: new_hour = str(int(old_hour)+12) # afternoon time time_one_day[index] = i.replace(old_hour+'h', new_hour+'h') # assign data back to the dataframe indoor_combined.loc[indoor_combined['DATE'] == day, ['TIME']] = time_one_day.values # this column has mixed datetime and string data, convert all to datetime # format time indoor_combined['TIME'] = indoor_combined['TIME'].str.replace('h', ':') indoor_combined['TIME'] = indoor_combined['TIME'].str.replace('min', ':') indoor_combined['TIME'] = indoor_combined['TIME'].str.replace('s', '') ''' Room 2''' # DATE TIME TEMPERATURE (°C) RELATIVE HUMIDITY (%) ILLUMINANCE - HORIZONTAL (LUX) ILLUMINANCE - VERTICAL (LUX) # combine data from room 2 and assign room ID for index, name in enumerate(indoor2): temp_df = pd.read_excel(combined_room2, sheet_name=name) temp_df.drop(['ILLUMINANCE - VERTICAL (LUX)'], axis=1, inplace=True) temp_df.columns = ['DATE', 'TIME', 'Indoor_Temp', 'Indoor_RH', 'Indoor_Illuminance'] # indoor 1 and indoor 2 have different column names temp_df['Room_ID'] = 2 indoor_combined = pd.concat([indoor_combined, temp_df], ignore_index=True) # print(index) # indoor_combined.columns # format time indoor_combined['DATE'] = pd.to_datetime(indoor_combined['DATE'], infer_datetime_format=True) indoor_combined.isnull().sum() # check null values # combine the date and time indoor_combined['Date_Time'] = indoor_combined['DATE'].dt.date.astype(str) + ' ' + indoor_combined['TIME'].astype(str) # add date and time indoor_combined['Date_Time'] = pd.to_datetime(indoor_combined['Date_Time'], format="%Y-%m-%d %H:%M:%S") # convert to datetime indoor_combined = indoor_combined[['Date_Time', 'Indoor_Temp', 'Indoor_RH', 'Indoor_Illuminance', 'Room_ID']] # re-order columns # indoor_combined.columns # concat the combined data to the template template_indoor = pd.concat([template_indoor, indoor_combined], ignore_index=True) # check missing values, and sum missing value count by column print('Check missing values in : indoor_combined') print(template_indoor.isnull().sum()) # no missing values in the combined raw data # assign data type to each columns template_indoor.dtypes template_indoor = template_indoor.fillna('') template_indoor['Indoor_Temp'] = template_indoor['Indoor_Temp'].astype(float) template_indoor['Indoor_RH'] = template_indoor['Indoor_RH'].astype(float) # template_indoor['Indoor_Illuminance'] = template_indoor['Indoor_Illuminance'].astype(float) # template_indoor['Room_ID'] = template_indoor['Room_ID'].astype(int) # check missing values, and sum missing value count by column print('Check missing values in : indoor_combined') print(template_indoor.isnull().sum()) # save # save Window_Status.csv template_indoor.to_csv(data_path+'Indoor_Measurement.csv ', index=False) ''' 2.5 Outdoor_Measurement.csv ''' # template_outdoor; 'Outdoor_Measurement.csv' outdoor_combined = pd.DataFrame() # combine data from room 1 and assign room ID for index, name in enumerate(outdoor1): temp_df = pd.read_excel(combined_room1, sheet_name=name) temp_df['Room_ID'] = 1 outdoor_combined = pd.concat([outdoor_combined, temp_df], ignore_index=True) # print(index) # combine data from room 2 and assign room ID for index, name in enumerate(outdoor2): temp_df = pd.read_excel(combined_room2, sheet_name=name) temp_df['Room_ID'] = 2 outdoor_combined = pd.concat([outdoor_combined, temp_df], ignore_index=True) # print(index) outdoor_combined.columns # this column has mixed datetime and string data, convert all to datetime outdoor_combined['DATE (MM/DD/YY)'] =
pd.to_datetime(outdoor_combined['DATE (MM/DD/YY)'], infer_datetime_format=True)
pandas.to_datetime
""" I/O functions of the aecg package: tools for annotated ECG HL7 XML files This module implements helper functions to parse and read annotated electrocardiogram (ECG) stored in XML files following HL7 specification. See authors, license and disclaimer at the top level directory of this project. """ # Imports ===================================================================== from typing import Dict, Tuple from lxml import etree from aecg import validate_xpath, new_validation_row, VALICOLS, \ TIME_CODES, SEQUENCE_CODES, \ Aecg, AecgLead, AecgAnnotationSet import copy import logging import pandas as pd import re import zipfile # Python logging ============================================================== logger = logging.getLogger(__name__) def parse_annotations(xml_filename: str, zip_filename: str, aecg_doc: etree._ElementTree, aecgannset: AecgAnnotationSet, path_prefix: str, annsset_xmlnode_path: str, valgroup: str = "RHYTHM", log_validation: bool = False) -> Tuple[ AecgAnnotationSet, pd.DataFrame]: """Parses `aecg_doc` XML document and extracts annotations Args: xml_filename (str): Filename of the aECG XML file. zip_filename (str): Filename of zip file containint the aECG XML file. If '', then xml file is not stored in a zip file. aecg_doc (etree._ElementTree): XML document of the aECG XML file. aecgannset (AecgAnnotationSet): Annotation set to which append found annotations. path_prefix (str): Prefix of xml path from which start searching for annotations. annsset_xmlnode_path (str): Path to xml node of the annotation set containing the annotations. valgroup (str, optional): Indicates whether to search annotations in rhythm or derived waveform. Defaults to "RHYTHM". log_validation (bool, optional): Indicates whether to maintain the validation results in `aecg.validatorResults`. Defaults to False. Returns: Tuple[AecgAnnotationSet, pd.DataFrame]: Annotation set updated with found annotations and dataframe with results of validation. """ anngrpid = 0 # Annotations stored within a beat beatnodes = aecg_doc.xpath(( path_prefix + "/component/annotation/code[@code=\'MDC_ECG_BEAT\']").replace( '/', '/ns:'), namespaces={'ns': 'urn:hl7-org:v3'}) beatnum = 0 valpd = pd.DataFrame() if len(beatnodes) > 0: logger.info( f'{xml_filename},{zip_filename},' f'{valgroup} {len(beatnodes)} annotated beats found') for beatnode in beatnodes: for rel_path in ["../component/annotation/" "code[contains(@code, \"MDC_ECG_\")]"]: annsnodes = beatnode.xpath(rel_path.replace('/', '/ns:'), namespaces={'ns': 'urn:hl7-org:v3'}) rel_path2 = "../value" for annsnode in annsnodes: ann = {"anngrpid": anngrpid, "beatnum": "", "code": "", "codetype": "", "wavecomponent": "", "wavecomponent2": "", "timecode": "", "value": "", "value_unit": "", "low": "", "low_unit": "", "high": "", "high_unit": "", "lead": ""} # Annotation code valrow2 = validate_xpath( annsnode, ".", "urn:hl7-org:v3", "code", new_validation_row(xml_filename, valgroup, "ANNSET_BEAT_ANNS"), failcat="WARNING") valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path if valrow2["VALIOUT"] == "PASSED": ann["code"] = valrow2["VALUE"] # Annotation type from top level value valrow2 = validate_xpath(annsnode, "../value", "urn:hl7-org:v3", "code", new_validation_row( xml_filename, valgroup, "ANNSET_BEAT_ANNS"), failcat="WARNING") valrow2["XPATH"] = annsset_xmlnode_path + "/value" if log_validation: valpd = valpd.append(pd.DataFrame( [valrow2], columns=VALICOLS), ignore_index=True) if valrow2["VALIOUT"] == "PASSED": ann["codetype"] = valrow2["VALUE"] # Annotations type valrow2 = validate_xpath( annsnode, rel_path2, "urn:hl7-org:v3", "code", new_validation_row(xml_filename, valgroup, "ANNSET_BEAT_ANNS"), failcat="WARNING") valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path + \ "/" + rel_path2 if valrow2["VALIOUT"] == "PASSED": ann["beatnum"] = beatnum ann["codetype"] = valrow2["VALUE"] if log_validation: valpd = valpd.append( pd.DataFrame([valrow2], columns=VALICOLS), ignore_index=True) subannsnodes = annsnode.xpath( rel_path.replace('/', '/ns:'), namespaces={'ns': 'urn:hl7-org:v3'}) if len(subannsnodes) == 0: subannsnodes = [annsnode] else: subannsnodes += [annsnode] # Exclude annotations reporting interval values only subannsnodes = [ sa for sa in subannsnodes if not sa.get("code").startswith("MDC_ECG_TIME_PD_")] for subannsnode in subannsnodes: # Annotations type valrow2 = validate_xpath(subannsnode, rel_path2, "urn:hl7-org:v3", "code", new_validation_row( xml_filename, valgroup, "ANNSET_BEAT_ANNS"), failcat="WARNING") valrow2["XPATH"] = annsset_xmlnode_path + "/" + \ rel_path + "/" + rel_path2 if valrow2["VALIOUT"] == "PASSED": ann["wavecomponent"] = valrow2["VALUE"] if log_validation: valpd = valpd.append( pd.DataFrame([valrow2], columns=VALICOLS), ignore_index=True) # Annotations value valrow2 = validate_xpath(subannsnode, rel_path2, "urn:hl7-org:v3", "value", new_validation_row( xml_filename, valgroup, "ANNSET_BEAT_ANNS"), failcat="WARNING") valrow2["XPATH"] = annsset_xmlnode_path + "/" + \ rel_path + "/" + rel_path2 if valrow2["VALIOUT"] == "PASSED": ann["value"] = valrow2["VALUE"] if log_validation: valpd = valpd.append( pd.DataFrame([valrow2], columns=VALICOLS), ignore_index=True) # Annotations value units valrow2 = validate_xpath(subannsnode, rel_path2, "urn:hl7-org:v3", "unit", new_validation_row( xml_filename, valgroup, "ANNSET_BEAT_ANNS"), failcat="WARNING") valrow2["XPATH"] = annsset_xmlnode_path + "/" + \ rel_path + "/" + rel_path2 if valrow2["VALIOUT"] == "PASSED": ann["value_unit"] = valrow2["VALUE"] if log_validation: valpd = valpd.append( pd.DataFrame([valrow2], columns=VALICOLS), ignore_index=True) # annotations info from supporting ROI rel_path3 = "../support/supportingROI/component/"\ "boundary/value" for n in ["", "low", "high"]: if n != "": rp = rel_path3 + "/" + n else: rp = rel_path3 valrow3 = validate_xpath( subannsnode, rp, "urn:hl7-org:v3", "value", new_validation_row(xml_filename, valgroup, "ANNSET_BEAT_ANNS"), failcat="WARNING") valrow3["XPATH"] = annsset_xmlnode_path + "/" + \ rel_path + "/" + rp if valrow3["VALIOUT"] == "PASSED": if n != "": ann[n] = valrow3["VALUE"] else: ann["value"] = valrow3["VALUE"] if log_validation: valpd = valpd.append( pd.DataFrame([valrow3], columns=VALICOLS), ignore_index=True) valrow3 = validate_xpath( subannsnode, rp, "urn:hl7-org:v3", "unit", new_validation_row(xml_filename, valgroup, "ANNSET_BEAT_ANNS"), failcat="WARNING") valrow3["XPATH"] = annsset_xmlnode_path + "/" + \ rel_path + "/" + rp if valrow3["VALIOUT"] == "PASSED": if n != "": ann[n + "_unit"] = valrow3["VALUE"] else: ann["value_unit"] = valrow3["VALUE"] if log_validation: valpd = valpd.append( pd.DataFrame([valrow3], columns=VALICOLS), ignore_index=True) # annotations time encoding, lead and other info used # by value and supporting ROI rel_path4 = "../support/supportingROI/component/"\ "boundary/code" roinodes = subannsnode.xpath( rel_path4.replace('/', '/ns:'), namespaces={'ns': 'urn:hl7-org:v3'}) for roinode in roinodes: valrow4 = validate_xpath( roinode, ".", "urn:hl7-org:v3", "code", new_validation_row(xml_filename, valgroup, "ANNSET_BEAT_ANNS"), failcat="WARNING") valrow4["XPATH"] = annsset_xmlnode_path + "/" + \ rel_path + "/" + rel_path4 if valrow4["VALIOUT"] == "PASSED": if valrow4["VALUE"] in ["TIME_ABSOLUTE", "TIME_RELATIVE"]: ann["timecode"] = valrow4["VALUE"] else: ann["lead"] = valrow4["VALUE"] if log_validation: valpd = valpd.append( pd.DataFrame([valrow4], columns=VALICOLS), ignore_index=True) aecgannset.anns.append(copy.deepcopy(ann)) else: # Annotations type valrow2 = validate_xpath(annsnode, ".", "urn:hl7-org:v3", "code", new_validation_row(xml_filename, valgroup, "ANNSET_BEAT_" "ANNS"), failcat="WARNING") valrow2["XPATH"] = annsset_xmlnode_path + "/" + rel_path +\ "/" + rel_path2 if valrow2["VALIOUT"] == "PASSED": ann["beatnum"] = beatnum ann["codetype"] = valrow2["VALUE"] if log_validation: valpd = valpd.append( pd.DataFrame([valrow2], columns=VALICOLS), ignore_index=True) # Annotations value valrow2 = validate_xpath(annsnode, rel_path2, "urn:hl7-org:v3", "value", new_validation_row( xml_filename, valgroup, "ANNSET_BEAT_ANNS"), failcat="WARNING") valrow2["XPATH"] = annsset_xmlnode_path + "/" + \ rel_path + "/" + rel_path2 if valrow2["VALIOUT"] == "PASSED": ann["value"] = valrow2["VALUE"] if log_validation: valpd = valpd.append( pd.DataFrame([valrow2], columns=VALICOLS), ignore_index=True) # Annotations value units valrow2 = validate_xpath(annsnode, rel_path2, "urn:hl7-org:v3", "unit", new_validation_row( xml_filename, valgroup, "ANNSET_BEAT_ANNS"), failcat="WARNING") valrow2["XPATH"] = annsset_xmlnode_path + "/" + \ rel_path + "/" + rel_path2 if valrow2["VALIOUT"] == "PASSED": ann["value_unit"] = valrow2["VALUE"] if log_validation: valpd = valpd.append( pd.DataFrame([valrow2], columns=VALICOLS), ignore_index=True) # annotations time encoding, lead and other info used # by value and supporting ROI rel_path4 = "../support/supportingROI/component/" \ "boundary/code" roinodes = annsnode.xpath( rel_path4.replace('/', '/ns:'), namespaces={'ns': 'urn:hl7-org:v3'}) for roinode in roinodes: valrow4 = validate_xpath(roinode, ".", "urn:hl7-org:v3", "code", new_validation_row( xml_filename, valgroup, "ANNSET_BEAT_ANNS"), failcat="WARNING") valrow4["XPATH"] = annsset_xmlnode_path + "/" + \ rel_path + "/" + rel_path4 if valrow4["VALIOUT"] == "PASSED": if valrow4["VALUE"] in ["TIME_ABSOLUTE", "TIME_RELATIVE"]: ann["timecode"] = valrow4["VALUE"] else: ann["lead"] = valrow4["VALUE"] if log_validation: valpd = valpd.append( pd.DataFrame([valrow4], columns=VALICOLS), ignore_index=True) aecgannset.anns.append(copy.deepcopy(ann)) else: if log_validation: valpd = valpd.append( pd.DataFrame([valrow2], columns=VALICOLS), ignore_index=True) anngrpid = anngrpid + 1 beatnum = beatnum + 1 if len(beatnodes) > 0: logger.info( f'{xml_filename},{zip_filename},' f'{valgroup} {beatnum} annotated beats and {anngrpid} ' f'annotations groups found') anngrpid_from_beats = anngrpid # Annotations stored without an associated beat for codetype_path in ["/component/annotation/code[" "(contains(@code, \"MDC_ECG_\") and" " not (@code=\'MDC_ECG_BEAT\'))]"]: annsnodes = aecg_doc.xpath( (path_prefix + codetype_path).replace('/', '/ns:'), namespaces={'ns': 'urn:hl7-org:v3'}) rel_path2 = "../value" for annsnode in annsnodes: ann = {"anngrpid": anngrpid, "beatnum": "", "code": "", "codetype": "", "wavecomponent": "", "wavecomponent2": "", "timecode": "", "value": "", "value_unit": "", "low": "", "low_unit": "", "high": "", "high_unit": "", "lead": ""} # Annotations code valrow2 = validate_xpath(annsnode, ".", "urn:hl7-org:v3", "code", new_validation_row(xml_filename, valgroup, "ANNSET_NOBEAT_ANNS"), failcat="WARNING") valrow2["XPATH"] = annsset_xmlnode_path if log_validation: valpd = valpd.append(pd.DataFrame([valrow2], columns=VALICOLS), ignore_index=True) if valrow2["VALIOUT"] == "PASSED": ann["code"] = valrow2["VALUE"] # Annotation type from top level value valrow2 = validate_xpath(annsnode, "../value", "urn:hl7-org:v3", "code", new_validation_row(xml_filename, valgroup, "ANNSET_NOBEAT_ANNS"), failcat="WARNING") valrow2["XPATH"] = annsset_xmlnode_path + "/value" if log_validation: valpd = valpd.append(pd.DataFrame([valrow2], columns=VALICOLS), ignore_index=True) if valrow2["VALIOUT"] == "PASSED": ann["codetype"] = valrow2["VALUE"] subannsnodes = annsnode.xpath( (".." + codetype_path).replace('/', '/ns:'), namespaces={'ns': 'urn:hl7-org:v3'}) if len(subannsnodes) == 0: subannsnodes = [annsnode] for subannsnode in subannsnodes: subsubannsnodes = subannsnode.xpath( (".." + codetype_path).replace('/', '/ns:'), namespaces={'ns': 'urn:hl7-org:v3'}) tmpnodes = [subannsnode] if len(subsubannsnodes) > 0: tmpnodes = tmpnodes + subsubannsnodes for subsubannsnode in tmpnodes: ann["wavecomponent"] = "" ann["wavecomponent2"] = "" ann["timecode"] = "" ann["value"] = "" ann["value_unit"] = "" ann["low"] = "" ann["low_unit"] = "" ann["high"] = "" ann["high_unit"] = "" roi_base = "../support/supportingROI/component/boundary" rel_path3 = roi_base + "/value" valrow2 = validate_xpath( subsubannsnode, ".", "urn:hl7-org:v3", "code", new_validation_row(xml_filename, valgroup, "ANNSET_NOBEAT_" "ANNS"), failcat="WARNING") valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \ codetype_path + "/code" if valrow2["VALIOUT"] == "PASSED": if not ann["codetype"].endswith("WAVE"): ann["codetype"] = valrow2["VALUE"] if log_validation: valpd = valpd.append( pd.DataFrame([valrow2], columns=VALICOLS), ignore_index=True) # Annotations type valrow2 = validate_xpath( subsubannsnode, rel_path2, "urn:hl7-org:v3", "code", new_validation_row(xml_filename, valgroup, "ANNSET_NOBEAT_" "ANNS"), failcat="WARNING") valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \ codetype_path + "/" + rel_path2 if valrow2["VALIOUT"] == "PASSED": ann["wavecomponent"] = valrow2["VALUE"] # if ann["wavecomponent"] == "": # ann["wavecomponent"] = valrow2["VALUE"] # else: # ann["wavecomponent2"] = valrow2["VALUE"] if log_validation: valpd = valpd.append( pd.DataFrame([valrow2], columns=VALICOLS), ignore_index=True) # Annotations value valrow2 = validate_xpath( subsubannsnode, rel_path2, "urn:hl7-org:v3", "", new_validation_row(xml_filename, valgroup, "ANNSET_NOBEAT_" "ANNS"), failcat="WARNING") valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \ codetype_path + "/" + rel_path2 if valrow2["VALIOUT"] == "PASSED": ann["value"] = valrow2["VALUE"] if log_validation: valpd = valpd.append( pd.DataFrame([valrow2], columns=VALICOLS), ignore_index=True) # Annotations value as attribute valrow2 = validate_xpath( subsubannsnode, rel_path2, "urn:hl7-org:v3", "value", new_validation_row(xml_filename, valgroup, "ANNSET_NOBEAT_" "ANNS"), failcat="WARNING") valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \ codetype_path + "/" + rel_path2 if valrow2["VALIOUT"] == "PASSED": ann["value"] = valrow2["VALUE"] if log_validation: valpd = valpd.append( pd.DataFrame([valrow2], columns=VALICOLS), ignore_index=True) # Annotations value units valrow2 = validate_xpath( subsubannsnode, rel_path2, "urn:hl7-org:v3", "unit", new_validation_row(xml_filename, valgroup, "ANNSET_NOBEAT_" "ANNS"), failcat="WARNING") valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \ codetype_path + "/" + rel_path2 if valrow2["VALIOUT"] == "PASSED": ann["value_unit"] = valrow2["VALUE"] if log_validation: valpd = valpd.append( pd.DataFrame([valrow2], columns=VALICOLS), ignore_index=True) # annotations info from supporting ROI for n in ["", "low", "high"]: if n != "": rp = rel_path3 + "/" + n else: rp = rel_path3 valrow3 = validate_xpath( subsubannsnode, rp, "urn:hl7-org:v3", "value", new_validation_row(xml_filename, valgroup, "ANNSET_NOBEAT_" "ANNS"), failcat="WARNING") valrow3["XPATH"] = annsset_xmlnode_path + "/.." + \ codetype_path + "/" + rp if valrow3["VALIOUT"] == "PASSED": if n != "": ann[n] = valrow3["VALUE"] else: ann["value"] = valrow3["VALUE"] else: roi_base = "../component/annotation/support/"\ "supportingROI/component/boundary" # Annotations type valrow2 = validate_xpath(subsubannsnode, "../component/annotation/" "value", "urn:hl7-org:v3", "code", new_validation_row( xml_filename, valgroup, "ANNSET_NOBEAT_ANNS"), failcat="WARNING") valrow2["XPATH"] = annsset_xmlnode_path + "/.." + \ codetype_path + "/" + \ "../component/annotation/value" if valrow2["VALIOUT"] == "PASSED": ann["wavecomponent2"] = valrow2["VALUE"] if log_validation: valpd = valpd.append( pd.DataFrame([valrow2], columns=VALICOLS), ignore_index=True) # annotation values if n != "": rp = roi_base + "/value/" + n else: rp = roi_base + "/value" valrow3 = validate_xpath(subsubannsnode, rp, "urn:hl7-org:v3", "value", new_validation_row( xml_filename, valgroup, "ANNSET_NOBEAT_ANNS"), failcat="WARNING") valrow3["XPATH"] = annsset_xmlnode_path + "/.." + \ codetype_path + "/" + rp if valrow3["VALIOUT"] == "PASSED": if n != "": ann[n] = valrow3["VALUE"] else: ann["value"] = valrow3["VALUE"] if log_validation: valpd = valpd.append( pd.DataFrame([valrow3], columns=VALICOLS), ignore_index=True) valrow3 = validate_xpath( subsubannsnode, rp, "urn:hl7-org:v3", "unit", new_validation_row(xml_filename, valgroup, "ANNSET_NOBEAT" "_ANNS"), failcat="WARNING") valrow3["XPATH"] = annsset_xmlnode_path + "/.." + \ codetype_path + "/" + rp if valrow3["VALIOUT"] == "PASSED": if n != "": ann[n + "_unit"] = valrow3["VALUE"] else: ann["value_unit"] = valrow3["VALUE"] if log_validation: valpd = valpd.append( pd.DataFrame([valrow3], columns=VALICOLS), ignore_index=True) # annotations time encoding, lead and other info used by # value and supporting ROI for rel_path4 in ["../support/supportingROI/component/" "boundary", "../component/annotation/support/" "supportingROI/component/boundary"]: roinodes = subsubannsnode.xpath( rel_path4.replace('/', '/ns:'), namespaces={'ns': 'urn:hl7-org:v3'}) for roinode in roinodes: valrow4 = validate_xpath(roinode, "./code", "urn:hl7-org:v3", "code", new_validation_row( xml_filename, valgroup, "ANNSET_NOBEAT_ANNS"), failcat="WARNING") valrow4["XPATH"] = annsset_xmlnode_path + "/.." + \ codetype_path + "/" + rel_path4 if valrow4["VALIOUT"] == "PASSED": if valrow4["VALUE"] in ["TIME_ABSOLUTE", "TIME_RELATIVE"]: ann["timecode"] = valrow4["VALUE"] else: ann["lead"] = valrow4["VALUE"] if log_validation: valpd = valpd.append( pd.DataFrame([valrow4], columns=VALICOLS), ignore_index=True) aecgannset.anns.append(copy.deepcopy(ann)) anngrpid = anngrpid + 1 logger.info( f'{xml_filename},{zip_filename},' f'{valgroup} {anngrpid-anngrpid_from_beats} annotations groups' f' without an associated beat found') return aecgannset, valpd def parse_generalinfo(aecg_doc: etree._ElementTree, aecg: Aecg, log_validation: bool = False) -> Aecg: """Parses `aecg_doc` XML document and extracts general information This function parses the `aecg_doc` xml document searching for general information that includes in the returned `Aecg`: unique identifier (UUID), ECG date and time of collection (EGDTC), and device information. Args: aecg_doc (etree._ElementTree): aECG XML document aecg (Aecg): The aECG object to update log_validation (bool, optional): Indicates whether to maintain the validation results in `aecg.validatorResults`. Defaults to False. Returns: Aecg: `aecg` updated with the information found in the xml document. """ # ======================================= # UUID # ======================================= valrow = validate_xpath(aecg_doc, "./*[local-name() = \"id\"]", "", "root", new_validation_row(aecg.filename, "GENERAL", "UUID")) if log_validation: aecg.validatorResults = aecg.validatorResults.append( pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True) if valrow["VALIOUT"] == "PASSED": logger.info( f'{aecg.filename},{aecg.zipContainer},' f'UUID found: {valrow["VALUE"]}') aecg.UUID = valrow["VALUE"] else: logger.critical( f'{aecg.filename},{aecg.zipContainer},' f'UUID not found') valrow = validate_xpath(aecg_doc, "./*[local-name() = \"id\"]", "", "extension", new_validation_row(aecg.filename, "GENERAL", "UUID")) if log_validation: aecg.validatorResults = aecg.validatorResults.append( pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True) if valrow["VALIOUT"] == "PASSED": logger.debug( f'{aecg.filename},{aecg.zipContainer},' f'UUID extension found: {valrow["VALUE"]}') aecg.UUID += valrow["VALUE"] logger.info( f'{aecg.filename},{aecg.zipContainer},' f'UUID updated to: {aecg.UUID}') else: logger.debug( f'{aecg.filename},{aecg.zipContainer},' f'UUID extension not found') # ======================================= # EGDTC # ======================================= valpd = pd.DataFrame() egdtc_found = False for n in ["low", "center", "high"]: valrow = validate_xpath(aecg_doc, "./*[local-name() = \"effectiveTime\"]/" "*[local-name() = \"" + n + "\"]", "", "value", new_validation_row(aecg.filename, "GENERAL", "EGDTC_" + n), "WARNING") if valrow["VALIOUT"] == "PASSED": egdtc_found = True logger.info( f'{aecg.filename},{aecg.zipContainer},' f'EGDTC {n} found: {valrow["VALUE"]}') aecg.EGDTC[n] = valrow["VALUE"] if log_validation: valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True) if not egdtc_found: logger.critical( f'{aecg.filename},{aecg.zipContainer},' f'EGDTC not found') if log_validation: aecg.validatorResults = aecg.validatorResults.append(valpd, ignore_index=True) # ======================================= # DEVICE # ======================================= # DEVICE = {"manufacturer": "", "model": "", "software": ""} valrow = validate_xpath(aecg_doc, "./component/series/author/" "seriesAuthor/manufacturerOrganization/name", "urn:hl7-org:v3", "", new_validation_row(aecg.filename, "GENERAL", "DEVICE_manufacturer"), "WARNING") if valrow["VALIOUT"] == "PASSED": tmp = valrow["VALUE"].replace("\n", "|") logger.info( f'{aecg.filename},{aecg.zipContainer},' f'DEVICE manufacturer found: {tmp}') aecg.DEVICE["manufacturer"] = valrow["VALUE"] else: logger.warning( f'{aecg.filename},{aecg.zipContainer},' f'DEVICE manufacturer not found') if log_validation: aecg.validatorResults = aecg.validatorResults.append( pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True) valrow = validate_xpath(aecg_doc, "./component/series/author/" "seriesAuthor/manufacturedSeriesDevice/" "manufacturerModelName", "urn:hl7-org:v3", "", new_validation_row(aecg.filename, "GENERAL", "DEVICE_model"), "WARNING") if valrow["VALIOUT"] == "PASSED": tmp = valrow["VALUE"].replace("\n", "|") logger.info( f'{aecg.filename},{aecg.zipContainer},' f'DEVICE model found: {tmp}') aecg.DEVICE["model"] = valrow["VALUE"] else: logger.warning( f'{aecg.filename},{aecg.zipContainer},' f'DEVICE model not found') if log_validation: aecg.validatorResults = aecg.validatorResults.append( pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True) valrow = validate_xpath(aecg_doc, "./component/series/author/" "seriesAuthor/manufacturedSeriesDevice/" "softwareName", "urn:hl7-org:v3", "", new_validation_row(aecg.filename, "GENERAL", "DEVICE_software"), "WARNING") if valrow["VALIOUT"] == "PASSED": tmp = valrow["VALUE"].replace("\n", "|") logger.info( f'{aecg.filename},{aecg.zipContainer},' f'DEVICE software found: {tmp}') aecg.DEVICE["software"] = valrow["VALUE"] else: logger.warning( f'{aecg.filename},{aecg.zipContainer},' f'DEVICE software not found') if log_validation: aecg.validatorResults = aecg.validatorResults.append( pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True) return aecg def parse_subjectinfo(aecg_doc: etree._ElementTree, aecg: Aecg, log_validation: bool = False) -> Aecg: """Parses `aecg_doc` XML document and extracts subject information This function parses the `aecg_doc` xml document searching for subject information that includes in the returned `Aecg`: subject unique identifier (USUBJID), gender, birthtime, and race. Args: aecg_doc (etree._ElementTree): aECG XML document aecg (Aecg): The aECG object to update log_validation (bool, optional): Indicates whether to maintain the validation results in `aecg.validatorResults`. Defaults to False. Returns: Aecg: `aecg` updated with the information found in the xml document. """ # ======================================= # USUBJID # ======================================= valpd = pd.DataFrame() for n in ["root", "extension"]: valrow = validate_xpath(aecg_doc, "./componentOf/timepointEvent/componentOf/" "subjectAssignment/subject/trialSubject/id", "urn:hl7-org:v3", n, new_validation_row(aecg.filename, "SUBJECTINFO", "USUBJID_" + n)) if valrow["VALIOUT"] == "PASSED": logger.info( f'{aecg.filename},{aecg.zipContainer},' f'DM.USUBJID ID {n} found: {valrow["VALUE"]}') aecg.USUBJID[n] = valrow["VALUE"] else: if n == "root": logger.warning( f'{aecg.filename},{aecg.zipContainer},' f'DM.USUBJID ID {n} not found') else: logger.warning( f'{aecg.filename},{aecg.zipContainer},' f'DM.USUBJID ID {n} not found') if log_validation: valpd = valpd.append(pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True) if (aecg.USUBJID["root"] == "") and (aecg.USUBJID["extension"] == ""): logger.error( f'{aecg.filename},{aecg.zipContainer},' f'DM.USUBJID cannot be established.') if log_validation: aecg.validatorResults = aecg.validatorResults.append(valpd, ignore_index=True) # ======================================= # SEX / GENDER # ======================================= valrow = validate_xpath(aecg_doc, "./componentOf/timepointEvent/componentOf/" "subjectAssignment/subject/trialSubject/" "subjectDemographicPerson/" "administrativeGenderCode", "urn:hl7-org:v3", "code", new_validation_row(aecg.filename, "SUBJECTINFO", "SEX"), failcat="WARNING") if valrow["VALIOUT"] == "PASSED": logger.info( f'{aecg.filename},{aecg.zipContainer},' f'DM.SEX found: {valrow["VALUE"]}') aecg.SEX = valrow["VALUE"] else: logger.debug( f'{aecg.filename},{aecg.zipContainer},' f'DM.SEX not found') if log_validation: aecg.validatorResults = aecg.validatorResults.append( pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True) # ======================================= # BIRTHTIME # ======================================= valrow = validate_xpath(aecg_doc, "./componentOf/timepointEvent/componentOf/" "subjectAssignment/subject/trialSubject/" "subjectDemographicPerson/birthTime", "urn:hl7-org:v3", "value", new_validation_row(aecg.filename, "SUBJECTINFO", "BIRTHTIME"), failcat="WARNING") if valrow["VALIOUT"] == "PASSED": logger.info( f'{aecg.filename},{aecg.zipContainer},' f'DM.BIRTHTIME found.') aecg.BIRTHTIME = valrow["VALUE"] # age_in_years = aecg.subject_age_in_years() else: logger.debug( f'{aecg.filename},{aecg.zipContainer},' f'DM.BIRTHTIME not found') if log_validation: aecg.validatorResults = aecg.validatorResults.append( pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True) # ======================================= # RACE # ======================================= valrow = validate_xpath(aecg_doc, "./componentOf/timepointEvent/componentOf/" "subjectAssignment/subject/trialSubject/" "subjectDemographicPerson/raceCode", "urn:hl7-org:v3", "code", new_validation_row(aecg.filename, "SUBJECTINFO", "RACE"), failcat="WARNING") if valrow["VALIOUT"] == "PASSED": logger.info( f'{aecg.filename},{aecg.zipContainer},' f'DM.RACE found: {valrow["VALUE"]}') else: logger.debug( f'{aecg.filename},{aecg.zipContainer},' f'DM.RACE not found') aecg.RACE = valrow["VALUE"] if log_validation: aecg.validatorResults = aecg.validatorResults.append( pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True) return aecg def parse_trtainfo(aecg_doc: etree._ElementTree, aecg: Aecg, log_validation: bool = False) -> Aecg: """Parses `aecg_doc` XML document and extracts subject information This function parses the `aecg_doc` xml document searching for treatment information that includes in the returned `Aecg`. Args: aecg_doc (etree._ElementTree): aECG XML document aecg (Aecg): The aECG object to update log_validation (bool, optional): Indicates whether to maintain the validation results in `aecg.validatorResults`. Defaults to False. Returns: Aecg: `aecg` updated with the information found in the xml document. """ valrow = validate_xpath(aecg_doc, "./componentOf/timepointEvent/componentOf/" "subjectAssignment/definition/" "treatmentGroupAssignment/code", "urn:hl7-org:v3", "code", new_validation_row(aecg.filename, "STUDYINFO", "TRTA"), failcat="WARNING") if valrow["VALIOUT"] == "PASSED": logger.info( f'{aecg.filename},{aecg.zipContainer},' f'TRTA information found: {valrow["VALUE"]}') aecg.TRTA = valrow["VALUE"] else: logger.debug( f'{aecg.filename},{aecg.zipContainer},' f'TRTA information not found') if log_validation: aecg.validatorResults = aecg.validatorResults.append( pd.DataFrame([valrow], columns=VALICOLS), ignore_index=True) return aecg def parse_studyinfo(aecg_doc: etree._ElementTree, aecg: Aecg, log_validation: bool = False) -> Aecg: """Parses `aecg_doc` XML document and extracts study information This function parses the `aecg_doc` xml document searching for study information that includes in the returned `Aecg`: study unique identifier (STUDYID), and study title. Args: aecg_doc (etree._ElementTree): aECG XML document aecg (Aecg): The aECG object to update log_validation (bool, optional): Indicates whether to maintain the validation results in `aecg.validatorResults`. Defaults to False. Returns: Aecg: `aecg` updated with the information found in the xml document. """ valpd = pd.DataFrame() for n in ["root", "extension"]: valrow = validate_xpath(aecg_doc, "./componentOf/timepointEvent/componentOf/" "subjectAssignment/componentOf/" "clinicalTrial/id", "urn:hl7-org:v3", n, new_validation_row(aecg.filename, "STUDYINFO", "STUDYID_" + n), failcat="WARNING") if valrow["VALIOUT"] == "PASSED": logger.info( f'{aecg.filename},{aecg.zipContainer},' f'STUDYID {n} found: {valrow["VALUE"]}') aecg.STUDYID[n] = valrow["VALUE"] else: logger.debug( f'{aecg.filename},{aecg.zipContainer},' f'STUDYID {n} not found') if log_validation: valpd = valpd.append(
pd.DataFrame([valrow], columns=VALICOLS)
pandas.DataFrame
import pandas as pd import pytest from django.core.exceptions import ObjectDoesNotExist from zoo_checks.ingest import ( ExcelUploadError, create_animals, create_enclosures, create_groups, create_species, find_animals_groups, get_animal_attributes, get_changesets, handle_upload, ingest_changesets, read_xlsx_data, validate_accession_numbers, ) from zoo_checks.models import Animal, Enclosure, Group, Species INPUT_EXAMPLE = "test_data/example.xlsx" INPUT_EMPTY = "test_data/empty_data.xlsx" INPUT_WRONG_COL = "test_data/wrong_column.xlsx" INPUT_MALFORMED = "test_data/malformed.xlsx" INPUT_ACCESSIONS_BAD = "test_data/too_many_digits_access_num.xlsx" ONLY_GROUPS_EXAMPLE = "test_data/only_groups.xlsx" ONLY_ANIMALS_EXAMPLE = "test_data/only_animals.xlsx" def test_read_xlsx_data(): with pytest.raises(ExcelUploadError, match="No data found in file"): read_xlsx_data(INPUT_EMPTY) with pytest.raises( ExcelUploadError, match="Not all columns found in file", ): read_xlsx_data(INPUT_WRONG_COL) with pytest.raises(ExcelUploadError, match="Unable to read file"): read_xlsx_data(INPUT_MALFORMED) df = read_xlsx_data(INPUT_EXAMPLE) assert df.shape[0] == 5 def test_validate_input(): df = read_xlsx_data(INPUT_ACCESSIONS_BAD) with pytest.raises( ExcelUploadError, match="Accession numbers should only have 6 characters" ): validate_accession_numbers(df) df_simple_bad = pd.DataFrame({"Accession": "12345"}, index=[0]) with pytest.raises( ExcelUploadError, match="Accession numbers should only have 6 characters" ): validate_accession_numbers(df_simple_bad) df_simple_good =
pd.DataFrame([{"Accession": "654321"}])
pandas.DataFrame
import pandas as pd import argparse csvs = ["decade/box_scores_1_1_2010_1_1_2011.csv", "decade/box_scores_1_1_2011_1_1_2012.csv", "decade/box_scores_1_1_2012_1_1_2013.csv", "decade/box_scores_1_1_2013_1_1_2014.csv", "decade/box_scores_1_1_2014_1_1_2015.csv", "decade/box_scores_1_1_2015_1_1_2016.csv", "decade/box_scores_1_1_2016_1_1_2017.csv", "decade/box_scores_1_1_2017_1_1_2018.csv", "decade/box_scores_1_1_2018_1_1_2019.csv", ] if __name__ == "__main__": # grabbing command line args parser = argparse.ArgumentParser() parser.add_argument('out', type=str, help='output file name') args = parser.parse_args() out = args.out #df = pd.read_csv(target, index_col=0) #df.reset_index() dfs = [
pd.read_csv(f, index_col=0)
pandas.read_csv
# license: Creative Commons License # Title: Big data strategies seminar. Challenge 1. www.iaac.net # Created by: <NAME> # # is licensed under a license Creative Commons Attribution 4.0 International License. # http://creativecommons.org/licenses/by/4.0/ # This script uses pandas for data management for more information visit; pandas.pydata.org/ # The tasks for joins and merges are here https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html # The options for scatterplotw with seaborn https://seaborn.pydata.org/generated/seaborn.scatterplot.html # import pandas as pd from pandas import plotting import matplotlib.pyplot as plt import seaborn as sns plotting.register_matplotlib_converters() ###################################################### # Read the different files starting with the last file irf_2007 = pd.read_csv('../data/opendatabcn/2007_distribucio_territorial_renda_familiar.csv') irf_2008 = pd.read_csv('../data/opendatabcn/2008_distribucio_territorial_renda_familiar.csv') irf_2009 = pd.read_csv('../data/opendatabcn/2009_distribucio_territorial_renda_familiar.csv') irf_2010 = pd.read_csv('../data/opendatabcn/2010_distribucio_territorial_renda_familiar.csv') irf_2011 = pd.read_csv('../data/opendatabcn/2011_distribucio_territorial_renda_familiar.csv') irf_2012 = pd.read_csv('../data/opendatabcn/2012_distribucio_territorial_renda_familiar.csv') irf_2013 = pd.read_csv('../data/opendatabcn/2013_distribucio_territorial_renda_familiar.csv') irf_2014 = pd.read_csv('../data/opendatabcn/2014_distribucio_territorial_renda_familiar.csv') irf_2015 =
pd.read_csv('../data/opendatabcn/2015_distribucio_territorial_renda_familiar.csv')
pandas.read_csv
import matplotlib.pyplot as plt import seaborn as sns import numpy as np import pandas as pd import scipy as sc import pickle import os from . import preprocess from scipy.sparse import vstack, csr_matrix, csc_matrix, lil_matrix from sklearn.metrics.pairwise import cosine_similarity from sklearn.preprocessing import normalize from . import builders class Dataset(object): @staticmethod def load(): train = pd.read_csv('data/train_final.csv', delimiter='\t') playlists = pd.read_csv('data/playlists_final.csv', delimiter='\t') target_playlists = pd.read_csv('data/target_playlists.csv', delimiter='\t') target_tracks = pd.read_csv('data/target_tracks.csv', delimiter = '\t') tracks = pd.read_csv('data/tracks_final.csv', delimiter='\t') return Dataset(train, tracks, playlists, target_tracks, target_playlists) def __init__(self, train, tracks, playlists, target_tracks, target_playlists): self.train = train self.tracks = tracks self.playlists = playlists self.target_tracks = target_tracks self.target_playlists = target_playlists def _normalize_train_dataset(self): self.track_to_num = pd.Series(self.tracks.index) self.track_to_num.index = self.tracks['track_id_tmp'] self.playlist_to_num = pd.Series(self.playlists.index) self.playlist_to_num.index = self.playlists['playlist_id_tmp'] self.train['track_id'] = self.train['track_id'].apply(lambda x : self.track_to_num[x]) self.train['playlist_id'] = self.train['playlist_id'].apply(lambda x : self.playlist_to_num[x]) def _normalize_tracks(self): # Convert track id self.tracks['track_id_tmp'] = self.tracks['track_id'] self.tracks['track_id'] = self.tracks.index self.num_to_tracks = pd.Series(self.tracks['track_id_tmp']) self.tracks.tags = self.tracks.tags.apply(lambda s: np.array(eval(s), dtype=int)) # Substitute each bad album (i.e. an illformed album such as -1, None, etc) with the 0 album def transform_album_1(alb): ar = eval(alb) if len(ar) == 0 or (len(ar) > 0 and (ar[0] == None or ar[0] == -1)): ar = [0] return ar[0] self.tracks.album = self.tracks.album.apply(lambda alb: transform_album_1(alb)) # Substitute each 0 album with a brand new album last_album = self.tracks.album.max() class AlbumTransformer(object): def __init__(self, last_album): self.next_album_id = last_album def __call__(self, alb): if alb == 0: alb = self.next_album_id self.next_album_id += 1 return alb # self.tracks.album = self.tracks.album.apply(lambda alb: transform_album_2(alb)) self.tracks.album = self.tracks.album.apply(AlbumTransformer(last_album+1)) def _normalize_playlists(self): self.playlists['playlist_id_tmp'] = self.playlists['playlist_id'] self.playlists['playlist_id'] = self.playlists.index self.playlist_to_num = pd.Series(self.playlists.index) self.playlist_to_num.index = self.playlists['playlist_id_tmp'] def _normalize_target_playlists(self): # Convert target playlist id self.target_playlists['playlist_id_tmp'] = self.target_playlists['playlist_id'] self.target_playlists['playlist_id'] = self.target_playlists['playlist_id'].apply(lambda x : self.playlist_to_num[x]) self.target_playlists = self.target_playlists.astype(int) def _normalize_target_tracks(self): # Convert target tracks id self.target_tracks['track_id_tmp'] = self.target_tracks['track_id'] self.target_tracks['track_id'] = self.target_tracks['track_id'].apply(lambda x : self.track_to_num[x]) self.target_tracks = self.target_tracks.astype(int) def _compute_mappings(self): # Create a dataframe that maps a playlist to the set of its tracks self.playlist_tracks = pd.DataFrame(self.train['playlist_id'].drop_duplicates()) self.playlist_tracks.index = self.train['playlist_id'].unique() self.playlist_tracks['track_ids'] = self.train.groupby('playlist_id').apply(lambda x : x['track_id'].values) self.playlist_tracks = self.playlist_tracks.sort_values('playlist_id') # Create a dataframe that maps a track to the set of the playlists it appears into self.track_playlists = pd.DataFrame(self.train['track_id'].drop_duplicates()) self.track_playlists.index = self.train['track_id'].unique() self.track_playlists['playlist_ids'] = self.train.groupby('track_id').apply(lambda x : x['playlist_id'].values) self.track_playlists = self.track_playlists.sort_values('track_id') def _add_owners(self): self.tracks['owners'] = self.track_playlists['playlist_ids'].apply(lambda x : self.playlists.loc[x]['owner'].values) null_owners = self.tracks[~self.tracks.owners.notnull()] for i in range(len(null_owners)): self.tracks.set_value(null_owners.track_id.iloc[i], 'owners', np.array([])) def split_holdout(self, test_size=1, min_playlist_tracks=13): self.train_orig = self.train.copy() self.target_tracks_orig = self.target_tracks.copy() self.target_playlists_orig = self.target_playlists.copy() self.train, self.test, self.target_playlists, self.target_tracks = train_test_split(self.train, test_size, min_playlist_tracks, target_playlists=self.target_playlists_orig) self.target_playlists = self.target_playlists.astype(int) self.target_tracks = self.target_tracks.astype(int) self.train = self.train.astype(int) self.test = self.test.astype(int) def normalize(self): self._normalize_tracks() self._normalize_playlists() self._normalize_train_dataset() self._normalize_target_tracks() self._normalize_target_playlists() self._compute_mappings() self._add_owners() def build_urm(self, urm_builder=builders.URMBuilder(norm="no")): self.urm = urm_builder.build(self) self.urm = csr_matrix(self.urm) def evaluate(test, recommendations, should_transform_test=True): """ - "test" is: if should_transform_test == False: a dataframe with columns "playlist_id" and "track_id". else: a dict with "playlist_id" as key and a list of "track_id" as value. - "recommendations" is a dataframe with "playlist_id" and "track_id" as numpy.ndarray value. """ if should_transform_test: # Tranform "test" in a dict: # key: playlist_id # value: list of track_ids test_df = preprocess.get_playlist_track_list2(test) else: test_df = test mean_ap = 0 for _,row in recommendations.iterrows(): pl_id = row['playlist_id'] tracks = row['track_ids'] correct = 0 ap = 0 for it, t in enumerate(tracks): if t in test_df.loc[pl_id]['track_ids']: correct += 1 ap += correct / (it+1) if len(tracks) > 0: ap /= len(tracks) mean_ap += ap return mean_ap / len(recommendations) def train_test_split(train, test_size=0.3, min_playlist_tracks=7, target_playlists=None): if target_playlists is None: playlists = train.groupby('playlist_id').count() else: playlists = train[train.playlist_id.isin(target_playlists.playlist_id)].groupby('playlist_id').count() # Only playlists with at least "min_playlist_tracks" tracks are considered. # If "min_playlists_tracks" = 7, then 28311 out of 45649 playlists in "train" are considered. to_choose_playlists = playlists[playlists['track_id'] >= min_playlist_tracks].index.values # Among these playlists, "test_size * len(to_choose_playlists)" distinct playlists are chosen for testing. # If "test_size" = 0.3, then 8493 playlists are chosen for testing. # It's a numpy array that contains playlis_ids. target_playlists = np.random.choice(to_choose_playlists, replace=False, size=int(test_size * len(to_choose_playlists))) target_tracks = np.array([]) indexes = np.array([]) for p in target_playlists: # Choose 5 random tracks of such playlist: since we selected playlists with at least "min_playlist_tracks" # tracks, if "min_playlist_tracks" is at least 5, we are sure to find them. selected_df = train[train['playlist_id'] == p].sample(5) selected_tracks = selected_df['track_id'].values target_tracks = np.union1d(target_tracks, selected_tracks) indexes = np.union1d(indexes, selected_df.index.values) test = train.loc[indexes].copy() train = train.drop(indexes) return train, test, pd.DataFrame(target_playlists, columns=['playlist_id']), pd.DataFrame(target_tracks, columns=['track_id']) def dot_with_top(m1, m2, def_rows_g, top=-1, row_group=1, similarity="dot", shrinkage=0.000001, alpha=1): """ Produces the product between matrices m1 and m2. Possible similarities: "dot", "cosine". By default it goes on "dot". NB: Shrinkage is not implemented... Code taken from https://stackoverflow.com/questions/29647326/sparse-matrix-dot-product-keeping-only-n-max-values-per-result-row and optimized for smart dot products. """ m2_transposed = m2.transpose() l2 = m2.sum(axis=0) # by cols if top > 0: final_rows = [] row_id = 0 while row_id < m1.shape[0]: last_row = row_id + row_group if row_id + row_group <= m1.shape[0] else m1.shape[0] rows = m1[row_id:last_row] if rows.count_nonzero() > 0: if similarity == "cosine-old": res_rows = cosine_similarity(rows, m2_transposed, dense_output=False) elif similarity == "cosine": res_rows = csr_matrix((np.dot(rows,m2) / (np.sqrt(rows.sum(axis=1)) * np.sqrt(l2) + shrinkage))) elif similarity == "cosine-asym": res_rows = csr_matrix((np.dot(rows,m2) / (np.power(rows.sum(axis=1),alpha) * np.power(m2.sum(axis=0),(1-alpha)) + shrinkage))) elif similarity == "dot-old": res_rows = rows.dot(m2) else: res_rows = (np.dot(rows,m2) + shrinkage).toarray() if res_rows.count_nonzero() > 0: for res_row in res_rows: if res_row.nnz > top: args_ids = np.argsort(res_row.data)[-top:] data = res_row.data[args_ids] cols = res_row.indices[args_ids] final_rows.append(csr_matrix((data, (np.zeros(top), cols)), shape=res_row.shape)) else: args_ids = np.argsort(res_row.data)[-top:] data = res_row.data[args_ids] cols = res_row.indices[args_ids] final_rows.append(csr_matrix((data, (np.zeros(len(args_ids)), cols)), shape=res_row.shape)) #print("Less than top: {0}".format(len(args_ids))) #final_rows.append(def_rows_g[0]) else: print("Add empty 2") for res_row in res_rows: final_rows.append(def_rows_g[0]) else: print("Add empty 3") final_rows.append(def_rows_g) row_id += row_group if row_id % row_group == 0: print(row_id) return vstack(final_rows, 'csr') return m1.dot(m2) def from_num_to_id(df, row_num, column = 'track_id'): """ df must have a 'track_id' column """ return df.iloc[row_num][column] def from_id_to_num(df, tr_id, column='track_id'): """ df must have a 'track_id' column """ return np.where(df[column].values == tr_id)[0][0] def from_prediction_matrix_to_dataframe(pred_matrix, dataset, keep_best=5, map_tracks=False): pred_matrix_csr = pred_matrix.tocsr() predictions = pd.DataFrame(dataset.target_playlists[:pred_matrix.shape[0]]) predictions.index = dataset.target_playlists['playlist_id'][:pred_matrix.shape[0]] predictions['track_ids'] = [np.array([]) for i in range(len(predictions))] for target_row,pl_id in enumerate(dataset.target_playlists.playlist_id[:pred_matrix.shape[0]]): row_start = pred_matrix_csr.indptr[target_row] row_end = pred_matrix_csr.indptr[target_row+1] row_columns = pred_matrix_csr.indices[row_start:row_end] row_data = pred_matrix_csr.data[row_start:row_end] best_indexes = row_data.argsort()[::-1][:keep_best] pred = row_columns[best_indexes] if map_tracks: pred = np.array([dataset.num_to_tracks[t] for t in pred]) predictions.loc[pl_id] = predictions.loc[pl_id].set_value('track_ids', pred) return predictions def build_id_to_num_map(df, column): a = pd.Series(np.arange(len(df))) a.index = df[column] return a def build_num_to_id_map(df, column): a =
pd.Series(df[column])
pandas.Series
import os import sys import gc import numpy as np import pandas as pd import math import scipy.stats as scst import scipy as sp import scipy.linalg as la from bgen_reader import read_bgen import qtl_loader_utils import pdb from glimix_core.lmm import LMM def run_QTL_analysis_load_intersect_phenotype_covariates_kinship_sample_mapping(pheno_filename, anno_filename, geno_prefix, plinkGenotype, minimum_test_samples= 10, relatedness_score=None, cis_mode=True, skipAutosomeFiltering = False, snps_filename=None, feature_filename=None, snp_feature_filename=None, selection='all', covariates_filename=None, randomeff_filename=None, sample_mapping_filename=None, extended_anno_filename=None, feature_variant_covariate_filename=None): # pheno_filename = "/Users/chaaya/dhonveli_dkfz/hipsci_pipeline/geuvadis_CEU_test_data/Expression/Geuvadis_CEU_YRI_Expr.txt.gz" # anno_filename = "/Users/chaaya/dhonveli_dkfz/hipsci_pipeline/geuvadis_CEU_test_data/Expression/Geuvadis_CEU_Annot.txt" # geno_prefix = "/Users/chaaya/dhonveli_dkfz/limix_qtl/limix_qtl-master/Limix_QTL/test_data/Genotypes/Geuvadis" # plinkGenotype = "/Users/chaaya/dhonveli_dkfz/limix_qtl/limix_qtl-master/Limix_QTL/test_data/Genotypes/Geuvadis" # minimum_test_samples = 10 # relatedness_score = 0.95 # cis_mode = True # skipAutosomeFiltering = False # snps_filename = None # feature_filename = None # snp_feature_filename = None # selection = 'all' # covariates_filename = "/Users/chaaya/dhonveli_dkfz/limix_qtl/limix_qtl-master/Limix_QTL/test_data/Expression/Geuvadis_CEU_YRI_covariates.txt" # randomeff_filename = "/Users/chaaya/dhonveli_dkfz/limix_qtl/limix_qtl-master/Limix_QTL/test_data/Genotypes/Geuvadis_chr1_kinship.normalized.txt,/Users/chaaya/dhonveli_dkfz/limix_qtl/limix_qtl-master/Limix_QTL/test_data/Genotypes/Geuvadis_readdepth.txt" # sample_mapping_filename = "/Users/chaaya/dhonveli_dkfz/limix_qtl/limix_qtl-master/Limix_QTL/test_data/Geuvadis_CEU_gte.txt" # extended_anno_filename = None # feature_variant_covariate_filename = None # output_dir = "/Users/chaaya/dhonveli_dkfz/limix_qtl/limix_qtl-master/Limix_QTL/test_data/Output2/" # window_size = 250000 # min_maf = 0.05 # min_hwe_P = 0.001 # min_call_rate = 0.95 # blocksize = 1000 # gaussianize_method = None # genetic_range = "all" # seed = np.random.randint(40000) # n_perm = 0 # write_permutations = False # regressCovariatesUpfront = False # write_feature_top_permutations = False # selection based on coordinates selectionStart = None selectionEnd = None if(":" in selection): parts = selection.split(":") if("-" not in parts[1]): print("No correct sub selection.") print("Given in: "+selection) print("Expected format: (chr number):(start location)-(stop location)") sys.exit() chromosome = parts[0] if("-" in parts[1]): parts2 = parts[1].split("-") selectionStart = int(parts2[0]) selectionEnd = int(parts2[1]) else : chromosome=selection ''' function to take input and intersect sample and genotype.''' #Load input data files & filter for relevant data #Load input data filesf # loading phenotype and annotation files phenotype_df = qtl_loader_utils.get_phenotype_df(pheno_filename) annotation_df = qtl_loader_utils.get_annotation_df(anno_filename) phenotype_df.columns = phenotype_df.columns.astype("str") phenotype_df.index = phenotype_df.index.astype("str") annotation_df.columns = annotation_df.columns.astype("str") annotation_df.index = annotation_df.index.astype("str") # loading genotype if(plinkGenotype): bim,fam,bed = qtl_loader_utils.get_genotype_data(geno_prefix) bgen=None else : bgen = read_bgen(geno_prefix+'.bgen', verbose=False) bed=None fam =bgen['samples'] fam = fam.to_frame("iid") fam.index=fam["iid"] bim = bgen['variants'].compute() bim = bim.assign(i = range(bim.shape[0])) bim['id'] = bim['rsid'] bim = bim.rename(index=str, columns={"id": "snp"}) bim['a1'] = bim['allele_ids'].str.split(",", expand=True)[0] bim.index = bim["snp"].astype(str).values bim.index.name = "candidate" ##Fix chromosome ids bim['chrom'].replace('^chr','',regex=True,inplace=True) bim['chrom'].replace(['X', 'Y', 'XY', 'MT'], ['23', '24', '25', '26'],inplace=True) ##Remove non-biallelic & non-ploidy 2 (to be sure). print("Warning, the current software only supports biallelic SNPs and ploidy 2") bim.loc[np.logical_and(bim['nalleles']<3,bim['nalleles']>0),:] # converting chromsome names annotation_df.replace(['X', 'Y', 'XY', 'MT'], ['23', '24', '25', '26'],inplace=True) if chromosome=='X' : chromosome = '23' elif chromosome=='Y': chromosome = '24' elif chromosome=='XY': chromosome='25' elif chromosome=='MT': chromosome='26' print("Intersecting data.") if(annotation_df.shape[0] != annotation_df.groupby(annotation_df.index).first().shape[0]): print("Only one location per feature supported. If multiple locations are needed please look at: --extended_anno_file") sys.exit() ##Make sure that there is only one entry per feature id!. sample2individual_df = qtl_loader_utils.get_samplemapping_df(sample_mapping_filename,list(phenotype_df.columns),'sample') sample2individual_df.index = sample2individual_df.index.astype('str') sample2individual_df = sample2individual_df.astype('str') sample2individual_df['sample']=sample2individual_df.index sample2individual_df = sample2individual_df.drop_duplicates(); ##Filter first the linking files! #Subset linking to relevant genotypes. orgSize = sample2individual_df.shape[0] sample2individual_df = sample2individual_df.loc[sample2individual_df['iid'].map(lambda x: x in list(map(str, fam.index))),:] diff = orgSize- sample2individual_df.shape[0] orgSize = sample2individual_df.shape[0] print("Dropped: "+str(diff)+" samples because they are not present in the genotype file.") #Subset linking to relevant phenotypes. sample2individual_df = sample2individual_df.loc[np.intersect1d(sample2individual_df.index,phenotype_df.columns),:] diff = orgSize- sample2individual_df.shape[0] orgSize = sample2individual_df.shape[0] print("Dropped: "+str(diff)+" samples because they are not present in the phenotype file.") #Subset linking vs kinship. kinship_df = None readdepth_df = None if randomeff_filename is not None: kinship_df,readdepth_df = qtl_loader_utils.get_randeff_df(randomeff_filename) if kinship_df is not None: #Filter from individual2sample_df & sample2individual_df since we don't want to filter from the genotypes. sample2individual_df = sample2individual_df[sample2individual_df['iid'].map(lambda x: x in list(map(str, kinship_df.index)))] diff = orgSize- sample2individual_df.shape[0] orgSize = sample2individual_df.shape[0] print("Dropped: "+str(diff)+" samples because they are not present in the kinship file.") if readdepth_df is not None: #This needs to come from the covariate site not the genotype side! #Filter from individual2sample_df & sample2individual_df since we don't want to filter from the genotypes. sample2individual_df = sample2individual_df[sample2individual_df['sample'].map(lambda x: x in list(map(str, readdepth_df.index)))] diff = orgSize- sample2individual_df.shape[0] orgSize = sample2individual_df.shape[0] print("Dropped: "+str(diff)+" samples because they are not present in the second random effect file.") #Subset linking vs covariates. covariate_df = qtl_loader_utils.get_covariate_df(covariates_filename) if covariate_df is not None: if np.nansum(covariate_df==1,0).max()<covariate_df.shape[0]: covariate_df.insert(0, 'ones', np.ones(covariate_df.shape[0])) sample2individual_df = sample2individual_df.loc[list(set(sample2individual_df.index) & set(covariate_df.index)),:] diff = orgSize- sample2individual_df.shape[0] orgSize = sample2individual_df.shape[0] print("Dropped: "+str(diff)+" samples because they are not present in the covariate file.") ### print("Number of samples with genotype & phenotype data: " + str(sample2individual_df.shape[0])) if(sample2individual_df.shape[0]<minimum_test_samples): print("Not enough samples with both genotype & phenotype data.") sys.exit() ##Filter now the actual data! #Filter phenotype data based on the linking files. phenotype_df = phenotype_df.loc[list(set(phenotype_df.index)&set(annotation_df.index)),sample2individual_df.index.values] #Filter kinship data based on the linking files. genetically_unique_individuals = None if kinship_df is not None: kinship_df = kinship_df.loc[np.intersect1d(kinship_df.index,sample2individual_df['iid']),np.intersect1d(kinship_df.index,sample2individual_df['iid'])] if (kinship_df is not None) and (relatedness_score is not None): genetically_unique_individuals = get_unique_genetic_samples(kinship_df, relatedness_score); #Filter covariate data based on the linking files. snp_feature_filter_df= qtl_loader_utils.get_snp_feature_df(snp_feature_filename) try: feature_filter_df = qtl_loader_utils.get_snp_df(feature_filename) except: if feature_filename is not None: feature_filter_df=pd.DataFrame(index=feature_filename) #Do filtering on features. if feature_filter_df is not None: phenotype_df = phenotype_df.loc[feature_filter_df.index,:] ##Filtering on features to test. if snp_feature_filter_df is not None: lst3 = set(phenotype_df.index).intersection(np.unique(snp_feature_filter_df['feature_id'])) phenotype_df = phenotype_df.loc[lst3,:] ##Filtering on features to test from the combined feature snp filter. if ((not cis_mode) and len(set(bim['chrom']))<22) : print("Warning, running a trans-analysis on snp data from less than 22 chromosomes.\nTo merge data later the permutation P-values need to be written out.") if(cis_mode): #Remove features from the annotation that are on chromosomes which are not present anyway. annotation_df = annotation_df[np.in1d(annotation_df['chromosome'],list(set(bim['chrom'])))] #Prepare to filter on snps. snp_filter_df = qtl_loader_utils.get_snp_df(snps_filename) if snp_filter_df is not None: toSelect = set(snp_filter_df.index).intersection(set(bim['snp'])) bim = bim.loc[bim['snp'].isin(toSelect)] ##Filtering on SNPs to test from the snp filter. if snp_feature_filter_df is not None: toSelect = set(np.unique(snp_feature_filter_df['snp_id'])).intersection(set(bim['snp'])) bim = bim.loc[bim['snp'].isin(toSelect)] ##Filtering on features to test from the combined feature snp filter. #Filtering for sites on non allosomes. if not skipAutosomeFiltering : annotation_df = annotation_df[annotation_df['chromosome'].map(lambda x: x in list(map(str, range(1, 23))))] #Determine features to be tested if chromosome=='all': feature_list = list(set(annotation_df.index)&set(phenotype_df.index)) else: if not selectionStart is None : lowest = min([selectionStart,selectionEnd]) highest = max([selectionStart,selectionEnd]) annotation_df['mean'] = ((annotation_df["start"] + annotation_df["end"])/2) feature_list = list(set(annotation_df.iloc[(annotation_df['chromosome'].values==chromosome) & (annotation_df['mean'].values>=lowest) & (annotation_df["mean"].values<highest)].index.values)&set(phenotype_df.index)) del annotation_df['mean'] else : feature_list = list(set(annotation_df[annotation_df['chromosome']==chromosome].index)&set(phenotype_df.index)) #Drop not used feature information. phenotype_df = phenotype_df.loc[feature_list,:] gc.collect() print("Number of features to be tested: " + str(len(feature_list))) print("Total number of variants to be considered, before variante QC and feature intersection: " + str(bim.shape[0])) if(phenotype_df.shape[1]<minimum_test_samples): print("Not enough samples with both genotype & phenotype data, for current number of covariates.") sys.exit() if extended_anno_filename is not None: complete_annotation_df = pd.read_csv(extended_anno_filename,sep='\t',index_col=0) annotation_df['index']=annotation_df.index complete_annotation_df['index']=complete_annotation_df.index complete_annotation_df =
pd.concat([annotation_df,complete_annotation_df])
pandas.concat
import ta import collections import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import researchpy as rp from ta.momentum import RSIIndicator from ta.trend import MACD from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix as cm from pandas.plotting import register_matplotlib_converters from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split register_matplotlib_converters() DESKTOP_PATH = r'C:/Users/<NAME>/Desktop' FOLDER_PATH = r'/quant-finance-main' CIK_CSV = r'/cik_ticker.csv' FINAL_PATH = DESKTOP_PATH + '/Proof_Of_Concept/2020' XLSX_NAME = FINAL_PATH + r'/Pricing_Data2.xlsx' WEIGHT_NAME = FINAL_PATH + r'/Weight.xlsx' if __name__ == "__main__": all_df = pd.read_excel(XLSX_NAME) weight_df = pd.read_excel(WEIGHT_NAME, index_col=0) all_df = all_df.replace(np.nan, 0) date_df = all_df.iloc[:, 0:1] replace_list = [',', '(', ')', '\''] for i in replace_list: date_df['Date'] = date_df['Date'].str.replace(i, '') high_df = all_df.filter(regex='High') low_df = all_df.filter(regex='Low') open_df = all_df.filter(regex='Open') close_df = all_df.filter(regex='Close') volume_df = all_df.filter(regex='Volume') final_price = [] # List for the FINAL_PRICE of the ACTUAL DATASET for the ACTUAL PRICE # Market Cap Weighting Index Value for index2 in range(0, 3670): total_wp = 0 for index in range(0, 442): if index != 0: close_name = 'Close' + '.' + str(index) else: close_name = 'Close' weighted_price = close_df[close_name][index2] * weight_df['Weight'][index] total_wp += weighted_price final_price.append(total_wp) final_price_df = pd.DataFrame(final_price, columns=['Close']) # Close, Open, High, Low, Volume, Return for Portfolio. I hate this shit man. portfolio_df = collections.defaultdict(list) for index2 in range(0, 3670): close_wp = 0 open_wp = 0 high_wp = 0 low_wp = 0 volume_wp = 0 return_wp = 0 for index in range(0, 442): if index != 0: close_name = 'Close' + '.' + str(index) open_name = 'Open' + '.' + str(index) high_name = 'High' + '.' + str(index) low_name = 'Low' + '.' + str(index) volume_name = 'Volume' + '.' + str(index) else: close_name = 'Close' open_name = 'Open' high_name = 'High' low_name = 'Low' volume_name = 'Volume' close_port = close_df[close_name][index2] * weight_df['Weight'][index] open_port = open_df[open_name][index2] * weight_df['Weight'][index] high_port = high_df[high_name][index2] * weight_df['Weight'][index] low_port = low_df[low_name][index2] * weight_df['Weight'][index] volume_port = volume_df[volume_name][index2] * weight_df['Weight'][index] close_wp += close_port open_wp += open_port high_wp += high_port low_wp += low_port volume_wp += volume_port # return_wp = np.log((close_wp - open_wp)/open_wp) return_wp = np.log(close_wp/open_wp) portfolio_df['Close'].append(close_wp) portfolio_df['Open'].append(open_wp) portfolio_df['High'].append(high_wp) portfolio_df['Low'].append(low_wp) portfolio_df['Volume'].append(volume_wp) portfolio_df['Return'].append(return_wp) independent_portfolio_df = pd.DataFrame.from_dict(portfolio_df) independent_portfolio_df2 = independent_portfolio_df independent_portfolio_df2['Open-Close'] = independent_portfolio_df['Open'] - independent_portfolio_df['Close'] independent_portfolio_df2['High-Low'] = independent_portfolio_df['High'] - independent_portfolio_df['Low'] # ec = ta.volume.OnBalanceVolumeIndicator(independent_portfolio_df['Close'], independent_portfolio_df['Volume']) # independent_portfolio_df2['OBV'] = ec.on_balance_volume() independent_portfolio_df2['RSI'] = ta.momentum.rsi(independent_portfolio_df['Close']) independent_portfolio_df2['MACD'] = ta.trend.macd(independent_portfolio_df['Close']) # independent_portfolio_df['MACD_Difference'] = ta.trend.macd_diff(independent_portfolio_df['Close']) # independent_portfolio_df['MACD_Signal'] = ta.trend.macd_signal(independent_portfolio_df['Close']) independent_portfolio_df2 = independent_portfolio_df2.set_index(date_df['Date']) independent_portfolio_df2 = independent_portfolio_df2.dropna() X = independent_portfolio_df2.iloc[:, 6:] target_variable = np.where(independent_portfolio_df2['Close'].shift(-1) > independent_portfolio_df2['Close'], 1, -1) target_list = target_variable.tolist() split_percentage = 0.7 split = int(split_percentage * len(X)) X_train = X[:split] Y_train = target_variable[:split] X_test = X[split:] Y_test = target_variable[split:] knn = KNeighborsClassifier(n_neighbors=15) fitted_model = knn.fit(X_train, Y_train) knn.fit(X_train, Y_train) predictions = fitted_model.predict(X_test) confusion_matrix = cm(Y_test, predictions) print(confusion_matrix) accuracy_train = accuracy_score(Y_train, knn.predict(X_train)) accuracy_test = accuracy_score(Y_test, knn.predict(X_test)) print('Train data Accuracy: %.2f' % accuracy_train) print('Test data Accuracy: %.2f' % accuracy_test) independent_portfolio_df2['Predicted_Signal'] = knn.predict(X) Cum_Returns = independent_portfolio_df2[split:]['Return'].cumsum() * 100 independent_portfolio_df2['Strategy_Returns'] = independent_portfolio_df2['Return'] * independent_portfolio_df2[ 'Predicted_Signal'].shift(1) Cum_Strategy_Returns = independent_portfolio_df2[split:]['Strategy_Returns'].cumsum() * 100 plt.figure(figsize=(16, 13)) plt.plot(Cum_Returns, color='r', label='Returns') plt.plot(Cum_Strategy_Returns, color='g', label='Strategy Returns') plt.xlabel('Dates', fontsize=12) plt.ylabel('Return', fontsize=12) plt.legend() plt.show() summary, results = rp.ttest(group1=Cum_Returns, group1_name='Return', group2=Cum_Strategy_Returns, group2_name='Strategy_Returns') print(summary) print(results) dff = pd.DataFrame() dff['a'] = Cum_Returns dff['b'] = Cum_Strategy_Returns df1 = dff['a'].sample(n=30) df2 = dff['b'].sample(n=30) summary, results = rp.ttest(group1=df1, group1_name='Return', group2=df2, group2_name='Strategy_Returns') print(summary) print(results) dff =
pd.DataFrame()
pandas.DataFrame
from flask import render_template, request, redirect, url_for, session from app import app from model import * from model.main import * import json import pandas as pd import numpy as np class DataStore(): model=None model_month=None sale_model=None data = DataStore() @app.route('/', methods=["GET"]) def home(): percent=percentageMethod() total_month=totalMonth() file1=pd.read_json('total_month.json',orient='index') month_index=np.array(file1['month_year']) month_data=np.array(file1['total']) with open('percent.json') as f: file2 = json.load(f) labels=file2['index'] data=file2['data'] if "username" in session: return render_template('index.html', last_year=lastYear(), last_month=lastMonth(),dataset=data, label=labels, percent=percent, month_index=month_index, month_data=month_data) else: return render_template('login.html') # Register new user @app.route('/register', methods=["GET", "POST"]) def register(): if request.method == "GET": return render_template("register.html") elif request.method == "POST": registerUser() return redirect(url_for("login")) #Check if email already exists in the registratiion page @app.route('/checkusername', methods=["POST"]) def check(): return checkusername() # Everything Login (Routes to renderpage, check if username exist and also verifypassword through Jquery AJAX request) @app.route('/login', methods=["GET"]) def login(): if request.method == "GET": if "username" not in session: return render_template("login.html") else: return redirect(url_for("home")) @app.route('/checkloginusername', methods=["POST"]) def checkUserlogin(): return checkloginusername() @app.route('/checkloginpassword', methods=["POST"]) def checkUserpassword(): return checkloginpassword() #The admin logout @app.route('/logout', methods=["GET"]) # URL for logout def logout(): # logout function session.pop('username', None) # remove user session return redirect(url_for("home")) # redirect to home page with message #Forgot Password @app.route('/forgot-password', methods=["GET"]) def forgotpassword(): return render_template('forgot-password.html') #404 Page @app.route('/404', methods=["GET"]) def errorpage(): return render_template("404.html") #Blank Page @app.route('/blank', methods=["GET"]) def blank(): return render_template('blank.html') @app.route('/totalyear', methods=["GET"]) def total_year(): total_year=totalYear() file1=pd.read_json('total_year.json',orient='index') year_index=np.array(file1['year']) year_data=np.array(file1['total']) return render_template("total_year.html",year_index=year_index, year_data=year_data) @app.route('/totalmonth', methods=["GET"]) def total_month(): total_month=totalMonth() file1=pd.read_json('total_month.json',orient='index') month_index=np.array(file1['month_year']) month_data=np.array(file1['total']) num=6 # Fit model model=fit_model() data.model_month=model predict_rs, fitted_data=predict(model,6) pred_index=np.array(predict_rs['month_year']) pred_data=np.array(predict_rs['total']) #Test model test_rs= test(pred_data[0], fitted_data) return render_template("total_month.html",month_index=month_index, month_data=month_data, stationary=check_stationary(), model=model, pred_index=pred_index, pred_data=pred_data, test_rs=test_rs, num=num) def check_stationary(): total_month=totalMonth() data1=total_month[['month_year','total']] data1.set_index('month_year', inplace=True) result=stationary(data1) return result def fit_model(): total_month=totalMonth() data1=total_month[['month_year','total']] data1.set_index('month_year', inplace=True) data=data1['total'] stationary=check_stationary() p=stationary[1] if (p<0.05): result1 = fit_model_stationary(data) else: result1 = fit_model_non_stationary(data) return result1 def predict(model,num_predict): if num_predict==0: num_predict=6 fitted_month, confint_month = model.predict(n_periods=num_predict, return_conf_int=True) df2=df[['total', 'date']] total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True) data=total_day[['date','total']] data.set_index('date', inplace=True) date = pd.date_range(data.index[-1], periods=num_predict, freq='MS') fitted_seri_month = pd.Series(fitted_month, index=date) dff=pd.DataFrame(fitted_seri_month) dff=dff.reset_index() dff.columns=['date','total'] dff['month_year'] = pd.to_datetime(dff['date']).dt.to_period('M') pred=dff[['month_year','total']] return pred, fitted_month def test(y, yhat): e = y-yhat mse=np.mean(e**2) rmse=np.sqrt(mse) mae=np.mean(np.abs(e)) mape=np.mean(abs(e/y)) # print('Sai số bình phương trung bình MSE: {}'.format(mse)) # print('Root Mean Square Error: {}'.format(rmse)) # print('Mean Absolute Error: {}'.format(mae)) # print('Mean Absolute Percentage Error: {}'.format(mape)) return mse, rmse, mae, mape @app.route('/totalmonth', methods=["POST"]) def total_month_num(): total_month=totalMonth() file1=pd.read_json('total_month.json',orient='index') month_index=np.array(file1['month_year']) month_data=np.array(file1['total']) #Get data if request.method == "POST": num = int(request.form.get("num_month")) predict_rs, fitted_data=predict(data.model_month,num) pred_index=np.array(predict_rs['month_year']) pred_data=np.array(predict_rs['total']) #Test model test_rs= test(pred_data[0], fitted_data) return render_template("total_month.html",month_index=month_index, month_data=month_data, stationary=check_stationary(), model=data.model_month, pred_index=pred_index, pred_data=pred_data, test_rs=test_rs, num=num) def check_stationary(): total_month=totalMonth() data1=total_month[['month_year','total']] data1.set_index('month_year', inplace=True) result=stationary(data1) return result def predict(model,num_predict): if num_predict==0: num_predict=6 fitted_month, confint_month = model.predict(n_periods=num_predict, return_conf_int=True) df2=df[['total', 'date']] total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True) data2=total_day[['date','total']] data2.set_index('date', inplace=True) date = pd.date_range(data2.index[-1], periods=num_predict, freq='MS') fitted_seri_month = pd.Series(fitted_month, index=date) dff=pd.DataFrame(fitted_seri_month) dff=dff.reset_index() dff.columns=['date','total'] dff['month_year'] = pd.to_datetime(dff['date']).dt.to_period('M') pred=dff[['month_year','total']] return pred, fitted_month def test(y, yhat): e = y-yhat mse=np.mean(e**2) rmse=np.sqrt(mse) mae=np.mean(np.abs(e)) mape=np.mean(abs(e/y)) # print('Sai số bình phương trung bình MSE: {}'.format(mse)) # print('Root Mean Square Error: {}'.format(rmse)) # print('Mean Absolute Error: {}'.format(mae)) # print('Mean Absolute Percentage Error: {}'.format(mape)) return mse, rmse, mae, mape @app.route('/totaldate', methods=["GET"]) def total_date(): total_date=totalDate() date_index=np.array(total_date['date']) date_data=np.array(total_date['total']) num=30 # Fit model model_date=fit_model_date() data.model=model_date predict_rs_date, fitted_data_date=predict_date(model_date,30) pred_index_date=np.array(predict_rs_date['date']) pred_data_date=np.array(predict_rs_date['total']) #Test model test_rs= test_date(pred_data_date[0], fitted_data_date) return render_template("total_date.html",date_index=date_index, date_data=date_data, stationary=check_stationary_date(), model_date=model_date, pred_index=pred_index_date, pred_data=pred_data_date, test_rs=test_rs, num=num) def check_stationary_date(): total_date=totalDate() data1=total_date[['date','total']] data1.set_index('date', inplace=True) result=stationary_trend(data1) return result def fit_model_date(): total_date=totalDate() data1=total_date[['date','total']] data1.set_index('date', inplace=True) data=data1['total'] result1 = fit_model_fast(data) return result1 def predict_date(model_date, num_predict): if num_predict==0: num_predict=30 fitted_date, confint_date = model_date.predict(n_periods=num_predict, return_conf_int=True) df2=df[['total', 'date']] total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True) data=total_day[['date','total']] data.set_index('date', inplace=True) date = pd.date_range(data.index[-1], periods=num_predict) fitted_seri_date = pd.Series(fitted_date, index=date) dff=pd.DataFrame(fitted_seri_date) dff=dff.reset_index() dff.columns=['date','total'] dff['date'] = pd.to_datetime(dff['date']).dt.to_period('D') pred=dff[['date','total']] return pred, fitted_date def test_date(y, yhat): e = y-yhat mse=np.mean(e**2) rmse=np.sqrt(mse) mae=np.mean(np.abs(e)) mape=np.mean(abs(e/y)) # print('Sai số bình phương trung bình MSE: {}'.format(mse)) # print('Root Mean Square Error: {}'.format(rmse)) # print('Mean Absolute Error: {}'.format(mae)) # print('Mean Absolute Percentage Error: {}'.format(mape)) return mse, rmse, mae, mape @app.route('/totaldate', methods=["POST"]) def total_date_num(): total_date=totalDate() date_index=np.array(total_date['date']) date_data=np.array(total_date['total']) #Get data if request.method == "POST": num = int(request.form.get("num_date")) predict_rs_date, fitted_data_date=predict_date(data.model,num) pred_index_date=np.array(predict_rs_date['date']) pred_data_date=np.array(predict_rs_date['total']) test_rs= test_date(pred_data_date[0], fitted_data_date) return render_template("total_date.html",date_index=date_index, date_data=date_data, stationary=check_stationary_date(), model_date=data.model, pred_index=pred_index_date, pred_data=pred_data_date, test_rs=test_rs, num=num) def check_stationary_date(): total_date=totalDate() data1=total_date[['date','total']] data1.set_index('date', inplace=True) result=stationary_trend(data1) return result def predict_date(model_date, num_predict): if num_predict==0: num_predict=6 fitted_date, confint_date = model_date.predict(n_periods=num_predict, return_conf_int=True) df2=df[['total', 'date']] total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True) data2=total_day[['date','total']] data2.set_index('date', inplace=True) date = pd.date_range(data2.index[-1], periods=num_predict) fitted_seri_date = pd.Series(fitted_date, index=date) dff=pd.DataFrame(fitted_seri_date) dff=dff.reset_index() dff.columns=['date','total'] dff['date'] = pd.to_datetime(dff['date']).dt.to_period('D') pred=dff[['date','total']] return pred, fitted_date def test_date(y, yhat): e = y-yhat mse=np.mean(e**2) rmse=np.sqrt(mse) mae=np.mean(np.abs(e)) mape=np.mean(abs(e/y)) return mse, rmse, mae, mape @app.route('/revenueyear', methods=["GET"]) def revenue_year(): sale_year=saleYear() year_index=np.array(sale_year['year']) year_data=np.array(sale_year['quantity']) return render_template("revenue_year.html",year_index=year_index, year_data=year_data) @app.route('/revenuemonth', methods=["GET"]) def revenue_month(): total_month=saleMonth() file1=pd.read_json('sale_month.json',orient='index') month_index=np.array(file1['month_year']) month_data=np.array(file1['quantity']) num_sale=6 # Fit model model=fit_model() data.model_month=model predict_rs, fitted_data=predict(model,6) pred_index=np.array(predict_rs['month_year']) pred_data=np.array(predict_rs['quantity']) #Test model test_rs= test(pred_data[0], fitted_data) return render_template("revenue_month.html",month_index=month_index, month_data=month_data, stationary=check_stationary(), model=model, pred_index=pred_index, pred_data=pred_data, test_rs=test_rs, num_sale=num_sale) def check_stationary(): total_month=saleMonth() data1=total_month[['month_year','quantity']] data1.set_index('month_year', inplace=True) result=stationary(data1) return result def fit_model(): total_month=saleMonth() data1=total_month[['month_year','quantity']] data1.set_index('month_year', inplace=True) data=data1['quantity'] stationary=check_stationary() p=stationary[1] if (p<0.05): result1 = fit_model_stationary(data) else: result1 = fit_model_non_stationary(data) return result1 def predict(model,num_predict): if num_predict==0: num_predict=6 fitted_month, confint_month = model.predict(n_periods=num_predict, return_conf_int=True) df2=df[['quantity', 'date']] total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True) data=total_day[['date','quantity']] data.set_index('date', inplace=True) date = pd.date_range(data.index[-1], periods=num_predict, freq='MS') fitted_seri_month = pd.Series(fitted_month, index=date) dff=pd.DataFrame(fitted_seri_month) dff=dff.reset_index() dff.columns=['date','quantity'] dff['month_year'] = pd.to_datetime(dff['date']).dt.to_period('M') pred=dff[['month_year','quantity']] return pred, fitted_month def test(y, yhat): e = y-yhat mse=np.mean(e**2) rmse=np.sqrt(mse) mae=np.mean(np.abs(e)) mape=np.mean(abs(e/y)) # print('Sai số bình phương trung bình MSE: {}'.format(mse)) # print('Root Mean Square Error: {}'.format(rmse)) # print('Mean Absolute Error: {}'.format(mae)) # print('Mean Absolute Percentage Error: {}'.format(mape)) return mse, rmse, mae, mape @app.route('/revenuemonth', methods=["POST"]) def revenue_month_num(): total_month=saleMonth() file1=
pd.read_json('sale_month.json',orient='index')
pandas.read_json
#!/usr/bin/env python # -- coding: utf-8 -- # PAQUETES PARA CORRER OP. import netCDF4 import pandas as pd import numpy as np import datetime as dt import json import wmf.wmf as wmf import hydroeval import glob import MySQLdb #modulo pa correr modelo import hidrologia from sklearn.linear_model import LinearRegression import math import os #spatial import cartopy.crs as crs import geopandas as gpd import pyproj from pyproj import transform from cartopy.feature import ShapelyFeature import cartopy.crs as ccrs from cartopy.io.shapereader import Reader from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER import seaborn as sns sns.set(style="whitegrid") sns.set_context('notebook', font_scale=1.13) #FORMATO # fuente import matplotlib matplotlib.use('Agg') import pylab as pl #avoid warnings import warnings warnings.filterwarnings('ignore') #--------------- #Funciones base. #--------------- def get_rutesList(rutas): ''' Abre el archivo de texto en la ruta: rutas, devuelve una lista de las lineas de ese archivo. Funcion base. #Argumentos rutas: string, path indicado. ''' f = open(rutas,'r') L = f.readlines() f.close() return L def set_modelsettings(ConfigList): ruta_modelset = get_ruta(ConfigList,'ruta_proj')+get_ruta(ConfigList,'ruta_modelset') # model settings Json with open(ruta_modelset, 'r') as f: model_set = json.load(f) # Model set wmf.models.max_aquifer = wmf.models.max_gravita * 10 wmf.models.retorno = model_set['retorno'] wmf.models.show_storage = model_set['show_storage'] wmf.models.separate_fluxes = model_set['separate_fluxes'] wmf.models.dt = model_set['dt'] def round_time(date = dt.datetime.now(),round_mins=5): ''' Rounds datetime object to nearest 'round_time' minutes. If 'dif' is < 'round_time'/2 takes minute behind, else takesminute ahead. Parameters ---------- date : date to round round_mins : round to this nearest minutes interval Returns ---------- datetime object rounded, datetime object ''' dif = date.minute % round_mins if dif <= round_mins/2: return dt.datetime(date.year, date.month, date.day, date.hour, date.minute - (date.minute % round_mins)) else: return dt.datetime(date.year, date.month, date.day, date.hour, date.minute - (date.minute % round_mins)) + dt.timedelta(minutes=round_mins) def get_credentials(ruta_credenciales): credentials = json.load(open(ruta_credenciales)) #creds para consultas mysqlServer = credentials['MySql_Siata'] for key in np.sort(list(credentials['MySql_Siata'].keys()))[::-1]: #1:hal, 2:sal try: connection = MySQLdb.connect(host=mysqlServer[key]['host'], user=mysqlServer[key]['user'], password=mysqlServer[key]['password'], db=mysqlServer[key]['db']) print('SERVER_CON: Succesful connection to %s'%(key)) host=mysqlServer[key]['host'] user=mysqlServer[key]['user'] password=mysqlServer[key]['password'] db=mysqlServer[key]['db'] break #si conecta bien a SAL para. except: print('SERVER_CON: No connection to %s'%(key)) pass #creds para copiar a var user2copy2var = credentials['cred_2copy2var']['user']; host2copy2var = credentials['cred_2copy2var']['host'] return host,user,password,db,user2copy2var,host2copy2var def coord2hillID(ruta_nc, df_coordxy): #lee simubasin pa asociar tramos, saca topologia basica cu = wmf.SimuBasin(rute= ruta_nc) cu.GetGeo_Cell_Basics() cu.GetGeo_Parameters() #saca coordenadas de todo el simubasin y las distancias entre ellas coordsX = wmf.cu.basin_coordxy(cu.structure,cu.ncells)[0] coordsY = wmf.cu.basin_coordxy(cu.structure,cu.ncells)[1] disty = np.unique(np.diff(np.unique(np.sort(coordsY)))) distx = np.unique(np.diff(np.unique(np.sort(coordsX)))) df_ids = pd.DataFrame(index = df_coordxy.index,columns=['id']) #identifica el id de la ladera donde caen los ptos for index in df_coordxy.index: df_ids.loc[index]=cu.hills_own[np.where((coordsY+disty[0]/2>df_coordxy.loc[index].values[1]) & (coordsY-disty[0]/2<df_coordxy.loc[index].values[1]) & (coordsX+distx[0]/2>df_coordxy.loc[index].values[0]) & (coordsX-distx[0]/2<df_coordxy.loc[index].values[0]))[0]].data return df_ids #----------------------------------- #----------------------------------- #Funciones de lectura del configfile #----------------------------------- #----------------------------------- def get_ruta(RutesList, key): ''' Busca en una lista 'RutesList' la linea que empieza con el key indicado, entrega rutas. Funcion base. #Argumentos RutesList: Lista que devuelve la funcion en este script get_rutesList() key: string, key indicado para buscar que linea en la lista empieza con el. ''' if any(i.startswith('- **'+key+'**') for i in RutesList): for i in RutesList: if i.startswith('- **'+key+'**'): return i.split(' ')[-1][:-1] else: return 'Aviso: no existe linea con el key especificado' def get_line(RutesList, key): ''' Busca en una lista 'RutesList' la linea que empieza con el key indicado, entrega lineas. Funcion base. #Argumentos RutesList: Lista que devuelve la funcion en este script get_rutesList() key: string, key indicado para buscar que linea en la lista empieza con el. ''' if any(i.startswith('- **'+key+'**') for i in RutesList): for i in RutesList: if i.startswith('- **'+key+'**'): return i[:-1].split(' ')[2:] else: return 'Aviso: no existe linea con el key especificado' def get_modelPlot(RutesList, PlotType = 'Qsim_map'): ''' #Devuelve un diccionario con la informacion de la tabla Plot en el configfile. #Funcion operacional. #Argumentos: - RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist. - PlotType= boolean, tipo del plot? . Default= 'Qsim_map'. ''' for l in RutesList: key = l.split('|')[1].rstrip().lstrip() if key[3:] == PlotType: EjecsList = [i.rstrip().lstrip() for i in l.split('|')[2].split(',')] return EjecsList return key def get_modelPars(RutesList): ''' #Devuelve un diccionario con la informacion de la tabla Calib en el configfile. #Funcion operacional. #Argumentos: - RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist. ''' DCalib = {} for l in RutesList: c = [float(i) for i in l.split('|')[3:-1]] name = l.split('|')[2] DCalib.update({name.rstrip().lstrip(): c}) return DCalib def get_modelPaths(List): ''' #Devuelve un diccionario con la informacion de la tabla Calib en el configfile. #Funcion operacional. #Argumentos: - RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist. ''' DCalib = {} for l in List: c = [i for i in l.split('|')[3:-1]] name = l.split('|')[2] DCalib.update({name.rstrip().lstrip(): c[0]}) return DCalib def get_modelStore(RutesList): ''' #Devuelve un diccionario con la informacion de la tabla Store en el configfile. #Funcion operacional. #Argumentos: - RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist. ''' DStore = {} for l in RutesList: l = l.split('|') DStore.update({l[1].rstrip().lstrip(): {'Nombre': l[2].rstrip().lstrip(), 'Actualizar': l[3].rstrip().lstrip(), 'Tiempo': float(l[4].rstrip().lstrip()), 'Condition': l[5].rstrip().lstrip(), 'Calib': l[6].rstrip().lstrip(), 'BackSto': l[7].rstrip().lstrip(), 'Slides': l[8].rstrip().lstrip()}}) return DStore def get_modelStoreLastUpdate(RutesList): ''' #Devuelve un diccionario con la informacion de la tabla Update en el configfile. #Funcion operacional. #Argumentos: - RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist. ''' DStoreUpdate = {} for l in RutesList: l = l.split('|') DStoreUpdate.update({l[1].rstrip().lstrip(): {'Nombre': l[2].rstrip().lstrip(), 'LastUpdate': l[3].rstrip().lstrip()}}) return DStoreUpdate def get_ConfigLines(RutesList, key, keyTable = None, PlotType = None): ''' #Devuelve un diccionario con la informacion de las tablas en el configfile: Calib, Store, Update, Plot. #Funcion operacional. #Argumentos: - RutesList= lista, es el resultado de leer el configfile con al.get_ruteslist. - key= string, palabra clave de la tabla que se quiere leer. Puede ser: -s,-t. - Calib_Storage= string, palabra clave de la tabla que se quiere leer. Puede ser: Calib, Store, Update, Plot. - PlotType= boolean, tipo del plot? . Default= None. ''' List = [] for i in RutesList: if i.startswith('|'+key) or i.startswith('| '+key): List.append(i) if len(List)>0: if keyTable == 'Pars': return get_modelPars(List) if keyTable == 'Paths': return get_modelPaths(List) if keyTable == 'Store': return get_modelStore(List) if keyTable == 'Update': return get_modelStoreLastUpdate(List) if keyTable == 'Plot': return get_modelPlot(List, PlotType=PlotType) return List else: return 'Aviso: no se encuentran lineas con el key de inicio especificado.' #----------------------------------- #----------------------------------- #Funciones generacion de radar #----------------------------------- #----------------------------------- def file_format(start,end): ''' Returns the file format customized for siata for elements containing starting and ending point Parameters ---------- start : initial date end : final date Returns ---------- file format with datetimes like %Y%m%d%H%M Example ---------- ''' start,end = pd.to_datetime(start),pd.to_datetime(end) format = '%Y%m%d%H%M' return '%s-%s'%(start.strftime(format),end.strftime(format)) def hdr_to_series(path): ''' Reads hdr rain files and converts it into pandas Series Parameters ---------- path : path to .hdr file Returns ---------- pandas time Series with mean radar rain ''' s = pd.read_csv(path,skiprows=5,usecols=[2,3]).set_index(' Fecha ')[' Lluvia'] s.index = pd.to_datetime(list(map(lambda x:x.strip()[:10]+' '+x.strip()[11:],s.index))) return s def hdr_to_df(path): ''' Reads hdr rain files and converts it into pandas DataFrame Parameters ---------- path : path to .hdr file Returns ---------- pandas DataFrame with mean radar rain ''' if path.endswith('.hdr') != True: path = path+'.hdr' df = pd.read_csv(path,skiprows=5).set_index(' Fecha ') df.index = pd.to_datetime(list(map(lambda x:x.strip()[:10]+' '+x.strip()[11:],df.index))) df = df.drop('IDfecha',axis=1) df.columns = ['record','mean_rain'] return df def bin_to_df(path,ncells,start=None,end=None,**kwargs): ''' Reads rain fields (.bin) and converts it into pandas DataFrame Parameters ---------- path : path to .hdr and .bin file start : initial date end : final date Returns ---------- pandas DataFrame with mean radar rain Note ---------- path without extension, ejm folder_path/file not folder_path/file.bin, if start and end is None, the program process all the data ''' start,end = pd.to_datetime(start),pd.to_datetime(end) records = df['record'].values rain_field = [] for count,record in enumerate(records): if record != 1: rain_field.append(wmf.models.read_int_basin('%s.bin'%path,record,ncells)[0]/1000.0) count = count+1 # format = (count*100.0/len(records),count,len(records)) else: rain_field.append(np.zeros(ncells)) return pd.DataFrame(np.matrix(rain_field),index=df.index) def get_radar_rain(start,end,Dt,cuenca,codigos,rutaNC,accum=False,path_tif=None,all_radextent=False, mask=None,meanrain_ALL=True,path_masks_csv=None,complete_naninaccum=False,save_bin=False, save_class = False,path_res=None,umbral=0.005, verbose=True, zero_fill = None): start,end = pd.to_datetime(start),pd.to_datetime(end) #hora UTC startUTC,endUTC = start + pd.Timedelta('5 hours'), end + pd.Timedelta('5 hours') fechaI,fechaF,hora_1,hora_2 = startUTC.strftime('%Y-%m-%d'), endUTC.strftime('%Y-%m-%d'),startUTC.strftime('%H:%M'),endUTC.strftime('%H:%M') #Obtiene las fechas por dias para listar archivos por dia datesDias =
pd.date_range(fechaI, fechaF,freq='D')
pandas.date_range
import pandas as pd from pandas import DataFrame import sys #-------- # Imports medi dataset with icd9 and rxcui descriptions to .csv file # PARAMETERS: # medi = medi spreadsheet # icd9_desc = contains icd9 codes and their descriptions # rxcui_desc = contains rxcui codes and their descriptions def add_info_to_medi(medi, icd9_desc, rxcui_desc): # adding in icd9 descriptions df_icd9_desc = pd.read_table(icd9_desc, sep=' ', header=None, usecols=[0, 1]) df_icd9_desc.columns = ['ICD9', 'ICD9_DESC'] # adding in rxcui descriptions into the medi spreadsheet df_rxcui_desc = pd.read_csv(rxcui_desc, encoding='latin-1').drop_duplicates().groupby('RXCUI_IN')['STR'].apply('; '.join) rxcui_desc = pd.DataFrame({'RXCUI_IN': df_rxcui_desc.index, 'STR': df_rxcui_desc.values}) df_medi = pd.read_csv(medi) df_medi_desc = pd.merge(df_medi, rxcui_desc, how='left', on='RXCUI_IN') df_rxcui_icd9 = pd.merge(df_medi_desc, df_icd9_desc, how='left', on='ICD9') df_rxcui_icd9 = df_rxcui_icd9[['RXCUI_IN', 'STR', 'DRUG_DESC', 'ICD9', 'ICD9_DESC', 'INDICATION_DESCRIPTION', 'MENTIONEDBYRESOURCES', 'HIGHPRECISIONSUBSET', 'POSSIBLE_LABEL_USE']] df_rxcui_icd9.to_csv('medi_with_icd9_rxcui.csv', index=False) #-------- # Imports medi_rxcui_icd9 dataset with icd9-phecode mappings to .csv file # Maps drug (rxcui codes) with clinical phenotype (phecode) through icd9 codes # PARAMETERS: # medi_rxcui_icd9 = medi spreadsheet (created from add_info_to_medi function above) with rxcui + icd9 descriptions # phecode_icd9_mapping = maps phecodes to icd9 codes def drug_phenotype(phecode_icd9_mapping, medi_rxcui_icd9): df_rxcui_icd9 = pd.read_csv(medi_rxcui_icd9) df_phecode_icd9 = pd.read_csv(phecode_icd9_mapping, usecols=['ICD9', 'PheCode']) result = pd.merge(df_rxcui_icd9, df_phecode_icd9, how='left', on='ICD9').drop_duplicates().sort_values('RXCUI_IN') result.to_csv('drug_phenotype.csv', index=False) #print (result) #-------- # Imports medi_rxcui_icd9 dataset with drug-targeted gene mappings to .csv file # Maps drugs (rxcui codes) with corresponding targeted genes (HuGOIDs) through unii codes and DrugBank drug IDs # PARAMETERS: # unii_rxcui = contains mapping of unii codes to rxcui codes # unii_drug = contains mapping of unii codes to HuGOIDs (DrugBank), needs to be .txt file # medi_rxcui_icd9 = medi spreadsheet (created from add_info_to_medi function above) with rxcui + icd9 descriptions # drug_gene = for each gene, contains list of drugs that target said gene def drug_gene(unii_rxcui, unii_drug, drug_gene, medi_rxcui_icd9): df_unii_rxcui = pd.read_csv(unii_rxcui) df_unii_drug = pd.read_table(unii_drug, header=0, sep=':', usecols=['unii', 'drug_id']) df_rxcui_icd9 = pd.read_csv(medi_rxcui_icd9) # drugbank id and rxcui mapping data1 = pd.merge(df_unii_drug, df_unii_rxcui, how='left', on='unii').drop('unii', axis=1).drop_duplicates() # splits drugs for each gene in individual cell data2 = pd.read_csv(drug_gene, usecols=['Drug IDs', 'Gene Name']) df_drugbank_gene = DataFrame(data2['Drug IDs'].str.split('; ').tolist(), index=data2['Gene Name']).stack().reset_index()[[0, 'Gene Name']] # var1 variable is currently labeled 0 df_drugbank_gene.columns = ['drug_id', 'Gene Name'] df_drugbank_gene = df_drugbank_gene.dropna(how='any', axis=0) # for each drug combines all targeted genes into one cell data3 = df_drugbank_gene.drop_duplicates().groupby('drug_id')['Gene Name'].apply('; '.join) data4 = pd.DataFrame({'drug_id': data3.index, 'Gene Name': data3.values}) drug_rxcui = pd.merge(data1, data4, how='left', on='drug_id').drop_duplicates() result = pd.merge(df_rxcui_icd9, drug_rxcui, how='left', on='RXCUI_IN') result.to_csv('drug_gene.csv', index=False) #print (result) #-------- # Imports dataset with mappings between gwas phenotype and clinical phenotype through snp # Merges gwas phenotype with phewas phenotype (phecode) by SNP ''' ** was not used when creating all-drugs-gwas-data.csv, mapping by SNP was deemed to be not as accurate as using gwas_catalog-phewas.csv, which already had gwas phenotype (disease column) mapped to phecode ''' # PARAMETERS: # gwas = contains gwas/inrich associations between drug-phenotype pairs # phewas = contains mapping between phecodes and gwas phenotypes (by name) def gene_phenotype(gwas, phewas): df_gwas = pd.read_table(gwas, header=0, sep=' ') df_phewas =
pd.read_csv(phewas, usecols=['snp', 'jd_code', 'jd_string'])
pandas.read_csv
# -*- coding: utf-8 -*- """ Created on Sun Apr 25 13:55:59 2021 @author: tatia """ from dataproc.cohort import query_esbl_pts, remove_dups, observation_window from dataproc.sampling import generate_samples from dataproc.sampling import stratify_set from dataproc.roc_auc_curves import plt_roc_auc_curve, plt_precision_recall_curve from dataproc.create_dataset import dataset_creation from dataproc.create_dataset import prescriptions from dataproc.create_dataset import previous_admissions from dataproc.create_dataset import open_wounds_diags, intubation_cpt, noteevents from dataproc.embeddings import loinc_values from hyper_params import HyperParams import numpy as np import pandas as pd from sklearn.impute import SimpleImputer import re # load hyperparams instance params = HyperParams() def cohort_creation(observation_window_hours): # Select esbl microbiology test esbl_admits = query_esbl_pts() # Remove dups esbl_admits = remove_dups(esbl_admits) # Create observation window esbl_admits_window = observation_window(esbl_admits, window_size=observation_window_hours) # Subset columns pts_labels = esbl_admits_window[['hadm_id', 'index_date','RESISTANT_YN']] return pts_labels def loinc_values_proc(loinc_codes): loinc_vals = loinc_values(loinc_codes) loinc_vals.dropna(subset=['value'], inplace=True) loinc_vals = loinc_vals.astype({'value': 'string', 'loinc_code': 'category'}) loinc_vals['value'] = loinc_vals['value'].map(lambda x: x.lstrip('LESS THAN ')) loinc_vals['value'] = loinc_vals['value'].map(lambda x: x.lstrip('GREATER THAN ')) loinc_vals['value'] = loinc_vals['value'].map(lambda x: x.lstrip('>GREATER THAN ')) loinc_vals['value'] = loinc_vals['value'].map(lambda x: x.lstrip('<LESS THAN ')) loinc_vals['value'] = loinc_vals['value'].map(lambda x: x.rstrip(' NG/ML')) loinc_vals['value'] = loinc_vals['value'].map(lambda x: x.lstrip('<>')) loinc_vals['value'] = loinc_vals['value'].map(lambda x: x.replace(',', '.')) loinc_vals.drop(list(loinc_vals.loc[loinc_vals['value'] == 'UNABLE TO ANALYZE'].index), inplace=True) loinc_vals.drop(list(loinc_vals.loc[loinc_vals['value'] == 'MOLYSIS FALSELY DECREASES THIS RESULT'].index), inplace=True) loinc_vals.drop(list(loinc_vals.loc[loinc_vals['value'] == 'COMPUTER NETWORK FAILURE. TEST NOT RESULTED.'].index), inplace=True) loinc_vals.drop(list(loinc_vals.loc[loinc_vals['value'] == 'UNABLE TO DETERMINE'].index), inplace=True) loinc_vals.drop(list(loinc_vals.loc[loinc_vals['value'] == ':UNABLE TO DETERMINE'].index), inplace=True) loinc_vals.drop(list(loinc_vals.loc[loinc_vals['value'] == 'UNABLE TO QUANTITATE'].index), inplace=True) loinc_vals.drop(list(loinc_vals.loc[loinc_vals['value'] == 'UNABLE TO REPORT'].index), inplace=True) return loinc_vals def lab_records_categories(loinc_vals): numeric = [] categorical = [] weird = [] for code in loinc_codes: size = len(loinc_vals.loc[loinc_vals['loinc_code'] == str(code), 'value']) size_unique = len(loinc_vals.loc[loinc_vals['loinc_code'] == str(code), 'value'].unique()) sum_na = pd.to_numeric(loinc_vals.loc[loinc_vals['loinc_code'] == str(code), 'value'], errors='coerce').isna().sum() if sum_na / size < 0.05: numeric.append(code) elif sum_na / size > 0.05 and size_unique < 100: categorical.append(code) else: weird.append(code) # Remove columns that are not useful: # remove lab column that contains only 'inf' and 'Nan' numeric.remove('26498-6') # remove lab column that only contains phrase 'See comments' categorical.remove('33914-3') # remove lab column that contains phrase 'Random' categorical.remove('13362-9') return numeric, categorical, weird def sum_stats_numeric_labs(loinc_vals, numeric): numeric_stats = [] for code in numeric: a = pd.to_numeric(loinc_vals.loc[loinc_vals['loinc_code'] == str(code), 'value'], errors='coerce').describe() numeric_stats.append(a) numeric_stats_df =
pd.concat(numeric_stats, axis=1, keys=numeric)
pandas.concat
# util.py from __future__ import print_function from collections import Mapping, OrderedDict import datetime import itertools import random import warnings import pandas as pd np = pd.np from scipy import integrate from matplotlib import pyplot as plt import seaborn from scipy.optimize import minimize from scipy.signal import correlate from titlecase import titlecase from pug.nlp.util import listify, fuzzy_get, make_timestamp def dropna(x): """Delete all NaNs and and infinities in a sequence of real values Returns: list: Array of all values in x that are between -inf and +inf, exclusive """ return [x_i for x_i in listify(x) if float('-inf') < x_i < float('inf')] def rms(x): """"Root Mean Square" Arguments: x (seq of float): A sequence of numerical values Returns: The square root of the average of the squares of the values math.sqrt(sum(x_i**2 for x_i in x) / len(x)) or return (np.array(x) ** 2).mean() ** 0.5 >>> rms([0, 2, 4, 4]) 3.0 """ try: return (np.array(x) ** 2).mean() ** 0.5 except: x = np.array(dropna(x)) invN = 1.0 / len(x) return (sum(invN * (x_i ** 2) for x_i in x)) ** .5 def rmse(target, prediction, relative=False, percent=False): """Root Mean Square Error This seems like a simple formula that you'd never need to create a function for. But my mistakes on coding challenges have convinced me that I do need it, as a reminder of important tweaks, if nothing else. >>> rmse([0, 1, 4, 3], [2, 1, 0, -1]) 3.0 >>> rmse([0, 1, 4, 3], [2, 1, 0, -1], relative=True) # doctest: +ELLIPSIS 1.2247... >>> rmse([0, 1, 4, 3], [2, 1, 0, -1], percent=True) # doctest: +ELLIPSIS 122.47... """ relative = relative or percent prediction = pd.np.array(prediction) target = np.array(target) err = prediction - target if relative: denom = target # Avoid ZeroDivisionError: divide by prediction rather than target where target==0 denom[denom == 0] = prediction[denom == 0] # If the prediction and target are both 0, then the error is 0 and should be included in the RMSE # Otherwise, the np.isinf() below would remove all these zero-error predictions from the array. denom[(denom == 0) & (target == 0)] = 1 err = (err / denom) err = err[(~ np.isnan(err)) & (~ np.isinf(err))] return 100 * rms(err) if percent else rms(err) def blended_rolling_apply(series, window=2, fun=pd.np.mean): new_series = pd.Series(np.fromiter((fun(series[:i + 1]) for i in range(window - 1)), type(series.values[0])), index=series.index[:window - 1]).append( pd.rolling_apply(series.copy(), window, fun)[window - 1:]) assert len(series) == len(new_series), ( "blended_rolling_apply should always return a series of the same length!\n" " len(series) = {0} != {1} = len(new_series".format(len(series), len(new_series))) assert not any(np.isnan(val) or val is None for val in new_series) return new_series def rolling_latch(series, period=31, decay=1.0): # FIXME: implement recursive exponential decay filter rather than the nonrecursive, deratring done here return blended_rolling_apply(series, period, lambda val: decay * pd.np.max(val)) def clean_dataframe(df): """Fill NaNs with the previous value, the next value or if all are NaN then 1.0""" df = df.fillna(method='ffill') df = df.fillna(0.0) return df def clean_dataframes(dfs): """Fill NaNs with the previous value, the next value or if all are NaN then 1.0 TODO: Linear interpolation and extrapolation Arguments: dfs (list of dataframes): list of dataframes that contain NaNs to be removed Returns: list of dataframes: list of dataframes with NaNs replaced by interpolated values """ if isinstance(dfs, (list)): for df in dfs: df = clean_dataframe(df) return dfs else: return [clean_dataframe(dfs)] def get_symbols_from_list(list_name): """Retrieve a named (symbol list name) list of strings (symbols) If you've installed the QSTK Quantitative analysis toolkit `get_symbols_from_list('sp5002012')` will produce a list of the symbols that were members of the S&P 500 in 2012. Otherwise an import error exception will be raised. If the symbol list cannot be found you'll get an empty list returned Example: >> len(get_symbols_from_list('sp5002012')) in (0, 501) True """ try: # quant software toolkit has a method for retrieving lists of symbols like S&P500 for 2012 with 'sp5002012' import QSTK.qstkutil.DataAccess as da dataobj = da.DataAccess('Yahoo') except ImportError: raise except: return [] try: return dataobj.get_symbols_from_list(list_name) except: raise def make_symbols(symbols, *args): """Return a list of uppercase strings like "GOOG", "$SPX, "XOM"... Arguments: symbols (str or list of str): list of market ticker symbols to normalize If `symbols` is a str a get_symbols_from_list() call is used to retrieve the list of symbols Returns: list of str: list of cananical ticker symbol strings (typically after .upper().strip()) See Also: pug.dj.db.normalize_names Examples: >>> make_symbols("Goog") ['GOOG'] >>> make_symbols(" $SPX ", " aaPL ") ['$SPX', 'AAPL'] >>> make_symbols(["$SPX", ["GOOG", "AAPL"]]) ['GOOG', 'AAPL', '$SPX'] >>> make_symbols(" $Spy, Goog, aAPL ") ['$SPY', 'GOOG', 'AAPL'] """ if (hasattr(symbols, '__iter__') and not any(symbols)) \ or (isinstance(symbols, (list, tuple, Mapping)) and not symbols): return [] if isinstance(symbols, basestring): # # FIXME: find a direct API for listing all possible symbols # try: # return list(set(dataobj.get_symbols_from_list(symbols))) # except: return [s.upper().strip() for s in (symbols.split(',') + list(str(a) for a in args))] else: ans = [] for sym in (list(symbols) + list(args)): tmp = make_symbols(sym) ans = ans + tmp return list(set(ans)) def make_time_series(x, t=pd.Timestamp(datetime.datetime(1970, 1, 1)), freq=None): """Convert a 2-D array of time/value pairs (or pair of time/value vectors) into a pd.Series time-series >>> make_time_series(range(3), freq='15min') # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS 1970-01-01 00:00:00 NaN 1970-01-01 00:15:00 NaN 1970-01-01 00:30:00 NaN dtype: float64 """ if isinstance(x, pd.DataFrame): x = pd.Series(x[x.columns[0]]) elif not isinstance(x, pd.Series) and (not isinstance(t, (pd.Series, pd.Index, list, tuple)) or not len(t)): #warnings.warn("Coercing a non-Series") if len(x) == 2: t, x = listify(x[0]), listify(x[1]) elif len(x) >= 2: try: t, x = zip(*x) except (ValueError, IndexError, TypeError): pass x = pd.Series(x) else: if isinstance(t, (datetime.datetime, pd.Timestamp)): t = pd.Timestamp(t) else: x = pd.Series(listify(x), index=listify(t)) if not isinstance(x, pd.Series): raise TypeError("`pug.invest.util.make_time_series(x, t)` expects x to be a type that" " can be coerced to a Series object, but it's type is: {0}" .format(type(x))) # By this point x must be a Series, only question is whether its index needs to be converted to a DatetimeIndex if x.index[0] != 0 and isinstance(x.index[0], (datetime.date, datetime.datetime, pd.Timestamp, basestring, float, np.int64, int)): t = x.index elif isinstance(t, (datetime.date, datetime.datetime, pd.Timestamp, basestring, float, np.int64, int)): if not freq: freq = '15min' warnings.warn('Assumed time series freq to be {0} though no freq argument was provided!' .format(freq), RuntimeWarning) t = pd.date_range(t, periods=len(x), freq=freq) x = pd.Series(x, index=t) if isinstance(x, pd.Series): x.index = pd.DatetimeIndex(x.index.values) return x def pandas_mesh(df): """Create numpy 2-D "meshgrid" from 3+ columns in a Pandas DataFrame Arguments: df (DataFrame): Must have 3 or 4 columns of numerical data Returns: OrderedDict: column labels from the data frame are the keys, values are 2-D matrices All matrices have shape NxM, where N = len(set(df.iloc[:,0])) and M = len(set(df.iloc[:,1])) >>> pandas_mesh(pd.DataFrame(np.arange(18).reshape(3,6), ... columns=list('ABCDEF'))).values() # doctest: +NORMALIZE_WHITESPACE [array([[ 0, 6, 12], [ 0, 6, 12], [ 0, 6, 12]]), array([[ 1, 1, 1], [ 7, 7, 7], [13, 13, 13]]), array([[ 2., nan, nan], [ nan, 8., nan], [ nan, nan, 14.]]), array([[ 3., nan, nan], [ nan, 9., nan], [ nan, nan, 15.]]), array([[ 4., nan, nan], [ nan, 10., nan], [ nan, nan, 16.]]), array([[ 5., nan, nan], [ nan, 11., nan], [ nan, nan, 17.]])] """ xyz = [df[c].values for c in df.columns] index = pd.MultiIndex.from_tuples(zip(xyz[0], xyz[1]), names=['x', 'y']) # print(index) series = [pd.Series(values, index=index) for values in xyz[2:]] # print(series) X, Y = np.meshgrid(sorted(list(set(xyz[0]))), sorted(list(set(xyz[1])))) N, M = X.shape Zs = [] # print(Zs) for k, s in enumerate(series): Z = np.empty(X.shape) Z[:] = np.nan for i, j in itertools.product(range(N), range(M)): Z[i, j] = s.get((X[i, j], Y[i, j]), np.NAN) Zs += [Z] return OrderedDict((df.columns[i], m) for i, m in enumerate([X, Y] + Zs)) def integrated_change(ts, integrator=integrate.trapz, clip_floor=None, clip_ceil=float('inf')): """Total value * time above the starting value within a TimeSeries""" integrator = get_integrator(integrator) if clip_floor is None: clip_floor = ts[0] if clip_ceil < clip_floor: polarity = -1 offset, clip_floor, clip_ceil, = clip_ceil, clip_ceil, clip_floor else: polarity, offset = 1, clip_floor clipped_values = np.clip(ts.values - offset, clip_floor, clip_ceil) print(polarity, offset, clip_floor, clip_ceil) print(clipped_values) integrator_types = set(['trapz', 'cumtrapz', 'simps', 'romb']) if integrator in integrator_types: integrator = getattr(integrate, integrator) integrator = integrator or integrate.trapz # datetime units converted to seconds (since 1/1/1970) return integrator(clipped_values, ts.index.astype(np.int64) / 10 ** 9) def insert_crossings(ts, thresh): """Insert/append threshold crossing points (time and value) into a timeseries (pd.Series) Arguments: ts (pandas.Series): Time series of values to be interpolated at `thresh` crossings thresh (float or np.float64): """ # import time # tic0 = time.clock(); tic = tic0 # int64 for fast processing, pandas.DatetimeIndex is 5-10x slower, 0.3 ms index = ts.index index_type = type(index) ts.index = ts.index.astype(np.int64) # toc = time.clock(); # print((toc-tic)*1000); tic = time.clock() # value immediately before an upward thresh crossing, 6 ms preup = ts[(ts < thresh) & (ts.shift(-1) > thresh)] # toc = time.clock(); # print((toc-tic)*1000); tic = time.clock() # values immediately after an upward thresh crossing, 4 ms\ postup = ts[(ts.shift(1) < thresh) & (ts > thresh)] # toc = time.clock(); # print((toc-tic)*1000); tic = time.clock() # value immediately after a downward thresh crossing, 1.8 ms postdown = ts[(ts < thresh) & (ts.shift(1) > thresh)] # toc = time.clock(); # print((toc-tic)*1000); tic = time.clock() # value immediately before an upward thresh crossing, 1.9 ms predown = ts[(ts.shift(-1) < thresh) & (ts > thresh)] # toc = time.clock(); # print((toc-tic)*1000); tic = time.clock() # upward slope (always positive) between preup and postup in units of # "value" per nanosecond (timestamps convert to floats as nanoseconds), 0.04 ms slopeup = (postup.values - preup.values) / (postup.index.values - preup.index.values).astype(np.float64) # toc = time.clock(); # print((toc-tic)*1000); tic = time.clock() # upward crossing point index/time, 0.04 ms tup = preup.index.values + ((thresh - preup.values) / slopeup).astype(np.int64) # toc = time.clock(); # print((toc-tic)*1000); tic = time.clock() # downward slope (always negative) between predown and postdown in units of # "value" per nanosecond (timestamps convert to floats as nanoseconds), 0.03 ms slopedown = (postdown.values - predown.values) / \ (postdown.index.values - predown.index.values).astype(np.float64) # toc = time.clock(); # print((toc-tic)*1000); tic = time.clock() # upward crossing point index/time, 0.02 ms tdown = predown.index.values + ((thresh - predown.values) / slopedown).astype(np.int64) # toc = time.clock(); # print((toc-tic)*1000); tic = time.clock() # insert crossing points into time-series (if it had a regular sample period before, it won't now!), 2.0 ms ts.index = index # pd.DatetimeIndex(ts.index) # toc = time.clock(); # print((toc-tic)*1000); tic = time.clock() # insert crossing points into time-series (if it had a regular sample period before, it won't now!), 2.0 ms ts = ts.append(pd.Series(thresh * np.ones(len(tup)), index=index_type(tup.astype(np.int64)))) # toc = time.clock(); # print((toc-tic)*1000); tic = time.clock() # insert crossing points into time-series (if it had a regular sample period before, it won't now!), 1.9 ms ts = ts.append(pd.Series(thresh * np.ones(len(tdown)), index=index_type(tdown.astype(np.int64)))) # toc = time.clock(); # print((toc-tic)*1000); tic = time.clock() # if you don't `sort_index()`, numerical integrators in `scipy.integrate` will give the wrong answer, 0.1 ms ts = ts.sort_index() # toc = time.clock(); # if you don't `sort_index()`, numerical integrators in `scipy.integrate` will give the wrong answer # print((toc-tic)*1000); tic = time.clock() # print((toc-tic0)*1000); return ts def get_integrator(integrator): """Return the scipy.integrator indicated by an index, name, or integrator_function >> get_integrator(0) """ integrator_types = set(['trapz', 'cumtrapz', 'simps', 'romb']) integrator_funcs = [integrate.trapz, integrate.cumtrapz, integrate.simps, integrate.romb] if isinstance(integrator, int) and 0 <= integrator < len(integrator_types): integrator = integrator_types[integrator] if isinstance(integrator, basestring) and integrator in integrator_types: return getattr(integrate, integrator) elif integrator in integrator_funcs: return integrator else: print('Unsupported integration rule: {0}'.format(integrator)) print('Expecting one of these sample-based integration rules: %s' % (str(list(integrator_types)))) raise AttributeError return integrator def clipped_area(ts, thresh=0, integrator=integrate.trapz): """Total value * time above the starting value within a TimeSeries Arguments: ts (pandas.Series): Time series to be integrated. thresh (float): Value to clip the tops off at (crossings will be interpolated) References: http://nbviewer.ipython.org/gist/kermit666/5720498 >>> t = ['2014-12-09T00:00', '2014-12-09T00:15', '2014-12-09T00:30', '2014-12-09T00:45', ... '2014-12-09T01:00', '2014-12-09T01:15', '2014-12-09T01:30', '2014-12-09T01:45'] >>> import pandas as pd >>> ts = pd.Series([217, 234, 235, 231, 219, 219, 231, 232], index=pd.to_datetime(t)) >>> clipped_area(ts, thresh=230) # doctest: +ELLIPSIS 8598.52941... >>> clipped_area(ts, thresh=234) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE 562.5 >>> clipped_area(pd.Series(ts.values, index=ts.index.values.astype(pd.np.int64)), ... thresh=234) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE 562.5 """ integrator = get_integrator(integrator or 0) ts = insert_crossings(ts, thresh) - thresh ts = ts[ts >= 0] # timestamp is in nanoseconds (since 1/1/1970) but this converts it to seconds (SI units) return integrator(ts, ts.index.astype(np.int64)) / 1.0e9 def clipping_params(ts, capacity=100, rate_limit=float('inf'), method=None, max_attempts=100): """Start, end, and threshold that clips the value of a time series the most, given a limitted "capacity" and "rate" Assumes that signal can be linearly interpolated between points (trapezoidal integration) Arguments: ts (TimeSeries): Time series to attempt to clip to as low a max value as possible capacity (float): Total "funds" or "energy" available for clipping (integrated area under time series) method (str): scipy optimization algorithm name, one of: 'L-BFGS-B': Byrd, 1995, "A Limited Memory Algorithm for Bound Constrained Optimization" 'TNC': Truncated Newton in C, or Newton Conjugate-Gradient, each variable may be constrained with upper and lower bounds 'COBYLA': Constrained Optimization by Linear Approximation. Fortran implementation. 'SLSQP': Kraft, 1988, Sequential Least Squares Programming or Quadratic Programming, infinite bounds converted to large floats TODO: Bisection search for the optimal threshold. Returns: 2-tuple: Timestamp of the start and end of the period of the maximum clipped integrated increase >>> t = ['2014-12-09T00:00', '2014-12-09T00:15', '2014-12-09T00:30', '2014-12-09T00:45', '2014-12-09T01:00', '2014-12-09T01:15', '2014-12-09T01:30', '2014-12-09T01:45'] >>> import pandas as pd >>> ts = pd.Series([217, 234, 235, 231, 219, 219, 231, 232], index=pd.to_datetime(t)) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE >>> clipping_params(ts, capacity=60000)['threshold'] # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE 218.13... >>> clipping_params(ts, capacity=30000)['threshold'] # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE 224.15358... """ VALID_METHODS = ['L-BFGS-B', 'TNC', 'SLSQP', 'COBYLA'] # print('in clipping params for ts.index={0} and method={1}'.format(ts.index[0], method)) ts.index = ts.index.astype(np.int64) costs = [] def cost_fun(x, *args): thresh = x[0] ts, capacity, bounds = args integral = clipped_area(ts, thresh=thresh) terms = np.array([(10. * (integral - capacity) / capacity) ** 2, 2. / 0.1**((bounds[0] - thresh) * capacity / bounds[0]), 2. / 0.1**((thresh - bounds[1]) * capacity / bounds[1]), 1.2 ** (integral / capacity)]) return sum(terms) bounds = (ts.min(), ts.max()) done, attempts = 0, 0 thresh0 = bounds[0] + 0.5 * (bounds[1] - bounds[0]) if not method or not method in VALID_METHODS: while attempts < max_attempts and not done: for optimizer_method in VALID_METHODS: optimum = minimize(fun=cost_fun, x0=[thresh0], bounds=[bounds], args=(ts, capacity, bounds), method=optimizer_method) if optimum.success: done = True break if done: break attempts += 1 thresh0 = bounds[0] + random.random() * (bounds[1] - bounds[0]) else: optimum = minimize(fun=cost_fun, x0=[thresh0], bounds=[bounds], args=(ts, capacity, bounds), method=method) thresh = optimum.x[0] integral = clipped_area(ts, thresh=thresh) params = dict(optimum) params.update({'costs': costs, 'threshold': thresh, 'initial_guess': thresh0, 'attempts': attempts, 'integral': integral, 'method': method}) return params # if integral - capacity > capacity: # return {'t0': None, 't1': None, 'threshold': 0.96*thresh + 0.06*bounds[0][1], 'integral': integral} def discrete_clipping_params(ts, capacity=100, rate_limit=float('inf')): """Start, end, and threshold that clips the value of a time series the most, given a limitted "capacity" and "rate" Assumes that the integrated maximum includes the peak (instantaneous maximum). Assumes that the threshold can only set to one of the values of the Series. Arguments: ts (TimeSeries): Time series to attempt to clip to as low a max value as possible capacity (float): Total "funds" or "energy" available for clipping (integrated area under time series) TODO: Bisection search for the optimal threshold. Returns: 2-tuple: Timestamp of the start and end of the period of the maximum clipped integrated increase >> t = ['2014-12-09T00:00', '2014-12-09T00:15', '2014-12-09T00:30', '2014-12-09T00:45', .. '2014-12-09T01:00', '2014-12-09T01:15', '2014-12-09T01:30', '2014-12-09T01:45'] >> ts = pd.Series([217, 234, 235, 231, 219, 219, 231, 232], index=pd.to_datetime(t)) >> (discrete_clipping_params(ts, capacity=60000) == .. {'integral': 54555.882352942499, 't0': pd.Timestamp('2014-12-09 00:15:00'), .. 't1': pd.Timestamp('2014-12-09 01:45:00'), .. 'threshold': 219}) True >> (discrete_clipping_params(ts, capacity=30000) == .. {'integral': 5638.2352941179997, 't0': pd.Timestamp('2014-12-09 00:15:00'), .. 't1': pd.Timestamp('2014-12-09 01:45:00'), .. 'threshold': 231}) True """ raise NotImplementedError("Doesn't work. Returns incorrect, overly conservative threshold values.") #index_type = ts.index.dtype #ts2 = ts.copy() ts.index = ts.index.astype(np.int64) ts_sorted = ts.order(ascending=False) # default is to clip right at the peak (no clipping at all) i, t0, t1, integral, thresh = 1, ts_sorted.index[0], ts_sorted.index[0], 0, ts_sorted.iloc[0] params = {'t0': t0, 't1': t1, 'integral': 0, 'threshold': thresh} while i < len(ts_sorted) and integral <= capacity and (ts_sorted.iloc[0] - ts_sorted.iloc[i]) < rate_limit: params = {'t0': pd.Timestamp(t0), 't1': pd.Timestamp(t1), 'threshold': thresh, 'integral': integral} i += 1 times = ts_sorted.index[:i] # print(times) t0 = times.min() t1 = times.max() # print(ts_sorted.index[:3]) thresh = min(ts_sorted.iloc[:i]) integral = clipped_area(ts, thresh=thresh) if integral <= capacity: return {'t0': pd.Timestamp(t0), 't1': pd.Timestamp(t1), 'threshold': thresh, 'integral': integral} return params def square_off(series, time_delta=None, transition_seconds=1): """Insert samples in regularly sampled data to produce stairsteps from ramps when plotted. New samples are 1 second (1e9 ns) before each existing samples, to facilitate plotting and sorting >>> square_off(pd.Series(range(3), index=pd.date_range('2014-01-01', periods=3, freq='15m')), ... time_delta=5.5) # doctest: +NORMALIZE_WHITESPACE 2014-01-31 00:00:00 0 2014-01-31 00:00:05.500000 0 2015-04-30 00:00:00 1 2015-04-30 00:00:05.500000 1 2016-07-31 00:00:00 2 2016-07-31 00:00:05.500000 2 dtype: int64 >>> square_off(pd.Series(range(2), index=pd.date_range('2014-01-01', periods=2, freq='15min')), ... transition_seconds=2.5) # doctest: +NORMALIZE_WHITESPACE 2014-01-01 00:00:00 0 2014-01-01 00:14:57.500000 0 2014-01-01 00:15:00 1 2014-01-01 00:29:57.500000 1 dtype: int64 """ if time_delta: # int, float means delta is in seconds (not years!) if isinstance(time_delta, (int, float)): time_delta = datetime.timedelta(0, time_delta) new_times = series.index + time_delta else: diff = np.diff(series.index) time_delta = np.append(diff, [diff[-1]]) new_times = series.index + time_delta new_times = pd.DatetimeIndex(new_times) - datetime.timedelta(0, transition_seconds) return pd.concat([series, pd.Series(series.values, index=new_times)]).sort_index() def clipping_threshold(ts, capacity=100, rate_limit=10): """Start and end index (datetime) that clips the price/value of a time series the most Assumes that the integrated maximum includes the peak (instantaneous maximum). Arguments: ts (TimeSeries): Time series of prices or power readings to be "clipped" as much as possible. capacity (float): Total "funds" or "energy" available for clipping (in $ or Joules) The maximum allowed integrated area under time series and above the clipping threshold. rate_limit: Maximum rate at which funds or energy can be expended (in $/s or Watts) The clipping threshold is limitted to no less than the peak power (price rate) minus this rate_limit Returns: dict: Timestamp of the start and end of the period of the maximum clipped integrated increase >>> t = ['2014-12-09T00:00', '2014-12-09T00:15', '2014-12-09T00:30', '2014-12-09T00:45', ... '2014-12-09T01:00', '2014-12-09T01:15', '2014-12-09T01:30', '2014-12-09T01:45'] >>> import pandas as pd >>> ts = pd.Series([217, 234, 235, 231, 219, 219, 231, 232], index=pd.to_datetime(t)) >>> clipping_threshold(ts, capacity=60000) # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS 218.13... >>> clipping_threshold(ts, capacity=30000) # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS 224.15... """ params = clipping_params(ts, capacity=capacity, rate_limit=rate_limit) if params: return params['threshold'] return None def join_time_series(serieses, ignore_year=False, T_s=None, aggregator='mean'): """Combine a dict of pd.Series objects into a single pd.DataFrame with optional downsampling FIXME: For ignore_year and multi-year data, the index (in seconds) is computed assuming 366 days per year (leap year). So 3 out of 4 years will have a 1-day (86400 s) gap Arguments: series (dict of Series): dictionary of named timestamp-indexed Series objects ignore_year (bool): ignore the calendar year, but not the season (day of year) If True, the DataFrame index will be seconds since the beginning of the year in each Series index, i.e. midnight Jan 1, 2014 will have index=0 as will Jan 1, 2010 if two Series start on those two dates. T_s (float): sample period in seconds (for downsampling) aggregator (str or func): e.g. 'mean', 'sum', np.std """ if ignore_year: df = pd.DataFrame() for name, ts in serieses.iteritems(): # FIXME: deal with leap years sod = np.array(map(lambda x: (x.hour * 3600 + x.minute * 60 + x.second), ts.index.time)) # Coerce soy to an integer so that merge/join operations identify same values # (floats don't equal!?) soy = (ts.index.dayofyear + 366 * (ts.index.year - ts.index.year[0])) * 3600 * 24 + sod ts2 = pd.Series(ts.values, index=soy) ts2 = ts2.dropna() ts2 = ts2.sort_index() df2 = pd.DataFrame({name: ts2.values}, index=soy) df = df.join(df2, how='outer') if T_s and aggregator: df = df.groupby(lambda x: int(x / float(T_s))).aggregate(dict((name, aggregator) for name in df.columns)) else: df = pd.DataFrame(serieses) if T_s and aggregator: x0 = df.index[0] df = df.groupby(lambda x: int((x - x0).total_seconds() / float(T_s))).aggregate(dict((name, aggregator) for name in df.columns)) # FIXME: convert seconds since begninning of first year back into Timestamp instances return df def simulate(t=1000, poly=(0.,), sinusoids=None, sigma=0, rw=0, irw=0, rrw=0): """Simulate a random signal with seasonal (sinusoids), linear and quadratic trend, RW, IRW, and RRW Arguments: t (int or list of float): number of samples or time vector, default = 1000 poly (list of float): polynomial coefficients (in decreasing "order") passed to `numpy.polyval` i.e. poly[0]*x**(N-1) + ... + poly[N-1] sinusoids (list of list): [[period], [amplitude, period], or [ampl., period, phase]] >>> len(simulate(poly=(0,),rrw=1)) 1000 >>> simulate(t=range(3), poly=(1,2)) # doctest: +NORMALIZE_WHITESPACE 0 2 1 3 2 4 dtype: float64 >>> all(simulate(t=50, sinusoids=((1,2,3),)) == simulate(t=range(50), sinusoids=((1,2,3),))) True >>> any(simulate(t=100)) False >>> abs(simulate(sinusoids=42.42).values[1] + simulate(sinusoids=42.42).values[-1]) < 1e-10 True >>> simulate(t=17,sinusoids=[42, 16]).min() -42.0 >>> all((simulate(t=range(10), sinusoids=(1, 9, 4.5))+simulate(t=10, sinusoids=(1,9))).abs() < 1e-10) True """ if t and isinstance(t, int): t = np.arange(t, dtype=np.float64) else: t = np.array(t, dtype=np.float64) N = len(t) poly = poly or (0.,) poly = listify(poly) y = np.polyval(poly, t) sinusoids = listify(sinusoids or []) if any(isinstance(ATP, (int, float)) for ATP in sinusoids): sinusoids = [sinusoids] for ATP in sinusoids: # default period is 1 more than the length of the simulated series (no values of the cycle are repeated) T = (t[-1] - t[0]) * N / (N - 1.) # default amplitude is 1 and phase is 0 A, P = 1., 0 try: A, T, P = ATP except (TypeError, ValueError): try: A, T = ATP except (TypeError, ValueError): # default period is 1 more than the length of the simulated series # (no values of the cycle are repeated) A = ATP[0] # print(A, T, P) # print(t[1] - t[0]) y += A * np.sin(2 * np.pi * (t - P) / T) if sigma: y += np.random.normal(0.0, float(sigma), N) if rw: y += np.random.normal(0.0, float(rw), N).cumsum() if irw: y += np.random.normal(0.0, float(irw), N).cumsum().cumsum() if rrw: y += np.random.normal(0.0, float(rrw), N).cumsum().cumsum().cumsum() return pd.Series(y, index=t) def normalize_symbols(symbols, *args, **kwargs): """Coerce into a list of uppercase strings like "GOOG", "$SPX, "XOM" Flattens nested lists in `symbols` and converts all list elements to strings Arguments: symbols (str or list of str): list of market ticker symbols to normalize If `symbols` is a str a get_symbols_from_list() call is used to retrieve the list of symbols postrprocess (func): function to apply to strings after they've been stripped default = str.upper FIXME: - list(set(list(symbols))) and `args` separately so symbols may be duplicated in symbols and args - `postprocess` should be a method to facilitate monkey-patching Returns: list of str: list of cananical ticker symbol strings (typically after .upper().strip()) Examples: >> normalize_symbols("Goog,AAPL") ['GOOG', 'AAPL'] >> normalize_symbols(" $SPX ", " aaPL ") ['$SPX', 'AAPL'] >> normalize_symbols(" $SPX ", " aaPL ", postprocess=str) ['$SPX', 'aaPL'] >> normalize_symbols(["$SPX", ["GOOG", "AAPL"]]) ['GOOG', 'AAPL', '$SPX'] >> normalize_symbols("$spy", ["GOOGL", "Apple"], postprocess=str) ['$spy', 'GOOGL', 'Apple'] """ postprocess = kwargs.get('postprocess', None) or str.upper if ( (hasattr(symbols, '__iter__') and not any(symbols)) or (isinstance(symbols, (list, tuple, Mapping)) and (not symbols or not any(symbols)))): return [] args = normalize_symbols(args, postprocess=postprocess) if isinstance(symbols, basestring): try: return list(set(get_symbols_from_list(symbols))) + args except: return [postprocess(s.strip()) for s in symbols.split(',')] + args else: ans = [] for sym in list(symbols): ans += normalize_symbols(sym, postprocess=postprocess) return list(set(ans)) def series_bollinger(series, window=20, sigma=1., plot=False): mean = pd.rolling_mean(series, window=window) std = pd.rolling_std(series, window=window) df = pd.DataFrame({'value': series, 'mean': mean, 'upper': mean + sigma * std, 'lower': mean - sigma * std}) bollinger_values = (series - pd.rolling_mean(series, window=window)) / (pd.rolling_std(series, window=window)) if plot: df.plot() pd.DataFrame({'bollinger': bollinger_values}).plot() plt.show() return bollinger_values def frame_bollinger(df, window=20, sigma=1., plot=False): bol = pd.DataFrame() for col in df.columns: bol[col] = series_bollinger(df[col], plot=False) return bol def double_sinc(T_0=120, T_N=240, T_s=0.01, A=[1, .9], sigma=0.01, T_cyc=10, N_cyc=[3, 2], verbosity=0): # T0, TN, A, sigma = np.array(T0), np.array(TN), np.array(A), np.array(sigma) N = int(T_N / T_s) t = np.arange(0, T_N, T_s) # t_mid = 0.5 * (t[-1] + t[0]) e = sigma * np.random.randn(N) x = A[0] * np.sinc(((t - T_0) * N_cyc[0] * 2 / T_cyc) % T_cyc) * np.sinc((t - T_0) * N_cyc[1] * 2 / t[-1]) y = x + e df = pd.DataFrame({'x': x, 'y': y}, index=t) if verbosity > 0: df.plot() plt.show(block=False) return df def sinc_signals(T0=[60, 120], TN=[240, 160], A=[1, .9], sigma=[.03, .02], T_cyc=10, Ts=0.01): T0, TN, A, sigma = np.array(T0), np.array(TN), np.array(A), np.array(sigma) N1 = int(TN[0] / Ts) N2 = int(TN[1] / Ts) i1 = np.arange(0, N1) i2 = np.arange(0, N2) t1 = T0[0] + i1 * Ts t2 = t1[i2 + int((T0[1] - T0[0]) / Ts)] e1 = sigma[0] * np.random.randn(N1) e2 = sigma[1] * np.random.randn(N2) signal = A[0] * np.sinc((t1[i1] * 5. / T_cyc) % T_cyc) * np.sinc((t1[i1]) * 4 / t1[-1]) x1 = signal + e1 x2 = signal[i2 + int((T0[1] - T0[0]) / Ts)] + e2 df = pd.DataFrame({'signal 1': pd.Series(x1, index=t1), 'signal 2': pd.Series(x2, index=t2)}) df.plot() plt.show(block=False) return df def smooth(x, window_len=11, window='hanning', fill='reflect'): """smooth the data using a window with requested size. Convolve a normalized window with the signal. input: x: signal to be smoothed window_len: the width of the smoothing window window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman' flat window will produce a moving average smoothing. fill: 'reflect' means that the signal is reflected onto both ends before filtering output: the smoothed signal example: t = linspace(-2, 2, 0.1) x = sin(t) + 0.1 * randn(len(t)) y = smooth(x) import seaborn pd.DataFrame({'x': x, 'y': y}, index=t).plot() SEE ALSO: numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman numpy.convolve scipy.signal.lfilter TODO: the window parameter could be the window itself if an array instead of a string NOTE: length(output) != length(input), to correct this: instead of just y. References: http://wiki.scipy.org/Cookbook/SignalSmooth """ # force window_len to be an odd integer so it can be symmetrically applied window_len = int(window_len) window_len += int(not (window_len % 2)) half_len = (window_len - 1) / 2 if x.ndim != 1: raise ValueError("smooth only accepts 1 dimension arrays.") if x.size < window_len: raise ValueError("Input vector needs to be bigger than window size.") if window_len < 3: return x if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']: raise ValueError("The window arg ({}) should be 'flat', 'hanning', 'hamming', 'bartlett', or 'blackman'" .format(window)) s = np.r_[x[window_len - 1:0:-1], x, x[-1:-window_len:-1]] window = window.strip().lower() if window is None or window == 'flat': w = np.ones(window_len, 'd') else: w = getattr(np, window)(window_len) y = np.convolve(w / w.sum(), s, mode='valid') return y[half_len + 1:-half_len] def estimate_shift(x, y, smoother=None, w=None, index_and_value=False, ignore_edge=1/3., method='valid'): """Estimate the time shift between two signals based on their cross correlation Arguements: smoother: Smoothing function applied to correlation values before finding peak w: Window. Sequence of values between 0 and 1 for wind centered on 0-shift to weight correlation by before finding peak. Zero-padded to match width of larger of x and y. Default = hanning(max(len(x, y))) Returns: int: number to subtract from an x index to compute a corresponding y index >>> x, y = np.asarray(np.matrix([[0.5, 0.01], [0.01, 1.0]]) * np.random.randn(50,2).T) >>> x[:30-8] = y[8:30] >> estimate_shift(x, y, 'full') -8 >> estimate_shift(x, y, 'valid') -8 >> estimate_shift(y, x, 'full') in [8, 9] True >> estimate_shift(y, x, 'full') in [8, 9] True >> estimate_shift(y, x, 'full') in [8, 9] True """ return NotImplementedError("On Line 965, FIXME: TypeError: object of type 'NoneType' has no len()") method = method or 'valid' try: x = x.dropna() x = x.values except: pass try: y = y.dropna() y = y.values except: pass if len(x) < len(y): swap, x, y = -1, y, x else: swap = +1 Nx, Ny = len(x), len(y) if ignore_edge > 0: yi0 = int(max(Ny * ignore_edge, 1)) yi1 = max(Ny - yi0 - 1, 0) # ignore a large portion of the data in the shorter vector y = y[yi0:yi1] x, y = x - x.mean(), y - y.mean() x, y = x / x.std(), y / y.std() c = np.correlate(x, y, mode=method) print(len(x)) print(len(y)) print(len(w)) print(len(c)) if w is not None: wc = int(np.ceil(len(w) / 2.)) - 1 cc = int(np.ceil(len(c) / 2.)) - 1 w0 = cc - wc print(w0) if w0 > 0: c[:w0], c[-w0:] = 0, 0 c[w0:-w0] = w[:len(c[w0:-w0])] * c[w0:-w0] elif w0 == 0: if len(w) < len(c): w = np.append(w, 0) c = c * w[:len(c)] elif w0 < 0: w0 = abs(w0) w = w[w0:-w0] c[w0:-w0] = w[:len(c[w0:-w0])] * c[w0:-w0] try: c = smoother(c) except: pass offset = imax = c.argmax() offset = offset - yi0 if method == 'full': offset = imax - Nx + 1 # elif method == 'valid': # offset = imax - yi0 elif method == 'same': raise NotImplementedError("Unsure what index value to report for a correlation maximum at i = {}" .format(imax)) offset *= swap if index_and_value: return offset, c[imax] else: return offset estimate_offset = estimate_shift def fuzzy_index_match(possiblities, label, **kwargs): """Find the closest matching column label, key, or integer indexed value Returns: type(label): sequence of immutable objects corresponding to best matches to each object in label if label is an int returns the object (value) in the list of possibilities at that index if label is a str returns the closest str match in possibilities >>> from collections import OrderedDict as odict >>> fuzzy_index_match(pd.DataFrame(pd.np.random.randn(9,4), columns=list('ABCD'), index=range(9)), 'b') 'B' >>> fuzzy_index_match(odict(zip('12345','ABCDE')), 'r2d2') '2' >>> fuzzy_index_match(odict(zip('12345','ABCDE')), 1) '2' >>> fuzzy_index_match(odict(zip('12345','ABCDE')), -1) '5' >>> fuzzy_index_match(odict(zip(range(4),'FOUR')), -4) 0 """ possibilities = list(possiblities) if isinstance(label, basestring): return fuzzy_get(possibilities, label, **kwargs) if isinstance(label, int): return possibilities[label] if isinstance(label, list): return [fuzzy_get(possibilities, lbl) for lbl in label] def get_column_labels(obj): """Retrieve the column labels/keys from any DataFrame or QuerySet-like table object >>> from collections import OrderedDict >>> get_column_labels(OrderedDict(zip('ABC', pd.np.arange(12).reshape((3,4))))) ['A', 'B', 'C'] """ if not isinstance(obj, (list, tuple, pd.np.ndarray)): try: labels = [f.name for f in obj.model._meta.fields] except: try: labels = obj.keys() except: try: labels = dir(obj) except: labels = None elif all(isinstance(heading, basestring) for heading in obj[0]): labels = list(obj[0]) # if obj isn't a reference to a mutable (dict, DataFrame, list, etc), this won't work del obj[0] return labels def make_dataframe(obj, columns=None, exclude=None, limit=1e8): """Coerce an iterable, queryset, list or rows, dict of columns, etc into a Pandas DataFrame""" try: obj = obj.objects.all()[:limit] except: pass if isinstance(obj, (pd.Series, list, tuple)): return make_dataframe(pd.DataFrame(obj), columns, exclude, limit) # if the obj is a named tuple, DataFrame, dict of columns, django QuerySet, sql alchemy query result # retrieve the "include"d field/column names from its keys/fields/attributes if columns is None: columns = get_column_labels(obj) if exclude is not None and columns is not None and columns and exclude: columns = [i for i in columns if i not in exclude] try: return pd.DataFrame(list(obj.values(*columns)[:limit])) except: pass try: return pd.DataFrame(obj)[fuzzy_get(obj, columns)] except: pass return pd.DataFrame(obj) def hist(table, field=-1, class_column=None, title='', verbosity=2, **kwargs): """Plot discrete PDFs >>> df = pd.DataFrame(pd.np.random.randn(99,3), columns=list('ABC')) >>> df['Class'] = pd.np.array((pd.np.matrix([1,1,1])*pd.np.matrix(df).T).T > 0) >>> len(hist(df, verbosity=0, class_column='Class')) 3 """ field = fuzzy_index_match(table, field) if not isinstance(table, (pd.DataFrame, basestring)): try: table = make_dataframe(table.objects.filter(**{field + '__isnull': False})) except: table = table # labels = get_column_labels(table) try: table = table[pd.notnull(table[field])] except: pass series_labels = [] if class_column is not None: series_labels = sorted(set(table[class_column])) labels = [str(c) for c in series_labels] + ['all'] default_kwargs = { 'normed': False, 'histtype': 'bar', 'color': seaborn.color_palette(), 'label': labels, 'log': True, 'bins': 10, } default_kwargs.update(kwargs) num_colors = len(default_kwargs['color']) num_labels = len(default_kwargs['label']) default_kwargs['color'] = [default_kwargs['color'][i % num_colors] for i in range(num_labels)] if not title: title = '{} vs. {}'.format(titlecase(str(field).replace('_', ' ')), titlecase(str(class_column).replace('_', ' '))) if verbosity > 0: print('Plotting histogram titled: {}'.format(title)) if verbosity > 1: print('histogram configuration: {}'.format(default_kwargs)) x = [table[(table[class_column].isnull() if pd.isnull(c) else table[class_column] == c)] [field].values for c in series_labels] x += [table[field].values] if not default_kwargs['normed']: default_kwargs['weights'] = [pd.np.ones_like(x_c) / float(len(x_c)) for x_c in x] elif isinstance(default_kwargs['normed'], int) and default_kwargs['normed'] < 0: default_kwargs['normed'] = 0 bins = default_kwargs['bins'] # FIXME: x log scaling doesn't work if False and default_kwargs['log'] and isinstance(bins, int): max_x = max(
pd.np.max(x_c)
pandas.np.max
''' Imputation: Helps in filling up the null values Method1: Removal of null rows Method2: Filling null values with specified values Method3: Filling null values with average values ''' import pandas as pd import numpy as np class Imputer: def __init__(self,df): self.df=df def colm_rem(self,colm): #Removes the column from the dataset print("Removing the null value rows of "+colm) temp_df=self.df[pd.notnull(self.df[colm])] print(temp_df.describe()) print("\n Do you want to keep the changes[0/1]\n") colm_rem_inp=int(input()) if colm_rem_inp==1: print("updating column") self.df=temp_df return def colm_fill(self,colm,colm_type): #Fills the column with given value print("You can fill the column with element of your choice") if colm_type=="obj": fill_with = input("Enter the value to fill with") else: fill_with = int(input("Enter the value to fill with")) self.df[colm] = self.df[colm].fillna(fill_with) return def colm_avg(self,colm): #fills the column with avg data print("Filling the nan values with the average of the column\n") self.df[colm] = self.df[colm].fillna(self.df[colm].mean()) return def colm_median(self,colm): #fills the column with median of data print("Filling the nan values with the median of the column\n") self.df[colm] = self.df[colm].fillna(self.df[colm].median()) return def colm_mode(self,colm): #fills the column with mode of data print("Filling the nan values with the mode of the column\n") self.df[colm] = self.df[colm].fillna(self.df[colm].mode()[0]) return def suggest_imp(self,colm_names): #loops through all the column and asks for imputation if needed for colm in colm_names: colm_null=sum(
pd.isnull(self.df[colm])
pandas.isnull
""" `snps` tools for reading, writing, merging, and remapping SNPs """ """ BSD 3-Clause License Copyright (c) 2019, <NAME> All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ from itertools import groupby, count import os import re import numpy as np import pandas as pd from pandas.api.types import CategoricalDtype from snps.ensembl import EnsemblRestClient from snps.resources import Resources from snps.io import Reader, Writer from snps.utils import save_df_as_csv, Parallelizer, clean_str # set version string with Versioneer from snps._version import get_versions import logging logger = logging.getLogger(__name__) __version__ = get_versions()["version"] del get_versions class SNPs: def __init__( self, file="", only_detect_source=False, assign_par_snps=True, output_dir="output", resources_dir="resources", deduplicate=True, deduplicate_XY_chrom=True, parallelize=False, processes=os.cpu_count(), rsids=(), ): """ Object used to read and parse genotype / raw data files. Parameters ---------- file : str or bytes path to file to load or bytes to load only_detect_source : bool only detect the source of the data assign_par_snps : bool assign PAR SNPs to the X and Y chromosomes output_dir : str path to output directory resources_dir : str name / path of resources directory deduplicate : bool deduplicate RSIDs and make SNPs available as `duplicate_snps` deduplicate_XY_chrom : bool deduplicate alleles in the non-PAR regions of X and Y for males; see `discrepant_XY_snps` parallelize : bool utilize multiprocessing to speedup calculations processes : int processes to launch if multiprocessing rsids : tuple, optional rsids to extract if loading a VCF file """ self._file = file self._only_detect_source = only_detect_source self._snps = pd.DataFrame() self._duplicate_snps = pd.DataFrame() self._discrepant_XY_snps = pd.DataFrame() self._source = "" self._phased = False self._build = 0 self._build_detected = False self._output_dir = output_dir self._resources = Resources(resources_dir=resources_dir) self._parallelizer = Parallelizer(parallelize=parallelize, processes=processes) if file: d = self._read_raw_data(file, only_detect_source, rsids) self._snps = d["snps"] self._source = d["source"] self._phased = d["phased"] if not self._snps.empty: self.sort_snps() if deduplicate: self._deduplicate_rsids() self._build = self.detect_build() if not self._build: self._build = 37 # assume Build 37 / GRCh37 if not detected else: self._build_detected = True if deduplicate_XY_chrom: if self.determine_sex() == "Male": self._deduplicate_XY_chrom() if assign_par_snps: self._assign_par_snps() def __repr__(self): return "SNPs({!r})".format(self._file[0:50]) @property def source(self): """ Summary of the SNP data source for ``SNPs``. Returns ------- str """ return self._source @property def snps(self): """ Get a copy of SNPs. Returns ------- pandas.DataFrame """ return self._snps @property def duplicate_snps(self): """ Get any duplicate SNPs. A duplicate SNP has the same RSID as another SNP. The first occurrence of the RSID is not considered a duplicate SNP. Returns ------- pandas.DataFrame """ return self._duplicate_snps @property def discrepant_XY_snps(self): """ Get any discrepant XY SNPs. A discrepant XY SNP is a heterozygous SNP in the non-PAR region of the X or Y chromosome found during deduplication for a detected male genotype. Returns ------- pandas.DataFrame """ return self._discrepant_XY_snps @property def build(self): """ Get the build of ``SNPs``. Returns ------- int """ return self._build @property def build_detected(self): """ Get status indicating if build of ``SNPs`` was detected. Returns ------- bool """ return self._build_detected @property def assembly(self): """ Get the assembly of ``SNPs``. Returns ------- str """ return self.get_assembly() @property def snp_count(self): """ Count of SNPs. Returns ------- int """ return self.get_snp_count() @property def chromosomes(self): """ Chromosomes of ``SNPs``. Returns ------- list list of str chromosomes (e.g., ['1', '2', '3', 'MT'], empty list if no chromosomes """ return self.get_chromosomes() @property def chromosomes_summary(self): """ Summary of the chromosomes of ``SNPs``. Returns ------- str human-readable listing of chromosomes (e.g., '1-3, MT'), empty str if no chromosomes """ return self.get_chromosomes_summary() @property def sex(self): """ Sex derived from ``SNPs``. Returns ------- str 'Male' or 'Female' if detected, else empty str """ sex = self.determine_sex(chrom="X") if not sex: sex = self.determine_sex(chrom="Y") return sex @property def unannotated_vcf(self): """ Indicates if VCF file is unannotated. Returns ------- bool """ if self.snp_count == 0 and self.source == "vcf": return True return False @property def phased(self): """ Indicates if genotype is phased. Returns ------- bool """ return self._phased def heterozygous_snps(self, chrom=""): """ Get heterozygous SNPs. Parameters ---------- chrom : str, optional chromosome (e.g., "1", "X", "MT") Returns ------- pandas.DataFrame """ if chrom: return self._snps.loc[ (self._snps.chrom == chrom) & (self._snps.genotype.notnull()) & (self._snps.genotype.str.len() == 2) & (self._snps.genotype.str[0] != self._snps.genotype.str[1]) ] else: return self._snps.loc[ (self._snps.genotype.notnull()) & (self._snps.genotype.str.len() == 2) & (self._snps.genotype.str[0] != self._snps.genotype.str[1]) ] def not_null_snps(self, chrom=""): """ Get not null SNPs. Parameters ---------- chrom : str, optional chromosome (e.g., "1", "X", "MT") Returns ------- pandas.DataFrame """ if chrom: return self._snps.loc[ (self._snps.chrom == chrom) & (self._snps.genotype.notnull()) ] else: return self._snps.loc[self._snps.genotype.notnull()] def get_summary(self): """ Get summary of ``SNPs``. Returns ------- dict summary info if ``SNPs`` is valid, else {} """ if not self.is_valid(): return {} else: return { "source": self.source, "assembly": self.assembly, "build": self.build, "build_detected": self.build_detected, "snp_count": self.snp_count, "chromosomes": self.chromosomes_summary, "sex": self.sex, } def is_valid(self): """ Determine if ``SNPs`` is valid. ``SNPs`` is valid when the input file has been successfully parsed. Returns ------- bool True if ``SNPs`` is valid """ if self._snps.empty: return False else: return True def save_snps(self, filename="", vcf=False, atomic=True, **kwargs): """ Save SNPs to file. Parameters ---------- filename : str or buffer filename for file to save or buffer to write to vcf : bool flag to save file as VCF atomic : bool atomically write output to a file on local filesystem **kwargs additional parameters to `pandas.DataFrame.to_csv` Returns ------- str path to file in output directory if SNPs were saved, else empty str """ return Writer.write_file( snps=self, filename=filename, vcf=vcf, atomic=atomic, **kwargs ) def _read_raw_data(self, file, only_detect_source, rsids): return Reader.read_file(file, only_detect_source, self._resources, rsids) def _assign_par_snps(self): """ Assign PAR SNPs to the X or Y chromosome using SNP position. References ----- 1. National Center for Biotechnology Information, Variation Services, RefSNP, https://api.ncbi.nlm.nih.gov/variation/v0/ 2. Yates et. al. (doi:10.1093/bioinformatics/btu613), `<http://europepmc.org/search/?query=DOI:10.1093/bioinformatics/btu613>`_ 3. Zerbino et. al. (doi.org/10.1093/nar/gkx1098), https://doi.org/10.1093/nar/gkx1098 4. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. dbSNP: the NCBI database of genetic variation. Nucleic Acids Res. 2001 Jan 1; 29(1):308-11. 5. Database of Single Nucleotide Polymorphisms (dbSNP). Bethesda (MD): National Center for Biotechnology Information, National Library of Medicine. dbSNP accession: rs28736870, rs113313554, and rs758419898 (dbSNP Build ID: 151). Available from: http://www.ncbi.nlm.nih.gov/SNP/ """ rest_client = EnsemblRestClient( server="https://api.ncbi.nlm.nih.gov", reqs_per_sec=1 ) for rsid in self._snps.loc[self._snps["chrom"] == "PAR"].index.values: if "rs" in rsid: id = rsid.split("rs")[1] response = rest_client.perform_rest_action("/variation/v0/refsnp/" + id) if response is not None: for item in response["primary_snapshot_data"][ "placements_with_allele" ]: if "NC_000023" in item["seq_id"]: assigned = self._assign_snp(rsid, item["alleles"], "X") elif "NC_000024" in item["seq_id"]: assigned = self._assign_snp(rsid, item["alleles"], "Y") else: assigned = False if assigned: if not self._build_detected: self._build = self._extract_build(item) self._build_detected = True break def _assign_snp(self, rsid, alleles, chrom): # only assign SNP if positions match (i.e., same build) for allele in alleles: allele_pos = allele["allele"]["spdi"]["position"] # ref SNP positions seem to be 0-based... if allele_pos == self._snps.loc[rsid].pos - 1: self._snps.loc[rsid, "chrom"] = chrom return True return False def _extract_build(self, item): assembly_name = item["placement_annot"]["seq_id_traits_by_assembly"][0][ "assembly_name" ] assembly_name = assembly_name.split(".")[0] return int(assembly_name[-2:]) def detect_build(self): """ Detect build of SNPs. Use the coordinates of common SNPs to identify the build / assembly of a genotype file that is being loaded. Notes ----- rs3094315 : plus strand in 36, 37, and 38 rs11928389 : plus strand in 36, minus strand in 37 and 38 rs2500347 : plus strand in 36 and 37, minus strand in 38 rs964481 : plus strand in 36, 37, and 38 rs2341354 : plus strand in 36, 37, and 38 Returns ------- int detected build of SNPs, else 0 References ---------- 1. Yates et. al. (doi:10.1093/bioinformatics/btu613), `<http://europepmc.org/search/?query=DOI:10.1093/bioinformatics/btu613>`_ 2. Zerbino et. al. (doi.org/10.1093/nar/gkx1098), https://doi.org/10.1093/nar/gkx1098 3. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. dbSNP: the NCBI database of genetic variation. Nucleic Acids Res. 2001 Jan 1;29(1):308-11. 4. Database of Single Nucleotide Polymorphisms (dbSNP). Bethesda (MD): National Center for Biotechnology Information, National Library of Medicine. dbSNP accession: rs3094315, rs11928389, rs2500347, rs964481, and rs2341354 (dbSNP Build ID: 151). Available from: http://www.ncbi.nlm.nih.gov/SNP/ """ def lookup_build_with_snp_pos(pos, s): try: return s.loc[s == pos].index[0] except: return 0 build = 0 rsids = ["rs3094315", "rs11928389", "rs2500347", "rs964481", "rs2341354"] df = pd.DataFrame( { 36: [742429, 50908372, 143649677, 27566744, 908436], 37: [752566, 50927009, 144938320, 27656823, 918573], 38: [817186, 50889578, 148946169, 27638706, 983193], }, index=rsids, ) for rsid in rsids: if rsid in self._snps.index: build = lookup_build_with_snp_pos( self._snps.loc[rsid].pos, df.loc[rsid] ) if build: break return build def get_assembly(self): """ Get the assembly of a build. Returns ------- str """ if self._build == 37: return "GRCh37" elif self._build == 36: return "NCBI36" elif self._build == 38: return "GRCh38" else: return "" def get_snp_count(self, chrom=""): """ Count of SNPs. Parameters ---------- chrom : str, optional chromosome (e.g., "1", "X", "MT") Returns ------- int """ if chrom: return len(self._snps.loc[(self._snps.chrom == chrom)]) else: return len(self._snps) def get_chromosomes(self): """ Get the chromosomes of SNPs. Returns ------- list list of str chromosomes (e.g., ['1', '2', '3', 'MT'], empty list if no chromosomes """ if not self._snps.empty: return list(pd.unique(self._snps["chrom"])) else: return [] def get_chromosomes_summary(self): """ Summary of the chromosomes of SNPs. Returns ------- str human-readable listing of chromosomes (e.g., '1-3, MT'), empty str if no chromosomes """ if not self._snps.empty: chroms = list(pd.unique(self._snps["chrom"])) int_chroms = [int(chrom) for chrom in chroms if chrom.isdigit()] str_chroms = [chrom for chrom in chroms if not chrom.isdigit()] # https://codereview.stackexchange.com/a/5202 def as_range(iterable): l = list(iterable) if len(l) > 1: return "{0}-{1}".format(l[0], l[-1]) else: return "{0}".format(l[0]) # create str representations int_chroms = ", ".join( as_range(g) for _, g in groupby(int_chroms, key=lambda n, c=count(): n - next(c)) ) str_chroms = ", ".join(str_chroms) if int_chroms != "" and str_chroms != "": int_chroms += ", " return int_chroms + str_chroms else: return "" def determine_sex( self, heterozygous_x_snps_threshold=0.03, y_snps_not_null_threshold=0.3, chrom="X", ): """ Determine sex from SNPs using thresholds. Parameters ---------- heterozygous_x_snps_threshold : float percentage heterozygous X SNPs; above this threshold, Female is determined y_snps_not_null_threshold : float percentage Y SNPs that are not null; above this threshold, Male is determined chrom : {"X", "Y"} use X or Y chromosome SNPs to determine sex Returns ------- str 'Male' or 'Female' if detected, else empty str """ if not self._snps.empty: if chrom == "X": return self._determine_sex_X(heterozygous_x_snps_threshold) elif chrom == "Y": return self._determine_sex_Y(y_snps_not_null_threshold) return "" def _determine_sex_X(self, threshold): x_snps = self.get_snp_count("X") if x_snps > 0: if len(self.heterozygous_snps("X")) / x_snps > threshold: return "Female" else: return "Male" else: return "" def _determine_sex_Y(self, threshold): y_snps = self.get_snp_count("Y") if y_snps > 0: if len(self.not_null_snps("Y")) / y_snps > threshold: return "Male" else: return "Female" else: return "" def _get_non_par_start_stop(self, chrom): # get non-PAR start / stop positions for chrom pr = self.get_par_regions(self.build) np_start = pr.loc[(pr.chrom == chrom) & (pr.region == "PAR1")].stop.values[0] np_stop = pr.loc[(pr.chrom == chrom) & (pr.region == "PAR2")].start.values[0] return np_start, np_stop def _get_non_par_snps(self, chrom, heterozygous=True): np_start, np_stop = self._get_non_par_start_stop(chrom) if heterozygous: # get heterozygous SNPs in the non-PAR region (i.e., discrepant XY SNPs) return self._snps.loc[ (self._snps.chrom == chrom) & (self._snps.genotype.notnull()) & (self._snps.genotype.str.len() == 2) & (self._snps.genotype.str[0] != self._snps.genotype.str[1]) & (self._snps.pos > np_start) & (self._snps.pos < np_stop) ].index else: # get homozygous SNPs in the non-PAR region return self._snps.loc[ (self._snps.chrom == chrom) & (self._snps.genotype.notnull()) & (self._snps.genotype.str.len() == 2) & (self._snps.genotype.str[0] == self._snps.genotype.str[1]) & (self._snps.pos > np_start) & (self._snps.pos < np_stop) ].index def _deduplicate_rsids(self): # Keep first duplicate rsid. duplicate_rsids = self._snps.index.duplicated(keep="first") # save duplicate SNPs self._duplicate_snps = self._duplicate_snps.append( self._snps.loc[duplicate_rsids] ) # deduplicate self._snps = self._snps.loc[~duplicate_rsids] def _deduplicate_chrom(self, chrom): """ Deduplicate a chromosome in the non-PAR region. """ discrepant_XY_snps = self._get_non_par_snps(chrom) # save discrepant XY SNPs self._discrepant_XY_snps = self._discrepant_XY_snps.append( self._snps.loc[discrepant_XY_snps] ) # drop discrepant XY SNPs since it's ambiguous for which allele to deduplicate self._snps.drop(discrepant_XY_snps, inplace=True) # get remaining non-PAR SNPs with two alleles non_par_snps = self._get_non_par_snps(chrom, heterozygous=False) # remove duplicate allele self._snps.loc[non_par_snps, "genotype"] = self._snps.loc[ non_par_snps, "genotype" ].apply(lambda x: x[0]) def _deduplicate_XY_chrom(self): """ Fix chromosome issue where some data providers duplicate male X and Y chromosomes""" self._deduplicate_chrom("X") self._deduplicate_chrom("Y") @staticmethod def get_par_regions(build): """ Get PAR regions for the X and Y chromosomes. Parameters ---------- build : int build of SNPs Returns ------- pandas.DataFrame PAR regions for the given build References ---------- 1. Genome Reference Consortium, https://www.ncbi.nlm.nih.gov/grc/human 2. Yates et. al. (doi:10.1093/bioinformatics/btu613), `<http://europepmc.org/search/?query=DOI:10.1093/bioinformatics/btu613>`_ 3. Zerbino et. al. (doi.org/10.1093/nar/gkx1098), https://doi.org/10.1093/nar/gkx1098 """ if build == 37: return pd.DataFrame( { "region": ["PAR1", "PAR2", "PAR1", "PAR2"], "chrom": ["X", "X", "Y", "Y"], "start": [60001, 154931044, 10001, 59034050], "stop": [2699520, 155260560, 2649520, 59363566], }, columns=["region", "chrom", "start", "stop"], ) elif build == 38: return pd.DataFrame( { "region": ["PAR1", "PAR2", "PAR1", "PAR2"], "chrom": ["X", "X", "Y", "Y"], "start": [10001, 155701383, 10001, 56887903], "stop": [2781479, 156030895, 2781479, 57217415], }, columns=["region", "chrom", "start", "stop"], ) elif build == 36: return pd.DataFrame( { "region": ["PAR1", "PAR2", "PAR1", "PAR2"], "chrom": ["X", "X", "Y", "Y"], "start": [1, 154584238, 1, 57443438], "stop": [2709520, 154913754, 2709520, 57772954], }, columns=["region", "chrom", "start", "stop"], ) else: return pd.DataFrame() def sort_snps(self): """ Sort SNPs based on ordered chromosome list and position. """ sorted_list = sorted(self._snps["chrom"].unique(), key=self._natural_sort_key) # move PAR and MT to the end of the dataframe if "PAR" in sorted_list: sorted_list.remove("PAR") sorted_list.append("PAR") if "MT" in sorted_list: sorted_list.remove("MT") sorted_list.append("MT") # convert chrom column to category for sorting # https://stackoverflow.com/a/26707444 self._snps["chrom"] = self._snps["chrom"].astype( CategoricalDtype(categories=sorted_list, ordered=True) ) # sort based on ordered chromosome list and position snps = self._snps.sort_values(["chrom", "pos"]) # convert chromosome back to object snps["chrom"] = snps["chrom"].astype(object) self._snps = snps def remap_snps(self, target_assembly, complement_bases=True): """ Remap SNP coordinates from one assembly to another. This method uses the assembly map endpoint of the Ensembl REST API service (via ``Resources``'s ``EnsemblRestClient``) to convert SNP coordinates / positions from one assembly to another. After remapping, the coordinates / positions for the SNPs will be that of the target assembly. If the SNPs are already mapped relative to the target assembly, remapping will not be performed. Parameters ---------- target_assembly : {'NCBI36', 'GRCh37', 'GRCh38', 36, 37, 38} assembly to remap to complement_bases : bool complement bases when remapping SNPs to the minus strand Returns ------- chromosomes_remapped : list of str chromosomes remapped chromosomes_not_remapped : list of str chromosomes not remapped Notes ----- An assembly is also know as a "build." For example: Assembly NCBI36 = Build 36 Assembly GRCh37 = Build 37 Assembly GRCh38 = Build 38 See https://www.ncbi.nlm.nih.gov/assembly for more information about assemblies and remapping. References ---------- 1. Ensembl, Assembly Map Endpoint, http://rest.ensembl.org/documentation/info/assembly_map """ chromosomes_remapped = [] chromosomes_not_remapped = [] snps = self.snps if snps.empty: logger.warning("No SNPs to remap") return chromosomes_remapped, chromosomes_not_remapped else: chromosomes = snps["chrom"].unique() chromosomes_not_remapped = list(chromosomes) valid_assemblies = ["NCBI36", "GRCh37", "GRCh38", 36, 37, 38] if target_assembly not in valid_assemblies: logger.warning("Invalid target assembly") return chromosomes_remapped, chromosomes_not_remapped if isinstance(target_assembly, int): if target_assembly == 36: target_assembly = "NCBI36" else: target_assembly = "GRCh" + str(target_assembly) if self.build == 36: source_assembly = "NCBI36" else: source_assembly = "GRCh" + str(self.build) if source_assembly == target_assembly: return chromosomes_remapped, chromosomes_not_remapped assembly_mapping_data = self._resources.get_assembly_mapping_data( source_assembly, target_assembly ) if not assembly_mapping_data: return chromosomes_remapped, chromosomes_not_remapped tasks = [] for chrom in chromosomes: if chrom in assembly_mapping_data: chromosomes_remapped.append(chrom) chromosomes_not_remapped.remove(chrom) mappings = assembly_mapping_data[chrom] tasks.append( { "snps": snps.loc[snps["chrom"] == chrom], "mappings": mappings, "complement_bases": complement_bases, } ) else: logger.warning( "Chromosome {} not remapped; " "removing chromosome from SNPs for consistency".format(chrom) ) snps = snps.drop(snps.loc[snps["chrom"] == chrom].index) # remap SNPs remapped_snps = self._parallelizer(self._remapper, tasks) remapped_snps = pd.concat(remapped_snps) # update SNP positions and genotypes snps.loc[remapped_snps.index, "pos"] = remapped_snps["pos"] snps.loc[remapped_snps.index, "genotype"] = remapped_snps["genotype"] self._snps = snps self.sort_snps() self._build = int(target_assembly[-2:]) return chromosomes_remapped, chromosomes_not_remapped def _remapper(self, task): """ Remap SNPs for a chromosome. Parameters ---------- task : dict dict with `snps` to remap per `mappings`, optionally `complement_bases` Returns ------- pandas.DataFrame remapped SNPs """ temp = task["snps"].copy() mappings = task["mappings"] complement_bases = task["complement_bases"] temp["remapped"] = False pos_start = int(temp["pos"].describe()["min"]) pos_end = int(temp["pos"].describe()["max"]) for mapping in mappings["mappings"]: # skip if mapping is outside of range of SNP positions if ( mapping["original"]["end"] <= pos_start or mapping["original"]["start"] >= pos_end ): continue orig_range_len = mapping["original"]["end"] - mapping["original"]["start"] mapped_range_len = mapping["mapped"]["end"] - mapping["mapped"]["start"] orig_region = mapping["original"]["seq_region_name"] mapped_region = mapping["mapped"]["seq_region_name"] if orig_region != mapped_region: logger.warning("discrepant chroms") continue if orig_range_len != mapped_range_len: logger.warning( "discrepant coords" ) # observed when mapping NCBI36 -> GRCh38 continue # find the SNPs that are being remapped for this mapping snp_indices = temp.loc[ ~temp["remapped"] & (temp["pos"] >= mapping["original"]["start"]) & (temp["pos"] <= mapping["original"]["end"]) ].index if len(snp_indices) > 0: # remap the SNPs if mapping["mapped"]["strand"] == -1: # flip and (optionally) complement since we're mapping to minus strand diff_from_start = ( temp.loc[snp_indices, "pos"] - mapping["original"]["start"] ) temp.loc[snp_indices, "pos"] = ( mapping["mapped"]["end"] - diff_from_start ) if complement_bases: temp.loc[snp_indices, "genotype"] = temp.loc[ snp_indices, "genotype" ].apply(self._complement_bases) else: # mapping is on same (plus) strand, so just remap based on offset offset = mapping["mapped"]["start"] - mapping["original"]["start"] temp.loc[snp_indices, "pos"] = temp["pos"] + offset # mark these SNPs as remapped temp.loc[snp_indices, "remapped"] = True return temp def _complement_bases(self, genotype): if pd.isnull(genotype): return np.nan complement = "" for base in list(genotype): if base == "A": complement += "T" elif base == "G": complement += "C" elif base == "C": complement += "G" elif base == "T": complement += "A" else: complement += base return complement # https://stackoverflow.com/a/16090640 @staticmethod def _natural_sort_key(s, natural_sort_re=re.compile("([0-9]+)")): return [ int(text) if text.isdigit() else text.lower() for text in re.split(natural_sort_re, s) ] class SNPsCollection(SNPs): def __init__(self, raw_data=None, output_dir="output", name="", **kwargs): """ Parameters ---------- raw_data : list or str path(s) to file(s) with raw genotype data output_dir : str path to output directory name : str name for this ``SNPsCollection`` """ super().__init__(file="", output_dir=output_dir, **kwargs) self._source = [] self._discrepant_positions_file_count = 0 self._discrepant_genotypes_file_count = 0 self._discrepant_positions = pd.DataFrame() self._discrepant_genotypes =
pd.DataFrame()
pandas.DataFrame
import pandas as pd import numpy as np from sklearn.metrics import accuracy_score import warnings from sklearn.metrics import fbeta_score, precision_score, recall_score, confusion_matrix,f1_score import itertools import pickle from matplotlib import pyplot as plt,style from multiprocessing import Pool import json import os import sys warnings.simplefilter("ignore", category=DeprecationWarning) style.use('ggplot') np.random.seed(42) label_file = sys.argv[5] labels = [] with open(label_file) as ff: for line in ff.readlines(): line = line.strip() if line.startswith('#') or line == '': continue labels.append(line) # TODO: Do not hardcode dictionary. Labels need to be taken from the device. di ={} reverse_di = {} for i in range(len(labels)): di.update({labels[i]:i}) reverse_di.update({i:labels[i]}) di.update({'anomaly':len(labels)}) class NumpyEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.ndarray): return obj.tolist() return json.JSONEncoder.default(self, obj) def plot_confusion_matrix(cm, classes, recall,precision,f2,f1, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): plt.figure() plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=0) plt.yticks(tick_marks, classes) thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j], horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') plt.xticks(rotation=90) plt.text(12,0, f" Recall:{recall},\n Precision:{precision},\n F2 Score:{f2},\n F1 Score:{f1}", fontsize=12) return plt #plt.show() def load_data(path): anomaly_data = pd.read_csv(path) # anomaly_data = anomaly_data.drop(anomaly_data.columns[0], axis=1) return anomaly_data def filter_anomaly(ss,anomaly_data,multivariate_model_dict,model_path): mv_model = multivariate_model_dict['mvmodel'] treshold = multivariate_model_dict['treshold'] y_test = anomaly_data['state'].apply(lambda x: 1 if x == 'anomaly' else 0) y_predict = (mv_model.logpdf(anomaly_data.drop(['state'], axis=1).values) < treshold).astype(int) recall = recall_score(y_pred=y_predict, y_true=y_test, average='weighted') precision = precision_score(y_pred=y_predict, y_true=y_test, average='weighted') f2 = fbeta_score(y_pred=y_predict, y_true=y_test, average='weighted', beta=2) f1 = f1_score(y_pred=y_predict, y_true=y_test, average='weighted') _acc_score = accuracy_score(y_test, y_predict) cm = confusion_matrix(y_test, y_predict) plt = plot_confusion_matrix(cm, classes=['Normal', 'Anomalous'], recall=recall, precision=precision, f2=f2, f1=f1, title='Confusion matrix') if not os.path.exists(model_path+'/plots'): os.makedirs(model_path+'/plots') plt.savefig(model_path+'/plots/anomalous_cm.png',bbox_inches='tight') anomaly_data['anomalous'] = y_predict normal_data = anomaly_data[anomaly_data['anomalous'] == 0] anomalous_data = anomaly_data[anomaly_data['anomalous'] == 1] output_dict = {'predictions': y_predict, 'recall': recall, 'precision': precision, 'f1': f1, 'f2': f2} if not os.path.exists(model_path+'/results'): os.makedirs(model_path+'/results') with open(model_path+'/results/anomaly_output.txt','w') as file: file.write(json.dumps(output_dict,cls=NumpyEncoder)) return normal_data,anomalous_data def action_classification_model(normal_data,action_class_dict): ss = action_class_dict['standard_scaler'] pca = action_class_dict['pca'] trained_model = action_class_dict['trained_model'] transformed_data = ss.transform(normal_data.drop(['state','anomalous'], axis=1)) #TODO: Fix nan value results from transformations transformed_data = pca.transform(transformed_data) transformed_data =
pd.DataFrame(transformed_data)
pandas.DataFrame
import os import unittest import random import sys import site # so that ai4water directory is in path ai4_dir = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0]))) site.addsitedir(ai4_dir) import scipy import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from ai4water import Model from ai4water.preprocessing import DataHandler, SiteDistributedDataHandler from ai4water.preprocessing.datahandler import MultiLocDataHandler from ai4water.datasets import load_u1, arg_beach os.environ['PYTHONHASHSEED'] = '313' random.seed(313) np.random.seed(313) # todo, check last dimension of x,y # todo test with 3d y def _check_xy_equal_len(x, prev_y, y, lookback, num_ins, num_outs, num_examples, data_type='training'): feat_dim = 1 if lookback > 1: assert x.shape[1] == lookback feat_dim = 2 assert x.shape[ feat_dim] == num_ins, f"for {data_type} x's shape is {x.shape} while num_ins of dataloader are {num_ins}" if y is not None: assert y.shape[1] == num_outs, f"for {data_type} y's shape is {y.shape} while num_outs of dataloader are {num_outs}" else: assert num_outs == 0 y = x # just for next statement to run if prev_y is None: prev_y = x # just for next statement to run assert x.shape[0] == y.shape[0] == prev_y.shape[ 0], f"for {data_type} xshape: {x.shape}, yshape: {y.shape}, prevyshape: {prev_y.shape}" if num_examples: assert x.shape[ 0] == num_examples, f'for {data_type} x contains {x.shape[0]} samples while expected samples are {num_examples}' return def assert_xy_equal_len(x, prev_y, y, data_loader, num_examples=None, data_type='training'): if isinstance(x, np.ndarray): _check_xy_equal_len(x, prev_y, y, data_loader.lookback, data_loader.num_ins, data_loader.num_outs, num_examples, data_type=data_type) elif isinstance(x, list): while len(y)<len(x): y.append(None) for idx, i in enumerate(x): _check_xy_equal_len(i, prev_y[idx], y[idx], data_loader.lookback[idx], data_loader.num_ins[idx], data_loader.num_outs[idx], num_examples, data_type=data_type ) elif isinstance(x, dict): for key, i in x.items(): _check_xy_equal_len(i, prev_y.get(key, None), y.get(key, None), data_loader.lookback[key], data_loader.num_ins[key], data_loader.num_outs[key], num_examples, data_type=data_type ) elif x is None: # all should be None assert all(v is None for v in [x, prev_y, y]) else: raise ValueError def _check_num_examples(train_x, val_x, test_x, val_ex, test_ex, tot_obs): val_examples = 0 if val_ex: val_examples = val_x.shape[0] test_examples = 0 if test_ex: test_examples = test_x.shape[0] xyz_samples = train_x.shape[0] + val_examples + test_examples # todo, whould be equal assert xyz_samples == tot_obs, f""" data_loader has {tot_obs} examples while sum of train/val/test examples are {xyz_samples}.""" def check_num_examples(train_x, val_x, test_x, val_ex, test_ex, data_loader): if isinstance(train_x, np.ndarray): _check_num_examples(train_x, val_x, test_x, val_ex, test_ex, data_loader.tot_obs_for_one_df()) elif isinstance(train_x, list): for idx in range(len(train_x)): _check_num_examples(train_x[idx], val_x[idx], test_x[idx], val_ex, test_ex, data_loader.tot_obs_for_one_df()[idx]) return def check_inverse_transformation(data, data_loader, y, cols, key): if cols is None: # not output columns, so not checking return # check that after inverse transformation, we get correct y. if data_loader.source_is_df: train_y_ = data_loader.inverse_transform(data=pd.DataFrame(y.reshape(-1, len(cols)), columns=cols), key=key) train_y_, index = data_loader.deindexify(train_y_, key=key) compare_individual_item(data, key, cols, train_y_, data_loader) elif data_loader.source_is_list: #for idx in range(data_loader.num_sources): # y_ = y[idx].reshape(-1, len(cols[idx])) train_y_ = data_loader.inverse_transform(data=y, key=key) train_y_, _ = data_loader.deindexify(train_y_, key=key) for idx, y in enumerate(train_y_): compare_individual_item(data[idx], f'{key}_{idx}', cols[idx], y, data_loader) elif data_loader.source_is_dict: train_y_ = data_loader.inverse_transform(data=y, key=key) train_y_, _ = data_loader.deindexify(train_y_, key=key) for src_name, val in train_y_.items(): compare_individual_item(data[src_name], f'{key}_{src_name}', cols[src_name], val, data_loader) def compare_individual_item(data, key, cols, y, data_loader): if y is None: return train_index = data_loader.indexes[key] if y.__class__.__name__ in ['DataFrame']: y = y.values for i, v in zip(train_index, y): if len(cols) == 1: if isinstance(train_index, pd.DatetimeIndex): # if true value in data is None, y's value should also be None if np.isnan(data[cols].loc[i]).item(): assert np.isnan(v).item() else: _t = round(data[cols].loc[i].item(), 0) _p = round(v.item(), 0) if not np.allclose(data[cols].loc[i].item(), v.item()): print(f'true: {_t}, : pred: {_p}, index: {i}, col: {cols}') else: if isinstance(v, np.ndarray): v = round(v.item(), 3) _true = round(data[cols].loc[i], 3).item() _p = round(v, 3) if _true != _p: print(f'true: {_true}, : pred: {_p}, index: {i}, col: {cols}') else: if isinstance(train_index, pd.DatetimeIndex): assert abs(data[cols].loc[i].sum() - np.nansum(v)) <= 0.00001, f'{data[cols].loc[i].sum()},: {v}' else: assert abs(data[cols].iloc[i].sum() - v.sum()) <= 0.00001 def check_kfold_splits(data_handler): if data_handler.source_is_df: splits = data_handler.KFold_splits() for (train_x, train_y), (test_x, test_y) in splits: ... # print(train_x.shape, train_y.shape, test_x.shape, test_y.shape) return def assert_uniquenes(train_y, val_y, test_y, out_cols, data_loader): if isinstance(train_y, list): assert isinstance(val_y, list) assert isinstance(test_y, list) train_y = train_y[0] val_y = val_y[0] test_y = test_y[0] if isinstance(train_y, dict): train_y = list(train_y.values())[0] assert isinstance(val_y, dict) isinstance(test_y, dict) val_y = list(val_y.values())[0] test_y = list(test_y.values())[0] if out_cols is not None: b = train_y.reshape(-1, ) if val_y is None: a = test_y.reshape(-1, ) else: a = val_y.reshape(-1, ) if not len(np.intersect1d(a, b)) == 0: raise ValueError(f'train and val have overlapping values') if data_loader.val_data != 'same' and out_cols is not None and val_y is not None and test_y is not None: a = test_y.reshape(-1,) b = val_y.reshape(-1,) assert len(np.intersect1d(a, b)) == 0, 'test and val have overlapping values' return def build_and_test_loader(data, config, out_cols, train_ex=None, val_ex=None, test_ex=None, save=True, assert_uniqueness=True, check_examples=True, true_train_y=None, true_val_y=None, true_test_y=None): config['teacher_forcing'] = True # todo if 'val_fraction' not in config: config['val_fraction'] = 0.3 if 'test_fraction' not in config: config['test_fraction'] = 0.3 data_loader = DataHandler(data=data, save=save, verbosity=0, **config) #dl = DataLoader.from_h5('data.h5') train_x, prev_y, train_y = data_loader.training_data(key='train') assert_xy_equal_len(train_x, prev_y, train_y, data_loader, train_ex) val_x, prev_y, val_y = data_loader.validation_data(key='val') assert_xy_equal_len(val_x, prev_y, val_y, data_loader, val_ex, data_type='validation') test_x, prev_y, test_y = data_loader.test_data(key='test') assert_xy_equal_len(test_x, prev_y, test_y, data_loader, test_ex, data_type='test') if check_examples: check_num_examples(train_x, val_x, test_x, val_ex, test_ex, data_loader) if isinstance(data, str): data = data_loader.data check_inverse_transformation(data, data_loader, train_y, out_cols, 'train') if val_ex: check_inverse_transformation(data, data_loader, val_y, out_cols, 'val') if test_ex: check_inverse_transformation(data, data_loader, test_y, out_cols, 'test') check_kfold_splits(data_loader) if assert_uniqueness: assert_uniquenes(train_y, val_y, test_y, out_cols, data_loader) if true_train_y is not None: assert np.allclose(train_y, true_train_y) if true_val_y is not None: assert np.allclose(val_y, true_val_y) if true_test_y is not None: assert np.allclose(test_y, true_test_y) return data_loader class TestAllCases(object): def __init__(self, input_features, output_features, lookback=3, allow_nan_labels=0, save=True): self.input_features = input_features self.output_features = output_features self.lookback = lookback self.allow_nan_labels=allow_nan_labels self.save=save self.run_all() def run_all(self): all_methods = [m for m in dir(self) if callable(getattr(self, m)) and not m.startswith('_') and m not in ['run_all']] for m in all_methods: getattr(self, m)() return def test_basic(self): examples = 100 data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose() data = pd.DataFrame(data, columns=['a', 'b', 'c']) config = {'input_features':self.input_features, 'output_features': self.output_features, 'lookback': self.lookback} tr_examples = 49 - (self.lookback - 2) if self.lookback>1 else 49 val_examples = 22 - (self.lookback - 2) if self.lookback>1 else 22 test_examples = 30 - (self.lookback - 2) if self.lookback>1 else 30 if self.output_features == ['c']: tty = np.arange(202, 250).reshape(-1, 1, 1) tvy = np.arange(250, 271).reshape(-1, 1, 1) ttesty = np.arange(271, 300).reshape(-1, 1, 1) else: tty, tvy, ttesty = None, None, None loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, test_examples, save=self.save, true_train_y=tty, true_val_y=tvy, true_test_y=ttesty, check_examples=True, ) assert loader.source_is_df return def test_with_random(self): examples = 100 data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose() data = pd.DataFrame(data, columns=['a', 'b', 'c']) config = {'input_features':self.input_features, 'output_features': self.output_features, 'lookback': self.lookback, 'train_data': 'random'} tr_examples = 49 - (self.lookback - 2) if self.lookback>1 else 49 loader = build_and_test_loader(data, config, self.output_features, tr_examples, 20, 30, save=self.save, ) assert loader.source_is_df return def test_drop_remainder(self): examples = 100 data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose() data = pd.DataFrame(data, columns=['a', 'b', 'c']) config = {'input_features':self.input_features, 'output_features': self.output_features, 'lookback': self.lookback, 'batch_size': 8, 'drop_remainder': True, 'train_data': 'random'} loader = build_and_test_loader(data, config, self.output_features, 48, 16, 24, check_examples=False, save=self.save, ) assert loader.source_is_df return def test_with_same_val_data(self): # val_data is "same" as and train_data is make based upon fractions. examples = 100 data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose() data = pd.DataFrame(data, columns=['a', 'b', 'c']) config = {'input_features':self.input_features, 'output_features': self.output_features, 'lookback': self.lookback, 'val_data': 'same'} if self.output_features == ['c']: tty = np.arange(202, 271).reshape(-1, 1, 1) tvy = np.arange(271, 300).reshape(-1, 1, 1) ttesty = np.arange(271, 300).reshape(-1, 1, 1) else: tty, tvy, ttesty = None, None, None tr_examples = 71 - (self.lookback - 1) if self.lookback > 1 else 71 loader = build_and_test_loader(data, config, self.output_features, tr_examples, 29, 29, true_train_y=tty, true_val_y=tvy, true_test_y=ttesty, save=self.save, check_examples=False ) assert loader.source_is_df return def test_with_same_val_data_and_random(self): examples = 100 data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose() data = pd.DataFrame(data, columns=['a', 'b', 'c']) config = {'input_features':self.input_features, 'output_features': self.output_features, 'lookback': self.lookback, 'train_data': 'random', 'val_data': 'same'} tr_examples = 70 - (self.lookback - 1) if self.lookback > 1 else 70 loader = build_and_test_loader(data, config, self.output_features, tr_examples, 30, 30, check_examples=False, save=self.save ) assert loader.source_is_df return def test_with_no_val_data(self): # we dont' want to have any validation_data examples = 100 data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose() data = pd.DataFrame(data, columns=['a', 'b', 'c']) config = {'input_features':self.input_features, 'output_features': self.output_features, 'lookback': self.lookback, 'val_fraction': 0.0} if self.output_features == ['c']: tty = np.arange(202, 271).reshape(-1, 1, 1) ttesty = np.arange(271, 300).reshape(-1, 1, 1) else: tty, tvy, ttesty = None, None, None tr_examples = 71 - (self.lookback - 1) if self.lookback > 1 else 71 loader = build_and_test_loader(data, config, self.output_features, tr_examples, 0, 29, true_train_y=tty, true_test_y=ttesty, save=self.save) assert loader.source_is_df return def test_with_no_val_data_with_random(self): # we dont' want to have any validation_data examples = 100 data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose() data = pd.DataFrame(data, columns=['a', 'b', 'c']) config = {'input_features':self.input_features, 'output_features': self.output_features, 'lookback': self.lookback, 'train_data': 'random', 'val_fraction': 0.0} tr_examples = 70 - (self.lookback - 1) if self.lookback > 1 else 70 loader = build_and_test_loader(data, config, self.output_features, tr_examples, 0, 30, save=self.save ) assert loader.source_is_df return def test_with_no_test_data(self): # we don't want any test_data examples = 100 data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose() data = pd.DataFrame(data, columns=['a', 'b', 'c']) config = {'input_features':self.input_features, 'output_features': self.output_features, 'lookback': self.lookback, 'test_fraction': 0.0} if self.output_features == ['c']: tty = np.arange(202, 271).reshape(-1, 1, 1) tvy = np.arange(271, 300).reshape(-1, 1, 1) else: tty, tvy, ttesty = None, None, None tr_examples = 71 - (self.lookback - 1) if self.lookback > 1 else 71 loader = build_and_test_loader(data, config, self.output_features, tr_examples, 29, 0, true_train_y=tty, true_val_y=tvy, save=self.save ) assert loader.source_is_df return def test_with_no_test_data_with_random(self): # we don't want any test_data examples = 20 data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose() data = pd.DataFrame(data, columns=['a', 'b', 'c']) config = {'input_features':self.input_features, 'output_features': self.output_features, 'lookback': self.lookback, 'train_data': 'random', 'test_fraction': 0.0, 'transformation': 'minmax'} tr_examples = 15- (self.lookback - 1) if self.lookback > 1 else 15 loader = build_and_test_loader(data, config, self.output_features, tr_examples, 5, 0, save=self.save) assert loader.source_is_df return def test_with_dt_index(self): # we don't want any test_data #print('testing test_with_dt_index', self.lookback) examples = 20 data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose() data = pd.DataFrame(data, columns=['a', 'b', 'c'], index=pd.date_range('20110101', periods=20, freq='D')) config = {'input_features': self.input_features, 'output_features': self.output_features, 'lookback': self.lookback, 'train_data': 'random', 'test_fraction': 0.0, 'transformation': 'minmax'} tr_examples = 15 - (self.lookback - 1) if self.lookback > 1 else 15 loader = build_and_test_loader(data, config, self.output_features, tr_examples, 5, 0, save=self.save) assert loader.source_is_df return def test_with_intervals(self): #print('testing test_with_intervals', self.lookback) examples = 35 data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose() data = pd.DataFrame(data, columns=['a', 'b', 'c'], index=pd.date_range('20110101', periods=35, freq='D')) config = {'input_features':self.input_features, 'output_features': self.output_features, 'lookback': self.lookback, 'train_data': 'random', 'transformation': 'minmax', 'intervals': [(0, 10), (20, 35)] } tr_examples = 12 - (self.lookback - 1) if self.lookback > 1 else 12 loader = build_and_test_loader(data, config, self.output_features, tr_examples, 4, 7, save=self.save ) assert loader.source_is_df return def test_with_dt_intervals(self): # check whether indices of intervals can be datetime? examples = 35 data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose() data = pd.DataFrame(data, columns=['a', 'b', 'c'], index=pd.date_range('20110101', periods=35, freq='D')) config = {'input_features':self.input_features, 'output_features': self.output_features, 'lookback': self.lookback, 'train_data': 'random', 'transformation': 'minmax', 'intervals': [('20110101', '20110110'), ('20110121', '20110204')] } tr_examples = 12 - (self.lookback - 1) if self.lookback > 1 else 12 val_examples = 7 - (self.lookback - 2) if self.lookback > 1 else 7 test_examples = 7 - (self.lookback - 2) if self.lookback > 1 else 7 loader = build_and_test_loader(data, config, self.output_features, tr_examples, 4, 7, save=self.save) assert loader.source_is_df return def test_with_custom_train_indices(self): #print('testing test_with_custom_train_indices') examples = 20 data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose() data = pd.DataFrame(data, columns=['a', 'b', 'c'], index=pd.date_range('20110101', periods=20, freq='D')) config = {'input_features':self.input_features, 'output_features': self.output_features, 'lookback': self.lookback, 'train_data': [1,2,3,4,5,6,7,8,9,10,11,12], 'transformation': 'minmax', } tr_examples = 9 - (self.lookback - 2) if self.lookback > 1 else 9 val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6 test_examples = 8 - (self.lookback - 1) if self.lookback > 1 else 8 loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, test_examples, save=self.save) assert loader.source_is_df return def test_with_custom_train_indices_no_val_data(self): examples = 20 data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose() data = pd.DataFrame(data, columns=['a', 'b', 'c'], index=pd.date_range('20110101', periods=20, freq='D')) config = {'input_features':self.input_features, 'output_features': self.output_features, 'lookback': self.lookback, 'train_data': [1,2,3,4,5,6,7,8,9,10,11,12], 'transformation': 'minmax', 'val_fraction': 0.0, } test_examples = 8 - (self.lookback - 1) if self.lookback > 1 else 8 loader = build_and_test_loader(data, config, self.output_features, 12, 0, test_examples, save=self.save) assert loader.source_is_df return def test_with_custom_train_indices_same_val_data(self): #print('testing test_with_custom_train_indices_same_val_data') examples = 20 data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose() data = pd.DataFrame(data, columns=['a', 'b', 'c'], index=pd.date_range('20110101', periods=20, freq='D')) config = {'input_features':self.input_features, 'output_features': self.output_features, 'lookback': self.lookback, 'train_data': [1,2,3,4,5,6,7,8,9,10,11,12], 'transformation': 'minmax', 'val_data': 'same', } test_examples = 8 - (self.lookback - 1) if self.lookback > 1 else 8 loader = build_and_test_loader(data, config, self.output_features, 12, 0, test_examples, save=self.save) assert loader.source_is_df return def test_with_custom_train_and_val_indices(self): examples = 20 data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose() data = pd.DataFrame(data, columns=['a', 'b', 'c'], index=pd.date_range('20110101', periods=20, freq='D')) config = {'input_features':self.input_features, 'output_features': self.output_features, 'lookback': self.lookback, 'train_data': [1,2,3,4,5,6,7,8,9,10,11,12], 'transformation': 'minmax', 'val_data': [0, 12, 14, 16, 5], 'val_fraction': 0.0, } test_examples = 8 - (self.lookback - 1) if self.lookback > 1 else 8 loader = build_and_test_loader(data, config, self.output_features, 12, 5, test_examples, assert_uniqueness=False, save=self.save, check_examples=False ) assert loader.source_is_df return # def test_with_train_and_val_and_test_indices(self): # # todo, does it make sense to define test_data by indices # return def test_with_custom_train_indices_and_intervals(self): #print('testing test_with_custom_train_indices_and_intervals', self.lookback) examples = 30 data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose() data = pd.DataFrame(data, columns=['a', 'b', 'c'], index=pd.date_range('20110101', periods=30, freq='D')) config = {'input_features':self.input_features, 'output_features': self.output_features, 'lookback': self.lookback, 'train_data': [1,2,3,4,5,6,7,8,9,10,11,12], #'transformation': 'minmax', 'intervals': [(0, 10), (20, 30)] } if self.output_features == ['c']: tty = np.array([63., 64., 65., 66., 67., 68., 69., 82.]).reshape(-1, 1, 1) tvy = np.arange(83, 87).reshape(-1, 1, 1) ttesty = np.array([62., 87., 88., 89.]).reshape(-1, 1, 1) else: tty, tvy, ttesty = None, None, None tr_examples = 10 - (self.lookback - 1) if self.lookback > 1 else 10 val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6 test_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6 loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, test_examples, true_train_y=tty, true_val_y=tvy, true_test_y=ttesty, save=self.save) assert loader.source_is_df return def test_with_one_feature_transformation(self): examples = 20 data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose() data = pd.DataFrame(data, columns=['a', 'b', 'c']) config = {'input_features':self.input_features, 'output_features': self.output_features, 'lookback': self.lookback, 'transformation': [{'method': 'minmax', 'features': ['a']}], } if self.output_features == ['c']: tty = np.arange(42, 51).reshape(-1, 1, 1) tvy = np.arange(51, 55).reshape(-1, 1, 1) ttesty = np.arange(55, 60).reshape(-1, 1, 1) else: tty, tvy, ttesty = None, None, None tr_examples = 11 - (self.lookback - 1) if self.lookback > 1 else 11 val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6 loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, 5, true_train_y=tty, true_val_y=tvy, true_test_y=ttesty, save=self.save) assert loader.source_is_df return def test_with_one_feature_multi_transformation(self): examples = 20 data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose() data = pd.DataFrame(data, columns=['a', 'b', 'c']) config = {'input_features':self.input_features, 'output_features': self.output_features, 'lookback': self.lookback, 'transformation': [{'method': 'minmax', 'features': ['a']}, {'method': 'zscore', 'features': ['a']}], } if self.output_features == ['c']: tty = np.arange(42, 51).reshape(-1, 1, 1) tvy = np.arange(51, 55).reshape(-1, 1, 1) ttesty = np.arange(55, 60).reshape(-1, 1, 1) else: tty, tvy, ttesty = None, None, None tr_examples = 11 - (self.lookback - 1) if self.lookback > 1 else 11 val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6 loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, 5, true_train_y=tty, true_val_y=tvy, true_test_y=ttesty, save=self.save) assert loader.source_is_df return def test_with_one_feature_multi_transformation_on_diff_features(self): examples = 20 data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose() data = pd.DataFrame(data, columns=['a', 'b', 'c']) config = {'input_features':self.input_features, 'output_features': self.output_features, 'lookback': self.lookback, 'transformation': [{'method': 'minmax', 'features': ['a', 'b', 'c']}, {'method': 'zscore', 'features': ['c']}], } tr_examples = 11 - (self.lookback - 1) if self.lookback > 1 else 11 val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6 loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, 5, save=self.save) assert loader.source_is_df return def test_with_input_transformation(self): examples = 20 data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose() data = pd.DataFrame(data, columns=['a', 'b', 'c']) config = {'input_features':self.input_features, 'output_features': self.output_features, 'lookback': self.lookback, 'transformation': [{'method': 'minmax', 'features': ['a', 'b']}], } if self.output_features == ['c']: tty = np.arange(42, 51).reshape(-1, 1, 1) tvy = np.arange(51, 55).reshape(-1, 1, 1) ttesty = np.arange(55, 60).reshape(-1, 1, 1) else: tty, tvy, ttesty = None, None, None tr_examples = 11 - (self.lookback - 1) if self.lookback > 1 else 11 val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6 loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, 5, true_train_y=tty, true_val_y=tvy, true_test_y=ttesty, save=self.save) assert loader.source_is_df return def test_with_input_transformation_as_dict(self): examples = 20 data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose() data = pd.DataFrame(data, columns=['a', 'b', 'c']) config = {'input_features':self.input_features, 'output_features': self.output_features, 'lookback': self.lookback, 'transformation': {'method': 'minmax', 'features': ['a', 'b']}, } if self.output_features == ['c']: tty = np.arange(42, 51).reshape(-1, 1, 1) tvy = np.arange(51, 55).reshape(-1, 1, 1) ttesty = np.arange(55, 60).reshape(-1, 1, 1) else: tty, tvy, ttesty = None, None, None tr_examples = 11 - (self.lookback - 1) if self.lookback > 1 else 11 val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6 loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, 5, true_train_y=tty, true_val_y=tvy, true_test_y=ttesty, save=self.save) assert loader.source_is_df return def test_with_output_transformation(self): examples = 20 data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose() data = pd.DataFrame(data, columns=['a', 'b', 'c']) config = {'input_features':self.input_features, 'output_features': self.output_features, 'lookback': self.lookback, 'transformation': {'method': 'minmax', 'features': ['c']}, } tr_examples = 11 - (self.lookback - 1) if self.lookback > 1 else 11 val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6 loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, 5, save=self.save) assert loader.source_is_df return def test_with_indices_and_intervals(self): examples = 30 data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose() data = pd.DataFrame(data, columns=['a', 'b', 'c'], index=pd.date_range('20110101', periods=30, freq='D')) config = {'input_features':self.input_features, 'output_features': self.output_features, 'lookback': self.lookback, 'train_data': 'random', 'transformation': 'minmax', 'intervals': [(0, 10), (20, 30)] } tr_examples = 10 - (self.lookback - 1) if self.lookback > 1 else 10 val_examples = 5 - (self.lookback - 1) if self.lookback > 1 else 5 loader = build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, 5, save=self.save) assert loader.source_is_df return def test_with_indices_and_intervals_same_val_data(self): examples = 30 data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose() data = pd.DataFrame(data, columns=['a', 'b', 'c'], index=pd.date_range('20110101', periods=30, freq='D')) config = {'input_features':self.input_features, 'output_features': self.output_features, 'lookback': self.lookback, 'train_data': 'random', 'val_data': 'same', 'transformation': 'minmax', 'intervals': [(0, 10), (20, 30)] } tr_examples = 13 - (self.lookback - 1) if self.lookback > 1 else 13 loader = build_and_test_loader(data, config, self.output_features, tr_examples, 5, 5, check_examples=False, save=self.save) assert loader.source_is_df return def test_with_indices_and_intervals_no_val_data(self): examples = 30 data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose() data = pd.DataFrame(data, columns=['a', 'b', 'c'], index=pd.date_range('20110101', periods=30, freq='D')) config = {'input_features':self.input_features, 'output_features': self.output_features, 'lookback': self.lookback, 'train_data': 'random', 'val_fraction': 0.0, 'transformation': 'minmax', 'intervals': [(0, 10), (20, 30)] } tr_examples = 13 - (self.lookback - 1) if self.lookback > 1 else 13 loader = build_and_test_loader(data, config, self.output_features, tr_examples, 0, 5, save=self.save) assert loader.source_is_df return def test_with_indices_and_nans(self): examples = 30 data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose() data = pd.DataFrame(data, columns=['a', 'b', 'c'], index=pd.date_range('20110101', periods=30, freq='D')) if self.output_features is not None: data['c'].iloc[10:20] = np.nan config = {'input_features':self.input_features, 'output_features': self.output_features, 'lookback': self.lookback, 'train_data': 'random', } tr_examples = 10 - (self.lookback - 1) if self.lookback > 1 else 10 loader = build_and_test_loader(data, config, self.output_features, tr_examples, 4, 6, save=self.save) assert loader.source_is_df config['allow_nan_labels'] = 2 if len(self.output_features) == 1 else 1 tr_examples = 15 - (self.lookback - 1) if self.lookback > 1 else 15 loader = build_and_test_loader(data, config, self.output_features, tr_examples, 6, 9, save=self.save) assert loader.source_is_df return def test_with_indices_and_nans_interpolate(self): examples = 30 data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose() data = pd.DataFrame(data, columns=['a', 'b', 'c'], index=pd.date_range('20110101', periods=30, freq='D')) if self.output_features is not None: data['b'].iloc[10:20] = np.nan config = {'input_features': self.input_features, 'output_features': self.output_features, 'lookback': self.lookback, 'nan_filler': {'method': 'KNNImputer', 'features': self.input_features}, 'train_data': 'random', } if self.input_features == ['a']: tr_examples = 10 - (self.lookback - 1) if self.lookback > 1 else 10 val_examples = 6 - (self.lookback - 1) if self.lookback > 1 else 6 test_examples = 6 else: tr_examples = 15 - (self.lookback - 1) if self.lookback > 1 else 15 val_examples = 6 test_examples = 9 build_and_test_loader(data, config, self.output_features, tr_examples, val_examples, test_examples, save=self.save) data['c'].iloc[10:20] = np.nan if 'b' not in self.output_features: config = {'input_features': self.input_features, 'output_features': self.output_features, 'lookback': self.lookback, 'nan_filler': {'method': 'KNNImputer', 'features': ['b']}, 'train_data': 'random', } tr_examples = 10 - (self.lookback - 1) if self.lookback > 1 else 10 build_and_test_loader(data, config, self.output_features, tr_examples, 4, 6, save=self.save) config = {'input_features': self.input_features, 'output_features': self.output_features, 'lookback': self.lookback, 'nan_filler': {'method': 'KNNImputer', 'features': ['b'], 'imputer_args': {'n_neighbors': 4}}, 'train_data': 'random', } tr_examples = 10 - (self.lookback - 1) if self.lookback > 1 else 10 build_and_test_loader(data, config, self.output_features, tr_examples, 4, 6, save=self.save) return def test_with_indices_and_nans_at_irregular_intervals(self): if self.output_features is not None and len(self.output_features)>1: examples = 40 data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose() data = pd.DataFrame(data, columns=['a', 'b', 'c'], index=pd.date_range('20110101', periods=40, freq='D')) data['b'].iloc[20:30] = np.nan data['c'].iloc[10:20] = np.nan config = {'input_features':self.input_features, 'output_features': self.output_features, 'lookback': self.lookback, 'train_data': 'random', } tr_examples = 10 - (self.lookback - 1) if self.lookback > 1 else 10 loader = build_and_test_loader(data, config, self.output_features, tr_examples, 4, 6, save=self.save) assert loader.source_is_df config['allow_nan_labels'] = self.allow_nan_labels loader = build_and_test_loader(data, config, self.output_features, 18, 8, 12, save=self.save) assert loader.source_is_df return def test_with_intervals_and_nans(self): # if data contains nans and we also have intervals if self.output_features is not None: examples = 40 data = np.arange(int(examples * 3), dtype=np.int32).reshape(-1, examples).transpose() data = pd.DataFrame(data, columns=['a', 'b', 'c'], index=
pd.date_range('20110101', periods=40, freq='D')
pandas.date_range
#!/usr/bin/env python3 from os.path import join as ospath_join from time import asctime from time import sleep import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import pandas as pd #import seaborn as sns import statsmodels.api as sm # From Helpyr from helpyr import helpyr_misc as hm from helpyr import logger from helpyr import data_loading from omnipickle_manager import OmnipickleManager import global_settings as settings import tokens # This file is intentionally very repetitive. Each type of plot gets its own # set of functions due to the potential for required very specialized code to # format the image just right. Though there are opportunities to generalize # some... # To do: # - Remove outliers # For reference Qs_column_names = [ # Timing and meta data #'elapsed-time sec', <- Calculate this column later 'timestamp', 'missing ratio', 'vel', 'sd vel', 'number vel', 'exp_time', # Bedload transport masses (g) 'Bedload all', 'Bedload 0.5', 'Bedload 0.71', 'Bedload 1', 'Bedload 1.4', 'Bedload 2', 'Bedload 2.8', 'Bedload 4', 'Bedload 5.6', 'Bedload 8', 'Bedload 11.2', 'Bedload 16', 'Bedload 22', 'Bedload 32', 'Bedload 45', # Grain counts 'Count all', 'Count 0.5', 'Count 0.71', 'Count 1', 'Count 1.4', 'Count 2', 'Count 2.8', 'Count 4', 'Count 5.6', 'Count 8', 'Count 11.2', 'Count 16', 'Count 22', 'Count 32', 'Count 45', # Statistics 'D10', 'D16', 'D25', 'D50', 'D75', 'D84', 'D90', 'D95', 'Dmax' ] gsd_column_names = [ # Stats 'Sigmag', 'Dg', 'La', 'D90', 'D50', 'D10', 'Fsx', # Grain Size Fractions (counts) '0.5', '0.71', '1', '1.4', '2', '2.8', '4', '5.6', '8', '11.3', '16', '22.6', '32', # Scan name (ex: 3B-f75L-t60-8m ) 'scan_name', ] class UniversalGrapher: # Based on the Qs_grapher, but extended to graph other data as well. # Generic functions def __init__(self, fig_debug=False): self.fig_debug = fig_debug # File locations self.log_filepath = f"{settings.log_dir}/universal_grapher.txt" # Start up logger self.logger = logger.Logger(self.log_filepath, default_verbose=True) self.logger.begin_output("Universal Grapher") # omnimanager self.omnimanager = OmnipickleManager(self.logger) self.omnimanager.restore() self.figure_destination = settings.figure_destination self.figure_subdir = '' self.figure_subdir_dict = { 'dem_stats' : 'dem_stats', 'dem_subplots' : 'dem_subplots', 'dem_variograms' : 'dem_variograms', 'depth' : 'depth-based', 'lighttable' : 'lighttable', 'trap' : 'trap', 'feed_sieve' : 'feed_sieve', 'gsd' : 'gsd', 'synthesis' : 'synthesis', 'mass_balance' : 'mass_balance', } hm.ensure_dir_exists(self.figure_destination) self.export_destination = settings.export_destination self.export_subdir = '' hm.ensure_dir_exists(self.export_destination) self.data_loader = data_loading.DataLoader( self.export_destination, logger=self.logger) self.ignore_steps = [] self.for_paper = True self.skip_2B = False # General use functions def generic_make(self, name, load_fu, plot_fu, load_fu_kwargs=None, plot_fu_kwargs=None, fig_subdir=''): self.logger.write([f"Making {name} plots..."]) if fig_subdir in self.figure_subdir_dict: self.figure_subdir = self.figure_subdir_dict[fig_subdir] indent_function = self.logger.run_indented_function indent_function(load_fu, kwargs=load_fu_kwargs, before_msg="Loading data", after_msg="Data Loaded!") indent_function(plot_fu, kwargs=plot_fu_kwargs, before_msg=f"Plotting {name}", after_msg="Finished Plotting!") self.logger.end_output() def create_experiment_subplots(self, rows=4, cols=2): # So that I can create a standardized grid for the 8 experiments size = (12, 7.5) if self.for_paper else (16, 10) fig, axs = plt.subplots(rows, cols, sharey=True, sharex=True, figsize=size) return fig, axs def generate_rb_colors(self, n_colors): # Generate color red to blue color sequence n_colors = 8 half_n = n_colors//2 colors = np.ones((n_colors, 3)) s = np.linspace(0,1,num=half_n) colors[-half_n : , 0] = 1 - s # r colors[ : , 1] = 0 # g colors[ : half_n , 2] = s # b return colors def generate_rb_color_fu(self, max_n): half_n = max_n // 2 def rgb_fu(n): # red high, blue ramps up then blue high, red ramps down r = 1 if n <= half_n else (max_n - n) / half_n g = 0 b = 1 if n > half_n else n / half_n return (r, g, b) return rgb_fu def roll_data(self, data, roll_kwargs={}): # Roll the data. Returns the generic rolled object for flexibility of # what functions to call on the roll. # data is a dataframe with columns for x and y values roll_kwargs = roll_kwargs.copy() if isinstance(data, pd.Series): [roll_kwargs.pop(key) for key in ['x', 'y'] if key in roll_kwargs] series = data else: # Get the function parameters x_var = roll_kwargs.pop('x') y_var = roll_kwargs.pop('y') # Get the data x = data.loc[:, x_var] y = data.loc[:, y_var] # Convert to a series series = pd.Series(data=y.values, index=x) # Roll it rolled_data = series.rolling(**roll_kwargs) return rolled_data def _calc_retrended_slope(self, data, flume_elevations=None, intercept=None): # Assumes data values have same units as data column names (which are # positions) # Return ols results and flume elevation (for reuse) if flume_elevations is None: # Calculate new flume elevations positions = data.columns.values flume_elevations = positions * settings.flume_slope # Calculate the slope trended_profile = data + flume_elevations ols_out = trended_profile.apply(self._ols, axis=1, intercept=intercept) return ols_out, flume_elevations def _ols(self, series, intercept=None): # Do an OLS linear regression to calculate average slope and intercept. # series is a Panda Series where: # series index is the independent var # series values are the dependent var # if intercept is None, then ols will calculate an intercept # if intercept is not None, then ols will calculate line through # provided intercept. # # output is a pd.Series: # slope intercept r-sqr # series name # # # # # Intended for use with DataFrame.apply() function profile_name = series.name positions = series.index.values if pd.isnull(series).all(): return pd.Series({'r-sqr' : None, 'slope' : None, 'intercept' : None, }, name=profile_name) if intercept is None: # No fixed intercept provided, allow freedom independent = sm.add_constant(positions.astype(np.float64)) else: # sm.OLS will force fit through zero by default # offset the data by the intercept to force fit through intercept series = series - intercept independent = positions.astype(np.float64) dependent = series.values.astype(np.float64) results = sm.OLS(dependent, independent, missing='drop').fit() p = results.params out = {'r-sqr' : results.rsquared} if intercept is None: out['slope'] = p[1] out['intercept'] = p[0] else: out['slope'] = p[0] out['intercept'] = intercept return pd.Series(out, name=profile_name) def save_figure(self, figure_name): filepath = ospath_join(self.figure_destination, self.figure_subdir, figure_name) if self.fig_debug: self.logger.write(f"DEBUG: NOT saving figure to {filepath}") else: self.logger.write(f"Saving figure to {filepath}") plt.savefig(filepath, orientation='landscape') def export_data(self, data, filename): dirpath = ospath_join( self.export_destination, self.export_subdir) hm.ensure_dir_exists(dirpath) filepath = ospath_join( dirpath, filename) self.logger.write(f"Exporting data to {filepath}") self.data_loader.save_txt(data, filepath, kwargs={'sep':',', 'index':True}, is_path=True) def plot_group(self, group, plot_kwargs): # Plot each group as a line # Really, this would be more appropriate named as plot_dataframe as it # is most often used to plot a dataframe. However, it was originally # written for groupby objects, which I am having trouble using # properly. groups = [] if isinstance(group, pd.core.frame.DataFrame): groups = [group] elif isinstance(group, pd.core.series.Series): groups = [group.to_frame()] elif isinstance(group, pd.core.groupby.DataFrameGroupBy): # Set of groups in groupby object names, groups = zip(*[iter_val for iter_val in group]) try: for name in names: if name not in self.plot_labels: self.plot_labels.append(name) except AttributeError: pass elif isinstance(group, tuple) and len(group) == 2: assert isinstance(group[1], pd.core.groupby.DataFrame) # Came from a grouby object self.plot_labels.append(group[0]) groups = [group[1]] else: print("Unknown argument") print(type(group)) print(group) assert(False) for group_data in groups: self._time_plot_prep(group_data, plot_kwargs) plot_kwargs['ax'].set_ylabel('') def _time_plot_prep(self, data, plot_kwargs, auto_plot=True): # For plotting data where 'exp_time' is in the index as minutes df = data.reset_index() df.sort_values(plot_kwargs['x'], inplace=True) if 'exp_time' in df.columns: df['exp_time'] = df['exp_time'] / 60 if auto_plot: # Plot using some default settings self._generic_df_plot(df, plot_kwargs) else: # Return the time formatted df for special plotting return df def _generic_df_plot(self, df, plot_kwargs): # Plot dataframe with some default formatting try: df.plot(**plot_kwargs) except TypeError: # No values to plot, skip it pass plot_kwargs['ax'].get_xaxis().get_label().set_visible(False) plot_kwargs['ax'].tick_params( bottom=True, top=True, left=True, right=True) def generic_plot_experiments(self, plot_fu, post_plot_fu, data, plot_kwargs, figure_name, subplot_shape=(4,2), save=True): # Generic framework for plotting for the 8 experiments # Data is recommended to be a dict of experiments, but doesn't have to # be # Get subplots fig, axs = self.create_experiment_subplots(*subplot_shape) # Make one plot per experiment exp_codes = self.omnimanager.get_exp_codes() legend_ax = None legend_exp_code = plot_kwargs.pop('legend_exp_code') \ if 'legend_exp_code' in plot_kwargs else '2B' for exp_code, ax in zip(exp_codes, axs.flatten()): if exp_code == legend_exp_code: legend_ax = ax if self.for_paper and exp_code == '2B' and self.skip_2B: self.logger.write(f">>Skipping<< experiment {exp_code}") ax.set_axis_off() continue self.logger.write(f"Plotting experiment {exp_code}") plot_kwargs['ax'] = ax experiment = self.omnimanager.experiments[exp_code] ax.set_title(f"{exp_code} {experiment.name}") plot_fu(exp_code, data, plot_kwargs) if not self.for_paper: self.plot_2B_X(exp_code, ax) if self.for_paper: self.add_common_label(axs[0,0], legend_ax, has_quartiles=False) post_plot_fu(fig, axs, plot_kwargs) # Generate a figure name and save the figure if save: self.save_figure(figure_name) plt.show() def format_generic_figure(self, fig, axs, plot_kwargs, fig_kwargs): # Must add xlabel, ylabel, and title to fig_kwargs xlabel = fig_kwargs['xlabel'] # Common xlabel ylabel = fig_kwargs['ylabel'] # Common ylabel title_str = fig_kwargs['title'] # Common title plot_labels = [] if 'legend_labels' not in fig_kwargs \ else fig_kwargs['legend_labels'] # Common legend labels # Set the spacing and area of the subplots fig.tight_layout() if self.for_paper: # Minimize whitespace # leave out title and legend # Larger font so it is readable in pdf fig.subplots_adjust(top=0.95, left=0.075, bottom=0.075, right=0.99) fontsize = 25 # Set common x label fig.text(0.5, 0.01, xlabel, ha='center', usetex=True, fontsize=fontsize) # Set common y label rotation = fig_kwargs['ylabel_rotation'] if 'ylabel_rotation' in fig_kwargs else 'vertical' fig.text(0.0005, 0.5, ylabel, va='center', usetex=True, fontsize=fontsize, rotation=rotation) else: fig.subplots_adjust(top=0.9, left=0.10, bottom=0.075, right=0.90) fontsize = 16 if len(plot_labels) > 1: # Format the common legend if there is more than one y ax0_lines = axs[0,0].get_lines() fig.legend(handles=ax0_lines, labels=plot_labels, loc='center right') # Set common x label fig.text(0.5, 0.01, xlabel, ha='center', usetex=True, fontsize=fontsize) # Set common y label rotation = fig_kwargs['ylabel_rotation'] if 'ylabel_rotation' in fig_kwargs else 'vertical' fig.text(0.01, 0.5, ylabel, va='center', usetex=True, fontsize=fontsize, rotation=rotation) # Make a title plt.suptitle(title_str, fontsize=fontsize, usetex=True) def generic_set_grid(self, ax, **kwargs): # Use a consistent format for the grids # This makes the major grid a light grey and only shows the minor ticks # on the x axis if 'xticks_minor' in kwargs and kwargs['xticks_minor']: minor_locator = mpl.ticker.AutoMinorLocator(2) ax.xaxis.set_minor_locator(minor_locator) ax.tick_params(axis='x', which='minor', top=True, bottom=True) if 'yticks_minor' in kwargs and kwargs['yticks_minor']: minor_locator = mpl.ticker.AutoMinorLocator(2) ax.yaxis.set_minor_locator(minor_locator) ax.tick_params(axis='y', which='minor', left=True, right=True) ax.grid(True, which='major', color='#d6d6d6') #plot_kwargs['ax'].grid(True, which='minor', color='#f2f2f2', axis='x') ax.set_axisbelow(True) def draw_feed_Di(self, ax, Di, zorder=1, multiple=1.0, text=None): # Di like 'D50' # multiple is meant for multiples of the armor ratio # text is the in-plot line label assert(Di in settings.sum_feed_Di) kwargs = {'c' : 'k', 'linestyle' : '--', 'label' : self.get_feed_Di_label(Di), # -> Feed D_50 } if zorder is not None: kwargs['zorder'] = zorder feed_Di = settings.sum_feed_Di[Di] * multiple ax.axhline(feed_Di, **kwargs) if text is not None: ax.annotate(text, xy=(1.25, feed_Di*1.01), xycoords='data') def get_feed_Di_label(self, Di): return rf"Feed $D_{{{Di[1:]}}}$" def plot_distribution(self, distribution, is_frac=True, ax=None, **kwargs): if not is_frac: distribution = distribution.cumsum() / distribution.sum() if ax is None: fig = plt.figure() ax = plt.gca() distribution.plot(ax=ax, logx=True) index = distribution.index.values if 'hlines' in kwargs: [ax.axhline(y) for y in kwargs['hlines']] ax.xaxis.set_major_formatter(mpl.ticker.FixedFormatter(index)) ax.xaxis.set_major_locator(mpl.ticker.FixedLocator(index)) ax.yaxis.set_major_formatter(mpl.ticker.FixedFormatter(np.arange(11) / 10)) ax.yaxis.set_major_locator(mpl.ticker.FixedLocator(np.arange(11) / 10)) ax.minorticks_off() ax.set_ylim((0,1.01)) ax.set_ylabel(f"Fraction less than") self.generic_set_grid(ax) if 'title' in kwargs: ax.set_title(kwargs['title']) def plot_2B_X(self, exp_code, ax): # Puts a big X over the 2B plot so people aren't confused if exp_code != '2B': return for i in [0,1]: #line = mpl.lines.Line2D([0, 1], [i, abs(i-1)], # lw=2, color='k', alpha=0.75) #line.set_clip_on(False) #ax.add_line(line) ax.plot([0, 1], [i, 1-i], transform=ax.transAxes, lw=2, color='k') # Plotting functions that cross genres def make_pseudo_hysteresis_plots(self, y_name='D50', plot_2m=True): #reload_kwargs = { # 'check_ignored_fu' : \ # lambda period_data: period_data.step == 'rising-50L', #} if y_name in ['Bedload all', 'D50', 'D84']: reload_fu = self.omnimanager.reload_Qs_data fig_subdir = 'lighttable' elif y_name in ['depth', 'slope']: reload_fu = self.omnimanager.reload_depth_data fig_subdir = 'depth' elif y_name in ['bed-D50', 'bed-D84']: reload_fu = self.omnimanager.reload_gsd_data fig_subdir = 'gsd' else: raise NotImplementedError self.generic_make(f"pseudo hysteresis", reload_fu, self.plot_pseudo_hysteresis, #load_fu_kwargs=reload_kwargs, plot_fu_kwargs={'y_name':y_name, 'plot_2m':plot_2m}, fig_subdir=fig_subdir) def plot_pseudo_hysteresis(self, y_name='D50', plot_2m=True): # Do stuff before plot loop x_name = 'pseudo discharge' roll_window = 10 #minutes # # note: plot_2m Only works for data with stationing (eg. depth data) plot_kwargs = { 'x' : x_name, 'y' : y_name, 'kind' : 'line', #'legend' : True, 'legend' : False, #'xlim' : (-0.25, 8.25), #'ylim' : (0, settings.lighttable_bedload_cutoff), # for use without logy #'logy' : True, #'ylim' : (0.001, settings.lighttable_bedload_cutoff), # for use with logy } rolling_kwargs = { 'x' : 'exp_time_hrs', 'y' : y_name, 'window' : roll_window*60, # seconds 'min_periods' : 20, 'center' : True, #'on' : plot_kwargs['x'], } # Add to plot_kwargs as a hacky way to get info to _plot function plot_kwargs['rolling_kwargs'] = rolling_kwargs plot_kwargs['plot_2m'] = plot_2m if y_name in ['Bedload all', 'D50', 'D84']: gather_kwargs = { #'ignore_steps' : ['rising-50L'] } all_data = self.gather_Qs_data(gather_kwargs) if y_name in ['D50', 'D84']: # Gather the sieve data Di for plotting all_sieve_data = self.gather_sieve_data({}) plot_kwargs['sieve_data'] = all_sieve_data elif y_name in ['depth', 'slope']: gather_kwargs = { 'new_index' : ['exp_time', 'location'], } all_data = self.gather_depth_data(gather_kwargs) elif y_name in ['bed-D50', 'bed-D84']: gather_kwargs = { 'columns' : [y_name[-3:]], 'new_index' : ['exp_time', 'sta_str'], } all_data = self.gather_gsd_data(gather_kwargs) else: raise NotImplementedError filename_y_col = y_name.replace(' ', '-').lower() logy_str = '_logy' if 'logy' in plot_kwargs and plot_kwargs['logy'] else '' plot_2m_str = '_2m' if plot_2m else '' figure_name = '_'.join([ f"pseudo_hysteresis{plot_2m_str}", f"{filename_y_col}", f"roll-{roll_window}min{logy_str}.png", ]) # Start plot loop self.generic_plot_experiments( self._plot_pseudo_hysteresis, self._format_pseudo_hysteresis, all_data, plot_kwargs, figure_name, subplot_shape=(2,4)) def _plot_pseudo_hysteresis(self, exp_code, all_data, plot_kwargs): # Pull out some kwargs variables y_name = plot_kwargs['y'] x_name = plot_kwargs['x'] ax = plot_kwargs['ax'] plot_kwargs = plot_kwargs.copy() rolling_kwargs = plot_kwargs.pop('rolling_kwargs') plot_2m = plot_kwargs.pop('plot_2m') exp_data = all_data[exp_code] if y_name in ['Bedload all', 'D50', 'D84']: # Based on Qs data data = exp_data[exp_data['discharge'] != 50] try: sieve_data = plot_kwargs.pop('sieve_data')[exp_code] except KeyError: sieve_data = None # Roll it roll_y_var = rolling_kwargs['y'] rolled = self.roll_data(data, rolling_kwargs) if y_name == 'Bedload all': data = data.assign(roll_median=rolled.median().values) plot_kwargs['y'] = 'roll_median' else: data = data.assign(roll_mean=rolled.mean().values) plot_kwargs['y'] = 'roll_mean' elif y_name in ['depth', 'slope']: # Based on depth data sta_min, sta_max = (4.5, 6.5) if plot_2m else (0, 8) sta_keep = [s for s in exp_data.columns if sta_min <= s <= sta_max] if y_name == 'depth': data = exp_data.xs(y_name, level='location').loc[:, sta_keep] avg_depths = data.mean(axis=1) data.loc[:, y_name] = avg_depths elif y_name == 'slope': # Assume water surface slope cm2m = 1/100 surface_data = exp_data.xs('surface', level='location') *cm2m keep_data = surface_data.loc[:, sta_keep] notnull = keep_data.notnull() keep_data[notnull.sum(axis=1) <=3] = np.nan # require > 3 pts ols_out, flume_elevations = self._calc_retrended_slope( keep_data, None, None) data = ols_out elif y_name in ['bed-D50', 'bed-D84']: # Trim stations sta_min, sta_max = (4.5, 6.5) if plot_2m else (0, 8) sta_keep = [(t,s) for t,s in exp_data.index \ if sta_min <= float(s.split('-')[1])/1000 <= sta_max] raw_bed_data = exp_data.loc[sta_keep, :] # Get geom mean log2_bed_data = np.log2(raw_bed_data) log2_mean = log2_bed_data.groupby('exp_time').mean() data = np.exp2(log2_mean) # Make a geometric mean point at 270min (4.5hrs) so lines look # cleaner index_vals = data.index.values insert_index = index_vals.searchsorted(270) before_index = index_vals[insert_index - 1] after_index = index_vals[insert_index] yn = y_name[-3:] log2_before = np.log2(data.loc[before_index, yn]) log2_after = np.log2(data.loc[after_index, yn]) data.loc[270, yn] = np.exp2((log2_before + log2_after)/2) data.sort_index(inplace=True) plot_kwargs['y'] = y_name[-3:] else: raise NotImplementedError # Fold time in half around the peak using 'exp_time_hrs' peak_time = 4.5 # hrs exp_time_name = 'exp_time' exp_time_hrs_name = 'exp_time_hrs' if exp_time_hrs_name in data.columns: pass elif exp_time_name == data.index.name: # Make new column based on index data.loc[:, exp_time_hrs_name] = data.index / 60 else: raise NotImplementedError exp_time_hrs = data[exp_time_hrs_name] #assert(exp_time_hrs.iloc[-1] <= 8.0) # Otherwise not peak_time invalid data['pseudo discharge'] = peak_time - np.fabs(exp_time_hrs - peak_time) # Split data into limbs rising = data[data[exp_time_hrs_name] <= peak_time] falling = data[data[exp_time_hrs_name] >= peak_time] # Print some mass stats if applicable if y_name == 'Bedload all': rising_sum = rising[roll_y_var].sum() / 1000 # kg falling_sum = falling[roll_y_var].sum() / 1000 # kg limb_ratio = rising_sum / falling_sum self.logger.write([f"Rising sum = {rising_sum:3.0f} kg", f"Falling sum = {falling_sum:3.0f} kg", f"Rising/falling = {limb_ratio:3.2f}"], local_indent=1) # print latex table row print(f" {exp_code} & {rising_sum:3.0f} & {falling_sum:3.0f} & " + \ f"{(rising_sum+falling_sum):3.0f} & {limb_ratio:3.2f} \\\\") # Grab the default pyplot colors prop_cycle = plt.rcParams['axes.prop_cycle'] colors = prop_cycle.by_key()['color'] rising_color, falling_color = colors[0:2] # Not actually grouped, but can still use self.plot_group plot_kwargs['label'] = 'Rising Limb' plot_kwargs['color'] = rising_color self.plot_group(rising, plot_kwargs) plot_kwargs['label'] = 'Falling Limb' plot_kwargs['color'] = falling_color self.plot_group(falling, plot_kwargs) # Draw the feed Di if applicable if y_name in ['D50', 'D84']: self.draw_feed_Di(ax, y_name) # Plot the sieve data Di if applicable sieve_data[y_name] = self.calc_Di(sieve_data, target_Di=y_name) sieve_data.reset_index(inplace=True) sieve_exp_time_hrs = sieve_data['exp_time'] / 60 assert(sieve_exp_time_hrs.iloc[-1] == 8.0) sieve_folded_hrs = peak_time - np.fabs(sieve_exp_time_hrs - peak_time) sieve_data['pseudo discharge'] = sieve_folded_hrs sieve_plot_kwargs = { 'x' : 'exp_time' if x_name == 'exp_time_hrs' else x_name, 'y' : y_name, 'label' : rf"Trap {y_name}", 'ax' : ax, 'legend' : False, 'marker' : '*', 'markersize': 7, 'linestyle' : 'None', } sieve_rising = sieve_data[sieve_exp_time_hrs <= peak_time] sieve_falling = sieve_data[sieve_exp_time_hrs > peak_time] sieve_plot_kwargs['color'] = rising_color self.plot_group(sieve_rising, sieve_plot_kwargs) sieve_plot_kwargs['color'] = falling_color self.plot_group(sieve_falling, sieve_plot_kwargs) elif y_name == 'bed-D84': self.draw_feed_Di(ax, y_name[-3:]) elif y_name == 'bed-D50': yn = y_name[-3:] self.draw_feed_Di(ax, yn, multiple=1.0, text='Armor 1.0') self.draw_feed_Di(ax, yn, multiple=1.5, text='Armor 1.5') # Turn on the grid self.generic_set_grid(ax, yticks_minor=True) # Set the tick marks ax.set_xlim((1, peak_time)) ticker = mpl.ticker ax.xaxis.set_major_locator(ticker.FixedLocator(range(5))) ax.tick_params(axis='x', which='major', top=True, bottom=True) ax.xaxis.set_major_formatter(ticker.NullFormatter()) # Turn off minor axis ticks ax.tick_params(axis='x', which='minor', top=False, bottom=False) #text_locations = np.concatenate([np.arange(4) + 0.5, [4.25]]) ##for xloc, label in zip(text_locations, text_labels): ## ax.text(xloc, 0, label) #text_labels = mpl.ticker.FixedFormatter([rf'${d}L/s$' for d in [50, 62, 75, 87, 100]]) #ax.xaxis.set_major_formatter(text_labels) #ax.xaxis.OFFSETTEXTPAD = 100 ##ax.tick_params(pad=15) #ax.grid(True, which='major', color='#d6d6d6') #plot_kwargs['ax'].grid(True, which='minor', color='#f2f2f2', axis='x') #ax.set_axisbelow(True) def _format_pseudo_hysteresis(self, fig, axs, plot_kwargs): # Format the figure after the plot loop fig_kwargs = { 'xlabel' : r"Discharge", 'legend_labels' : [r"Rising", "Falling"], } y_name = plot_kwargs['y'] if y_name == 'Bedload all': fig_kwargs['ylabel'] = r"Bedload transport (g/s)" fig_kwargs['title'] = r"Pseudo hysteresis of bedload transport" elif y_name in ['D50', 'D84']: Di = plot_kwargs['y'] fig_kwargs['ylabel'] = rf"$D_{{ {Di[1:]} }}$ (mm)" fig_kwargs['title'] = rf"Pseudo hysteresis of {Di}" fig_kwargs['legend_labels'].append(self.get_feed_Di_label(Di)) elif y_name == 'depth': fig_kwargs['ylabel'] = rf"Depth (cm)" fig_kwargs['title'] = rf"Pseudo hysteresis of depth" elif y_name == 'slope': fig_kwargs['ylabel'] = rf"Slope (m/m)" fig_kwargs['title'] = rf"Pseudo hysteresis of slope" elif y_name in ['bed-D50', 'bed-D84']: Di = plot_kwargs['y'][-3:] fig_kwargs['ylabel'] = rf"$D_{{ {Di[1:]} }}$ (mm)" fig_kwargs['title'] = rf"Pseudo hysteresis of bed surface {Di}" fig_kwargs['legend_labels'].append(self.get_feed_Di_label(Di)) if y_name == 'bed-D50': # Hacky hardcoded way to make sure armor ratio text fits on # graph global_ymin, global_ymax = (7, 12.5) # min range for ax in axs.flatten(): ymin, ymax = ax.get_ylim() global_ymin = min(global_ymin, ymin) global_ymax = max(global_ymax, ymax) ax.set_ylim((global_ymin, global_ymax)) else: raise NotImplementedError self.format_generic_figure(fig, axs, plot_kwargs, fig_kwargs) ticker = mpl.ticker for ax in axs[-1]: # Set the major tick length default_length = ax.xaxis.get_major_ticks()[0].get_tick_padding() ax.tick_params(axis='x', which='major', bottom=True, top=False, length=default_length * 3) # Set the minor tick locations text_locations = np.concatenate([np.arange(4) + 0.5, [4.25]]) text_locator = ticker.FixedLocator(text_locations) ax.xaxis.set_minor_locator(text_locator) # Set the minor tick labels text_formats = [rf'${d}L/s$' for d in [50, 62, 75, 87, 100]] text_formatter = ticker.FixedFormatter(text_formats) ax.xaxis.set_minor_formatter(text_formatter) # Functions to plot only dem data def make_dem_subplots(self, plot_2m=True): self.generic_make("dem subplots", self.omnimanager.reload_dem_data, self.plot_dem_subplots, plot_fu_kwargs={'plot_2m' : plot_2m}, fig_subdir='dem_subplots') def plot_dem_subplots(self, plot_2m=True): # Can't use the generic_plot_experiments function because of the way I # want to display and save them. plot_kwargs = { } dem_gather_kwargs = { 'wall_trim' : settings.dem_wall_trim, } if plot_2m: dem_gather_kwargs['sta_lim'] = settings.stationing_2m dem_data = self.gather_dem_data(dem_gather_kwargs) # Calculate period ranks ranking = {"rising" : 0, "falling" : 1, "50L" : 0, "62L" : 1, "75L" : 2, "87L" : 3, "100L" : 4} get_rank = lambda l, d: d + 2*l*(4-d) get_ax_index = lambda l, d: get_rank(*[ranking[k] for k in (l, d)]) # Generate a base file name length_str = '2m' if plot_2m else '8m' filename_base = f"{length_str}_dems_{{}}.png" # Start plotting; 1 plot per experiment color_min, color_max = settings.dem_color_limits exp_codes = self.omnimanager.get_exp_codes() for exp_code in exp_codes: self.logger.write(f"Creating plots for {exp_code}") self.logger.increase_global_indent() # Create the subplot for this experiment if plot_2m: # make 3x3 grid fig, axs = (plt.subplots(3, 3, sharey=True, sharex=True, figsize=(18,10))) else: # make 8x1 grid fig, axs = plt.subplots(8, 1, sharey=True, sharex=True, figsize=(10,12)) first_image=None axs = axs.flatten() # Plot the dems for this experiment for period_key, period_dem in dem_data[exp_code].items(): limb, discharge, time, exp_time = period_key self.logger.write(f"Plotting {limb} {discharge} {time}") assert(time == 't60') ax = axs[get_ax_index(limb, discharge)] title_hr = exp_time//60 title_hr_str = f"{title_hr} {'hour' if title_hr == 1 else 'hours'}" ax.set_title(f"{limb.capitalize()} {discharge} {title_hr_str}") px_y, px_x = period_dem.shape ax.set_ybound(0, px_y) img = ax.imshow(period_dem, vmin=color_min, vmax=color_max) if first_image is None: first_image = img # Convert axes from px to stationing dem_res = settings.dem_resolution ticker = mpl.ticker # Convert x tick labels dem_offset = settings.stationing_2m[0] if plot_2m else settings.dem_long_offset dem_offset_mod = dem_offset%500#settings.dem_long_offset % 1000 long_fu = lambda x, p: f"{(x * dem_res + dem_offset) / 1000:1.1f}" ax.xaxis.set_major_formatter(ticker.FuncFormatter(long_fu)) # Convert x tick locations # Make the resulting tick locations at either 0.5 or 1 m # intervals (250px or 500px). # Use MultipleLocator to easily generate tick locations then # correct for the dem offset auto_locator = ticker.MultipleLocator(250 if plot_2m else 500) auto_ticks = auto_locator.tick_values(0, px_x)[1:-1] offset_ticks = auto_ticks - dem_offset_mod / dem_res ax.xaxis.set_major_locator(ticker.FixedLocator(offset_ticks)) # Convert y tick labels dem_trim = settings.dem_wall_trim trav_fu = lambda x, p: f"{(x * dem_res + dem_trim):4.0f}" trav_formatter = ticker.FuncFormatter(trav_fu) ax.yaxis.set_major_formatter(trav_formatter) y_locator = ticker.MultipleLocator(200) ax.yaxis.set_major_locator(y_locator) self.logger.decrease_global_indent() # Format the figure xlabel = rf"Longitudinal Station (m)" ylabel = rf"Transverse Station (mm)" min_tick = np.amin(offset_ticks) max_tick = np.amax(offset_ticks) plt.xlim((min(0, min_tick), max(px_x, max_tick))) plt.ylim((0, px_y)) plt.tight_layout() if plot_2m: title_str = rf"{exp_code} DEM 2m subsections with wall trim" if self.for_paper: fig.subplots_adjust(top=0.9, left=0.05, bottom=0.075, right=0.95) fontsize = 16 axs[-1].set_visible(False) plt.colorbar(first_image, ax=axs[-1], use_gridspec=True) else: fig.subplots_adjust(top=0.9, left=0.05, bottom=0.075, right=0.95) fontsize = 16 else: title_str = rf"{exp_code} DEM 8m with wall trim" if self.for_paper: fig.subplots_adjust(top=0.97, left=0.1, bottom=0.05, right=0.95) fontsize = 16 plt.colorbar(first_image, ax=list(axs), use_gridspec=True, aspect=35, pad=0.01, anchor=(0.9, 0.5)) fig.subplots_adjust(right=0.9) else: fig.subplots_adjust(top=0.9, left=0.1, bottom=0.1, right=0.95) fontsize = 16 # Make a title if not self.for_paper: plt.suptitle(title_str, fontsize=fontsize, usetex=True) # Set common x label fig.text(0.5, 0.01, xlabel, ha='center', usetex=True, fontsize=fontsize) # Set common y label fig.text(0.01, 0.5, ylabel, va='center', usetex=True, fontsize=fontsize, rotation='vertical') # Save the figure filename = filename_base.format(exp_code) self.logger.write(f"Saving {filename}") self.save_figure(filename) #plt.show() #assert(False) def make_dem_semivariogram_plots(self): self.generic_make("dem semivariogram", self.omnimanager.reload_dem_data, self.plot_dem_semivariograms, figure_subdir='dem_variograms') def plot_dem_semivariograms(self): # Semivariogram are based on <NAME> 2014 # Can't use the generic_plot_experiments function because of the way I # want to display and save them. max_xlag = 300 #px max_ylag = 50 #px plot_kwargs = { } dem_gather_kwargs = { 'sta_lim' : settings.stationing_2m, 'wall_trim' : settings.dem_wall_trim, } dem_data = self.gather_dem_data(dem_gather_kwargs) # Calculate period ranks ranking = {"rising" : 0, "falling" : 1, "50L" : 0, "62L" : 1, "75L" : 2, "87L" : 3, "100L" : 4} get_rank = lambda l, d: d + 2*l*(4-d) get_ax_index = lambda l, d: get_rank(*[ranking[k] for k in (l, d)]) # Generate a base file name filename_base = f"dem_semivariograms_{{}}_{max_xlag}xlag-{max_ylag}ylag.png" # Start plotting; 1 plot per experiment exp_codes = self.omnimanager.get_exp_codes() for exp_code in exp_codes: self.logger.write(f"Creating semivariograms for {exp_code}") self.logger.increase_global_indent() # Make buffer room for labels in fig window btop = 0.8 bleft = 0.04 bbottom = 0.1 bright = 0.985 figsize = self._get_figsize(max_xlag, max_ylag, xbuffer=bleft + 1 - bright, ybuffer=bbottom + 1 - btop) # Create the subplots for this experiment fig, axs = plt.subplots(3, 3, sharey=True, sharex=True, figsize=figsize) axs = axs.flatten() # other parameters last_image=None levels_min, levels_max = (0,1) # contour levels min, max values xlabel = rf"l_x (px)" ylabel = rf"l_y (px)" title_str = rf"Semivariograms for experiment {exp_code} normalized by variance ($\sigma_z^2$)" fontsize = 16 # Calculate and plot the semivariograms for this experiment for period_key, period_dem in dem_data[exp_code].items(): limb, discharge, time, exp_time = period_key self.logger.write(f"Plotting {limb} {discharge} {time}") assert(time == 't60') ax = axs[get_ax_index(limb, discharge)] ax.axis('equal') ax.set_title(f"{limb} {discharge} {time}") # Calculate semivariogram x_coor, y_coor, semivariogram = self._calc_semivariogram( period_dem, max_xlag, max_ylag) ## Debug code (fast data) #x_coor, y_coor = np.meshgrid(np.arange(-max_xlag, max_xlag+1), # np.arange(-max_ylag, max_ylag+1)) ## Plot semivariogram levels = np.linspace(levels_min, levels_max, 40) last_image = ax.contourf(x_coor, y_coor, semivariogram, levels=levels, cmap='Greys_r') ## Debug code (fast data) #last_image = ax.contourf(x_coor, y_coor, # np.abs(x_coor*y_coor)/np.max(x_coor*y_coor), # levels=levels, cmap='Greys_r') self.logger.decrease_global_indent() # Format the figure plt.tight_layout() fig.subplots_adjust(top=btop, left=bleft, bottom=bbottom, right=bright) if max_xlag >= max_ylag: plt.ylim((-max_ylag, max_ylag)) plt.colorbar(last_image, orientation='horizontal', ticks=np.linspace(levels_min, levels_max, 6)) else: plt.xlim((-max_xlag, max_xlag)) plt.colorbar(last_image, orientation='vertical', ticks=np.linspace(levels_min, levels_max, 6)) # Make a title plt.suptitle(title_str, fontsize=fontsize, usetex=True) # Set common x label fig.text(0.5, 0.01, xlabel, ha='center', usetex=True, fontsize=fontsize) # Set common y label fig.text(0.01, 0.5, ylabel, va='center', usetex=True, fontsize=fontsize, rotation='vertical') axs[-1].set_visible(False) # Save the figure filename = filename_base.format(exp_code) self.logger.write(f"Saving {filename}") self.save_figure(filename) break def _calc_semivariogram(self, dem, x_pxlag_max, y_pxlag_max, normalize=True): # Create a semivariogram from the dem # x_pxlag_max and y_pxlag_max are the largest pixel lags to calculate # Will calculate from -lag_max to +lag_max in both dimensions # x is parallel to flow, y is perpendicular to flow nx = x_pxlag_max ny = y_pxlag_max x, y = np.meshgrid(np.arange(-nx, nx+1), np.arange(-ny, ny+1)) # Subset the right half (quadrants I and IV) x_half = x[:, nx:] y_half = y[:, nx:] half_svg = np.empty_like(x_half, dtype=np.float) iter = np.nditer([x_half, y_half, half_svg], flags = ['buffered', 'multi_index'], op_flags = [['readonly'], ['readonly'], ['writeonly', 'allocate', 'no_broadcast']]) for lx, ly, v in iter: # lx, ly are the lag coordinates (in pixels) of a dem element. # Calculate for quadrants I and IV -ny < y < ny and 0 <= x < nx # Quadrants II and III are symmetric semivariance = self._calc_semivariance(dem, lx, ly) v[...] = semivariance if normalize: variance = np.var(dem) half_svg /= variance # Rotate and trim the column on the y axis (x=0) rot_half_svg = half_svg[::-1, :0:-1] # Concatenate with the original half to create the full image semivariogram = np.concatenate((rot_half_svg, half_svg), axis=1) return x, y, semivariogram #plt.figure() #plt.hist(half_svg.flatten(), bins=40) #plt.figure() #plt.imshow(dem) #plt.figure() #levels = np.linspace(0,1,40) #plt.contourf(x, y, semivariogram, levels=levels, cmap='Greys_r') #plt.colorbar() ##plt.imshow(half_svg) #plt.show() #assert(False) def _calc_semivariance(self, dem, x_pxlag=0, y_pxlag=0): # Calculate the semivariance for the given 2D lag distances # dem should be an mxn numpy array. It should be oriented so upstream # is positive x. # x_pxlag is the pixel lag distance parallel to the flume # y_pxlag is the pixel lag distance perpendicular to the flume # returns an np array of [x_pxlag, y_pxlag, semivar] # # x_pxlag and y_pxlag can be negative, but the semivariance function is # rotationally symmetric so it might be wasted calculations # # Based on equation from <NAME> 2014 # semivar (lagx, lagy) = sum[i=1 -> N-n; j=1 -> M-m] ( # z(xi + lagx, yj + lagy) - z(xi, yj))**2 / (2(N-n)(M-m)) # # Converted to: # semivar (lagx, lagy) = sum( # (dem[lagx, lagy subset] - dem[origin subset])**2 ) / (2(N-n)(M-m)) # Get size of dem M, N = dem.shape # M = rows, N = cols nx = abs(x_pxlag) ny = abs(y_pxlag) if ny >= M or nx >= N: self.logger.write("Lag out of bounds") self.logger.write([f"# rows, # columns = {M, N}", f"y_pxlag, x_pxlag = {y_pxlag, x_pxlag}"], local_indent=1) assert(False) #return np.nan ## Get index coordinates for offset dem subsets # Can handle positive and negative lag values # # Calculate start and end offsets for subarray A s_fu = lambda l: int((abs(l) - l) / 2) e_fu = lambda l: int((abs(l) + l) / 2) # Start corner of subarray A sx = s_fu(x_pxlag) sy = s_fu(y_pxlag) # End corner of subarray A ex = e_fu(x_pxlag) ey = e_fu(y_pxlag) # Get the offset dem subsets # Remember, slicing is [rows, cols] dem_A = dem[sy : M-ey, sx : N-ex] # starts at origin if both lags >= 0 dem_B = dem[ey : M-sy, ex : N-sx] # Calculate the deviation squared. deviations = (dem_B - dem_A)**2 # Calculate the semivariance denominator = 2 * (N - nx) * (M - ny) semivar = np.sum(deviations) / denominator #print(f"x_lag, y_lag = {x_pxlag, y_pxlag}") #print(f"dem_A = dem[{sy} : {M - ey}, {sx} : {N - ex}]") #print(f"dem_B = dem[{ey} : {M - sy}, {ex} : {N - sx}]") #print(f"denominator = 2 * ({N - nx}) * ({M - ny}) = {denominator}") #print(f"semivar = {semivar}") #print() #assert(False) return semivar def _get_figsize(self, xmax, ymax, xbuffer=0, ybuffer=0): aspect_ratio = xmax / ymax xbuff_ratio = (1 - xbuffer) ybuff_ratio = (1 - ybuffer) # largest allowed for my screen accounting for buffer imgx = 19.0 * xbuff_ratio imgy = 10.0 * ybuff_ratio # aspect corrected size ax = imgy * aspect_ratio ay = imgx / aspect_ratio # pick the one that fits and correct back to full size figx = (imgx if ax > imgx else ax) / xbuff_ratio figy = (imgy if ay > imgy else ay) / ybuff_ratio return (figx, figy) def make_dem_stats_plots(self): self.generic_make("dem stats time", self.omnimanager.reload_dem_data, self.plot_dem_stats, figure_subdir='dem_stats') def plot_dem_stats(self): # Stats are based on <NAME> 2014 # Do stuff before plot loop x_name = 'exp_time' # y_name options: y_names = ['mean', 'stddev', 'skewness', 'kurtosis'] plot_kwargs = { 'x' : x_name, #'y' : y_name, #'kind' : 'scatter', 'legend' : False, } dem_gather_kwargs = { 'sta_lim' : settings.stationing_2m, 'wall_trim' : settings.dem_wall_trim, } dem_data = self.gather_dem_data(dem_gather_kwargs) # Calculate stats dem_stats = {} exp_codes = self.omnimanager.get_exp_codes() for exp_code in exp_codes: self.logger.write(f"Calculating stats for {exp_code}") dem_stats[exp_code] = self._calc_dem_stats(exp_code, dem_data) # Generate a base file name filename_x = x_name.replace('_', '-').lower() if 'sta_lim' in dem_gather_kwargs: sta_min, sta_max = dem_gather_kwargs['sta_lim'] subset_str = f"_sta-{sta_min}-{sta_max}" else: subset_str = '' figure_name = f"dem_{{}}_v_{filename_x}{subset_str}.png" figure_name = ospath_join("dem_stats", figure_name) # Plot the 4 different stats for y_name in y_names: self.logger.write(f"Plotting {y_name}") self.logger.increase_global_indent() plot_kwargs['y'] = y_name filename_y = y_name.replace('_', '-').lower() # Start plot loop self.generic_plot_experiments( self._plot_dem_stats, self._format_dem_stats, dem_stats, plot_kwargs, figure_name.format(filename_y)) self.logger.decrease_global_indent() def _plot_dem_stats(self, exp_code, all_stats_data, plot_kwargs): # Do stuff during plot loop # Plot an experiment # Not actually grouped, but can still use self.plot_group stats_data = all_stats_data[exp_code] self.plot_group(stats_data, plot_kwargs) def _format_dem_stats(self, fig, axs, plot_kwargs): # Format the figure after the plot loop y_name = plot_kwargs['y'] y_labels = { 'mean' : ('Mean', '(mm)'), 'stddev' : ('Standard Deviation', '(mm)'), 'skewness' : ('Skewness', ''), 'kurtosis' : ('Kurtosis', ''), } name, units = y_labels[y_name] y_label = rf"{name}" + " {units}" if units else rf"{units}" fig_kwargs = { 'xlabel' : r"Experiment time (hours)", 'ylabel' : rf"Bed Elevation {y_label}", 'title' : rf"{name} of the detrended bed surface elevations for the 2m subsection", 'legend_labels' : [rf"Elevation {name}"], } self.format_generic_figure(fig, axs, plot_kwargs, fig_kwargs) def _calc_dem_stats(self, exp_code, dem_data, kwargs={}): # Calculate the four different statistics from <NAME> 2014 # Keep ordered lists of the keys and resulting data # Will be converted later to pandas dataframe with multiindex key_limb = [] key_discharge = [] key_time = [] exp_times = [] dem_means = [] dem_stddevs = [] dem_skewnesses = [] dem_kurtoses = [] for period_key, period_dem in dem_data[exp_code].items(): limb, discharge, time, exp_time = period_key key_limb.append(limb) key_discharge.append(discharge) key_time.append(time) exp_times.append(exp_time) # Calculate overall mean elevation mean = np.mean(period_dem) # Calculate bed elevation variance (std dev squared) # Can I simply use overall mean?? Eq uses mean value for that # location? deviation = period_dem - mean variance = np.mean(deviation**2) stddev = np.sqrt(variance) #stddev2 = np.nanstd(period_dem) # Same as above # Calculate skewness n_points = period_dem.size skewness = np.sum(deviation**3) / (n_points * stddev**3) # Calculate kurtosis kurtosis = np.sum(deviation**4) / (n_points * stddev**4) - 3 ## Some debugging code #print(exp_code, period_key) ##print(period_dem[::50, ::100]) #print(f"Mean = {mean}") #print(f"Std dev = {stddev}") #print(f"Skewness = {skewness}") #print(f"Kurtosis = {kurtosis}") #plt.figure(10) #plt.imshow(period_dem) #plt.figure(20) #plt.hist(period_dem.flatten(), 50, normed=True) #plt.show(block=False) #sleep(0.5) #plt.close('all') # Add it to the lists dem_means.append(mean) dem_stddevs.append(stddev) dem_skewnesses.append(skewness) dem_kurtoses.append(kurtosis) # Create the dataframe mindex = pd.MultiIndex.from_arrays([key_limb, key_discharge, key_time], names=['limb', 'discharge', 'time']) stats_df = pd.DataFrame({'exp_time' : exp_times, 'mean' : dem_means, 'stddev' : dem_stddevs, 'skewness' : dem_skewnesses, 'kurtosis' : dem_kurtoses, }, index=mindex) return stats_df def make_dem_roughness_plots(self): self.logger.write([f"Making dem roughness time plots..."]) indent_function = self.logger.run_indented_function indent_function(self.omnimanager.reload_dem_data, before_msg="Loading data", after_msg="Data Loaded!") indent_function(self.plot_dem_roughness, before_msg=f"Plotting dem roughness", after_msg="Finished Plotting!") self.logger.end_output() def plot_dem_roughness(self): # Do stuff before plot loop x_name = 'exp_time' y_name = 'stddev' plot_kwargs = { 'x' : x_name, 'y' : y_name, #'kind' : 'scatter', 'legend' : False, } dem_gather_kwargs = { } dem_data = self.gather_dem_data(dem_gather_kwargs) filename_x = x_name.replace('_', '-').lower() filename_y = y_name.replace('_', '-').lower() figure_name = f"dem_{filename_y}_v_{filename_x}.png" # Start plot loop self.generic_plot_experiments( self._plot_dem_roughness, self._format_dem_roughness, dem_data, plot_kwargs, figure_name) def _plot_dem_roughness(self, exp_code, dem_data, plot_kwargs): # Do stuff during plot loop # Plot an experiment key_limb = [] key_discharge = [] key_time = [] stddev = [] exp_times = [] for period_key, period_dem in dem_data[exp_code].items(): stddev.append(np.nanstd(period_dem)) limb, discharge, time, exp_time = period_key key_limb.append(limb) key_discharge.append(discharge) key_time.append(time) exp_times.append(exp_time) mindex = pd.MultiIndex.from_arrays([key_limb, key_discharge, key_time], names=['limb', 'discharge', 'time']) stats_df = pd.DataFrame({'stddev' : stddev, 'exp_time' : exp_times}, index=mindex) # Not actually grouped, but can still use self.plot_group self.plot_group(stats_df, plot_kwargs) def _format_dem_roughness(self, fig, axs, plot_kwargs): # Format the figure after the plot loop fig_kwargs = { 'xlabel' : r"Experiment time (hours)", 'ylabel' : r"Bed Elevation Standard Deviation (mm)", 'title' : r"Standard deviations of the detrended bed surface elevations", 'legend_labels' : [r"Elevation Std dev"], } self.format_generic_figure(fig, axs, plot_kwargs, fig_kwargs) def gather_dem_data(self, kwargs): # Gather all the dem data into a dict of data dicts separated by # exp_code = { exp_code : {(limb, flow, time) : data}} self.dem_data_all = {} self.omnimanager.apply_to_periods(self._gather_dem_data, kwargs) #plt.show() #assert(False) return self.dem_data_all def _gather_dem_data(self, period, kwargs): # Have periods add themselves to the overall dem dict exp_code = period.exp_code data = period.dem_data if data is not None: dem_res = settings.dem_resolution # mm/px #data_copy = data.copy() if 'sta_lim' in kwargs: # Throw away data outside the stationing limits # sta_lim = stationing for a subsection of the dem dem_offset = settings.dem_long_offset index_lim = [(x - dem_offset) // dem_res for x in kwargs['sta_lim']] idx_min, idx_max = index_lim data = data[:, idx_min:idx_max] if 'wall_trim' in kwargs: # Throw away data too close to the wall trim = kwargs['wall_trim'] n_trim_rows = trim // dem_res data = data[n_trim_rows:-n_trim_rows, :] key = (period.limb, period.discharge, period.period_end, period.exp_time_end) # Debugging code #if exp_code == '1A':# and period.limb == 'rising' and period.discharge == '87L': # print(exp_code, key) # #f1 = plt.figure(1) # #plt.imshow(data_copy) # fig = plt.figure() # #plt.imshow(data) # plt.hist(data.flatten(), 50, normed=True) # plt.title(f"{exp_code} {key}") # plt.xlim((120, 220)) # #plt.show() if exp_code in self.dem_data_all: self.dem_data_all[exp_code][key] = data else: self.dem_data_all[exp_code] = {key : data} # Functions to plot only manual data def make_mobility_plots(self, t_interval='period'): # Plot mobility assert(t_interval in ['period', 'step']) plot_fu_kwargs = { 't_interval' : t_interval } def reload_fu(): self.omnimanager.reload_gsd_data() self.omnimanager.reload_sieve_data() self.generic_make("mobility", reload_fu, self.plot_mobility, plot_fu_kwargs=plot_fu_kwargs, fig_subdir='synthesis') def plot_mobility(self, t_interval = 'D50'): # Do stuff before plot loop sizes = [0.5, 0.71, 1, 1.41, 2, 2.83, 4, 5.66, 8, 11.2, 16, 22.3, 32] x_name = 'Dgm' plot_kwargs = { 'x' : x_name, #'kind' : 'scatter', 'legend' : False, } # Add to plot_kwargs as a hacky way to get info to _plot function plot_kwargs['t_interval'] = t_interval # Collect the data gsd_gather_kwargs = { 'columns' : sizes, 'new_index' : ['exp_time', 'sta_str'], } data = { 'gsd' : self.gather_gsd_data(gsd_gather_kwargs), 'sieve' : self.gather_sieve_data({'columns' : sizes}), } # Make a filename filename_x = x_name.replace('_', '-').lower() figure_name = f"mobility_{t_interval}_v_{filename_x}.png" # Start plot loop self.generic_plot_experiments( self._plot_mobility, self._format_mobility, data, plot_kwargs, figure_name) def _plot_mobility(self, exp_code, data, plot_kwargs): # Do stuff during plot loop # Plot an experiment ax = plot_kwargs['ax'] plot_kwargs = plot_kwargs.copy() t_interval = plot_kwargs.pop('t_interval') # Get the data surf_gsd_data = data['gsd'][exp_code] bedload_gsd_data = data['sieve'][exp_code] # Collapse the surf_gsd data to just time surf_gsd_data = surf_gsd_data.groupby(level='exp_time').sum() # Partially collapse time if desired dt = 60 if t_interval == 'step' else 20 bins = np.arange(60, 8*60+dt, dt) def sum_interval(data): intervals = np.digitize(data.index, bins, right=True) summed = data.groupby(intervals).sum() bins_used = [bins[i] for i in summed.index.values] summed.index = pd.Index(bins_used, name='exp_time') return summed surf_gsd_data = sum_interval(surf_gsd_data) bedload_gsd_data = sum_interval(bedload_gsd_data) # Make Dgm surf_col_values = surf_gsd_data.columns.values bedload_col_values = bedload_gsd_data.columns.values assert(np.all(surf_col_values == bedload_col_values)) upper = surf_col_values lower = np.roll(upper, 1) lower[0] = 0.001 Dgm = np.exp2((np.log2(upper) + np.log2(lower))/2) # surf is by count, bedload is by mass. # approximate mass per rock based on geometric mean size # assume sphere? ### How to convert them properly??? Dg_mass_np = settings.sediment_density * 4000/3 * np.pi *(Dgm/2000)**3 #[print(f"{m:10.5f} -> {d:10.2f} mm") for m, d in zip(Dg_mass, Dgm)] Dg_mass = pd.Series(Dg_mass_np, index=surf_gsd_data.columns) surf_gsd_mass = surf_gsd_data.multiply(Dg_mass) # Convert both to fractional values def make_fractional(data): return data.div(data.sum(axis=1), axis=0) surf_fractional = make_fractional(surf_gsd_mass) bedload_fractional = make_fractional(bedload_gsd_data) # Calculate mobility mobility = bedload_fractional / surf_fractional mobility[surf_fractional == 0] = np.nan # Change index and columns exp_times = mobility.index.values mobility.index = pd.Index(exp_times/60, name='exp_time_hrs') mobility.columns =
pd.Index(Dgm, name='Dgm')
pandas.Index
import urllib.request as urlreq import io,json import pandas as pd # ****************************************************************************************************************************************** def download_smiles(myList,intv=1) : """Retrieve canonical SMILES strings for a list of input INCHIKEYS. Will return only one SMILES string per INCHIKEY. If there are multiple values returned, the first is retained and the others are returned in a the discard_lst. INCHIKEYS that fail to return a SMILES string are put in the fail_lst Args: myList (list): List of INCHIKEYS intv (1) : number of INCHIKEYS to submit queries for in one request, default is 1 Returns: list of SMILES strings corresponding to INCHIKEYS list of INCHIKEYS, which failed to return a SMILES string list of CIDs and SMILES, which were returned beyond the first CID and SMILE found for input INCHIKEY """ ncmpds=len(myList) smiles_lst,cid_lst,inchikey_lst=[],[],[] sublst="" fail_lst=[] discard_lst=[] for it in range(0,ncmpds,intv) : if (it+intv) > ncmpds : upbnd=ncmpds else : upbnd=it+intv sublst=myList[it:upbnd] inchikey = ','.join(map(str,sublst)) url="https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/inchikey/"+inchikey+"/property/CanonicalSMILES/CSV" try : response = urlreq.urlopen(url) html = response.read() except : fail_lst.append(inchikey) continue f=io.BytesIO(html) cnt=0 for l in f : l=l.decode("utf-8") l=l.rstrip() vals=l.split(',') if vals[0] == '"CID"' : continue if cnt > 0: #print("more than one SMILES returned, discarding. Appear to be multiple CID values",vals) #print("using",cid_lst[-1],smiles_lst[-1],inchikey_lst[-1]) discard_lst.append(vals) break cid_lst.append(vals[0]) sstr=vals[1].replace('"','') smiles_lst.append(vals[1]) inchikey_lst.append(myList[it+cnt]) cnt+=1 if cnt != len(sublst) : print("warning, multiple SMILES for this inchikey key",cnt,len(sublst),sublst) save_smiles_df=pd.DataFrame( {'CID' : cid_lst, 'standard_inchi_key' :inchikey_lst, 'smiles' : smiles_lst}) return save_smiles_df,fail_lst,discard_lst #****************************************************************************************************************************************** def download_bioactivity_assay(myList,intv=1) : """Retrieve summary info on bioactivity assays. Args: myList (list): List of PubChem AIDs (bioactivity assay ids) intv (1) : number of INCHIKEYS to submit queries for in one request, default is 1 Returns: Nothing returned yet, will return basic stats to help decide whether to use assay or not """ ncmpds=len(myList) smiles_lst,cid_lst,inchikey_lst=[],[],[] sublst="" fail_lst=[] jsn_lst=[] for it in range(0,ncmpds,intv) : if (it+intv) > ncmpds : upbnd=ncmpds else : upbnd=it+intv sublst=myList[it:upbnd] inchikey = ','.join(map(str,sublst)) url="https://pubchem.ncbi.nlm.nih.gov/rest/pug/assay/aid/"+inchikey+"/summary/JSON" try : response = urlreq.urlopen(url) html = response.read() except : fail_lst.append(inchikey) continue f=io.BytesIO(html) cnt=0 json_str="" for l in f : l=l.decode("utf-8") l=l.rstrip() json_str += l jsn_lst.append(json_str) return jsn_lst # save_smiles_df=pd.DataFrame( {'CID' : cid_lst, 'standard_inchi_key' :inchikey_lst, 'smiles' : smiles_lst}) # return save_smiles_df,fail_lst,discard_lst #****************************************************************************************************************************************** def download_SID_from_bioactivity_assay(bioassayid) : """Retrieve summary info on bioactivity assays. Args: a single bioactivity id: PubChem AIDs (bioactivity assay ids) Returns: Returns the sids tested on this assay """ myList=[bioassayid] ncmpds=len(myList) smiles_lst,cid_lst,inchikey_lst=[],[],[] sublst="" fail_lst=[] jsn_lst=[] intv=1 for it in range(0,ncmpds,intv) : if (it+intv) > ncmpds : upbnd=ncmpds else : upbnd=it+intv sublst=myList[it:upbnd] inchikey = ','.join(map(str,sublst)) url="https://pubchem.ncbi.nlm.nih.gov/rest/pug/assay/aid/"+inchikey+"/sids/JSON" try : response = urlreq.urlopen(url) html = response.read() except : fail_lst.append(inchikey) continue f=io.BytesIO(html) cnt=0 json_str="" for l in f : l=l.decode("utf-8") l=l.rstrip() json_str += l jsn_lst.append(json_str) res=json.loads(jsn_lst[0]) res_lst=res["InformationList"]['Information'][0]['SID'] return res_lst #https://pubchem.ncbi.nlm.nih.gov/rest/pug/assay/aid/504526/doseresponse/CSV?sid=104169547,109967232 #****************************************************************************************************************************************** def download_dose_response_from_bioactivity(aid,sidlst) : """Retrieve data for assays for a select list of sids. Args: myList (list): a bioactivity id (aid) sidlst (list): list of sids specified as integers Returns: Nothing returned yet, will return basic stats to help decide whether to use assay or not """ sidstr= "," . join(str(val) for val in sidlst) myList=[sidstr] ncmpds=len(myList) smiles_lst,cid_lst,inchikey_lst=[],[],[] sublst="" fail_lst=[] jsn_lst=[] intv=1 for it in range(0,ncmpds,intv) : if (it+intv) > ncmpds : upbnd=ncmpds else : upbnd=it+intv sublst=myList[it:upbnd] inchikey = ','.join(map(str,sublst)) url="https://pubchem.ncbi.nlm.nih.gov/rest/pug/assay/aid/"+aid+"/doseresponse/CSV?sid="+inchikey try : response = urlreq.urlopen(url) html = response.read() except : fail_lst.append(inchikey) continue f=io.BytesIO(html) cnt=0 json_str="" df=pd.read_csv(f) jsn_lst.append(df) return jsn_lst #****************************************************************************************************************************************** def download_activitytype(aid,sid) : """Retrieve data for assays for a select list of sids. Args: myList (list): a bioactivity id (aid) sidlst (list): list of sids specified as integers Returns: Nothing returned yet, will return basic stats to help decide whether to use assay or not """ myList=[sid] ncmpds=len(myList) smiles_lst,cid_lst,inchikey_lst=[],[],[] sublst="" fail_lst=[] jsn_lst=[] intv=1 for it in range(0,ncmpds,intv) : if (it+intv) > ncmpds : upbnd=ncmpds else : upbnd=it+intv sublst=myList[it:upbnd] inchikey = ','.join(map(str,sublst)) url="https://pubchem.ncbi.nlm.nih.gov/rest/pug/assay/aid/"+aid+"/CSV?sid="+inchikey #url="https://pubchem.ncbi.nlm.nih.gov/rest/pug/assay/aid/"+aid+"/doseresponse/CSV?sid="+inchikey try : response = urlreq.urlopen(url) html = response.read() except : fail_lst.append(inchikey) continue f=io.BytesIO(html) cnt=0 json_str="" df=
pd.read_csv(f)
pandas.read_csv
# -*- coding: utf-8 -*- import pandas as pd try: from .graph import Graph except ImportError: from graph import Graph try: from .timeseries import IntervalTimeSeries except ImportError: from timeseries import IntervalTimeSeries class EsoGraph(Graph): """A class which can hold .eso data as a graph ONLY WORKS FOR SIMULATION VARIABLES AT PRESENT NOT DAILY, MONTHLY OR RUNPERIOD REPORT VARIABLES """ def __init__(self,fp=None): Graph.__init__(self) if fp: self.read_eso(fp) def read_eso(self,fp): """Reads the eso file and places the information in a graph Arguments: - fp (str): the filepath of an eso file """ #setup flag_data_dictionary=True self._node_dict={} # reads the lines in the eso file for line in open(fp,'r'): if line.startswith('End of Data Dictionary'): flag_data_dictionary=False continue if flag_data_dictionary: self._read_data_dictionary(line) else: self._read_data(line) #creates the time series and cleans up for n in self.nodes: # creates pd.Series from 'index' and 'data' properties index=n.properties['index'] data=n.properties['data'] s=pd.Series(index=index,data=data) # set timestamps to the start of the interval, not the end interval=n.properties['ts'].interval s.index=s.index-
pd.Timedelta(interval)
pandas.Timedelta
#!/usr/bin/env python #Links: #https://developers.google.com/analytics/devguides/reporting/core/v4/rest/v4/reports/batchGet#ReportData.FIELDS.sampling_space_sizes #https://note.nkmk.me/python-google-analytics-reporting-api-download/ # https://stackoverflow.com/questions/44296648/using-lists-in-pandas-to-replace-column-names # http://code.markedmondson.me/googleAnalyticsR/v4.html # https://www.themarketingtechnologist.co/getting-started-with-the-google-analytics-reporting-api-in-python/ # https://stackoverflow.com/questions/38084770/converting-google-analytics-reporting-api-v4-request-results-to-csv-with-python # https://medium.com/analytics-for-humans/submitting-your-first-google-analytics-reporting-api-request-cdda19969940 # #https://stackoverflow.com/questions/44296648/using-lists-in-pandas-to-replace-column-names #Libraries from apiclient.discovery import build from oauth2client.service_account import ServiceAccountCredentials from datetime import datetime, timedelta import pandas as pd import numpy import json from pandas.io.json import json_normalize import sys SCOPES = ['https://www.googleapis.com/auth/analytics.readonly'] KEY_FILE_LOCATION = 'client_secrets.json' VIEW_ID = ['yor_VIEW_ID'] """Google Analytics Reporting API V4.""" print("Enter startDate:") startDate = input() print("Enter timedelta(days):") x=input() start=datetime.strptime(startDate,'%Y-%m-%d') endDate=datetime.strftime(start + timedelta(days = x),'%Y-%m-%d') def initialize_analyticsreporting(): """Initializes an Analytics Reporting API V4 service object. Returns: An authorized Analytics Reporting API V4 service object. """ credentials = ServiceAccountCredentials.from_json_keyfile_name( KEY_FILE_LOCATION, SCOPES) #Build the service object. analytics = build('analyticsreporting', 'v4', credentials=credentials) return analytics #first chunk def get_report(analytics): """Queries the Analytics Reporting API V4. Args: analytics: An authorized Analytics Reporting API V4 service object. Returns: The Analytics Reporting API V4 response. """ pageToken='1' pageSize=99999 return analytics.reports().batchGet( body={ 'reportRequests': [ { 'viewId': VIEW_ID, 'dateRanges': [{'startDate': startDate, 'endDate': endDate}], 'metrics': [{'expression': 'ga:Sessions'},{'expression': 'ga:Pageviews'},{'expression': 'ga:timeOnPage'},{'expression': 'ga:pageviewsPerSession'}], 'dimensions': [{'name': 'ga:dimension4'},{'name': 'ga:dateHourMinute'},{'name': 'ga:pageTitle'}, {'name':'ga:dimension5'},{'name':'ga:dimension6'},{'name':'ga:contentGroup1'}, {'name':'ga:dimension10'}], 'samplingLevel':'LARGE', 'pageToken':pageToken, 'pageSize': pageSize }] } ).execute() def parse_data(response): sample_sizes=response['reports'][0]['data'].get('samplesReadCounts') sample_spaces=response['reports'][0]['data'].get('samplingSpaceSizes') if sample_sizes and sample_spaces: sys.exit('Sampled Data!') else: print ('No Sampled Data') reports = response['reports'][0] columnHeader = reports['columnHeader']['dimensions'] metricHeader = reports['columnHeader']['metricHeader']['metricHeaderEntries'] columns = columnHeader for metric in metricHeader: columns.append(metric['name']) data =
json_normalize(reports['data']['rows'])
pandas.io.json.json_normalize
import os import logging import numpy as np import pandas as pd import seaborn as sb import torch from matplotlib import pyplot as plt from functools import partial from simulation.observers import dynamics_traj_observer from simulation.observer_functions import EKF_ODE from NN_for_ODEs.NODE_utils import make_init_state_obs, \ update_trajs_init_state_obs from utils.utils import RMS, log_multivariate_normal_likelihood, reshape_pt1, \ reshape_dim1, list_torch_to_numpy, interpolate_func sb.set_style('whitegrid') # Some useful plotting functions to run open-loop rollouts (trajectory of GP # predictions given a true and a control trajectory) # Run open-loop rollouts of GP model def model_rollout(dyn_GP, init_state, control_traj, true_mean, rollout_length=100, only_prior=False): device = true_mean.device rollout_length = int(np.min([rollout_length, len(true_mean) - 1])) predicted_mean = torch.zeros((rollout_length + 1, init_state.shape[1]), device=device) predicted_lowconf = torch.zeros((rollout_length + 1, init_state.shape[1]), device=device) predicted_uppconf = torch.zeros((rollout_length + 1, init_state.shape[1]), device=device) predicted_var = torch.zeros((rollout_length + 1, init_state.shape[1]), device=device) predicted_mean[0] = init_state predicted_lowconf[0] = init_state predicted_uppconf[0] = init_state predicted_var[0] = torch.zeros((1, init_state.shape[1]), device=device) for t in range(rollout_length): control = control_traj[t] if 'Michelangelo' in dyn_GP.system: # True and predicted trajectory over time (random start, random # control) with Euler to get xt+1 from GP xt, ut->phit mean_next, varnext, next_lowconf, next_uppconf = \ dyn_GP.predict_euler_Michelangelo(predicted_mean[t], control, only_prior=only_prior) elif ('justvelocity' in dyn_GP.system) and not dyn_GP.continuous_model: # True and predicted trajectory over time (random start, random # control) with Euler to get xt+1 from GP xt, ut->xn_t+1 mean_next, varnext, next_lowconf, next_uppconf = \ dyn_GP.predict_euler_discrete_justvelocity( predicted_mean[t], control, only_prior=only_prior) elif ('justvelocity' in dyn_GP.system) and dyn_GP.continuous_model: # True and predicted trajectory over time (random start, random # control) with Euler to get xt+1 from GP xt, ut->xdot_t mean_next, varnext, next_lowconf, next_uppconf = \ dyn_GP.predict_euler_continuous_justvelocity( predicted_mean[t], control, only_prior=only_prior) else: # True and predicted trajectory over time (random start, random # control) mean_next, varnext, next_lowconf, next_uppconf = dyn_GP.predict( predicted_mean[t], control, only_prior=only_prior) predicted_mean[t + 1] = mean_next predicted_lowconf[t + 1] = next_lowconf predicted_uppconf[t + 1] = next_uppconf predicted_var[t + 1] = varnext RMSE = RMS(predicted_mean - true_mean) log_likelihood = log_multivariate_normal_likelihood(true_mean[1:, :], predicted_mean[1:, :], predicted_var[1:, :]) return init_state, control_traj, true_mean, predicted_mean, predicted_var, \ predicted_lowconf, predicted_uppconf, RMSE, log_likelihood # Run open-loop rollouts of NODE model def NODE_rollout(NODE, init_state, control_traj, xtraj_true, rollout_length=100, only_prior=False, scale=True): device = xtraj_true.device with torch.no_grad(): rollout_length = int(np.min([rollout_length, len(xtraj_true)])) time = torch.arange(0., rollout_length * NODE.dt, step=NODE.dt, device=device) if NODE.ground_truth_approx: y_observed_true = xtraj_true else: y_observed_true = NODE.observe_data(xtraj_true) if NODE.true_meas_noise_var != 0: y_observed_true += torch.normal(0, np.sqrt( NODE.true_meas_noise_var), size=y_observed_true.shape) t_u = torch.cat((reshape_dim1(time), reshape_dim1(control_traj)), dim=1) controller = interpolate_func( x=t_u, t0=time[0], init_value=reshape_pt1(control_traj[0])) if NODE.init_state_model: if NODE.ground_truth_approx: # If ground_truth_approx, init_state contains the test inputs # for the given recognition model (taken from Xtest) x0_estim = NODE.init_state_model(init_state) else: # Otherwise, need to create the inputs for the recognition # model given the rollout trajectories obs0 = make_init_state_obs(y_observed_true, control_traj, init_state, time, NODE.config) xtraj_true, y_observed_true, control_traj, time = \ update_trajs_init_state_obs( xtraj_true, y_observed_true, control_traj, time, NODE.config) x0_estim = NODE.init_state_model(obs0) else: x0_estim = init_state xtraj_estim = NODE.NODE_model.forward_traj( x0_estim, controller, time[0], time, reshape_pt1(control_traj[0])) y_pred = NODE.observe_data_x(xtraj_estim) if scale: y_pred = NODE.scaler_Y.transform(y_pred) y_observed_true = NODE.scaler_Y.transform(y_observed_true) RMSE_output = RMS(y_pred - y_observed_true) if NODE.ground_truth_approx: RMSE = RMSE_output RMSE_init = RMS( reshape_pt1(y_pred[0]) - reshape_pt1(y_observed_true[0])) else: RMSE = RMS(xtraj_estim - xtraj_true) RMSE_init = RMS(x0_estim - reshape_pt1(xtraj_true[0])) return init_state, control_traj, xtraj_true, xtraj_estim, RMSE, \ RMSE_init, RMSE_output # Run open-loop rollouts of NODE model def NODE_EKF_rollout(NODE, init_state, control_traj, xtraj_true, rollout_length=100, only_prior=False, scale=True): device = xtraj_true.device if NODE.config.get('prior_kwargs').get('EKF_added_meas_noise_var') is None: meas_noise_var = NODE.true_meas_noise_var else: meas_noise_var = \ NODE.config.get('prior_kwargs').get('EKF_added_meas_noise_var') with torch.no_grad(): rollout_length = int(np.min([rollout_length, len(xtraj_true)])) time = torch.arange(0., rollout_length * NODE.dt, step=NODE.dt, device=device) if NODE.ground_truth_approx: y_observed_true = xtraj_true else: y_observed_true = NODE.observe_data(xtraj_true) if NODE.config.get('prior_kwargs').get( 'EKF_observe_data') is not None: logging.info('New measurement function for testing EKF') y_EKF = NODE.config.get('prior_kwargs').get( 'EKF_observe_data')(y_observed_true) else: y_EKF = y_observed_true if meas_noise_var != 0: y_EKF += torch.normal(0, np.sqrt(meas_noise_var), size=y_EKF.shape) t_y = torch.cat((reshape_dim1(time), reshape_dim1(y_EKF)), dim=1) measurement = interpolate_func( x=t_y, t0=time[0], init_value=reshape_pt1(y_EKF[0])) t_u = torch.cat((reshape_dim1(time), reshape_dim1(control_traj)), dim=1) controller = interpolate_func( x=t_u, t0=time[0], init_value=reshape_pt1(control_traj[0])) if NODE.init_state_model: if NODE.ground_truth_approx: # If ground_truth_approx, init_state contains the test inputs # for the given recognition model (taken from Xtest) x0_estim = NODE.init_state_model(init_state) else: # Otherwise, need to create the inputs for the recognition # model given the rollout trajectories obs0 = make_init_state_obs(y_observed_true, control_traj, init_state, time, NODE.config) xtraj_true, y_observed_true, control_traj, time = \ update_trajs_init_state_obs( xtraj_true, y_observed_true, control_traj, time, NODE.config) x0_estim = NODE.init_state_model(obs0) else: x0_estim = init_state covar0 = NODE.config.get('prior_kwargs').get('EKF_init_covar') x0_estim = torch.cat((x0_estim, reshape_pt1(torch.flatten(covar0))), dim=1) # EKF state = (x, covar) observer = EKF_ODE(device, NODE.config) xtraj_estim = dynamics_traj_observer( x0=x0_estim, u=controller, y=measurement, t0=time[0], dt=NODE.dt, init_control=reshape_pt1(control_traj[0]), discrete=False, version=observer, t_eval=time, GP=NODE.NODE_model, kwargs=NODE.config) xtraj_estim = reshape_pt1(xtraj_estim[:, :NODE.n]) # get rid of covar y_pred = NODE.observe_data_x(xtraj_estim) if scale: y_pred = NODE.scaler_Y.transform(y_pred) y_observed_true = NODE.scaler_Y.transform(y_observed_true) RMSE_output = RMS(y_pred - y_observed_true) if NODE.ground_truth_approx: RMSE = RMSE_output RMSE_init = RMS( reshape_pt1(y_pred[0]) - reshape_pt1(y_observed_true[0])) else: RMSE = RMS(xtraj_estim - xtraj_true) RMSE_init = RMS(x0_estim - reshape_pt1(xtraj_true[0])) return init_state, control_traj, xtraj_true, xtraj_estim, RMSE, \ RMSE_init, RMSE_output # Save the results of rollouts def save_rollout_variables(model_object, results_folder, nb_rollouts, rollout_list, step, results=False, ground_truth_approx=False, plots=True, title=None, NODE=False): """ Save all rollout variables (true, predicted, upper and lower confidence bounds...), eventually plot them. If ground_truth_approx for NODE, plot only predicted values for full state because only observations are available on test trajectories. """ if title: folder = os.path.join(results_folder, title + '_' + str(step)) else: folder = os.path.join(results_folder, 'Rollouts_' + str(step)) os.makedirs(folder, exist_ok=True) rollout_list = list_torch_to_numpy(rollout_list) # transfer all back to CPU for i in range(len(rollout_list)): rollout_folder = os.path.join(folder, 'Rollout_' + str(i)) os.makedirs(rollout_folder, exist_ok=True) if results: if not NODE: filename = 'Predicted_mean_traj.csv' file = pd.DataFrame(reshape_pt1(rollout_list[i][3])) file.to_csv(os.path.join(rollout_folder, filename), header=False) filename = 'Predicted_var_traj.csv' file = pd.DataFrame(reshape_pt1(rollout_list[i][4])) file.to_csv(os.path.join(rollout_folder, filename), header=False) filename = 'Predicted_lowconf_traj.csv' file = pd.DataFrame(reshape_pt1(rollout_list[i][5])) file.to_csv(os.path.join(rollout_folder, filename), header=False) filename = 'Predicted_uppconf_traj.csv' file = pd.DataFrame(reshape_pt1(rollout_list[i][6])) file.to_csv(os.path.join(rollout_folder, filename), header=False) if not os.path.isfile( os.path.join(rollout_folder, 'True_traj.csv')): filename = 'Init_state.csv' file = pd.DataFrame(rollout_list[i][0]) file.to_csv(os.path.join(rollout_folder, filename), header=False) filename = 'Control_traj.csv' file = pd.DataFrame(rollout_list[i][1]) file.to_csv(os.path.join(rollout_folder, filename), header=False) filename = 'True_traj.csv' file =
pd.DataFrame(rollout_list[i][2])
pandas.DataFrame
import pandas as pd from .datastore import merge_postcodes from .types import ErrorDefinition from .utils import add_col_to_tables_CONTINUOUSLY_LOOKED_AFTER as add_CLA_column # Check 'Episodes' present before use! def validate_165(): error = ErrorDefinition( code = '165', description = 'Data entry for mother status is invalid.', affected_fields = ['MOTHER', 'SEX', 'ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM'] ) def _validate(dfs): if 'Header' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs: return {} else: header = dfs['Header'] episodes = dfs['Episodes'] oc3 = dfs['OC3'] collection_start = dfs['metadata']['collection_start'] collection_end = dfs['metadata']['collection_end'] valid_values = ['0','1'] # prepare to merge oc3.reset_index(inplace=True) header.reset_index(inplace=True) episodes.reset_index(inplace=True) collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce') collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce') episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') episodes['EPS'] = (episodes['DECOM']>=collection_start) & (episodes['DECOM']<=collection_end) episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum') merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er']).merge(oc3, on='CHILD', how='left') # Raise error if provided <MOTHER> is not a valid value. value_validity = merged['MOTHER'].notna() & (~merged['MOTHER'].isin(valid_values)) # If not provided female = (merged['SEX']=='1') eps_in_year = (merged['EPS_COUNT']>0) none_provided = (merged['ACTIV'].isna()& merged['ACCOM'].isna()& merged['IN_TOUCH'].isna()) # If provided <MOTHER> must be a valid value. If not provided <MOTHER> then either <GENDER> is male or no episode record for current year and any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided mask = value_validity | (merged['MOTHER'].isna() & (female & (eps_in_year | none_provided))) # That is, if value not provided and child is a female with eps in current year or no values of IN_TOUCH, ACTIV and ACCOM, then raise error. error_locs_eps = merged.loc[mask, 'index_eps'] error_locs_header = merged.loc[mask, 'index_er'] error_locs_oc3 = merged.loc[mask, 'index'] return {'Header':error_locs_header.dropna().unique().tolist(), 'OC3':error_locs_oc3.dropna().unique().tolist()} return error, _validate def validate_1014(): error = ErrorDefinition( code='1014', description='UASC information is not required for care leavers', affected_fields=['ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM'] ) def _validate(dfs): if 'UASC' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs: return {} else: uasc = dfs['UASC'] episodes = dfs['Episodes'] oc3 = dfs['OC3'] collection_start = dfs['metadata']['collection_start'] collection_end = dfs['metadata']['collection_end'] # prepare to merge oc3.reset_index(inplace=True) uasc.reset_index(inplace=True) episodes.reset_index(inplace=True) collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce') collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce') episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce') date_check = ( ((episodes['DECOM'] >= collection_start) & (episodes['DECOM'] <= collection_end)) | ((episodes['DEC'] >= collection_start) & (episodes['DEC'] <= collection_end)) | ((episodes['DECOM'] <= collection_start) & episodes['DEC'].isna()) ) episodes['EPS'] = date_check episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum') # inner merge to take only episodes of children which are also found on the uasc table merged = episodes.merge(uasc, on='CHILD', how='inner', suffixes=['_eps', '_sc']).merge(oc3, on='CHILD', how='left') # adding suffixes with the secondary merge here does not go so well yet. some_provided = (merged['ACTIV'].notna() | merged['ACCOM'].notna() | merged['IN_TOUCH'].notna()) mask = (merged['EPS_COUNT'] == 0) & some_provided error_locs_uasc = merged.loc[mask, 'index_sc'] error_locs_oc3 = merged.loc[mask, 'index'] return {'UASC': error_locs_uasc.unique().tolist(), 'OC3': error_locs_oc3.unique().tolist()} return error, _validate # !# not sure what this rule is actually supposed to be getting at - description is confusing def validate_197B(): error = ErrorDefinition( code='197B', description="SDQ score or reason for no SDQ should be reported for 4- or 17-year-olds.", affected_fields=['SDQ_REASON', 'DOB'], ) def _validate(dfs): if 'OC2' not in dfs or 'Episodes' not in dfs: return {} oc2 = add_CLA_column(dfs, 'OC2') start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce') endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce') oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce') ERRRR = ( ( (oc2['DOB'] + pd.DateOffset(years=4) == start) # ??? | (oc2['DOB'] + pd.DateOffset(years=17) == start) ) & oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2['SDQ_SCORE'].isna() & oc2['SDQ_REASON'].isna() ) return {'OC2': oc2[ERRRR].index.to_list()} return error, _validate def validate_157(): error = ErrorDefinition( code='157', description="Child is aged 4 years or over at the beginning of the year or 16 years or under at the end of the " "year and Strengths and Difficulties Questionnaire (SDQ) 1 has been recorded as the reason for no " "Strengths and Difficulties Questionnaire (SDQ) score.", affected_fields=['SDQ_REASON', 'DOB'], ) def _validate(dfs): if 'OC2' not in dfs or 'Episodes' not in dfs: return {} oc2 = add_CLA_column(dfs, 'OC2') start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce') endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce') oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce') ERRRR = ( oc2['CONTINUOUSLY_LOOKED_AFTER'] & (oc2['DOB'] + pd.DateOffset(years=4) <= start) & (oc2['DOB'] + pd.DateOffset(years=16) >= endo) & oc2['SDQ_SCORE'].isna() & (oc2['SDQ_REASON'] == 'SDQ1') ) return {'OC2': oc2[ERRRR].index.to_list()} return error, _validate def validate_357(): error = ErrorDefinition( code='357', description='If this is the first episode ever for this child, reason for new episode must be S. ' 'Check whether there is an episode immediately preceding this one, which has been left out. ' 'If not the reason for new episode code must be amended to S.', affected_fields=['RNE'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} eps = dfs['Episodes'] eps['DECOM'] = pd.to_datetime(eps['DECOM'], format='%d/%m/%Y', errors='coerce') eps = eps.loc[eps['DECOM'].notnull()] first_eps = eps.loc[eps.groupby('CHILD')['DECOM'].idxmin()] errs = first_eps[first_eps['RNE'] != 'S'].index.to_list() return {'Episodes': errs} return error, _validate def validate_117(): error = ErrorDefinition( code='117', description='Date of decision that a child should/should no longer be placed for adoption is beyond the current collection year or after the child ceased to be looked after.', affected_fields=['DATE_PLACED_CEASED', 'DATE_PLACED', 'DEC', 'REC', 'DECOM'], ) def _validate(dfs): if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs: return {} else: episodes = dfs['Episodes'] placed_adoption = dfs['PlacedAdoption'] collection_end = dfs['metadata']['collection_end'] # datetime placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'], format='%d/%m/%Y', errors='coerce') placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y', errors='coerce') collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce') episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce') episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') # Drop nans and continuing episodes episodes = episodes.dropna(subset=['DECOM']) episodes = episodes[episodes['REC'] != 'X1'] episodes = episodes.loc[episodes.groupby('CHILD')['DECOM'].idxmax()] # prepare to merge placed_adoption.reset_index(inplace=True) episodes.reset_index(inplace=True) p4a_cols = ['DATE_PLACED', 'DATE_PLACED_CEASED'] # latest episodes merged = episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa']) mask = ( (merged['DATE_PLACED'] > collection_end) | (merged['DATE_PLACED'] > merged['DEC']) | (merged['DATE_PLACED_CEASED'] > collection_end) | (merged['DATE_PLACED_CEASED'] > merged['DEC']) ) # If provided <DATE_PLACED> and/or <DATE_PLACED_CEASED> must not be > <COLLECTION_END_DATE> or <DEC> of latest episode where <REC> not = 'X1' pa_error_locs = merged.loc[mask, 'index_pa'] eps_error_locs = merged.loc[mask, 'index_eps'] return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()} return error, _validate def validate_118(): error = ErrorDefinition( code='118', description='Date of decision that a child should no longer be placed for adoption is before the current collection year or before the date the child started to be looked after.', affected_fields=['DECOM', 'DECOM', 'LS'] ) def _validate(dfs): if ('PlacedAdoption' not in dfs) or ('Episodes' not in dfs): return {} else: placed_adoption = dfs['PlacedAdoption'] episodes = dfs['Episodes'] collection_start = dfs['metadata']['collection_start'] code_list = ['V3', 'V4'] # datetime episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'], format='%d/%m/%Y', errors='coerce') collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce') # <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4' filter_by_ls = episodes[~(episodes['LS'].isin(code_list))] earliest_episode_idxs = filter_by_ls.groupby('CHILD')['DECOM'].idxmin() earliest_episodes = episodes[episodes.index.isin(earliest_episode_idxs)] # prepare to merge placed_adoption.reset_index(inplace=True) earliest_episodes.reset_index(inplace=True) # merge merged = earliest_episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa']) # drop rows where DATE_PLACED_CEASED is not provided merged = merged.dropna(subset=['DATE_PLACED_CEASED']) # If provided <DATE_PLACED_CEASED> must not be prior to <COLLECTION_START_DATE> or <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4' mask = (merged['DATE_PLACED_CEASED'] < merged['DECOM']) | (merged['DATE_PLACED_CEASED'] < collection_start) # error locations pa_error_locs = merged.loc[mask, 'index_pa'] eps_error_locs = merged.loc[mask, 'index_eps'] return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()} return error, _validate def validate_352(): error = ErrorDefinition( code='352', description='Child who started to be looked after was aged 18 or over.', affected_fields=['DECOM', 'RNE'], ) def _validate(dfs): if 'Header' not in dfs: return {} if 'Episodes' not in dfs: return {} else: header = dfs['Header'] episodes = dfs['Episodes'] header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce') episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') header['DOB18'] = header['DOB'] + pd.DateOffset(years=18) episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'), indicator=True).set_index('index') care_start = episodes_merged['RNE'].str.upper().astype(str).isin(['S']) started_over_18 = episodes_merged['DOB18'] <= episodes_merged['DECOM'] error_mask = care_start & started_over_18 error_locations = episodes.index[error_mask] return {'Episodes': error_locations.to_list()} return error, _validate def validate_209(): error = ErrorDefinition( code='209', description='Child looked after is of school age and should not have an unknown Unique Pupil Number (UPN) code of UN1.', affected_fields=['UPN', 'DOB'] ) def _validate(dfs): if 'Header' not in dfs: return {} else: header = dfs['Header'] collection_start = dfs['metadata']['collection_start'] # convert to datetime header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce') collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce') yr = collection_start.year - 1 reference_date = pd.to_datetime('31/08/' + str(yr), format='%d/%m/%Y', errors='coerce') # If <DOB> >= 4 years prior to 31/08/YYYY then <UPN> should not be 'UN1' Note: YYYY in this instance refers to the year prior to the collection start (for collection year 2019-2020, it would be looking at the 31/08/2018). mask = (reference_date >= (header['DOB'] + pd.offsets.DateOffset(years=4))) & (header['UPN'] == 'UN1') # error locations error_locs_header = header.index[mask] return {'Header': error_locs_header.tolist()} return error, _validate def validate_198(): error = ErrorDefinition( code='198', description="Child has not been looked after continuously for at least 12 months at 31 March but a reason " "for no Strengths and Difficulties (SDQ) score has been completed. ", affected_fields=['SDQ_REASON'], ) def _validate(dfs): if 'Episodes' not in dfs or 'OC2' not in dfs: return {} oc2 = add_CLA_column(dfs, 'OC2') error_mask = oc2['SDQ_REASON'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER'] error_locs = oc2.index[error_mask].to_list() return {'OC2': error_locs} return error, _validate def validate_185(): error = ErrorDefinition( code='185', description="Child has not been looked after continuously for at least 12 months at " + "31 March but a Strengths and Difficulties (SDQ) score has been completed.", affected_fields=['SDQ_SCORE'], ) def _validate(dfs): if 'Episodes' not in dfs or 'OC2' not in dfs: return {} oc2 = add_CLA_column(dfs, 'OC2') error_mask = oc2['SDQ_SCORE'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER'] error_locs = oc2.index[error_mask].to_list() return {'OC2': error_locs} return error, _validate def validate_186(): error = ErrorDefinition( code='186', description="Children aged 4 or over at the start of the year and children aged under 17 at the " + "end of the year and who have been looked after for at least 12 months continuously " + "should have a Strengths and Difficulties (SDQ) score completed.", affected_fields=['SDQ_SCORE'], ) def _validate(dfs): if 'Episodes' not in dfs or 'OC2' not in dfs: return {} oc2 = dfs['OC2'] collection_start_str = dfs['metadata']['collection_start'] collection_end_str = dfs['metadata']['collection_end'] collection_start = pd.to_datetime(collection_start_str, format='%d/%m/%Y', errors='coerce') collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce') oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce') oc2 = add_CLA_column(dfs, 'OC2') oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4) oc2['17th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=17) error_mask = ( (oc2['4th_bday'] <= collection_start) & (oc2['17th_bday'] > collection_end) & oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2['SDQ_SCORE'].isna() ) oc2_errors = oc2.loc[error_mask].index.to_list() return {'OC2': oc2_errors} return error, _validate def validate_187(): error = ErrorDefinition( code='187', description="Child cannot be looked after continuously for 12 months at " + "31 March (OC2) and have any of adoption or care leavers returns completed.", affected_fields=['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR', # OC3 'IN_TOUCH', 'ACTIV', 'ACCOM'], # AD1 ) def _validate(dfs): if ( 'OC3' not in dfs or 'AD1' not in dfs or 'Episodes' not in dfs ): return {} # add 'CONTINUOUSLY_LOOKED_AFTER' column ad1, oc3 = add_CLA_column(dfs, ['AD1', 'OC3']) # OC3 should_be_blank = ['IN_TOUCH', 'ACTIV', 'ACCOM'] oc3_mask = oc3['CONTINUOUSLY_LOOKED_AFTER'] & oc3[should_be_blank].notna().any(axis=1) oc3_error_locs = oc3[oc3_mask].index.to_list() # AD1 should_be_blank = ['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR'] ad1_mask = ad1['CONTINUOUSLY_LOOKED_AFTER'] & ad1[should_be_blank].notna().any(axis=1) ad1_error_locs = ad1[ad1_mask].index.to_list() return {'AD1': ad1_error_locs, 'OC3': oc3_error_locs} return error, _validate def validate_188(): error = ErrorDefinition( code='188', description="Child is aged under 4 years at the end of the year, " "but a Strengths and Difficulties (SDQ) score or a reason " "for no SDQ score has been completed. ", affected_fields=['SDQ_SCORE', 'SDQ_REASON'], ) def _validate(dfs): if 'OC2' not in dfs: return {} oc2 = dfs['OC2'] collection_end_str = dfs['metadata']['collection_end'] collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce') oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce') oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4) error_mask = ( (oc2['4th_bday'] > collection_end) & oc2[['SDQ_SCORE', 'SDQ_REASON']].notna().any(axis=1) ) oc2_errors = oc2.loc[error_mask].index.to_list() return {'OC2': oc2_errors} return error, _validate def validate_190(): error = ErrorDefinition( code='190', description="Child has not been looked after continuously for at least 12 months at 31 March but one or more " "data items relating to children looked after for 12 months have been completed.", affected_fields=['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'] , # AD1 ) def _validate(dfs): if ( 'OC2' not in dfs or 'Episodes' not in dfs ): return {} # add 'CONTINUOUSLY_LOOKED_AFTER' column oc2 = add_CLA_column(dfs, 'OC2') should_be_blank = ['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'] mask = ~oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_blank].notna().any(axis=1) error_locs = oc2[mask].index.to_list() return {'OC2': error_locs} return error, _validate def validate_191(): error = ErrorDefinition( code='191', description="Child has been looked after continuously for at least 12 months at 31 March but one or more " "data items relating to children looked after for 12 months have been left blank.", affected_fields=['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'], # OC2 ) def _validate(dfs): if ( 'OC2' not in dfs or 'Episodes' not in dfs ): return {} # add 'CONTINUOUSLY_LOOKED_AFTER' column oc2 = add_CLA_column(dfs, 'OC2') should_be_present = ['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'] mask = oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_present].isna().any(axis=1) error_locs = oc2[mask].index.to_list() return {'OC2': error_locs} return error, _validate def validate_607(): error = ErrorDefinition( code='607', description='Child ceased to be looked after in the year, but mother field has not been completed.', affected_fields=['DEC', 'REC', 'MOTHER', 'LS', 'SEX'] ) def _validate(dfs): if 'Header' not in dfs or 'Episodes' not in dfs: return {} else: header = dfs['Header'] episodes = dfs['Episodes'] collection_start = dfs['metadata']['collection_start'] collection_end = dfs['metadata']['collection_end'] code_list = ['V3', 'V4'] # convert to datetiime format episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce') collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce') collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce') # prepare to merge episodes.reset_index(inplace=True) header.reset_index(inplace=True) merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er']) # CEASED_TO_BE_LOOKED_AFTER = DEC is not null and REC is filled but not equal to X1 CEASED_TO_BE_LOOKED_AFTER = merged['DEC'].notna() & ((merged['REC'] != 'X1') & merged['REC'].notna()) # and <LS> not = ‘V3’ or ‘V4’ check_LS = ~(merged['LS'].isin(code_list)) # and <DEC> is in <CURRENT_COLLECTION_YEAR check_DEC = (collection_start <= merged['DEC']) & (merged['DEC'] <= collection_end) # Where <CEASED_TO_BE_LOOKED_AFTER> = ‘Y’, and <LS> not = ‘V3’ or ‘V4’ and <DEC> is in <CURRENT_COLLECTION_YEAR> and <SEX> = ‘2’ then <MOTHER> should be provided. mask = CEASED_TO_BE_LOOKED_AFTER & check_LS & check_DEC & (merged['SEX'] == '2') & (merged['MOTHER'].isna()) header_error_locs = merged.loc[mask, 'index_er'] eps_error_locs = merged.loc[mask, 'index_eps'] return {'Episodes': eps_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()} return error, _validate def validate_210(): error = ErrorDefinition( code='210', description='Children looked after for more than a week at 31 March should not have an unknown Unique Pupil Number (UPN) code of UN4.', affected_fields=['UPN', 'DECOM'] ) def _validate(dfs): if 'Header' not in dfs or 'Episodes' not in dfs: return {} else: header = dfs['Header'] episodes = dfs['Episodes'] collection_end = dfs['metadata']['collection_end'] # convert to datetime episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce') yr = collection_end.year reference_date = ref_date = pd.to_datetime('24/03/' + str(yr), format='%d/%m/%Y', errors='coerce') # prepare to merge episodes.reset_index(inplace=True) header.reset_index(inplace=True) # the logical way is to merge left on UPN but that will be a one to many merge and may not go as well as a many to one merge that we've been doing. merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er']) # If <UPN> = 'UN4' then no episode <DECOM> must be >` = 24/03/YYYY Note: YYYY refers to the current collection year. mask = (merged['UPN'] == 'UN4') & (merged['DECOM'] >= reference_date) # error locations error_locs_header = merged.loc[mask, 'index_er'] error_locs_eps = merged.loc[mask, 'index_eps'] return {'Episodes': error_locs_eps.tolist(), 'Header': error_locs_header.unique().tolist()} return error, _validate def validate_1010(): error = ErrorDefinition( code='1010', description='This child has no episodes loaded for current year even though there was an open episode of ' + 'care at the end of the previous year, and care leaver data has been entered.', affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'], ) def _validate(dfs): if 'Episodes' not in dfs or 'Episodes_last' not in dfs or 'OC3' not in dfs: return {} else: episodes = dfs['Episodes'] episodes_last = dfs['Episodes_last'] oc3 = dfs['OC3'] # convert DECOM to datetime, drop missing/invalid sort by CHILD then DECOM, episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce') episodes_last = episodes_last.dropna(subset=['DECOM']).sort_values(['CHILD', 'DECOM'], ascending=True) # Keep only the final episode for each child (ie where the following row has a different CHILD value) episodes_last = episodes_last[ episodes_last['CHILD'].shift(-1) != episodes_last['CHILD'] ] # Keep only the final episodes that were still open episodes_last = episodes_last[episodes_last['DEC'].isna()] # The remaining children ought to have episode data in the current year if they are in OC3 has_current_episodes = oc3['CHILD'].isin(episodes['CHILD']) has_open_episode_last = oc3['CHILD'].isin(episodes_last['CHILD']) error_mask = ~has_current_episodes & has_open_episode_last validation_error_locations = oc3.index[error_mask] return {'OC3': validation_error_locations.tolist()} return error, _validate def validate_525(): error = ErrorDefinition( code='525', description='A child for whom the decision to be placed for adoption has been reversed cannot be adopted during the year.', affected_fields=['DATE_PLACED_CEASED', 'DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR'] ) def _validate(dfs): if 'PlacedAdoption' not in dfs or 'AD1' not in dfs: return {} else: placed_adoption = dfs['PlacedAdoption'] ad1 = dfs['AD1'] # prepare to merge placed_adoption.reset_index(inplace=True) ad1.reset_index(inplace=True) merged = placed_adoption.merge(ad1, on='CHILD', how='left', suffixes=['_placed', '_ad1']) # If <DATE_PLACED_CEASED> not Null, then <DATE_INT>; <DATE_MATCH>; <FOSTER_CARE>; <NB_ADOPTR>; <SEX_ADOPTR>; and <LS_ADOPTR> should not be provided mask = merged['DATE_PLACED_CEASED'].notna() & ( merged['DATE_INT'].notna() | merged['DATE_MATCH'].notna() | merged['FOSTER_CARE'].notna() | merged['NB_ADOPTR'].notna() | merged['SEX_ADOPTR'].notna() | merged['LS_ADOPTR'].notna()) # error locations pa_error_locs = merged.loc[mask, 'index_placed'] ad_error_locs = merged.loc[mask, 'index_ad1'] # return result return {'PlacedAdoption': pa_error_locs.tolist(), 'AD1': ad_error_locs.tolist()} return error, _validate def validate_335(): error = ErrorDefinition( code='335', description='The current foster value (0) suggests that child is not adopted by current foster carer, but last placement is A2, A3, or A5. Or the current foster value (1) suggests that child is adopted by current foster carer, but last placement is A1, A4 or A6.', affected_fields=['PLACE', 'FOSTER_CARE'] ) def _validate(dfs): if 'Episodes' not in dfs or 'AD1' not in dfs: return {} else: episodes = dfs['Episodes'] ad1 = dfs['AD1'] # prepare to merge episodes.reset_index(inplace=True) ad1.reset_index(inplace=True) merged = episodes.merge(ad1, on='CHILD', how='left', suffixes=['_eps', '_ad1']) # Where <PL> = 'A2', 'A3' or 'A5' and <DEC> = 'E1', 'E11', 'E12' <FOSTER_CARE> should not be '0'; Where <PL> = ‘A1’, ‘A4’ or ‘A6’ and <REC> = ‘E1’, ‘E11’, ‘E12’ <FOSTER_CARE> should not be ‘1’. mask = ( merged['REC'].isin(['E1', 'E11', 'E12']) & ( (merged['PLACE'].isin(['A2', 'A3', 'A5']) & (merged['FOSTER_CARE'].astype(str) == '0')) | (merged['PLACE'].isin(['A1', 'A4', 'A6']) & (merged['FOSTER_CARE'].astype(str) == '1')) ) ) eps_error_locs = merged.loc[mask, 'index_eps'] ad1_error_locs = merged.loc[mask, 'index_ad1'] # use .unique since join is many to one return {'Episodes': eps_error_locs.tolist(), 'AD1': ad1_error_locs.unique().tolist()} return error, _validate def validate_215(): error = ErrorDefinition( code='215', description='Child has care leaver information but one or more data items relating to children looked after for 12 months have been completed.', affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM', 'CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'] ) def _validate(dfs): if 'OC3' not in dfs or 'OC2' not in dfs: return {} else: oc3 = dfs['OC3'] oc2 = dfs['OC2'] # prepare to merge oc3.reset_index(inplace=True) oc2.reset_index(inplace=True) merged = oc3.merge(oc2, on='CHILD', how='left', suffixes=['_3', '_2']) # If any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided then <CONVICTED>; <HEALTH_CHECK>; <IMMUNISATIONS>; <TEETH_CHECK>; <HEALTH_ASSESSMENT>; <SUBSTANCE MISUSE>; <INTERVENTION_RECEIVED>; <INTERVENTION_OFFERED>; should not be provided mask = (merged['IN_TOUCH'].notna() | merged['ACTIV'].notna() | merged['ACCOM'].notna()) & ( merged['CONVICTED'].notna() | merged['HEALTH_CHECK'].notna() | merged['IMMUNISATIONS'].notna() | merged['TEETH_CHECK'].notna() | merged['HEALTH_ASSESSMENT'].notna() | merged[ 'SUBSTANCE_MISUSE'].notna() | merged['INTERVENTION_RECEIVED'].notna() | merged[ 'INTERVENTION_OFFERED'].notna()) # error locations oc3_error_locs = merged.loc[mask, 'index_3'] oc2_error_locs = merged.loc[mask, 'index_2'] return {'OC3': oc3_error_locs.tolist(), 'OC2': oc2_error_locs.tolist()} return error, _validate def validate_399(): error = ErrorDefinition( code='399', description='Mother field, review field or participation field are completed but ' + 'child is looked after under legal status V3 or V4.', affected_fields=['MOTHER', 'LS', 'REVIEW', 'REVIEW_CODE'] ) def _validate(dfs): if 'Episodes' not in dfs or 'Header' not in dfs or 'Reviews' not in dfs: return {} else: episodes = dfs['Episodes'] header = dfs['Header'] reviews = dfs['Reviews'] code_list = ['V3', 'V4'] # prepare to merge episodes['index_eps'] = episodes.index header['index_hdr'] = header.index reviews['index_revs'] = reviews.index # merge merged = (episodes.merge(header, on='CHILD', how='left') .merge(reviews, on='CHILD', how='left')) # If <LS> = 'V3' or 'V4' then <MOTHER>, <REVIEW> and <REVIEW_CODE> should not be provided mask = merged['LS'].isin(code_list) & ( merged['MOTHER'].notna() | merged['REVIEW'].notna() | merged['REVIEW_CODE'].notna()) # Error locations eps_errors = merged.loc[mask, 'index_eps'] header_errors = merged.loc[mask, 'index_hdr'].unique() revs_errors = merged.loc[mask, 'index_revs'].unique() return {'Episodes': eps_errors.tolist(), 'Header': header_errors.tolist(), 'Reviews': revs_errors.tolist()} return error, _validate def validate_189(): error = ErrorDefinition( code='189', description='Child is aged 17 years or over at the beginning of the year, but an Strengths and Difficulties ' + '(SDQ) score or a reason for no Strengths and Difficulties (SDQ) score has been completed.', affected_fields=['DOB', 'SDQ_SCORE', 'SDQ_REASON'] ) def _validate(dfs): if 'OC2' not in dfs: return {} else: oc2 = dfs['OC2'] collection_start = dfs['metadata']['collection_start'] # datetime format allows appropriate comparison between dates oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce') collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce') # If <DOB> >17 years prior to <COLLECTION_START_DATE> then <SDQ_SCORE> and <SDQ_REASON> should not be provided mask = ((oc2['DOB'] + pd.offsets.DateOffset(years=17)) <= collection_start) & ( oc2['SDQ_REASON'].notna() | oc2['SDQ_SCORE'].notna()) # That is, raise error if collection_start > DOB + 17years oc_error_locs = oc2.index[mask] return {'OC2': oc_error_locs.tolist()} return error, _validate def validate_226(): error = ErrorDefinition( code='226', description='Reason for placement change is not required.', affected_fields=['REASON_PLACE_CHANGE', 'PLACE'] ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] code_list = ['T0', 'T1', 'T2', 'T3', 'T4'] episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') # create column to see previous REASON_PLACE_CHANGE episodes = episodes.sort_values(['CHILD', 'DECOM']) episodes['PREVIOUS_REASON'] = episodes.groupby('CHILD')['REASON_PLACE_CHANGE'].shift(1) # If <PL> = 'T0'; 'T1'; 'T2'; 'T3' or 'T4' then <REASON_PLACE_CHANGE> should be null in current episode and current episode - 1 mask = episodes['PLACE'].isin(code_list) & ( episodes['REASON_PLACE_CHANGE'].notna() | episodes['PREVIOUS_REASON'].notna()) # error locations error_locs = episodes.index[mask] return {'Episodes': error_locs.tolist()} return error, _validate def validate_358(): error = ErrorDefinition( code='358', description='Child with this legal status should not be under 10.', affected_fields=['DECOM', 'DOB', 'LS'] ) def _validate(dfs): if 'Episodes' not in dfs or 'Header' not in dfs: return {} else: episodes = dfs['Episodes'] header = dfs['Header'] code_list = ['J1', 'J2', 'J3'] # convert dates to datetime format episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce') # prepare to merge episodes.reset_index(inplace=True) header.reset_index(inplace=True) merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er']) # Where <LS> = ‘J1’ or ‘J2’ or ‘J3’ then <DOB> should <= to 10 years prior to <DECOM> mask = merged['LS'].isin(code_list) & (merged['DOB'] + pd.offsets.DateOffset(years=10) < merged['DECOM']) # That is, raise error if DECOM > DOB + 10years # error locations header_error_locs = merged.loc[mask, 'index_er'] episode_error_locs = merged.loc[mask, 'index_eps'] # one to many join implies use .unique on the 'one' return {'Episodes': episode_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()} return error, _validate def validate_407(): error = ErrorDefinition( code='407', description='Reason episode ceased is Special Guardianship Order, but child has reached age 18.', affected_fields=['DEC', 'DOB', 'REC'] ) def _validate(dfs): if 'Episodes' not in dfs or 'Header' not in dfs: return {} else: episodes = dfs['Episodes'] header = dfs['Header'] code_list = ['E45', 'E46', 'E47', 'E48'] # convert dates to datetime format episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce') header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce') # prepare to merge episodes.reset_index(inplace=True) header.reset_index(inplace=True) merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er']) # If <REC> = ‘E45’ or ‘E46’ or ‘E47’ or ‘E48’ then <DOB> must be < 18 years prior to <DEC> mask = merged['REC'].isin(code_list) & (merged['DOB'] + pd.offsets.DateOffset(years=18) < merged['DEC']) # That is, raise error if DEC > DOB + 10years # error locations header_error_locs = merged.loc[mask, 'index_er'] episode_error_locs = merged.loc[mask, 'index_eps'] # one to many join implies use .unique on the 'one' return {'Episodes': episode_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()} return error, _validate def validate_1007(): error = ErrorDefinition( code='1007', description='Care leaver information is not required for 17- or 18-year olds who are still looked after.', affected_fields=['DEC', 'REC', 'DOB', 'IN_TOUCH', 'ACTIV', 'ACCOM'] ) def _validate(dfs): if 'Episodes' not in dfs or 'OC3' not in dfs: return {} else: episodes = dfs['Episodes'] oc3 = dfs['OC3'] collection_end = dfs['metadata']['collection_end'] # convert dates to datetime format oc3['DOB'] = pd.to_datetime(oc3['DOB'], format='%d/%m/%Y', errors='coerce') collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce') # prepare to merge episodes.reset_index(inplace=True) oc3.reset_index(inplace=True) merged = episodes.merge(oc3, on='CHILD', how='left', suffixes=['_eps', '_oc3']) # If <DOB> < 19 and >= to 17 years prior to <COLLECTION_END_DATE> and current episode <DEC> and or <REC> not provided then <IN_TOUCH>, <ACTIV> and <ACCOM> should not be provided check_age = (merged['DOB'] + pd.offsets.DateOffset(years=17) <= collection_end) & ( merged['DOB'] + pd.offsets.DateOffset(years=19) > collection_end) # That is, check that 17<=age<19 check_dec_rec = merged['REC'].isna() | merged['DEC'].isna() # if either DEC or REC are absent mask = check_age & check_dec_rec & ( merged['IN_TOUCH'].notna() | merged['ACTIV'].notna() | merged['ACCOM'].notna()) # Then raise an error if either IN_TOUCH, ACTIV, or ACCOM have been provided too # error locations oc3_error_locs = merged.loc[mask, 'index_oc3'] episode_error_locs = merged.loc[mask, 'index_eps'] # one to many join implies use .unique on the 'one' return {'Episodes': episode_error_locs.tolist(), 'OC3': oc3_error_locs.unique().tolist()} return error, _validate def validate_442(): error = ErrorDefinition( code='442', description='Unique Pupil Number (UPN) field is not completed.', affected_fields=['UPN', 'LS'] ) def _validate(dfs): if ('Episodes' not in dfs) or ('Header' not in dfs): return {} else: episodes = dfs['Episodes'] header = dfs['Header'] episodes.reset_index(inplace=True) header.reset_index(inplace=True) code_list = ['V3', 'V4'] # merge left on episodes to get all children for which episodes have been recorded even if they do not exist on the header. merged = episodes.merge(header, on=['CHILD'], how='left', suffixes=['_eps', '_er']) # Where any episode present, with an <LS> not = 'V3' or 'V4' then <UPN> must be provided mask = (~merged['LS'].isin(code_list)) & merged['UPN'].isna() episode_error_locs = merged.loc[mask, 'index_eps'] header_error_locs = merged.loc[mask, 'index_er'] return {'Episodes': episode_error_locs.tolist(), # Select unique values since many episodes are joined to one header # and multiple errors will be raised for the same index. 'Header': header_error_locs.dropna().unique().tolist()} return error, _validate def validate_344(): error = ErrorDefinition( code='344', description='The record shows the young person has died or returned home to live with parent(s) or someone with parental responsibility for a continuous period of 6 months or more, but activity and/or accommodation on leaving care have been completed.', affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'] ) def _validate(dfs): if 'OC3' not in dfs: return {} else: oc3 = dfs['OC3'] # If <IN_TOUCH> = 'DIED' or 'RHOM' then <ACTIV> and <ACCOM> should not be provided mask = ((oc3['IN_TOUCH'] == 'DIED') | (oc3['IN_TOUCH'] == 'RHOM')) & ( oc3['ACTIV'].notna() | oc3['ACCOM'].notna()) error_locations = oc3.index[mask] return {'OC3': error_locations.to_list()} return error, _validate def validate_345(): error = ErrorDefinition( code='345', description='The data collection record shows the local authority is in touch with this young person, but activity and/or accommodation data items are zero.', affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'] ) def _validate(dfs): if 'OC3' not in dfs: return {} else: oc3 = dfs['OC3'] # If <IN_TOUCH> = 'Yes' then <ACTIV> and <ACCOM> must be provided mask = (oc3['IN_TOUCH'] == 'YES') & (oc3['ACTIV'].isna() | oc3['ACCOM'].isna()) error_locations = oc3.index[mask] return {'OC3': error_locations.to_list()} return error, _validate def validate_384(): error = ErrorDefinition( code='384', description='A child receiving respite care cannot be in a long-term foster placement ', affected_fields=['PLACE', 'LS'] ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] # Where <LS> = 'V3' or 'V4' then <PL> must not be 'U1' or 'U4' mask = ((episodes['LS'] == 'V3') | (episodes['LS'] == 'V4')) & ( (episodes['PLACE'] == 'U1') | (episodes['PLACE'] == 'U4')) error_locations = episodes.index[mask] return {'Episodes': error_locations.to_list()} return error, _validate def validate_390(): error = ErrorDefinition( code='390', description='Reason episode ceased is adopted but child has not been previously placed for adoption.', affected_fields=['PLACE', 'REC'] ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] # If <REC> = 'E11' or 'E12' then <PL> must be one of 'A3', 'A4', 'A5' or 'A6' mask = ((episodes['REC'] == 'E11') | (episodes['REC'] == 'E12')) & ~( (episodes['PLACE'] == 'A3') | (episodes['PLACE'] == 'A4') | (episodes['PLACE'] == 'A5') | ( episodes['PLACE'] == 'A6')) error_locations = episodes.index[mask] return {'Episodes': error_locations.to_list()} return error, _validate def validate_378(): error = ErrorDefinition( code='378', description='A child who is placed with parent(s) cannot be looked after under a single period of accommodation under Section 20 of the Children Act 1989.', affected_fields=['PLACE', 'LS'] ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] # the & sign supercedes the ==, so brackets are necessary here mask = (episodes['PLACE'] == 'P1') & (episodes['LS'] == 'V2') error_locations = episodes.index[mask] return {'Episodes': error_locations.to_list()} return error, _validate def validate_398(): error = ErrorDefinition( code='398', description='Distance field completed but child looked after under legal status V3 or V4.', affected_fields=['LS', 'HOME_POST', 'PL_POST'] ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] mask = ((episodes['LS'] == 'V3') | (episodes['LS'] == 'V4')) & ( episodes['HOME_POST'].notna() | episodes['PL_POST'].notna()) error_locations = episodes.index[mask] return {'Episodes': error_locations.to_list()} return error, _validate def validate_451(): error = ErrorDefinition( code='451', description='Child is still freed for adoption, but freeing orders could not be applied for since 30 December 2005.', affected_fields=['DEC', 'REC', 'LS'] ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] mask = episodes['DEC'].isna() & episodes['REC'].isna() & (episodes['LS'] == 'D1') error_locations = episodes.index[mask] return {'Episodes': error_locations.to_list()} return error, _validate def validate_519(): error = ErrorDefinition( code='519', description='Data entered on the legal status of adopters shows civil partnership couple, but data entered on genders of adopters does not show it as a couple.', affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'] ) def _validate(dfs): if 'AD1' not in dfs: return {} else: ad1 = dfs['AD1'] mask = (ad1['LS_ADOPTR'] == 'L2') & ( (ad1['SEX_ADOPTR'] != 'MM') & (ad1['SEX_ADOPTR'] != 'FF') & (ad1['SEX_ADOPTR'] != 'MF')) error_locations = ad1.index[mask] return {'AD1': error_locations.to_list()} return error, _validate def validate_520(): error = ErrorDefinition( code='520', description='Data entry on the legal status of adopters shows different gender married couple but data entry on genders of adopters shows it as a same gender couple.', affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'] ) def _validate(dfs): if 'AD1' not in dfs: return {} else: ad1 = dfs['AD1'] # check condition mask = (ad1['LS_ADOPTR'] == 'L11') & (ad1['SEX_ADOPTR'] != 'MF') error_locations = ad1.index[mask] return {'AD1': error_locations.to_list()} return error, _validate def validate_522(): error = ErrorDefinition( code='522', description='Date of decision that the child should be placed for adoption must be on or before the date that a child should no longer be placed for adoption.', affected_fields=['DATE_PLACED', 'DATE_PLACED_CEASED'] ) def _validate(dfs): if 'PlacedAdoption' not in dfs: return {} else: placed_adoption = dfs['PlacedAdoption'] # Convert to datetimes placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'], format='%d/%m/%Y', errors='coerce') placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y', errors='coerce') # Boolean mask mask = placed_adoption['DATE_PLACED_CEASED'] > placed_adoption['DATE_PLACED'] error_locations = placed_adoption.index[mask] return {'PlacedAdoption': error_locations.to_list()} return error, _validate def validate_563(): error = ErrorDefinition( code='563', description='The child should no longer be placed for adoption but the date of the decision that the child should be placed for adoption is blank', affected_fields=['DATE_PLACED', 'REASON_PLACED_CEASED', 'DATE_PLACED_CEASED'], ) def _validate(dfs): if 'PlacedAdoption' not in dfs: return {} else: placed_adoption = dfs['PlacedAdoption'] mask = placed_adoption['REASON_PLACED_CEASED'].notna() & placed_adoption['DATE_PLACED_CEASED'].notna() & \ placed_adoption['DATE_PLACED'].isna() error_locations = placed_adoption.index[mask] return {'PlacedAdoption': error_locations.to_list()} return error, _validate def validate_544(): error = ErrorDefinition( code='544', description="Any child who has conviction information completed must also have immunisation, teeth check, health assessment and substance misuse problem identified fields completed.", affected_fields=['CONVICTED', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'], ) def _validate(dfs): if 'OC2' not in dfs: return {} else: oc2 = dfs['OC2'] convict = oc2['CONVICTED'].astype(str) == '1' immunisations = oc2['IMMUNISATIONS'].isna() teeth_ck = oc2['TEETH_CHECK'].isna() health_ass = oc2['HEALTH_ASSESSMENT'].isna() sub_misuse = oc2['SUBSTANCE_MISUSE'].isna() error_mask = convict & (immunisations | teeth_ck | health_ass | sub_misuse) validation_error_locations = oc2.index[error_mask] return {'OC2': validation_error_locations.to_list()} return error, _validate def validate_634(): error = ErrorDefinition( code='634', description='There are entries for previous permanence options, but child has not started to be looked after from 1 April 2016 onwards.', affected_fields=['LA_PERM', 'PREV_PERM', 'DATE_PERM', 'DECOM'] ) def _validate(dfs): if 'Episodes' not in dfs or 'PrevPerm' not in dfs: return {} else: episodes = dfs['Episodes'] prevperm = dfs['PrevPerm'] collection_start = dfs['metadata']['collection_start'] # convert date field to appropriate format episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce') # the maximum date has the highest possibility of satisfying the condition episodes['LAST_DECOM'] = episodes.groupby('CHILD')['DECOM'].transform('max') # prepare to merge episodes.reset_index(inplace=True) prevperm.reset_index(inplace=True) merged = prevperm.merge(episodes, on='CHILD', how='left', suffixes=['_prev', '_eps']) # If <PREV_PERM> or <LA_PERM> or <DATE_PERM> provided, then at least 1 episode must have a <DECOM> later than 01/04/2016 mask = (merged['PREV_PERM'].notna() | merged['DATE_PERM'].notna() | merged['LA_PERM'].notna()) & ( merged['LAST_DECOM'] < collection_start) eps_error_locs = merged.loc[mask, 'index_eps'] prevperm_error_locs = merged.loc[mask, 'index_prev'] # return {'PrevPerm':prevperm_error_locs} return {'Episodes': eps_error_locs.unique().tolist(), 'PrevPerm': prevperm_error_locs.unique().tolist()} return error, _validate def validate_158(): error = ErrorDefinition( code='158', description='If a child has been recorded as receiving an intervention for their substance misuse problem, then the additional item on whether an intervention was offered should be left blank.', affected_fields=['INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'], ) def _validate(dfs): if 'OC2' not in dfs: return {} else: oc2 = dfs['OC2'] error_mask = oc2['INTERVENTION_RECEIVED'].astype(str).eq('1') & oc2['INTERVENTION_OFFERED'].notna() error_locations = oc2.index[error_mask] return {'OC2': error_locations.tolist()} return error, _validate def validate_133(): error = ErrorDefinition( code='133', description='Data entry for accommodation after leaving care is invalid. If reporting on a childs accommodation after leaving care the data entry must be valid', affected_fields=['ACCOM'], ) def _validate(dfs): if 'OC3' not in dfs: return {} else: oc3 = dfs['OC3'] valid_codes = ['B1', 'B2', 'C1', 'C2', 'D1', 'D2', 'E1', 'E2', 'G1', 'G2', 'H1', 'H2', 'K1', 'K2', 'R1', 'R2', 'S2', 'T1', 'T2', 'U1', 'U2', 'V1', 'V2', 'W1', 'W2', 'X2', 'Y1', 'Y2', 'Z1', 'Z2', '0'] error_mask = ~oc3['ACCOM'].isna() & ~oc3['ACCOM'].isin(valid_codes) error_locations = oc3.index[error_mask] return {'OC3': error_locations.tolist()} return error, _validate def validate_565(): error = ErrorDefinition( code='565', description='The date that the child started to be missing or away from placement without authorisation has been completed but whether the child was missing or away from placement without authorisation has not been completed.', affected_fields=['MISSING', 'MIS_START'] ) def _validate(dfs): if 'Missing' not in dfs: return {} else: missing = dfs['Missing'] mask = missing['MIS_START'].notna() & missing['MISSING'].isna() error_locations = missing.index[mask] return {'Missing': error_locations.to_list()} return error, _validate def validate_433(): error = ErrorDefinition( code='433', description='The reason for new episode suggests that this is a continuation episode, but the episode does not start on the same day as the last episode finished.', affected_fields=['RNE', 'DECOM'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] episodes['original_index'] = episodes.index episodes.sort_values(['CHILD', 'DECOM', 'DEC'], inplace=True) episodes[['PREVIOUS_DEC', 'PREVIOUS_CHILD']] = episodes[['DEC', 'CHILD']].shift(1) rne_is_ongoing = episodes['RNE'].str.upper().astype(str).isin(['P', 'L', 'T', 'U', 'B']) date_mismatch = episodes['PREVIOUS_DEC'] != episodes['DECOM'] missing_date = episodes['PREVIOUS_DEC'].isna() | episodes['DECOM'].isna() same_child = episodes['PREVIOUS_CHILD'] == episodes['CHILD'] error_mask = rne_is_ongoing & (date_mismatch | missing_date) & same_child error_locations = episodes['original_index'].loc[error_mask].sort_values() return {'Episodes': error_locations.to_list()} return error, _validate def validate_437(): error = ErrorDefinition( code='437', description='Reason episode ceased is child has died or is aged 18 or over but there are further episodes.', affected_fields=['REC'], ) # !# potential false negatives, as this only operates on the current year's data def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') episodes.sort_values(['CHILD', 'DECOM'], inplace=True) episodes[['NEXT_DECOM', 'NEXT_CHILD']] = episodes[['DECOM', 'CHILD']].shift(-1) # drop rows with missing DECOM as invalid/missing values can lead to errors episodes = episodes.dropna(subset=['DECOM']) ceased_e2_e15 = episodes['REC'].str.upper().astype(str).isin(['E2', 'E15']) has_later_episode = episodes['CHILD'] == episodes['NEXT_CHILD'] error_mask = ceased_e2_e15 & has_later_episode error_locations = episodes.index[error_mask] return {'Episodes': error_locations.to_list()} return error, _validate def validate_547(): error = ErrorDefinition( code='547', description="Any child who has health promotion information completed must also have immunisation, teeth check, health assessment and substance misuse problem identified fields completed.", affected_fields=['HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'], ) def _validate(dfs): if 'OC2' not in dfs: return {} else: oc2 = dfs['OC2'] healthck = oc2['HEALTH_CHECK'].astype(str) == '1' immunisations = oc2['IMMUNISATIONS'].isna() teeth_ck = oc2['TEETH_CHECK'].isna() health_ass = oc2['HEALTH_ASSESSMENT'].isna() sub_misuse = oc2['SUBSTANCE_MISUSE'].isna() error_mask = healthck & (immunisations | teeth_ck | health_ass | sub_misuse) validation_error_locations = oc2.index[error_mask] return {'OC2': validation_error_locations.to_list()} return error, _validate def validate_635(): error = ErrorDefinition( code='635', description='There are entries for date of order and local authority code where previous permanence option was arranged but previous permanence code is Z1', affected_fields=['LA_PERM', 'DATE_PERM', 'PREV_PERM'] ) def _validate(dfs): if 'PrevPerm' not in dfs: return {} else: prev_perm = dfs['PrevPerm'] # raise and error if either LA_PERM or DATE_PERM are present, yet PREV_PERM is absent. mask = ((prev_perm['LA_PERM'].notna() | prev_perm['DATE_PERM'].notna()) & prev_perm['PREV_PERM'].isna()) error_locations = prev_perm.index[mask] return {'PrevPerm': error_locations.to_list()} return error, _validate def validate_550(): error = ErrorDefinition( code='550', description='A placement provider code of PR0 can only be associated with placement P1.', affected_fields=['PLACE', 'PLACE_PROVIDER'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] mask = (episodes['PLACE'] != 'P1') & episodes['PLACE_PROVIDER'].eq('PR0') validation_error_locations = episodes.index[mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_217(): error = ErrorDefinition( code='217', description='Children who are placed for adoption with current foster carers (placement types A3 or A5) must have a reason for new episode of S, T or U.', affected_fields=['PLACE', 'DECOM', 'RNE'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') max_decom_allowed = pd.to_datetime('01/04/2015', format='%d/%m/%Y', errors='coerce') reason_new_ep = ['S', 'T', 'U'] place_codes = ['A3', 'A5'] mask = (episodes['PLACE'].isin(place_codes) & (episodes['DECOM'] >= max_decom_allowed)) & ~episodes[ 'RNE'].isin(reason_new_ep) validation_error_mask = mask validation_error_locations = episodes.index[validation_error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_518(): error = ErrorDefinition( code='518', description='If reporting legal status of adopters is L4 then the genders of adopters should be coded as MM or FF. MM = the adopting couple are both males. FF = the adopting couple are both females.', affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'], ) def _validate(dfs): if 'AD1' not in dfs: return {} else: AD1 = dfs['AD1'] error_mask = AD1['LS_ADOPTR'].eq('L4') & ~AD1['SEX_ADOPTR'].isin(['MM', 'FF']) error_locations = AD1.index[error_mask] return {'AD1': error_locations.tolist()} return error, _validate def validate_517(): error = ErrorDefinition( code='517', description='If reporting legal status of adopters is L3 then the genders of adopters should be coded as MF. MF = the adopting couple are male and female.', affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'], ) def _validate(dfs): if 'AD1' not in dfs: return {} else: AD1 = dfs['AD1'] error_mask = AD1['LS_ADOPTR'].eq('L3') & ~AD1['SEX_ADOPTR'].isin(['MF']) error_locations = AD1.index[error_mask] return {'AD1': error_locations.tolist()} return error, _validate def validate_558(): error = ErrorDefinition( code='558', description='If a child has been adopted, then the decision to place them for adoption has not been disrupted and the date of the decision that a child should no longer be placed for adoption should be left blank. if the REC code is either E11 or E12 then the DATE PLACED CEASED date should not be provided', affected_fields=['DATE_PLACED_CEASED', 'REC'], ) def _validate(dfs): if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs: return {} else: episodes = dfs['Episodes'] placedAdoptions = dfs['PlacedAdoption'] episodes = episodes.reset_index() rec_codes = ['E11', 'E12'] placeEpisodes = episodes[episodes['REC'].isin(rec_codes)] merged = placeEpisodes.merge(placedAdoptions, how='left', on='CHILD').set_index('index') episodes_with_errors = merged[merged['DATE_PLACED_CEASED'].notna()] error_mask = episodes.index.isin(episodes_with_errors.index) error_locations = episodes.index[error_mask] return {'Episodes': error_locations.to_list()} return error, _validate def validate_453(): error = ErrorDefinition( code='453', description='Contradiction between placement distance in the last episode of the previous year and in the first episode of the current year.', affected_fields=['PL_DISTANCE'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} if 'Episodes_last' not in dfs: return {} else: episodes = dfs['Episodes'] episodes_last = dfs['Episodes_last'] episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce') episodes['PL_DISTANCE'] = pd.to_numeric(episodes['PL_DISTANCE'], errors='coerce') episodes_last['PL_DISTANCE'] = pd.to_numeric(episodes_last['PL_DISTANCE'], errors='coerce') # drop rows with missing DECOM before finding idxmin/max, as invalid/missing values can lead to errors episodes = episodes.dropna(subset=['DECOM']) episodes_last = episodes_last.dropna(subset=['DECOM']) episodes_min = episodes.groupby('CHILD')['DECOM'].idxmin() episodes_last_max = episodes_last.groupby('CHILD')['DECOM'].idxmax() episodes = episodes[episodes.index.isin(episodes_min)] episodes_last = episodes_last[episodes_last.index.isin(episodes_last_max)] episodes_merged = episodes.reset_index().merge(episodes_last, how='left', on=['CHILD'], suffixes=('', '_last'), indicator=True).set_index('index') in_both_years = episodes_merged['_merge'] == 'both' same_rne = episodes_merged['RNE'] == episodes_merged['RNE_last'] last_year_open = episodes_merged['DEC_last'].isna() different_pl_dist = abs(episodes_merged['PL_DISTANCE'] - episodes_merged['PL_DISTANCE_last']) >= 0.2 error_mask = in_both_years & same_rne & last_year_open & different_pl_dist validation_error_locations = episodes.index[error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_516(): error = ErrorDefinition( code='516', description='The episode data submitted for this child does not show that he/she was with their former foster carer(s) during the year.If the code in the reason episode ceased is E45 or E46 the child must have a placement code of U1 to U6.', affected_fields=['REC', 'PLACE'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] place_codes = ['U1', 'U2', 'U3', 'U4', 'U5', 'U6'] rec_codes = ['E45', 'E46'] error_mask = episodes['REC'].isin(rec_codes) & ~episodes['PLACE'].isin(place_codes) validation_error_locations = episodes.index[error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_511(): error = ErrorDefinition( code='511', description='If reporting that the number of person(s) adopting the looked after child is two adopters then the code should only be MM, FF or MF. MM = the adopting couple are both males; FF = the adopting couple are both females; MF = The adopting couple are male and female.', affected_fields=['NB_ADOPTR', 'SEX_ADOPTR'], ) def _validate(dfs): if 'AD1' not in dfs: return {} else: AD1 = dfs['AD1'] mask = AD1['NB_ADOPTR'].astype(str).eq('2') & AD1['SEX_ADOPTR'].isin(['M1', 'F1']) validation_error_mask = mask validation_error_locations = AD1.index[validation_error_mask] return {'AD1': validation_error_locations.tolist()} return error, _validate def validate_524(): error = ErrorDefinition( code='524', description='If reporting legal status of adopters is L12 then the genders of adopters should be coded as MM or FF. MM = the adopting couple are both males. FF = the adopting couple are both females', affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'], ) def _validate(dfs): if 'AD1' not in dfs: return {} else: AD1 = dfs['AD1'] error_mask = AD1['LS_ADOPTR'].eq('L12') & ~AD1['SEX_ADOPTR'].isin(['MM', 'FF']) error_locations = AD1.index[error_mask] return {'AD1': error_locations.tolist()} return error, _validate def validate_441(): error = ErrorDefinition( code='441', description='Participation method indicates child was 4 years old or over at the time of the review, but the date of birth and review date indicates the child was under 4 years old.', affected_fields=['DOB', 'REVIEW', 'REVIEW_CODE'], ) def _validate(dfs): if 'Reviews' not in dfs: return {} else: reviews = dfs['Reviews'] reviews['DOB'] = pd.to_datetime(reviews['DOB'], format='%d/%m/%Y', errors='coerce') reviews['REVIEW'] = pd.to_datetime(reviews['REVIEW'], format='%d/%m/%Y', errors='coerce') reviews = reviews.dropna(subset=['REVIEW', 'DOB']) mask = reviews['REVIEW_CODE'].isin(['PN1', 'PN2', 'PN3', 'PN4', 'PN5', 'PN6', 'PN7']) & ( reviews['REVIEW'] < reviews['DOB'] + pd.offsets.DateOffset(years=4)) validation_error_mask = mask validation_error_locations = reviews.index[validation_error_mask] return {'Reviews': validation_error_locations.tolist()} return error, _validate def validate_184(): error = ErrorDefinition( code='184', description='Date of decision that a child should be placed for adoption is before the child was born.', affected_fields=['DATE_PLACED', # PlacedAdoptino 'DOB'], # Header ) def _validate(dfs): if 'Header' not in dfs or 'PlacedAdoption' not in dfs: return {} else: child_record = dfs['Header'] placed_for_adoption = dfs['PlacedAdoption'] all_data = (placed_for_adoption .reset_index() .merge(child_record, how='left', on='CHILD', suffixes=[None, '_P4A'])) all_data['DATE_PLACED'] = pd.to_datetime(all_data['DATE_PLACED'], format='%d/%m/%Y', errors='coerce') all_data['DOB'] = pd.to_datetime(all_data['DOB'], format='%d/%m/%Y', errors='coerce') mask = (all_data['DATE_PLACED'] >= all_data['DOB']) | all_data['DATE_PLACED'].isna() validation_error = ~mask validation_error_locations = all_data[validation_error]['index'].unique() return {'PlacedAdoption': validation_error_locations.tolist()} return error, _validate def validate_612(): error = ErrorDefinition( code='612', description="Date of birth field has been completed but mother field indicates child is not a mother.", affected_fields=['SEX', 'MOTHER', 'MC_DOB'], ) def _validate(dfs): if 'Header' not in dfs: return {} else: header = dfs['Header'] error_mask = ( ((header['MOTHER'].astype(str) == '0') | header['MOTHER'].isna()) & (header['SEX'].astype(str) == '2') & header['MC_DOB'].notna() ) validation_error_locations = header.index[error_mask] return {'Header': validation_error_locations.tolist()} return error, _validate def validate_552(): """ This error checks that the first adoption episode is after the last decision ! If there are multiple of either there may be unexpected results ! """ error = ErrorDefinition( code="552", description="Date of Decision to place a child for adoption should be on or prior to the date that the child was placed for adoption.", # Field that defines date of decision to place a child for adoption is DATE_PLACED and the start of adoption is defined by DECOM with 'A' placement types. affected_fields=['DATE_PLACED', 'DECOM'], ) def _validate(dfs): if ('PlacedAdoption' not in dfs) or ('Episodes' not in dfs): return {} else: # get the required datasets placed_adoption = dfs['PlacedAdoption'] episodes = dfs['Episodes'] # keep index values so that they stay the same when needed later on for error locations placed_adoption.reset_index(inplace=True) episodes.reset_index(inplace=True) adoption_eps = episodes[episodes['PLACE'].isin(['A3', 'A4', 'A5', 'A6'])].copy() # find most recent adoption decision placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y', errors='coerce') # remove rows where either of the required values have not been filled. placed_adoption = placed_adoption[placed_adoption['DATE_PLACED'].notna()] placed_adoption_inds = placed_adoption.groupby('CHILD')['DATE_PLACED'].idxmax(skipna=True) last_decision = placed_adoption.loc[placed_adoption_inds] # first time child started adoption adoption_eps["DECOM"] = pd.to_datetime(adoption_eps['DECOM'], format='%d/%m/%Y', errors='coerce') adoption_eps = adoption_eps[adoption_eps['DECOM'].notna()] adoption_eps_inds = adoption_eps.groupby('CHILD')['DECOM'].idxmin(skipna=True) # full information of first adoption first_adoption = adoption_eps.loc[adoption_eps_inds] # date of decision and date of start of adoption (DECOM) have to be put in one table merged = first_adoption.merge(last_decision, on=['CHILD'], how='left', suffixes=['_EP', '_PA']) # check to see if date of decision to place is less than or equal to date placed. decided_after_placed = merged["DECOM"] < merged["DATE_PLACED"] # find the corresponding location of error values per file. episode_error_locs = merged.loc[decided_after_placed, 'index_EP'] placedadoption_error_locs = merged.loc[decided_after_placed, 'index_PA'] return {"PlacedAdoption": placedadoption_error_locs.to_list(), "Episodes": episode_error_locs.to_list()} return error, _validate def validate_551(): error = ErrorDefinition( code='551', description='Child has been placed for adoption but there is no date of the decision that the child should be placed for adoption.', affected_fields=['DATE_PLACED', 'PLACE'], ) def _validate(dfs): if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs: return {} else: episodes = dfs['Episodes'] placedAdoptions = dfs['PlacedAdoption'] episodes = episodes.reset_index() place_codes = ['A3', 'A4', 'A5', 'A6'] placeEpisodes = episodes[episodes['PLACE'].isin(place_codes)] merged = placeEpisodes.merge(placedAdoptions, how='left', on='CHILD').set_index('index') episodes_with_errors = merged[merged['DATE_PLACED'].isna()] error_mask = episodes.index.isin(episodes_with_errors.index) error_locations = episodes.index[error_mask] return {'Episodes': error_locations.to_list()} return error, _validate def validate_557(): error = ErrorDefinition( code='557', description="Child for whom the decision was made that they should be placed for adoption has left care " + "but was not adopted and information on the decision that they should no longer be placed for " + "adoption items has not been completed.", affected_fields=['DATE_PLACED_CEASED', 'REASON_PLACED_CEASED', # PlacedAdoption 'PLACE', 'LS', 'REC'], # Episodes ) def _validate(dfs): if 'Episodes' not in dfs: return {} if 'PlacedAdoption' not in dfs: return {} else: eps = dfs['Episodes'] placed = dfs['PlacedAdoption'] eps = eps.reset_index() placed = placed.reset_index() child_placed = eps['PLACE'].isin(['A3', 'A4', 'A5', 'A6']) order_granted = eps['LS'].isin(['D1', 'E1']) not_adopted = ~eps['REC'].isin(['E11', 'E12']) & eps['REC'].notna() placed['ceased_incomplete'] = ( placed['DATE_PLACED_CEASED'].isna() | placed['REASON_PLACED_CEASED'].isna() ) eps = eps[(child_placed | order_granted) & not_adopted] eps = eps.merge(placed, on='CHILD', how='left', suffixes=['_EP', '_PA'], indicator=True) eps = eps[(eps['_merge'] == 'left_only') | eps['ceased_incomplete']] EP_errors = eps['index_EP'] PA_errors = eps['index_PA'].dropna() return { 'Episodes': EP_errors.to_list(), 'PlacedAdoption': PA_errors.to_list(), } return error, _validate def validate_207(): error = ErrorDefinition( code='207', description='Mother status for the current year disagrees with the mother status already recorded for this child.', affected_fields=['MOTHER'], ) def _validate(dfs): if 'Header' not in dfs or 'Header_last' not in dfs: return {} else: header = dfs['Header'] header_last = dfs['Header_last'] header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'), indicator=True).set_index('index') in_both_years = header_merged['_merge'] == 'both' mother_is_different = header_merged['MOTHER'].astype(str) != header_merged['MOTHER_last'].astype(str) mother_was_true = header_merged['MOTHER_last'].astype(str) == '1' error_mask = in_both_years & mother_is_different & mother_was_true error_locations = header.index[error_mask] return {'Header': error_locations.to_list()} return error, _validate def validate_523(): error = ErrorDefinition( code='523', description="Date of decision that the child should be placed for adoption should be the same date as the decision that adoption is in the best interest (date should be placed).", affected_fields=['DATE_PLACED', 'DATE_INT'], ) def _validate(dfs): if ("AD1" not in dfs) or ("PlacedAdoption" not in dfs): return {} else: placed_adoption = dfs["PlacedAdoption"] ad1 = dfs["AD1"] # keep initial index values to be reused for locating errors later on. placed_adoption.reset_index(inplace=True) ad1.reset_index(inplace=True) # convert to datetime to enable comparison placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format="%d/%m/%Y", errors='coerce') ad1["DATE_INT"] = pd.to_datetime(ad1['DATE_INT'], format='%d/%m/%Y', errors='coerce') # drop rows where either of the required values have not been filled. placed_adoption = placed_adoption[placed_adoption["DATE_PLACED"].notna()] ad1 = ad1[ad1["DATE_INT"].notna()] # bring corresponding values together from both dataframes merged_df = placed_adoption.merge(ad1, on=['CHILD'], how='inner', suffixes=["_AD", "_PA"]) # find error values different_dates = merged_df['DATE_INT'] != merged_df['DATE_PLACED'] # map error locations to corresponding indices pa_error_locations = merged_df.loc[different_dates, 'index_PA'] ad1_error_locations = merged_df.loc[different_dates, 'index_AD'] return {"PlacedAdoption": pa_error_locations.to_list(), "AD1": ad1_error_locations.to_list()} return error, _validate def validate_3001(): error = ErrorDefinition( code='3001', description='Where care leavers information is being returned for a young person around their 17th birthday, the accommodation cannot be with their former foster carer(s).', affected_fields=['REC'], ) def _validate(dfs): if 'Header' not in dfs: return {} if 'OC3' not in dfs: return {} else: header = dfs['Header'] oc3 = dfs['OC3'] collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce') collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce') header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce') header['DOB17'] = header['DOB'] + pd.DateOffset(years=17) oc3_merged = oc3.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'), indicator=True).set_index('index') accom_foster = oc3_merged['ACCOM'].str.upper().astype(str).isin(['Z1', 'Z2']) age_17_in_year = (oc3_merged['DOB17'] <= collection_end) & (oc3_merged['DOB17'] >= collection_start) error_mask = accom_foster & age_17_in_year error_locations = oc3.index[error_mask] return {'OC3': error_locations.to_list()} return error, _validate def validate_389(): error = ErrorDefinition( code='389', description='Reason episode ceased is that child transferred to care of adult social care services, but child is aged under 16.', affected_fields=['REC'], ) def _validate(dfs): if 'Header' not in dfs: return {} if 'Episodes' not in dfs: return {} else: header = dfs['Header'] episodes = dfs['Episodes'] header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce') episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce') header['DOB16'] = header['DOB'] + pd.DateOffset(years=16) episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'), indicator=True).set_index('index') ceased_asc = episodes_merged['REC'].str.upper().astype(str).isin(['E7']) ceased_over_16 = episodes_merged['DOB16'] <= episodes_merged['DEC'] error_mask = ceased_asc & ~ceased_over_16 error_locations = episodes.index[error_mask] return {'Episodes': error_locations.to_list()} return error, _validate def validate_387(): error = ErrorDefinition( code='387', description='Reason episode ceased is child moved into independent living arrangement, but the child is aged under 14.', affected_fields=['REC'], ) def _validate(dfs): if 'Header' not in dfs: return {} if 'Episodes' not in dfs: return {} else: header = dfs['Header'] episodes = dfs['Episodes'] header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce') episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce') header['DOB14'] = header['DOB'] + pd.DateOffset(years=14) episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'), indicator=True).set_index('index') ceased_indep = episodes_merged['REC'].str.upper().astype(str).isin(['E5', 'E6']) ceased_over_14 = episodes_merged['DOB14'] <= episodes_merged['DEC'] dec_present = episodes_merged['DEC'].notna() error_mask = ceased_indep & ~ceased_over_14 & dec_present error_locations = episodes.index[error_mask] return {'Episodes': error_locations.to_list()} return error, _validate def validate_452(): error = ErrorDefinition( code='452', description='Contradiction between local authority of placement code in the last episode of the previous year and in the first episode of the current year.', affected_fields=['PL_LA'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} if 'Episodes_last' not in dfs: return {} else: episodes = dfs['Episodes'] episodes_last = dfs['Episodes_last'] episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce') episodes_min = episodes.groupby('CHILD')['DECOM'].idxmin() episodes_last_max = episodes_last.groupby('CHILD')['DECOM'].idxmax() episodes = episodes[episodes.index.isin(episodes_min)] episodes_last = episodes_last[episodes_last.index.isin(episodes_last_max)] episodes_merged = episodes.reset_index().merge(episodes_last, how='left', on=['CHILD'], suffixes=('', '_last'), indicator=True).set_index('index') in_both_years = episodes_merged['_merge'] == 'both' same_rne = episodes_merged['RNE'] == episodes_merged['RNE_last'] last_year_open = episodes_merged['DEC_last'].isna() different_pl_la = episodes_merged['PL_LA'].astype(str) != episodes_merged['PL_LA_last'].astype(str) error_mask = in_both_years & same_rne & last_year_open & different_pl_la validation_error_locations = episodes.index[error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_386(): error = ErrorDefinition( code='386', description='Reason episode ceased is adopted but child has reached age 18.', affected_fields=['REC'], ) def _validate(dfs): if 'Header' not in dfs: return {} if 'Episodes' not in dfs: return {} else: header = dfs['Header'] episodes = dfs['Episodes'] header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce') episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce') header['DOB18'] = header['DOB'] + pd.DateOffset(years=18) episodes_merged = ( episodes .reset_index() .merge(header, how='left', on=['CHILD'], suffixes=('', '_header'), indicator=True) .set_index('index') .dropna(subset=['DOB18', 'DEC']) ) ceased_adopted = episodes_merged['REC'].str.upper().astype(str).isin(['E11', 'E12']) ceased_under_18 = episodes_merged['DOB18'] > episodes_merged['DEC'] error_mask = ceased_adopted & ~ceased_under_18 error_locations = episodes_merged.index[error_mask] return {'Episodes': error_locations.to_list()} return error, _validate def validate_363(): error = ErrorDefinition( code='363', description='Child assessment order (CAO) lasted longer than 7 days allowed in the Children Act 1989.', affected_fields=['LS', 'DECOM', 'DEC'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} episodes = dfs['Episodes'] collection_end_str = dfs['metadata']['collection_end'] L2_eps = episodes[episodes['LS'] == 'L3'].copy() L2_eps['original_index'] = L2_eps.index L2_eps = L2_eps[L2_eps['DECOM'].notna()] L2_eps.loc[L2_eps['DEC'].isna(), 'DEC'] = collection_end_str L2_eps['DECOM'] = pd.to_datetime(L2_eps['DECOM'], format='%d/%m/%Y', errors='coerce') L2_eps = L2_eps.dropna(subset=['DECOM']) L2_eps['DEC'] = pd.to_datetime(L2_eps['DEC'], format='%d/%m/%Y', errors='coerce') L2_eps.sort_values(['CHILD', 'DECOM']) L2_eps['index'] = pd.RangeIndex(0, len(L2_eps)) L2_eps['index+1'] = L2_eps['index'] + 1 L2_eps = L2_eps.merge(L2_eps, left_on='index', right_on='index+1', how='left', suffixes=[None, '_prev']) L2_eps = L2_eps[['original_index', 'DECOM', 'DEC', 'DEC_prev', 'CHILD', 'CHILD_prev', 'LS']] L2_eps['new_period'] = ( (L2_eps['DECOM'] > L2_eps['DEC_prev']) | (L2_eps['CHILD'] != L2_eps['CHILD_prev']) ) L2_eps['duration'] = (L2_eps['DEC'] - L2_eps['DECOM']).dt.days L2_eps['period_id'] = L2_eps['new_period'].astype(int).cumsum() L2_eps['period_duration'] = L2_eps.groupby('period_id')['duration'].transform(sum) error_mask = L2_eps['period_duration'] > 7 return {'Episodes': L2_eps.loc[error_mask, 'original_index'].to_list()} return error, _validate def validate_364(): error = ErrorDefinition( code='364', description='Sections 41-46 of Police and Criminal Evidence (PACE; 1984) severely limits ' + 'the time a child can be detained in custody in Local Authority (LA) accommodation.', affected_fields=['LS', 'DECOM', 'DEC'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} episodes = dfs['Episodes'] collection_end_str = dfs['metadata']['collection_end'] J2_eps = episodes[episodes['LS'] == 'J2'].copy() J2_eps['original_index'] = J2_eps.index J2_eps['DECOM'] = pd.to_datetime(J2_eps['DECOM'], format='%d/%m/%Y', errors='coerce') J2_eps = J2_eps[J2_eps['DECOM'].notna()] J2_eps.loc[J2_eps['DEC'].isna(), 'DEC'] = collection_end_str J2_eps['DEC'] = pd.to_datetime(J2_eps['DEC'], format='%d/%m/%Y', errors='coerce') J2_eps.sort_values(['CHILD', 'DECOM']) J2_eps['index'] = pd.RangeIndex(0, len(J2_eps)) J2_eps['index_prev'] = J2_eps['index'] + 1 J2_eps = J2_eps.merge(J2_eps, left_on='index', right_on='index_prev', how='left', suffixes=[None, '_prev']) J2_eps = J2_eps[['original_index', 'DECOM', 'DEC', 'DEC_prev', 'CHILD', 'CHILD_prev', 'LS']] J2_eps['new_period'] = ( (J2_eps['DECOM'] > J2_eps['DEC_prev']) | (J2_eps['CHILD'] != J2_eps['CHILD_prev']) ) J2_eps['duration'] = (J2_eps['DEC'] - J2_eps['DECOM']).dt.days J2_eps['period_id'] = J2_eps['new_period'].astype(int).cumsum() J2_eps['period_duration'] = J2_eps.groupby('period_id')['duration'].transform(sum) error_mask = J2_eps['period_duration'] > 21 return {'Episodes': J2_eps.loc[error_mask, 'original_index'].to_list()} return error, _validate def validate_365(): error = ErrorDefinition( code='365', description='Any individual short- term respite placement must not exceed 17 days.', affected_fields=['LS', 'DECOM', 'DEC'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} episodes = dfs['Episodes'] collection_end_str = dfs['metadata']['collection_end'] episodes.loc[episodes['DEC'].isna(), 'DEC'] = collection_end_str episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce') over_17_days = episodes['DEC'] > episodes['DECOM'] + pd.DateOffset(days=17) error_mask = (episodes['LS'] == 'V3') & over_17_days return {'Episodes': episodes.index[error_mask].to_list()} return error, _validate def validate_367(): error = ErrorDefinition( code='367', description='The maximum amount of respite care allowable is 75 days in any 12-month period.', affected_fields=['LS', 'DECOM', 'DEC'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} episodes = dfs['Episodes'] V3_eps = episodes[episodes['LS'] == 'V3'] V3_eps = V3_eps.dropna(subset=['DECOM']) # missing DECOM should get fixed before looking for this error collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce') collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce') V3_eps['DECOM_dt'] = pd.to_datetime(V3_eps['DECOM'], format='%d/%m/%Y', errors='coerce') V3_eps['DEC_dt'] = pd.to_datetime(V3_eps['DEC'], format='%d/%m/%Y', errors='coerce') # truncate episode start/end dates to collection start/end respectively V3_eps.loc[V3_eps['DEC'].isna() | (V3_eps['DEC_dt'] > collection_end), 'DEC_dt'] = collection_end V3_eps.loc[V3_eps['DECOM_dt'] < collection_start, 'DECOM_dt'] = collection_start V3_eps['duration'] = (V3_eps['DEC_dt'] - V3_eps['DECOM_dt']).dt.days V3_eps = V3_eps[V3_eps['duration'] > 0] V3_eps['year_total_duration'] = V3_eps.groupby('CHILD')['duration'].transform(sum) error_mask = V3_eps['year_total_duration'] > 75 return {'Episodes': V3_eps.index[error_mask].to_list()} return error, _validate def validate_440(): error = ErrorDefinition( code='440', description='Participation method indicates child was under 4 years old at the time of the review, but date of birth and review date indicates the child was 4 years old or over.', affected_fields=['DOB', 'REVIEW', 'REVIEW_CODE'], ) def _validate(dfs): if 'Reviews' not in dfs: return {} else: reviews = dfs['Reviews'] reviews['DOB'] = pd.to_datetime(reviews['DOB'], format='%d/%m/%Y', errors='coerce') reviews['REVIEW'] = pd.to_datetime(reviews['REVIEW'], format='%d/%m/%Y', errors='coerce') mask = reviews['REVIEW_CODE'].eq('PN0') & ( reviews['REVIEW'] > reviews['DOB'] + pd.offsets.DateOffset(years=4)) validation_error_mask = mask validation_error_locations = reviews.index[validation_error_mask] return {'Reviews': validation_error_locations.tolist()} return error, _validate def validate_445(): error = ErrorDefinition( code='445', description='D1 is not a valid code for episodes starting after December 2005.', affected_fields=['LS', 'DECOM'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') max_decom_allowed = pd.to_datetime('31/12/2005', format='%d/%m/%Y', errors='coerce') mask = episodes['LS'].eq('D1') & (episodes['DECOM'] > max_decom_allowed) validation_error_mask = mask validation_error_locations = episodes.index[validation_error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_446(): error = ErrorDefinition( code='446', description='E1 is not a valid code for episodes starting before December 2005.', affected_fields=['LS', 'DECOM'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') min_decom_allowed = pd.to_datetime('01/12/2005', format='%d/%m/%Y', errors='coerce') mask = episodes['LS'].eq('E1') & (episodes['DECOM'] < min_decom_allowed) validation_error_mask = mask validation_error_locations = episodes.index[validation_error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_208(): error = ErrorDefinition( code='208', description='Unique Pupil Number (UPN) for the current year disagrees with the Unique Pupil Number (UPN) already recorded for this child.', affected_fields=['UPN'], ) def _validate(dfs): if 'Header' not in dfs or 'Header_last' not in dfs: return {} else: header = dfs['Header'] header_last = dfs['Header_last'] header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'), indicator=True).set_index('index') in_both_years = header_merged['_merge'] == 'both' upn_is_different = header_merged['UPN'].str.upper().astype(str) != header_merged[ 'UPN_last'].str.upper().astype(str) upn_not_recorded = header_merged['UPN'].str.upper().astype(str).isin(['UN2', 'UN3', 'UN4', 'UN5', 'UN6']) & \ header_merged['UPN_last'].str.upper().astype(str).isin(['UN1']) error_mask = in_both_years & upn_is_different & ~upn_not_recorded error_locations = header.index[error_mask] return {'Header': error_locations.to_list()} return error, _validate def validate_204(): error = ErrorDefinition( code='204', description='Ethnic origin code disagrees with the ethnic origin already recorded for this child.', affected_fields=['ETHNIC'], ) def _validate(dfs): if 'Header' not in dfs or 'Header_last' not in dfs: return {} else: header = dfs['Header'] header_last = dfs['Header_last'] header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'), indicator=True).set_index('index') in_both_years = header_merged['_merge'] == 'both' ethnic_is_different = header_merged['ETHNIC'].astype(str).str.upper() != header_merged[ 'ETHNIC_last'].astype(str).str.upper() error_mask = in_both_years & ethnic_is_different error_locations = header.index[error_mask] return {'Header': error_locations.to_list()} return error, _validate def validate_203(): error = ErrorDefinition( code='203', description='Date of birth disagrees with the date of birth already recorded for this child.', affected_fields=['DOB'], ) def _validate(dfs): if 'Header' not in dfs or 'Header_last' not in dfs: return {} else: header = dfs['Header'] header_last = dfs['Header_last'] header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce') header_last['DOB'] = pd.to_datetime(header_last['DOB'], format='%d/%m/%Y', errors='coerce') header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'), indicator=True).set_index('index') in_both_years = header_merged['_merge'] == 'both' dob_is_different = header_merged['DOB'].astype(str) != header_merged['DOB_last'].astype(str) error_mask = in_both_years & dob_is_different error_locations = header.index[error_mask] return {'Header': error_locations.to_list()} return error, _validate def validate_530(): error = ErrorDefinition( code='530', description="A placement provider code of PR4 cannot be associated with placement P1.", affected_fields=['PLACE', 'PLACE_PROVIDER'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] mask = episodes['PLACE'].eq('P1') & episodes['PLACE_PROVIDER'].eq('PR4') validation_error_mask = mask validation_error_locations = episodes.index[validation_error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_571(): error = ErrorDefinition( code='571', description='The date that the child ceased to be missing or away from placement without authorisation is before the start or after the end of the collection year.', affected_fields=['MIS_END'], ) def _validate(dfs): if 'Missing' not in dfs: return {} else: missing = dfs['Missing'] collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce') collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce') missing['fMIS_END'] = pd.to_datetime(missing['MIS_END'], format='%d/%m/%Y', errors='coerce') end_date_before_year = missing['fMIS_END'] < collection_start end_date_after_year = missing['fMIS_END'] > collection_end error_mask = end_date_before_year | end_date_after_year error_locations = missing.index[error_mask] return {'Missing': error_locations.to_list()} return error, _validate def validate_1005(): error = ErrorDefinition( code='1005', description='The end date of the missing episode or episode that the child was away from placement without authorisation is not a valid date.', affected_fields=['MIS_END'], ) def _validate(dfs): if 'Missing' not in dfs: return {} else: missing = dfs['Missing'] missing['fMIS_END'] = pd.to_datetime(missing['MIS_END'], format='%d/%m/%Y', errors='coerce') missing_end_date = missing['MIS_END'].isna() invalid_end_date = missing['fMIS_END'].isna() error_mask = ~missing_end_date & invalid_end_date error_locations = missing.index[error_mask] return {'Missing': error_locations.to_list()} return error, _validate def validate_1004(): error = ErrorDefinition( code='1004', description='The start date of the missing episode or episode that the child was away from placement without authorisation is not a valid date.', affected_fields=['MIS_START'], ) def _validate(dfs): if 'Missing' not in dfs: return {} else: missing = dfs['Missing'] missing['fMIS_START'] = pd.to_datetime(missing['MIS_START'], format='%d/%m/%Y', errors='coerce') missing_start_date = missing['MIS_START'].isna() invalid_start_date = missing['fMIS_START'].isna() error_mask = missing_start_date | invalid_start_date error_locations = missing.index[error_mask] return {'Missing': error_locations.to_list()} return error, _validate def validate_202(): error = ErrorDefinition( code='202', description='The gender code conflicts with the gender already recorded for this child.', affected_fields=['SEX'], ) def _validate(dfs): if 'Header' not in dfs or 'Header_last' not in dfs: return {} else: header = dfs['Header'] header_last = dfs['Header_last'] header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'), indicator=True).set_index('index') in_both_years = header_merged['_merge'] == 'both' sex_is_different = header_merged['SEX'].astype(str) != header_merged['SEX_last'].astype(str) error_mask = in_both_years & sex_is_different error_locations = header.index[error_mask] return {'Header': error_locations.to_list()} return error, _validate def validate_621(): error = ErrorDefinition( code='621', description="Mother’s field has been completed but date of birth shows that the mother is younger than her child.", affected_fields=['DOB', 'MC_DOB'], ) def _validate(dfs): if 'Header' not in dfs: return {} else: header = dfs['Header'] header['MC_DOB'] = pd.to_datetime(header['MC_DOB'], format='%d/%m/%Y', errors='coerce') header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce') mask = (header['MC_DOB'] > header['DOB']) | header['MC_DOB'].isna() validation_error_mask = ~mask validation_error_locations = header.index[validation_error_mask] return {'Header': validation_error_locations.tolist()} return error, _validate def validate_556(): error = ErrorDefinition( code='556', description='Date of decision that the child should be placed for adoption should be on or prior to the date that the freeing order was granted.', affected_fields=['DATE_PLACED', 'DECOM'], ) def _validate(dfs): if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs: return {} else: episodes = dfs['Episodes'] placedAdoptions = dfs['PlacedAdoption'] episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') placedAdoptions['DATE_PLACED'] = pd.to_datetime(placedAdoptions['DATE_PLACED'], format='%d/%m/%Y', errors='coerce') episodes = episodes.reset_index() D1Episodes = episodes[episodes['LS'] == 'D1'] merged = D1Episodes.reset_index().merge(placedAdoptions, how='left', on='CHILD', ).set_index('index') episodes_with_errors = merged[merged['DATE_PLACED'] > merged['DECOM']] error_mask = episodes.index.isin(episodes_with_errors.index) error_locations = episodes.index[error_mask] return {'Episodes': error_locations.to_list()} return error, _validate def validate_393(): error = ErrorDefinition( code='393', description='Child is looked after but mother field is not completed.', affected_fields=['MOTHER'], ) def _validate(dfs): if 'Header' not in dfs or 'Episodes' not in dfs: return {} else: header = dfs['Header'] episodes = dfs['Episodes'] header_female = header[header['SEX'].astype(str) == '2'] applicable_episodes = episodes[~episodes['LS'].str.upper().isin(['V3', 'V4'])] error_mask = header_female['CHILD'].isin(applicable_episodes['CHILD']) & header_female['MOTHER'].isna() error_locations = header_female.index[error_mask] return {'Header': error_locations.to_list()} return error, _validate def validate_NoE(): error = ErrorDefinition( code='NoE', description='This child has no episodes loaded for previous year even though child started to be looked after before this current year.', affected_fields=['DECOM'], ) def _validate(dfs): if 'Episodes' not in dfs or 'Episodes_last' not in dfs: return {} else: episodes = dfs['Episodes'] episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') episodes_last = dfs['Episodes_last'] episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce') collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce') episodes_before_year = episodes[episodes['DECOM'] < collection_start] episodes_merged = episodes_before_year.reset_index().merge(episodes_last, how='left', on=['CHILD'], indicator=True).set_index('index') episodes_not_matched = episodes_merged[episodes_merged['_merge'] == 'left_only'] error_mask = episodes.index.isin(episodes_not_matched.index) error_locations = episodes.index[error_mask] return {'Episodes': error_locations.to_list()} return error, _validate def validate_356(): error = ErrorDefinition( code='356', description='The date the episode ceased is before the date the same episode started.', affected_fields=['DECOM'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce') episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce') error_mask = episodes['DEC'].notna() & (episodes['DEC'] < episodes['DECOM']) return {'Episodes': episodes.index[error_mask].to_list()} return error, _validate def validate_611(): error = ErrorDefinition( code='611', description="Date of birth field is blank, but child is a mother.", affected_fields=['MOTHER', 'MC_DOB'], ) def _validate(dfs): if 'Header' not in dfs: return {} else: header = dfs['Header'] validation_error_mask = header['MOTHER'].astype(str).isin(['1']) & header['MC_DOB'].isna() validation_error_locations = header.index[validation_error_mask] return {'Header': validation_error_locations.tolist()} return error, _validate def validate_1009(): error = ErrorDefinition( code='1009', description='Reason for placement change is not a valid code.', affected_fields=['REASON_PLACE_CHANGE'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} episodes = dfs['Episodes'] code_list = [ 'CARPL', 'CLOSE', 'ALLEG', 'STAND', 'APPRR', 'CREQB', 'CREQO', 'CHILD', 'LAREQ', 'PLACE', 'CUSTOD', 'OTHER' ] mask = episodes['REASON_PLACE_CHANGE'].isin(code_list) | episodes['REASON_PLACE_CHANGE'].isna() validation_error_mask = ~mask validation_error_locations = episodes.index[validation_error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_1006(): error = ErrorDefinition( code='1006', description='Missing type invalid.', affected_fields=['MISSING'], ) def _validate(dfs): if 'Missing' not in dfs: return {} missing_from_care = dfs['Missing'] code_list = ['M', 'A'] mask = missing_from_care['MISSING'].isin(code_list) | missing_from_care['MISSING'].isna() validation_error_mask = ~mask validation_error_locations = missing_from_care.index[validation_error_mask] return {'Missing': validation_error_locations.tolist()} return error, _validate def validate_631(): error = ErrorDefinition( code='631', description='Previous permanence option not a valid value.', affected_fields=['PREV_PERM'], ) def _validate(dfs): if 'PrevPerm' not in dfs: return {} previous_permanence = dfs['PrevPerm'] code_list = ['P1', 'P2', 'P3', 'P4', 'Z1'] mask = previous_permanence['PREV_PERM'].isin(code_list) | previous_permanence['PREV_PERM'].isna() validation_error_mask = ~mask validation_error_locations = previous_permanence.index[validation_error_mask] return {'PrevPerm': validation_error_locations.tolist()} return error, _validate def validate_196(): error = ErrorDefinition( code='196', description='Strengths and Difficulties (SDQ) reason is not a valid code.', affected_fields=['SDQ_REASON'], ) def _validate(dfs): if 'OC2' not in dfs: return {} oc2 = dfs['OC2'] code_list = ['SDQ1', 'SDQ2', 'SDQ3', 'SDQ4', 'SDQ5'] mask = oc2['SDQ_REASON'].isin(code_list) | oc2['SDQ_REASON'].isna() validation_error_mask = ~mask validation_error_locations = oc2.index[validation_error_mask] return {'OC2': validation_error_locations.tolist()} return error, _validate def validate_177(): error = ErrorDefinition( code='177', description='The legal status of adopter(s) code is not a valid code.', affected_fields=['LS_ADOPTR'], ) def _validate(dfs): if 'AD1' not in dfs: return {} adoptions = dfs['AD1'] code_list = ['L0', 'L11', 'L12', 'L2', 'L3', 'L4'] mask = adoptions['LS_ADOPTR'].isin(code_list) | adoptions['LS_ADOPTR'].isna() validation_error_mask = ~mask validation_error_locations = adoptions.index[validation_error_mask] return {'AD1': validation_error_locations.tolist()} return error, _validate def validate_176(): error = ErrorDefinition( code='176', description='The gender of adopter(s) at the date of adoption code is not a valid code.', affected_fields=['SEX_ADOPTR'], ) def _validate(dfs): if 'AD1' not in dfs: return {} adoptions = dfs['AD1'] code_list = ['M1', 'F1', 'MM', 'FF', 'MF'] mask = adoptions['SEX_ADOPTR'].isin(code_list) | adoptions['SEX_ADOPTR'].isna() validation_error_mask = ~mask validation_error_locations = adoptions.index[validation_error_mask] return {'AD1': validation_error_locations.tolist()} return error, _validate def validate_175(): error = ErrorDefinition( code='175', description='The number of adopter(s) code is not a valid code.', affected_fields=['NB_ADOPTR'], ) def _validate(dfs): if 'AD1' not in dfs: return {} adoptions = dfs['AD1'] code_list = ['1', '2'] mask = adoptions['NB_ADOPTR'].astype(str).isin(code_list) | adoptions['NB_ADOPTR'].isna() validation_error_mask = ~mask validation_error_locations = adoptions.index[validation_error_mask] return {'AD1': validation_error_locations.tolist()} return error, _validate def validate_132(): error = ErrorDefinition( code='132', description='Data entry for activity after leaving care is invalid.', affected_fields=['ACTIV'], ) def _validate(dfs): if 'OC3' not in dfs: return {} care_leavers = dfs['OC3'] code_list = [ 'F1', 'P1', 'F2', 'P2', 'F4', 'P4', 'F5', 'P5', 'G4', 'G5', 'G6', '0' ] mask = care_leavers['ACTIV'].astype(str).isin(code_list) | care_leavers['ACTIV'].isna() validation_error_mask = ~mask validation_error_locations = care_leavers.index[validation_error_mask] return {'OC3': validation_error_locations.tolist()} return error, _validate def validate_131(): error = ErrorDefinition( code='131', description='Data entry for being in touch after leaving care is invalid.', affected_fields=['IN_TOUCH'], ) def _validate(dfs): if 'OC3' not in dfs: return {} care_leavers = dfs['OC3'] code_list = [ 'YES', 'NO', 'DIED', 'REFU', 'NREQ', 'RHOM' ] mask = care_leavers['IN_TOUCH'].isin(code_list) | care_leavers['IN_TOUCH'].isna() validation_error_mask = ~mask validation_error_locations = care_leavers.index[validation_error_mask] return {'OC3': validation_error_locations.tolist()} return error, _validate def validate_120(): error = ErrorDefinition( code='120', description='The reason for the reversal of the decision that the child should be placed for adoption code is not valid.', affected_fields=['REASON_PLACED_CEASED'], ) def _validate(dfs): if 'PlacedAdoption' not in dfs: return {} placed_adoptions = dfs['PlacedAdoption'] code_list = ['RD1', 'RD2', 'RD3', 'RD4'] mask = placed_adoptions['REASON_PLACED_CEASED'].isin(code_list) | placed_adoptions[ 'REASON_PLACED_CEASED'].isna() validation_error_mask = ~mask validation_error_locations = placed_adoptions.index[validation_error_mask] return {'PlacedAdoption': validation_error_locations.tolist()} return error, _validate def validate_114(): error = ErrorDefinition( code='114', description='Data entry to record the status of former carer(s) of an adopted child is invalid.', affected_fields=['FOSTER_CARE'], ) def _validate(dfs): if 'AD1' not in dfs: return {} adoptions = dfs['AD1'] code_list = ['0', '1'] mask = adoptions['FOSTER_CARE'].astype(str).isin(code_list) | adoptions['FOSTER_CARE'].isna() validation_error_mask = ~mask validation_error_locations = adoptions.index[validation_error_mask] return {'AD1': validation_error_locations.tolist()} return error, _validate def validate_178(): error = ErrorDefinition( code='178', description='Placement provider code is not a valid code.', affected_fields=['PLACE_PROVIDER'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} episodes = dfs['Episodes'] code_list_placement_provider = ['PR0', 'PR1', 'PR2', 'PR3', 'PR4', 'PR5'] code_list_placement_with_no_provider = ['T0', 'T1', 'T2', 'T3', 'T4', 'Z1'] place_provider_needed_and_correct = episodes['PLACE_PROVIDER'].isin(code_list_placement_provider) & ~episodes[ 'PLACE'].isin(code_list_placement_with_no_provider) place_provider_not_provided = episodes['PLACE_PROVIDER'].isna() place_provider_not_needed = episodes['PLACE_PROVIDER'].isna() & episodes['PLACE'].isin( code_list_placement_with_no_provider) mask = place_provider_needed_and_correct | place_provider_not_provided | place_provider_not_needed validation_error_mask = ~mask validation_error_locations = episodes.index[validation_error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_103(): error = ErrorDefinition( code='103', description='The ethnicity code is either not valid or has not been entered.', affected_fields=['ETHNIC'], ) def _validate(dfs): if 'Header' not in dfs: return {} header = dfs['Header'] code_list = [ 'WBRI', 'WIRI', 'WOTH', 'WIRT', 'WROM', 'MWBC', 'MWBA', 'MWAS', 'MOTH', 'AIND', 'APKN', 'ABAN', 'AOTH', 'BCRB', 'BAFR', 'BOTH', 'CHNE', 'OOTH', 'REFU', 'NOBT' ] mask = header['ETHNIC'].isin(code_list) validation_error_mask = ~mask validation_error_locations = header.index[validation_error_mask] return {'Header': validation_error_locations.tolist()} return error, _validate def validate_143(): error = ErrorDefinition( code='143', description='The reason for new episode code is not a valid code.', affected_fields=['RNE'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} episodes = dfs['Episodes'] code_list = ['S', 'P', 'L', 'T', 'U', 'B'] mask = episodes['RNE'].isin(code_list) | episodes['RNE'].isna() validation_error_mask = ~mask validation_error_locations = episodes.index[validation_error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_144(): error = ErrorDefinition( code='144', description='The legal status code is not a valid code.', affected_fields=['LS'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} episodes = dfs['Episodes'] code_list = [ 'C1', 'C2', 'D1', 'E1', 'V2', 'V3', 'V4', 'J1', 'J2', 'J3', 'L1', 'L2', 'L3' ] mask = episodes['LS'].isin(code_list) | episodes['LS'].isna() validation_error_mask = ~mask validation_error_locations = episodes.index[validation_error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_145(): error = ErrorDefinition( code='145', description='Category of need code is not a valid code.', affected_fields=['CIN'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} episodes = dfs['Episodes'] code_list = [ 'N1', 'N2', 'N3', 'N4', 'N5', 'N6', 'N7', 'N8', ] mask = episodes['CIN'].isin(code_list) | episodes['CIN'].isna() validation_error_mask = ~mask validation_error_locations = episodes.index[validation_error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_146(): error = ErrorDefinition( code='146', description='Placement type code is not a valid code.', affected_fields=['PLACE'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} episodes = dfs['Episodes'] code_list = [ 'A3', 'A4', 'A5', 'A6', 'H5', 'K1', 'K2', 'P1', 'P2', 'P3', 'R1', 'R2', 'R3', 'R5', 'S1', 'T0', 'T1', 'T2', 'T3', 'T4', 'U1', 'U2', 'U3', 'U4', 'U5', 'U6', 'Z1' ] mask = episodes['PLACE'].isin(code_list) | episodes['PLACE'].isna() validation_error_mask = ~mask validation_error_locations = episodes.index[validation_error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_149(): error = ErrorDefinition( code='149', description='Reason episode ceased code is not valid. ', affected_fields=['REC'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} episodes = dfs['Episodes'] code_list = [ 'E11', 'E12', 'E2', 'E3', 'E4A', 'E4B', 'E13', 'E41', 'E45', 'E46', 'E47', 'E48', 'E5', 'E6', 'E7', 'E8', 'E9', 'E14', 'E15', 'E16', 'E17', 'X1' ] mask = episodes['REC'].isin(code_list) | episodes['REC'].isna() validation_error_mask = ~mask validation_error_locations = episodes.index[validation_error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_167(): error = ErrorDefinition( code='167', description='Data entry for participation is invalid or blank.', affected_fields=['REVIEW_CODE'], ) def _validate(dfs): if 'Reviews' not in dfs: return {} review = dfs['Reviews'] code_list = ['PN0', 'PN1', 'PN2', 'PN3', 'PN4', 'PN5', 'PN6', 'PN7'] mask = review['REVIEW'].notna() & review['REVIEW_CODE'].isin(code_list) | review['REVIEW'].isna() & review[ 'REVIEW_CODE'].isna() validation_error_mask = ~mask validation_error_locations = review.index[validation_error_mask] return {'Reviews': validation_error_locations.tolist()} return error, _validate def validate_101(): error = ErrorDefinition( code='101', description='Gender code is not valid.', affected_fields=['SEX'], ) def _validate(dfs): if 'Header' not in dfs: return {} header = dfs['Header'] code_list = ['1', '2'] mask = header['SEX'].astype(str).isin(code_list) validation_error_mask = ~mask validation_error_locations = header.index[validation_error_mask] return {'Header': validation_error_locations.tolist()} return error, _validate def validate_141(): error = ErrorDefinition( code='141', description='Date episode began is not a valid date.', affected_fields=['DECOM'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] mask = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce').notna() na_location = episodes['DECOM'].isna() validation_error_mask = ~mask & ~na_location validation_error_locations = episodes.index[validation_error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_147(): error = ErrorDefinition( code='147', description='Date episode ceased is not a valid date.', affected_fields=['DEC'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] mask = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce').notna() na_location = episodes['DEC'].isna() validation_error_mask = ~mask & ~na_location validation_error_locations = episodes.index[validation_error_mask] return {'Episodes': validation_error_locations.tolist()} return error, _validate def validate_171(): error = ErrorDefinition( code='171', description="Date of birth of mother's child is not a valid date.", affected_fields=['MC_DOB'], ) def _validate(dfs): if 'Header' not in dfs: return {} else: header = dfs['Header'] mask = pd.to_datetime(header['MC_DOB'], format='%d/%m/%Y', errors='coerce').notna() na_location = header['MC_DOB'].isna() validation_error_mask = ~mask & ~na_location validation_error_locations = header.index[validation_error_mask] return {'Header': validation_error_locations.tolist()} return error, _validate def validate_102(): error = ErrorDefinition( code='102', description='Date of birth is not a valid date.', affected_fields=['DOB'], ) def _validate(dfs): if 'Header' not in dfs: return {} else: header = dfs['Header'] mask = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce').notna() validation_error_mask = ~mask validation_error_locations = header.index[validation_error_mask] return {'Header': validation_error_locations.tolist()} return error, _validate def validate_112(): error = ErrorDefinition( code='112', description='Date should be placed for adoption is not a valid date.', affected_fields=['DATE_INT'], ) def _validate(dfs): if 'AD1' not in dfs: return {} else: ad1 = dfs['AD1'] mask = pd.to_datetime(ad1['DATE_INT'], format='%d/%m/%Y', errors='coerce').notna() na_location = ad1['DATE_INT'].isna() validation_error_mask = ~mask & ~na_location validation_error_locations = ad1.index[validation_error_mask] return {'AD1': validation_error_locations.tolist()} return error, _validate def validate_115(): error = ErrorDefinition( code='115', description="Date of Local Authority's (LA) decision that a child should be placed for adoption is not a valid date.", affected_fields=['DATE_PLACED'], ) def _validate(dfs): if 'PlacedAdoption' not in dfs: return {} else: adopt = dfs['PlacedAdoption'] mask = pd.to_datetime(adopt['DATE_PLACED'], format='%d/%m/%Y', errors='coerce').notna() na_location = adopt['DATE_PLACED'].isna() validation_error_mask = ~mask & ~na_location validation_error_locations = adopt.index[validation_error_mask] return {'PlacedAdoption': validation_error_locations.tolist()} return error, _validate def validate_116(): error = ErrorDefinition( code='116', description="Date of Local Authority's (LA) decision that a child should no longer be placed for adoption is not a valid date.", affected_fields=['DATE_PLACED_CEASED'], ) def _validate(dfs): if 'PlacedAdoption' not in dfs: return {} else: adopt = dfs['PlacedAdoption'] mask = pd.to_datetime(adopt['DATE_PLACED_CEASED'], format='%d/%m/%Y', errors='coerce').notna() na_location = adopt['DATE_PLACED_CEASED'].isna() validation_error_mask = ~mask & ~na_location validation_error_locations = adopt.index[validation_error_mask] return {'PlacedAdoption': validation_error_locations.tolist()} return error, _validate def validate_392c(): error = ErrorDefinition( code='392c', description='Postcode(s) provided are invalid.', affected_fields=['HOME_POST', 'PL_POST'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: episodes = dfs['Episodes'] home_provided = episodes['HOME_POST'].notna() home_details = merge_postcodes(episodes, "HOME_POST") home_valid = home_details['pcd'].notna() pl_provided = episodes['PL_POST'].notna() pl_details = merge_postcodes(episodes, "PL_POST") pl_valid = pl_details['pcd'].notna() error_mask = (home_provided & ~home_valid) | (pl_provided & ~pl_valid) return {'Episodes': episodes.index[error_mask].tolist()} return error, _validate def validate_213(): error = ErrorDefinition( code='213', description='Placement provider information not required.', affected_fields=['PLACE_PROVIDER'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: df = dfs['Episodes'] mask = df['PLACE'].isin(['T0', 'T1', 'T2', 'T3', 'T4', 'Z1']) & df['PLACE_PROVIDER'].notna() return {'Episodes': df.index[mask].tolist()} return error, _validate def validate_168(): error = ErrorDefinition( code='168', description='Unique Pupil Number (UPN) is not valid. If unknown, default codes should be UN1, UN2, UN3, UN4 or UN5.', affected_fields=['UPN'], ) def _validate(dfs): if 'Header' not in dfs: return {} else: df = dfs['Header'] mask = df['UPN'].str.match(r'(^((?![IOS])[A-Z]){1}(\d{12}|\d{11}[A-Z]{1})$)|^(UN[1-5])$', na=False) mask = ~mask return {'Header': df.index[mask].tolist()} return error, _validate def validate_388(): error = ErrorDefinition( code='388', description='Reason episode ceased is coded new episode begins, but there is no continuation episode.', affected_fields=['REC'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: df = dfs['Episodes'] df['DECOM'] = pd.to_datetime(df['DECOM'], format='%d/%m/%Y', errors='coerce') df['DEC'] = pd.to_datetime(df['DEC'], format='%d/%m/%Y', errors='coerce') df['DECOM'] = df['DECOM'].fillna('01/01/1901') # Watch for potential future issues df = df.sort_values(['CHILD', 'DECOM']) df['DECOM_NEXT_EPISODE'] = df.groupby(['CHILD'])['DECOM'].shift(-1) # The max DECOM for each child is also the one with no next episode # And we also add the skipna option # grouped_decom_by_child = df.groupby(['CHILD'])['DECOM'].idxmax(skipna=True) no_next = df.DECOM_NEXT_EPISODE.isna() & df.CHILD.notna() # Dataframe with the maximum DECOM removed max_decom_removed = df[~no_next] # Dataframe with the maximum DECOM only max_decom_only = df[no_next] # Case 1: If reason episode ceased is coded X1 there must be a subsequent episode # starting on the same day. case1 = max_decom_removed[(max_decom_removed['REC'] == 'X1') & (max_decom_removed['DEC'].notna()) & (max_decom_removed['DECOM_NEXT_EPISODE'].notna()) & (max_decom_removed['DEC'] != max_decom_removed['DECOM_NEXT_EPISODE'])] # Case 2: If an episode ends but the child continues to be looked after, a new # episode should start on the same day.The reason episode ceased code of # the episode which ends must be X1. case2 = max_decom_removed[(max_decom_removed['REC'] != 'X1') & (max_decom_removed['REC'].notna()) & (max_decom_removed['DEC'].notna()) & (max_decom_removed['DECOM_NEXT_EPISODE'].notna()) & (max_decom_removed['DEC'] == max_decom_removed['DECOM_NEXT_EPISODE'])] # Case 3: If a child ceases to be looked after reason episode ceased code X1 must # not be used. case3 = max_decom_only[(max_decom_only['DEC'].notna()) & (max_decom_only['REC'] == 'X1')] mask_case1 = case1.index.tolist() mask_case2 = case2.index.tolist() mask_case3 = case3.index.tolist() mask = mask_case1 + mask_case2 + mask_case3 mask.sort() return {'Episodes': mask} return error, _validate def validate_113(): error = ErrorDefinition( code='113', description='Date matching child and adopter(s) is not a valid date.', affected_fields=['DATE_MATCH'], ) def _validate(dfs): if 'AD1' not in dfs: return {} else: ad1 = dfs['AD1'] mask = pd.to_datetime(ad1['DATE_MATCH'], format='%d/%m/%Y', errors='coerce').notna() na_location = ad1['DATE_MATCH'].isna() validation_error_mask = ~mask & ~na_location validation_error_locations = ad1.index[validation_error_mask] return {'AD1': validation_error_locations.tolist()} return error, _validate def validate_134(): error = ErrorDefinition( code='134', description='Data on adoption should not be entered for the OC3 cohort.', affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM', 'DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR'], ) def _validate(dfs): if 'OC3' not in dfs or 'AD1' not in dfs: return {} else: oc3 = dfs['OC3'] ad1 = dfs['AD1'] ad1['ad1_index'] = ad1.index all_data = ad1.merge(oc3, how='left', on='CHILD') na_oc3_data = ( all_data['IN_TOUCH'].isna() & all_data['ACTIV'].isna() & all_data['ACCOM'].isna() ) na_ad1_data = ( all_data['DATE_INT'].isna() & all_data['DATE_MATCH'].isna() & all_data['FOSTER_CARE'].isna() & all_data['NB_ADOPTR'].isna() & all_data['SEX_ADOPTR'].isna() & all_data['LS_ADOPTR'].isna() ) validation_error = ~na_oc3_data & ~na_ad1_data validation_error_locations = all_data.loc[validation_error, 'ad1_index'].unique() return {'AD1': validation_error_locations.tolist()} return error, _validate def validate_119(): error = ErrorDefinition( code='119', description='If the decision is made that a child should no longer be placed for adoption, then the date of this decision and the reason why this decision was made must be completed.', affected_fields=['REASON_PLACED_CEASED', 'DATE_PLACED_CEASED'], ) def _validate(dfs): if 'PlacedAdoption' not in dfs: return {} else: adopt = dfs['PlacedAdoption'] na_placed_ceased = adopt['DATE_PLACED_CEASED'].isna() na_reason_ceased = adopt['REASON_PLACED_CEASED'].isna() validation_error = (na_placed_ceased & ~na_reason_ceased) | (~na_placed_ceased & na_reason_ceased) validation_error_locations = adopt.index[validation_error] return {'PlacedAdoption': validation_error_locations.tolist()} return error, _validate def validate_159(): error = ErrorDefinition( code='159', description='If a child has been recorded as not receiving an intervention for their substance misuse problem, then the additional item on whether an intervention was offered should be completed as well.', affected_fields=['SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'], ) def _validate(dfs): if 'OC2' not in dfs: return {} else: oc2 = dfs['OC2'] mask1 = oc2['SUBSTANCE_MISUSE'].astype(str) == '1' mask2 = oc2['INTERVENTION_RECEIVED'].astype(str) == '0' mask3 = oc2['INTERVENTION_OFFERED'].isna() validation_error = mask1 & mask2 & mask3 validation_error_locations = oc2.index[validation_error] return {'OC2': validation_error_locations.tolist()} return error, _validate def validate_142(): error = ErrorDefinition( code='142', description='A new episode has started, but the previous episode has not ended.', affected_fields=['DEC', 'REC'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: df = dfs['Episodes'] df['DECOM'] = pd.to_datetime(df['DECOM'], format='%d/%m/%Y', errors='coerce') df['DEC'] = pd.to_datetime(df['DEC'], format='%d/%m/%Y', errors='coerce') df['DECOM'] = df['DECOM'].fillna('01/01/1901') # Watch for potential future issues df['DECOM'] = df['DECOM'].replace('01/01/1901', pd.NA) last_episodes = df.sort_values('DECOM').reset_index().groupby(['CHILD'])['index'].last() ended_episodes_df = df.loc[~df.index.isin(last_episodes)] ended_episodes_df = ended_episodes_df[(ended_episodes_df['DEC'].isna() | ended_episodes_df['REC'].isna()) & ended_episodes_df['CHILD'].notna() & ended_episodes_df[ 'DECOM'].notna()] mask = ended_episodes_df.index.tolist() return {'Episodes': mask} return error, _validate def validate_148(): error = ErrorDefinition( code='148', description='Date episode ceased and reason episode ceased must both be coded, or both left blank.', affected_fields=['DEC', 'REC'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: df = dfs['Episodes'] df['DEC'] = pd.to_datetime(df['DEC'], format='%d/%m/%Y', errors='coerce') mask = ((df['DEC'].isna()) & (df['REC'].notna())) | ((df['DEC'].notna()) & (df['REC'].isna())) return {'Episodes': df.index[mask].tolist()} return error, _validate def validate_151(): error = ErrorDefinition( code='151', description='All data items relating to a childs adoption must be coded or left blank.', affected_fields=['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTER', 'SEX_ADOPTR', 'LS_ADOPTR'], ) def _validate(dfs): if 'AD1' not in dfs: return {} else: ad1 = dfs['AD1'] na_date_int = ad1['DATE_INT'].isna() na_date_match = ad1['DATE_MATCH'].isna() na_foster_care = ad1['FOSTER_CARE'].isna() na_nb_adoptr = ad1['NB_ADOPTR'].isna() na_sex_adoptr = ad1['SEX_ADOPTR'].isna() na_lsadoptr = ad1['LS_ADOPTR'].isna() ad1_not_null = ( ~na_date_int & ~na_date_match & ~na_foster_care & ~na_nb_adoptr & ~na_sex_adoptr & ~na_lsadoptr) validation_error = ( ~na_date_int | ~na_date_match | ~na_foster_care | ~na_nb_adoptr | ~na_sex_adoptr | ~na_lsadoptr) & ~ad1_not_null validation_error_locations = ad1.index[validation_error] return {'AD1': validation_error_locations.tolist()} return error, _validate def validate_182(): error = ErrorDefinition( code='182', description='Data entries on immunisations, teeth checks, health assessments and substance misuse problem identified should be completed or all OC2 fields should be left blank.', affected_fields=['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE', 'CONVICTED', 'HEALTH_CHECK', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'], ) def _validate(dfs): if 'OC2' not in dfs: return {} else: oc2 = dfs['OC2'] mask1 = ( oc2['IMMUNISATIONS'].isna() | oc2['TEETH_CHECK'].isna() | oc2['HEALTH_ASSESSMENT'].isna() | oc2['SUBSTANCE_MISUSE'].isna() ) mask2 = ( oc2['CONVICTED'].isna() & oc2['HEALTH_CHECK'].isna() & oc2['INTERVENTION_RECEIVED'].isna() & oc2['INTERVENTION_OFFERED'].isna() ) validation_error = mask1 & ~mask2 validation_error_locations = oc2.index[validation_error] return {'OC2': validation_error_locations.tolist()} return error, _validate def validate_214(): error = ErrorDefinition( code='214', description='Placement location information not required.', affected_fields=['PL_POST', 'URN'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: df = dfs['Episodes'] mask = df['LS'].isin(['V3', 'V4']) & ((df['PL_POST'].notna()) | (df['URN'].notna())) return {'Episodes': df.index[mask].tolist()} return error, _validate def validate_222(): error = ErrorDefinition( code='222', description='Ofsted Unique reference number (URN) should not be recorded for this placement type.', affected_fields=['URN'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: df = dfs['Episodes'] place_code_list = ['H5', 'P1', 'P2', 'P3', 'R1', 'R2', 'R5', 'T0', 'T1', 'T2', 'T3', 'T4', 'Z1'] mask = (df['PLACE'].isin(place_code_list)) & (df['URN'].notna()) & (df['URN'] != 'XXXXXX') return {'Episodes': df.index[mask].tolist()} return error, _validate def validate_366(): error = ErrorDefinition( code='366', description='A child cannot change placement during the course of an individual short-term respite break.', affected_fields=['RNE'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: df = dfs['Episodes'] mask = (df['LS'] == 'V3') & (df['RNE'] != 'S') return {'Episodes': df.index[mask].tolist()} return error, _validate def validate_628(): error = ErrorDefinition( code='628', description='Motherhood details are not required for care leavers who have not been looked after during the year.', affected_fields=['MOTHER'], ) def _validate(dfs): if 'Episodes' not in dfs or 'Header' not in dfs or 'OC3' not in dfs: return {} else: hea = dfs['Header'] epi = dfs['Episodes'] oc3 = dfs['OC3'] hea = hea.reset_index() oc3_no_nulls = oc3[oc3[['IN_TOUCH', 'ACTIV', 'ACCOM']].notna().any(axis=1)] hea_merge_epi = hea.merge(epi, how='left', on='CHILD', indicator=True) hea_not_in_epi = hea_merge_epi[hea_merge_epi['_merge'] == 'left_only'] cohort_to_check = hea_not_in_epi.merge(oc3_no_nulls, how='inner', on='CHILD') error_cohort = cohort_to_check[cohort_to_check['MOTHER'].notna()] error_list = list(set(error_cohort['index'].to_list())) error_list.sort() return {'Header': error_list} return error, _validate def validate_164(): error = ErrorDefinition( code='164', description='Distance is not valid. Please check a valid postcode has been entered.', affected_fields=['PL_DISTANCE'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: df = dfs['Episodes'] is_short_term = df['LS'].isin(['V3', 'V4']) distance = pd.to_numeric(df['PL_DISTANCE'], errors='coerce') # Use a bit of tolerance in these bounds distance_valid = distance.gt(-0.2) & distance.lt(1001.0) mask = ~is_short_term & ~distance_valid return {'Episodes': df.index[mask].tolist()} return error, _validate def validate_169(): error = ErrorDefinition( code='169', description='Local Authority (LA) of placement is not valid or is missing. Please check a valid postcode has been entered.', affected_fields=['PL_LA'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: df = dfs['Episodes'] is_short_term = df['LS'].isin(['V3', 'V4']) # Because PL_LA is derived, it will always be valid if present mask = ~is_short_term & df['PL_LA'].isna() return {'Episodes': df.index[mask].tolist()} return error, _validate def validate_179(): error = ErrorDefinition( code='179', description='Placement location code is not a valid code.', affected_fields=['PL_LOCATION'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: df = dfs['Episodes'] is_short_term = df['LS'].isin(['V3', 'V4']) # Because PL_LOCATION is derived, it will always be valid if present mask = ~is_short_term & df['PL_LOCATION'].isna() return {'Episodes': df.index[mask].tolist()} return error, _validate def validate_1015(): error = ErrorDefinition( code='1015', description='Placement provider is own provision but child not placed in own LA.', affected_fields=['PL_LA'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: df = dfs['Episodes'] local_authority = dfs['metadata']['localAuthority'] placement_fostering_or_adoption = df['PLACE'].isin([ 'A3', 'A4', 'A5', 'A6', 'U1', 'U2', 'U3', 'U4', 'U5', 'U6', ]) own_provision = df['PLACE_PROVIDER'].eq('PR1') is_short_term = df['LS'].isin(['V3', 'V4']) is_pl_la = df['PL_LA'].eq(local_authority) checked_episodes = ~placement_fostering_or_adoption & ~is_short_term & own_provision checked_episodes = checked_episodes & df['LS'].notna() & df['PLACE'].notna() mask = checked_episodes & ~is_pl_la return {'Episodes': df.index[mask].tolist()} return error, _validate def validate_411(): error = ErrorDefinition( code='411', description='Placement location code disagrees with LA of placement.', affected_fields=['PL_LOCATION'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: df = dfs['Episodes'] local_authority = dfs['metadata']['localAuthority'] mask = df['PL_LOCATION'].eq('IN') & df['PL_LA'].ne(local_authority) return {'Episodes': df.index[mask].tolist()} return error, _validate def validate_420(): error = ErrorDefinition( code='420', description='LA of placement completed but child is looked after under legal status V3 or V4.', affected_fields=['PL_LA'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: df = dfs['Episodes'] is_short_term = df['LS'].isin(['V3', 'V4']) mask = is_short_term & df['PL_LA'].notna() return {'Episodes': df.index[mask].tolist()} return error, _validate def validate_355(): error = ErrorDefinition( code='355', description='Episode appears to have lasted for less than 24 hours', affected_fields=['DECOM', 'DEC'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: df = dfs['Episodes'] mask = df['DECOM'].astype(str) == df['DEC'].astype(str) return {'Episodes': df.index[mask].tolist()} return error, _validate def validate_586(): error = ErrorDefinition( code='586', description='Dates of missing periods are before child’s date of birth.', affected_fields=['MIS_START'], ) def _validate(dfs): if 'Missing' not in dfs: return {} else: df = dfs['Missing'] df['DOB'] = pd.to_datetime(df['DOB'], format='%d/%m/%Y', errors='coerce') df['MIS_START'] = pd.to_datetime(df['MIS_START'], format='%d/%m/%Y', errors='coerce') error_mask = df['MIS_START'].notna() & (df['MIS_START'] <= df['DOB']) return {'Missing': df.index[error_mask].to_list()} return error, _validate def validate_630(): error = ErrorDefinition( code='630', description='Information on previous permanence option should be returned.', affected_fields=['RNE'], ) def _validate(dfs): if 'PrevPerm' not in dfs or 'Episodes' not in dfs: return {} else: epi = dfs['Episodes'] pre = dfs['PrevPerm'] epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce') collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce') epi = epi.reset_index() # Form the episode dataframe which has an 'RNE' of 'S' in this financial year epi_has_rne_of_S_in_year = epi[(epi['RNE'] == 'S') & (epi['DECOM'] >= collection_start)] # Merge to see # 1) which CHILD ids are missing from the PrevPerm file # 2) which CHILD are in the prevPerm file, but don't have the LA_PERM/DATE_PERM field completed where they should be # 3) which CHILD are in the PrevPerm file, but don't have the PREV_PERM field completed. merged_epi_preperm = epi_has_rne_of_S_in_year.merge(pre, on='CHILD', how='left', indicator=True) error_not_in_preperm = merged_epi_preperm['_merge'] == 'left_only' error_wrong_values_in_preperm = (merged_epi_preperm['PREV_PERM'] != 'Z1') & ( merged_epi_preperm[['LA_PERM', 'DATE_PERM']].isna().any(axis=1)) error_null_prev_perm = (merged_epi_preperm['_merge'] == 'both') & (merged_epi_preperm['PREV_PERM'].isna()) error_mask = error_not_in_preperm | error_wrong_values_in_preperm | error_null_prev_perm error_list = merged_epi_preperm[error_mask]['index'].to_list() error_list = list(set(error_list)) error_list.sort() return {'Episodes': error_list} return error, _validate def validate_501(): error = ErrorDefinition( code='501', description='A new episode has started before the end date of the previous episode.', affected_fields=['DECOM', 'DEC'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: epi = dfs['Episodes'] epi = epi.reset_index() epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce') epi['DEC'] = pd.to_datetime(epi['DEC'], format='%d/%m/%Y', errors='coerce') epi = epi.sort_values(['CHILD', 'DECOM']) epi_lead = epi.shift(1) epi_lead = epi_lead.reset_index() m_epi = epi.merge(epi_lead, left_on='index', right_on='level_0', suffixes=('', '_prev')) error_cohort = m_epi[(m_epi['CHILD'] == m_epi['CHILD_prev']) & (m_epi['DECOM'] < m_epi['DEC_prev'])] error_list = error_cohort['index'].to_list() error_list.sort() return {'Episodes': error_list} return error, _validate def validate_502(): error = ErrorDefinition( code='502', description='Last year’s record ended with an open episode. The date on which that episode started does not match the start date of the first episode on this year’s record.', affected_fields=['DECOM'], ) def _validate(dfs): if 'Episodes' not in dfs or 'Episodes_last' not in dfs: return {} else: epi = dfs['Episodes'] epi_last = dfs['Episodes_last'] epi = epi.reset_index() epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce') epi_last['DECOM'] = pd.to_datetime(epi_last['DECOM'], format='%d/%m/%Y', errors='coerce') epi_last_no_dec = epi_last[epi_last['DEC'].isna()] epi_min_decoms_index = epi[['CHILD', 'DECOM']].groupby(['CHILD'])['DECOM'].idxmin() epi_min_decom_df = epi.loc[epi_min_decoms_index, :] merged_episodes = epi_min_decom_df.merge(epi_last_no_dec, on='CHILD', how='inner') error_cohort = merged_episodes[merged_episodes['DECOM_x'] != merged_episodes['DECOM_y']] error_list = error_cohort['index'].to_list() error_list = list(set(error_list)) error_list.sort() return {'Episodes': error_list} return error, _validate def validate_153(): error = ErrorDefinition( code='153', description="All data items relating to a child's activity or accommodation after leaving care must be coded or left blank.", affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'], ) def _validate(dfs): if 'OC3' not in dfs: return {} oc3 = dfs['OC3'] oc3_not_na = ( oc3['IN_TOUCH'].notna() & oc3['ACTIV'].notna() & oc3['ACCOM'].notna() ) oc3_all_na = ( oc3['IN_TOUCH'].isna() & oc3['ACTIV'].isna() & oc3['ACCOM'].isna() ) validation_error = ~oc3_not_na & ~oc3_all_na validation_error_locations = oc3.index[validation_error] return {'OC3': validation_error_locations.to_list()} return error, _validate def validate_166(): error = ErrorDefinition( code='166', description="Date of review is invalid or blank.", affected_fields=['REVIEW'], ) def _validate(dfs): if 'Reviews' not in dfs: return {} else: review = dfs['Reviews'] error_mask = pd.to_datetime(review['REVIEW'], format='%d/%m/%Y', errors='coerce').isna() validation_error_locations = review.index[error_mask] return {'Reviews': validation_error_locations.to_list()} return error, _validate def validate_174(): error = ErrorDefinition( code='174', description="Mother's child date of birth is recorded but gender shows that the child is a male.", affected_fields=['SEX', 'MC_DOB'], ) def _validate(dfs): if 'Header' not in dfs: return {} else: header = dfs['Header'] child_is_male = header['SEX'].astype(str) == '1' mc_dob_recorded = header['MC_DOB'].notna() error_mask = child_is_male & mc_dob_recorded validation_error_locations = header.index[error_mask] return {'Header': validation_error_locations.to_list()} return error, _validate def validate_180(): error = ErrorDefinition( code='180', description="Data entry for the strengths and difficulties questionnaire (SDQ) score is invalid.", affected_fields=['SDQ_SCORE'], ) def _validate(dfs): if 'OC2' not in dfs: return {} else: oc2 = dfs['OC2'] oc2['SDQ_SCORE'] = pd.to_numeric(oc2['SDQ_SCORE'], errors='coerce') error_mask = oc2['SDQ_SCORE'].notna() & ~oc2['SDQ_SCORE'].isin(range(41)) validation_error_locations = oc2.index[error_mask] return {'OC2': validation_error_locations.to_list()} return error, _validate def validate_181(): error = ErrorDefinition( code='181', description="Data items relating to children looked after continuously for 12 months should be completed with a 0 or 1.", affected_fields=['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'], ) def _validate(dfs): if 'OC2' not in dfs: return {} else: oc2 = dfs['OC2'] code_list = ['0', '1'] fields_of_interest = ['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'] error_mask = ( oc2[fields_of_interest].notna() & ~oc2[fields_of_interest].astype(str).isin(['0', '1']) ).any(axis=1) validation_error_locations = oc2.index[error_mask] return {'OC2': validation_error_locations.tolist()} return error, _validate def validate_192(): error = ErrorDefinition( code='192', description="Child has been identified as having a substance misuse problem but the additional item on whether an intervention was received has been left blank.", affected_fields=['SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED'], ) def _validate(dfs): if 'OC2' not in dfs: return {} else: oc2 = dfs['OC2'] misuse = oc2['SUBSTANCE_MISUSE'].astype(str) == '1' intervention_blank = oc2['INTERVENTION_RECEIVED'].isna() error_mask = misuse & intervention_blank validation_error_locations = oc2.index[error_mask] return {'OC2': validation_error_locations.to_list()} return error, _validate def validate_193(): error = ErrorDefinition( code='193', description="Child not identified as having a substance misuse problem but at least one of the two additional items on whether an intervention were offered and received have been completed.", affected_fields=['SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'], ) def _validate(dfs): if 'OC2' not in dfs: return {} else: oc2 = dfs['OC2'] no_substance_misuse = oc2['SUBSTANCE_MISUSE'].isna() | (oc2['SUBSTANCE_MISUSE'].astype(str) == '0') intervention_not_blank = oc2['INTERVENTION_RECEIVED'].notna() | oc2['INTERVENTION_OFFERED'].notna() error_mask = no_substance_misuse & intervention_not_blank validation_error_locations = oc2.index[error_mask] return {'OC2': validation_error_locations.tolist()} return error, _validate def validate_197a(): error = ErrorDefinition( code='197a', description="Reason for no Strengths and Difficulties (SDQ) score is not required if Strengths and Difficulties Questionnaire score is filled in.", affected_fields=['SDQ_SCORE', 'SDQ_REASON'], ) def _validate(dfs): if 'OC2' not in dfs: return {} else: oc2 = dfs['OC2'] sdq_filled_in = oc2['SDQ_SCORE'].notna() reason_filled_in = oc2['SDQ_REASON'].notna() error_mask = sdq_filled_in & reason_filled_in validation_error_locations = oc2.index[error_mask] return {'OC2': validation_error_locations.tolist()} return error, _validate def validate_567(): error = ErrorDefinition( code='567', description='The date that the missing episode or episode that the child was away from placement without authorisation ended is before the date that it started.', affected_fields=['MIS_START', 'MIS_END'], ) def _validate(dfs): if 'Missing' not in dfs: return {} else: mis = dfs['Missing'] mis['MIS_START'] = pd.to_datetime(mis['MIS_START'], format='%d/%m/%Y', errors='coerce') mis['MIS_END'] = pd.to_datetime(mis['MIS_END'], format='%d/%m/%Y', errors='coerce') mis_error = mis[mis['MIS_START'] > mis['MIS_END']] return {'Missing': mis_error.index.to_list()} return error, _validate def validate_304(): error = ErrorDefinition( code='304', description='Date unaccompanied asylum-seeking child (UASC) status ceased must be on or before the 18th birthday of a child.', affected_fields=['DUC'], ) def _validate(dfs): if 'UASC' not in dfs: return {} else: uasc = dfs['UASC'] uasc['DOB'] = pd.to_datetime(uasc['DOB'], format='%d/%m/%Y', errors='coerce') uasc['DUC'] = pd.to_datetime(uasc['DUC'], format='%d/%m/%Y', errors='coerce') mask = uasc['DUC'].notna() & (uasc['DUC'] > uasc['DOB'] + pd.offsets.DateOffset(years=18)) return {'UASC': uasc.index[mask].to_list()} return error, _validate def validate_333(): error = ErrorDefinition( code='333', description='Date should be placed for adoption must be on or prior to the date of matching child with adopter(s).', affected_fields=['DATE_INT'], ) def _validate(dfs): if 'AD1' not in dfs: return {} else: adt = dfs['AD1'] adt['DATE_MATCH'] = pd.to_datetime(adt['DATE_MATCH'], format='%d/%m/%Y', errors='coerce') adt['DATE_INT'] = pd.to_datetime(adt['DATE_INT'], format='%d/%m/%Y', errors='coerce') # If <DATE_MATCH> provided, then <DATE_INT> must also be provided and be <= <DATE_MATCH> mask1 = adt['DATE_MATCH'].notna() & adt['DATE_INT'].isna() mask2 = adt['DATE_MATCH'].notna() & adt['DATE_INT'].notna() & (adt['DATE_INT'] > adt['DATE_MATCH']) mask = mask1 | mask2 return {'AD1': adt.index[mask].to_list()} return error, _validate def validate_1011(): error = ErrorDefinition( code='1011', description='This child is recorded as having his/her care transferred to another local authority for the final episode and therefore should not have the care leaver information completed.', affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'], ) def _validate(dfs): if 'OC3' not in dfs or 'Episodes' not in dfs: return {} else: epi = dfs['Episodes'] oc3 = dfs['OC3'] epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce') # If final <REC> = 'E3' then <IN_TOUCH>; <ACTIV> and <ACCOM> should not be provided epi.sort_values(['CHILD', 'DECOM'], inplace=True) grouped_decom_by_child = epi.groupby(['CHILD'])['DECOM'].idxmax(skipna=True) max_decom_only = epi.loc[epi.index.isin(grouped_decom_by_child), :] E3_is_last = max_decom_only[max_decom_only['REC'] == 'E3'] oc3.reset_index(inplace=True) cohort_to_check = oc3.merge(E3_is_last, on='CHILD', how='inner') error_mask = cohort_to_check[['IN_TOUCH', 'ACTIV', 'ACCOM']].notna().any(axis=1) error_list = cohort_to_check['index'][error_mask].to_list() error_list = list(set(error_list)) error_list.sort() return {'OC3': error_list} return error, _validate def validate_574(): error = ErrorDefinition( code='574', description='A new missing/away from placement without authorisation period cannot start when the previous missing/away from placement without authorisation period is still open. Missing/away from placement without authorisation periods should also not overlap.', affected_fields=['MIS_START', 'MIS_END'], ) def _validate(dfs): if 'Missing' not in dfs: return {} else: mis = dfs['Missing'] mis['MIS_START'] = pd.to_datetime(mis['MIS_START'], format='%d/%m/%Y', errors='coerce') mis['MIS_END'] = pd.to_datetime(mis['MIS_END'], format='%d/%m/%Y', errors='coerce') mis.sort_values(['CHILD', 'MIS_START'], inplace=True) mis.reset_index(inplace=True) mis.reset_index(inplace=True) # Twice on purpose mis['LAG_INDEX'] = mis['level_0'].shift(-1) lag_mis = mis.merge(mis, how='inner', left_on='level_0', right_on='LAG_INDEX', suffixes=['', '_PREV']) # We're only interested in cases where there is more than one row for a child. lag_mis = lag_mis[lag_mis['CHILD'] == lag_mis['CHILD_PREV']] # A previous MIS_END date is null mask1 = lag_mis['MIS_END_PREV'].isna() # MIS_START is before previous MIS_END (overlapping dates) mask2 = lag_mis['MIS_START'] < lag_mis['MIS_END_PREV'] mask = mask1 | mask2 error_list = lag_mis['index'][mask].to_list() error_list.sort() return {'Missing': error_list} return error, _validate def validate_564(): error = ErrorDefinition( code='564', description='Child was missing or away from placement without authorisation and the date started is blank.', affected_fields=['MISSING', 'MIS_START'], ) def _validate(dfs): if 'Missing' not in dfs: return {} else: mis = dfs['Missing'] error_mask = mis['MISSING'].isin(['M', 'A', 'm', 'a']) & mis['MIS_START'].isna() return {'Missing': mis.index[error_mask].to_list()} return error, _validate def validate_566(): error = ErrorDefinition( code='566', description='The date that the child' + chr( 39) + 's episode of being missing or away from placement without authorisation ended has been completed but whether the child was missing or away without authorisation has not been completed.', affected_fields=['MISSING', 'MIS_END'], ) def _validate(dfs): if 'Missing' not in dfs: return {} else: mis = dfs['Missing'] error_mask = mis['MISSING'].isna() & mis['MIS_END'].notna() return {'Missing': mis.index[error_mask].to_list()} return error, _validate def validate_436(): error = ErrorDefinition( code='436', description='Reason for new episode is that both child’s placement and legal status have changed, but this is not reflected in the episode data.', affected_fields=['RNE', 'LS', 'PLACE', 'PL_POST', 'URN', 'PLACE_PROVIDER'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: epi = dfs['Episodes'] epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce') epi.sort_values(['CHILD', 'DECOM'], inplace=True) epi.reset_index(inplace=True) epi.reset_index(inplace=True) epi['LAG_INDEX'] = epi['level_0'].shift(-1) epi.fillna(value={"LS": '*', "PLACE": '*', "PL_POST": '*', "URN": '*', "PLACE_PROVIDER": '*'}, inplace=True) epi_merge = epi.merge(epi, how='inner', left_on='level_0', right_on='LAG_INDEX', suffixes=['', '_PRE']) epi_multi_row = epi_merge[epi_merge['CHILD'] == epi_merge['CHILD_PRE']] epi_has_B_U = epi_multi_row[epi_multi_row['RNE'].isin(['U', 'B'])] mask_ls = epi_has_B_U['LS'] == epi_has_B_U['LS_PRE'] mask1 = epi_has_B_U['PLACE'] == epi_has_B_U['PLACE_PRE'] mask2 = epi_has_B_U['PL_POST'] == epi_has_B_U['PL_POST_PRE'] mask3 = epi_has_B_U['URN'] == epi_has_B_U['URN_PRE'] mask4 = epi_has_B_U['PLACE_PROVIDER'] == epi_has_B_U['PLACE_PROVIDER_PRE'] error_mask = mask_ls | (mask1 & mask2 & mask3 & mask4) error_list = epi_has_B_U[error_mask]['index'].to_list() error_list.sort() return {'Episodes': error_list} return error, _validate def validate_570(): error = ErrorDefinition( code='570', description='The date that the child started to be missing or away from placement without authorisation is after the end of the collection year.', affected_fields=['MIS_START'], ) def _validate(dfs): if 'Missing' not in dfs: return {} else: mis = dfs['Missing'] collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce') mis['MIS_START'] = pd.to_datetime(mis['MIS_START'], format='%d/%m/%Y', errors='coerce') error_mask = mis['MIS_START'] > collection_end return {'Missing': mis.index[error_mask].to_list()} return error, _validate def validate_531(): error = ErrorDefinition( code='531', description='A placement provider code of PR5 cannot be associated with placements P1.', affected_fields=['PLACE', 'PLACE_PROVIDER'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: epi = dfs['Episodes'] error_mask = (epi['PLACE'] == 'P1') & (epi['PLACE_PROVIDER'] == 'PR5') return {'Episodes': epi.index[error_mask].to_list()} return error, _validate def validate_542(): error = ErrorDefinition( code='542', description='A child aged under 10 at 31 March should not have conviction information completed.', affected_fields=['CONVICTED'], ) def _validate(dfs): if 'OC2' not in dfs: return {} else: oc2 = dfs['OC2'] oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce') collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce') error_mask = (oc2['DOB'] + pd.offsets.DateOffset(years=10) > collection_end) & oc2['CONVICTED'].notna() return {'OC2': oc2.index[error_mask].to_list()} return error, _validate def validate_620(): error = ErrorDefinition( code='620', description='Child has been recorded as a mother, but date of birth shows that the mother is under 11 years of age.', affected_fields=['DOB', 'MOTHER'], ) def _validate(dfs): if 'Header' not in dfs: return {} else: hea = dfs['Header'] collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce') hea['DOB'] = pd.to_datetime(hea['DOB'], format='%d/%m/%Y', errors='coerce') hea_mother = hea[hea['MOTHER'].astype(str) == '1'] error_cohort = (hea_mother['DOB'] + pd.offsets.DateOffset(years=11)) > collection_start return {'Header': hea_mother.index[error_cohort].to_list()} return error, _validate def validate_225(): error = ErrorDefinition( code='225', description='Reason for placement change must be recorded.', affected_fields=['REASON_PLACE_CHANGE'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: epi = dfs['Episodes'] epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce') epi.sort_values(['CHILD', 'DECOM'], inplace=True) epi.reset_index(inplace=True) epi.reset_index(inplace=True) epi['LAG_INDEX'] = epi['level_0'].shift(1) m_epi = epi.merge(epi, how='inner', left_on='level_0', right_on='LAG_INDEX', suffixes=['', '_NEXT']) m_epi = m_epi[m_epi['CHILD'] == m_epi['CHILD_NEXT']] mask_is_X1 = m_epi['REC'] == 'X1' mask_null_place_chg = m_epi['REASON_PLACE_CHANGE'].isna() mask_place_not_T = ~m_epi['PLACE'].isin(['T0', 'T1', 'T2', 'T3', 'T4']) mask_next_is_PBTU = m_epi['RNE_NEXT'].isin(['P', 'B', 'T', 'U']) mask_next_place_not_T = ~m_epi['PLACE_NEXT'].isin(['T0', 'T1', 'T2', 'T3', 'T4']) error_mask = mask_is_X1 & mask_null_place_chg & mask_place_not_T & mask_next_is_PBTU & mask_next_place_not_T error_list = m_epi['index'][error_mask].to_list() return {'Episodes': error_list} return error, _validate def validate_353(): error = ErrorDefinition( code='353', description='No episode submitted can start before 14 October 1991.', affected_fields=['DECOM'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: epi = dfs['Episodes'] epi['DECOM'] = pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce') min_decom_allowed = pd.to_datetime('14/10/1991', format='%d/%m/%Y', errors='coerce') error_mask = epi['DECOM'] < min_decom_allowed return {'Episodes': epi.index[error_mask].to_list()} return error, _validate def validate_528(): error = ErrorDefinition( code='528', description='A placement provider code of PR2 cannot be associated with placements P1, R2 or R5.', affected_fields=['PLACE', 'PLACE_PROVIDER'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: epi = dfs['Episodes'] error_mask = (epi['PLACE'].isin(['P1', 'R2', 'R5'])) & (epi['PLACE_PROVIDER'] == 'PR2') return {'Episodes': epi.index[error_mask].to_list()} return error, _validate def validate_527(): error = ErrorDefinition( code='527', description='A placement provider code of PR1 cannot be associated with placements P1, R2 or R5.', affected_fields=['PLACE', 'PLACE_PROVIDER'], ) def _validate(dfs): if 'Episodes' not in dfs: return {} else: epi = dfs['Episodes'] error_mask = (epi['PLACE'].isin(['P1', 'R2', 'R5'])) & (epi['PLACE_PROVIDER'] == 'PR1') return {'Episodes': epi.index[error_mask].to_list()} return error, _validate def validate_359(): error = ErrorDefinition( code='359', description='Child being looked after following 18th birthday must be accommodated under section 20(5) of the Children Act 1989 in a community home.', affected_fields=['DEC', 'LS', 'PLACE'], ) def _validate(dfs): if 'Episodes' not in dfs or 'Header' not in dfs: return {} else: epi = dfs['Episodes'] hea = dfs['Header'] hea['DOB'] = pd.to_datetime(hea['DOB'], format='%d/%m/%Y', errors='coerce') collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce') epi.reset_index(inplace=True) epi = epi.merge(hea, on='CHILD', how='left', suffixes=['', '_HEA']) mask_older_18 = (epi['DOB'] + pd.offsets.DateOffset(years=18)) < collection_end mask_null_dec = epi['DEC'].isna() mask_is_V2_K2 = (epi['LS'] == 'V2') & (epi['PLACE'] == 'K2') error_mask = mask_older_18 & mask_null_dec & ~mask_is_V2_K2 error_list = epi['index'][error_mask].to_list() error_list = list(set(error_list)) return {'Episodes': error_list} return error, _validate def validate_562(): error = ErrorDefinition( code='562', description='Episode commenced before the start of the current collection year but there is a missing continuous episode in the previous year.', affected_fields=['DECOM'], ) def _validate(dfs): if 'Episodes' not in dfs or 'Episodes_last' not in dfs: return {} else: epi = dfs['Episodes'] epi_last = dfs['Episodes_last'] epi['DECOM'] =
pd.to_datetime(epi['DECOM'], format='%d/%m/%Y', errors='coerce')
pandas.to_datetime
import sys import pandas as pd import numpy as np import time from utils import com2sys """Parse ZEV expression data in tall format into expression matrix: gene x tf (or sample). Args: dirpath - Path to ZEV data directory. time_point - Time point of which ZEV induction was measured. fc_type - Type of fold change data. Choose from ['cleaned', 'shrunken', 'prepert'] Example: ## Shrunken data at 15min python3 HELPER_SCRIPTS/parse_zev_expr_matrix.py RESOURCES/Yeast_ZEV_IDEA/ 15 shrunken RESOURCES/Yeast_genome/orf_name_conversion.tab ## Pre-perturbation data at 0min (log2 ratio of red/green channels) python3 HELPER_SCRIPTS/parse_zev_expr_matrix.py RESOURCES/Yeast_ZEV_IDEA/ 0 prepert RESOURCES/Yeast_genome/orf_name_conversion.tab ## Pre-perturbation data at 0min (Red channel) python3 HELPER_SCRIPTS/parse_zev_expr_matrix.py RESOURCES/Yeast_ZEV_IDEA/ 0 prepertRed RESOURCES/Yeast_genome/orf_name_conversion.tab """ TF_BLACKLIST = ['Z3EV'] RESTRICTION = 'P' FC_DICT = { 'cleaned': ([10], 'ratio'), 'shrunken': ([14], 'timecourses'), 'prepert': [[7, 8]], 'prepertRed': [[8]]} ## Input args dirpath = sys.argv[1] time_point = int(sys.argv[2]) fc_type = sys.argv[3] if fc_type not in FC_DICT: sys.exit('{} not in {}'.format(fc_type, FC_DICT)) gene_table_filepath = sys.argv[4] if len(sys.argv) > 4 else None ## Load dataframe t0 = time.time() df = pd.read_csv( '{}/idea_tall_expression_data.tsv'.format(dirpath), sep='\t', usecols=list(range(7)) + FC_DICT[fc_type][0]) t1 = time.time() print('Elapsed loading time = {}'.format(t1 - t0)) print('Loaded dataframe = {}'.format(df.shape)) ## Gene name conversion convert_gene_names = False if gene_table_filepath is not None: com2sys_dict = com2sys(gene_table_filepath) convert_gene_names = True ## Allow some other common names com2sys_dict.update( {'PHO88': 'YBR106W', 'FRA2': 'YGL220W', 'PET10': 'YKR046C', 'OSW5': 'YMR148W'}) ## Query time point df = df.loc[(~df['TF'].isin(TF_BLACKLIST)) & \ (df['restriction'] == RESTRICTION) & \ (df['time'] == time_point)] ## Create a output df if fc_type == 'prepert': expr_col = 'log2_r_g_ratio' df[expr_col] = np.log2(df['red_median'] / df['green_median']) elif fc_type == 'prepertRed': expr_col = 'red_median' df[expr_col] = df[expr_col] else: expr_col = 'log2_{}_{}'.format(fc_type, FC_DICT[fc_type][1]) out_df = pd.DataFrame(index=
pd.unique(df['GeneName'])
pandas.unique
import datetime import enum import pandas from numpy import nan, array, isnan, full, nanmean, mean from sklearn.base import clone from sklearn.model_selection import KFold from hydromet_forecasting.timeseries import FixedIndexTimeseries from hydromet_forecasting.evaluating import Evaluator, SeasonalEvaluator from sklearn import preprocessing from monthdelta import monthdelta from stldecompose import decompose as decomp import itertools import scipy.special as scisp class RegressionModel(object): """Sets up the Predictor Model from sklearn, etc. Workflow: 1. RegressionModel.SupportedModels.list_models(): returns dictionary of available models as name,value pairs 2. model=RegressionModel.build_regression_model(RegressionModel.SupportedModels(value)): imports necessary classes (sklearn etc.) 3. model.selectable_parameters: dictionary of possible parameters as parameter_type and "list of possible value" pairs. model.default_parameters: dictionary of default parameters as parameter_type and default value pairs. 4. configured_model=model.configure(parameters): returns configured model with parameters. Attributes: default_parameters: dictionary of default parameters as parameter_type and default value pairs. selectable_parameters: dictionary of parameters as parameter_type and a list of possible values [v1,v2,v3,v4,...] """ def __init__(self, model_class, selectable_parameters, default_parameters): self.model_class = model_class self.selectable_parameters = selectable_parameters self.default_parameters = default_parameters def configure(self, parameters=None): """Initialises model with parameters check Instance.selectable_parameters for possible parameters. Args: parameters: When no parameters are given, default parameters as defined in Instance.default_parameters are used. Returns: configured model object Raises: ValueError: when any parameter in parameters is invalid for this specific model. """ if parameters is None: return self.model_class(**self.default_parameters) else: for key in self.default_parameters: if not key in parameters.keys(): parameters.update({key: self.default_parameters[key]}) else: if not any(map(lambda x: x == parameters[key],self.selectable_parameters[key])): raise ValueError("The given value for %s must be a member of the class attribte selectable parameters." %(key)) return self.model_class(**parameters) class SupportedModels(enum.Enum): """Enum class for available models: list_models(): Returns: dictionary of available models as name,value pairs """ LinearRegression = 1 Lasso = 2 ExtraTreesRegressor = 3 @classmethod def list_models(self): out = dict() for model in (self): out[model.name] = model.value return out @classmethod def build_regression_model(cls, model): """Returns an instance of RegressionModel Args: model: An instance of RegressionModel.SupportedModels Returns: instance of RegressionModel Raises: ValueError: if model is not recognized """ if model == cls.SupportedModels.LinearRegression: from sklearn import linear_model return cls(linear_model.LinearRegression, {'fit_intercept': [True, False]}, {'fit_intercept': True}) elif model == cls.SupportedModels.Lasso: from sklearn import linear_model return cls(linear_model.Lasso, {'alpha': [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1]}, {'alpha': 1}) elif model == cls.SupportedModels.ExtraTreesRegressor: from sklearn import ensemble return cls(ensemble.ExtraTreesRegressor, {'n_estimators': range(1, 41, 1), 'random_state': range(1,10)}, {'n_estimators': 10, 'random_state': 1}) else: raise ValueError class Forecaster(object): """Forecasting class for timeseries that can be handled/read by FixedIndexTimeseries. This class enables the complete workflow from setting up a timeseries model, training, evaluating and forecasting values. It should work with all machine learning objects that know the methods fit() and predict(). It was designed to work with the FixedIndexTimeseries class which handles timeseries that have annual periodicity. In that sense, FixedIndex means, that each year has the same number of periods and that every period takes the same position in every year, e.g. monthes or semi-monthes etc. It does not work for timeseries with periods of strict length and as such, might overlap New Year. However, if the option multimodel is set to False, it can work with arbitrary timeseries that are handled by a class that replicates the methods in FixedIndexTimeseries. Attributes: trainingdates: a list of datetime.date objects of the periods which where used for training. Is None before training evaluator: An Evaluator object of the last evaluation done for this Forecaster instance. Is None before training trained: Boolean, False when instance has not yet been trained """ def __init__(self, model, y, X, laglength, lag=0, multimodel=True, decompose=False): """Initialising the Forecaster Instance Args: model: A model instance that knows the method fit() and predict() for a targetvector y and a feature array X y: A FixedIndexTimeseries Instance that is the target data X: A list of FixedIndexTimeseries Instances that represent the feature data laglength: A list of integers that define the number of past periods that are used from the feature set. Must have the same length as X lag: (int): when negative: the difference in days between forecasting date and the first day of the forecasted period (backwards in time) when positive: the difference in days between forecasting date and the first day of the period preceding the forecasted period (forward in time) Example: forecasted, decadal period is: 11.10-20.10, lag=0, laglength=1: The forecast is done on 11.10. The period 1.10 to 10.10 is used as feature. lag=-4, laglength=2: The forecast is done on 7.10. The periods 21.9-30.9 and 11.9-20.9 is used as feature lag=3, laglength=1: The forecast is done on 4.10. The period 21.9 to 30.9 is used as feature. multimodel: boolean. If true, a individual model is trained for each period of the year. Is used to build different models when the characteristics of the target timeseries have strong seasonality decompose: boolean: If true, the target timeseries is decomposed into seasonality and residual. The forecast is only done for the residual. Returns: A Forecaster instance with the methods train, predict and cross_validate Raises: ValueError: When the list "laglength" is of different length than the list X. """ self.__model = clone(model) self._multimodel = multimodel if not self._multimodel: self._maxindex = 1 self.__model = [self.__model] else: self._maxindex = y.maxindex self.__model = [clone(self.__model) for i in range(self._maxindex)] self._decompose = decompose self._seasonal = [0 for i in range(y.maxindex)] self._y = y self._y.timeseries.columns = ["target"] if type(X) is not list: self._X = [X] else: self._X = X self._X_type = [x.mode for x in self._X] self._lag = -lag # switches the sign of lag as argument, makes it easier to understand if type(laglength) is not list: self._laglength = [laglength] else: self._laglength = laglength if not len(self._laglength) == len(X): raise ValueError("The arguments laglength and X must be lists of the same length") self._y_scaler = [preprocessing.StandardScaler() for i in range(self._maxindex)] self._X_scaler = [preprocessing.StandardScaler() for i in range(self._maxindex)] assert len(self._X) > 0, "predictor dataset must contain at least one feature" self.trainingdates = None self.evaluator = None self.trained = False def _aggregate_featuredates(self, targetdate): """Given a targetdate, returns the list of required dates from the featuresets. Decadal forecast, lag 0, laglength 2: targetdate=datetime.date(2017,8,21) --> [datetime.date(2017,8,11),datetime.date(2017,8,1)] Args: targetdate: a datetime.date that is member of the targetperiod. Returns: A list of lists with datetime.date objects in the order of the featureset. Raises: None """ if self._lag < 0: targetdate = self._y.shift_date_by_period(targetdate, -1) targetdate = self._y.shift_date_by_period(targetdate, 0) - datetime.timedelta(self._lag) featuredates = [] for i, x in enumerate(self._X): x_targetdate = x.shift_date_by_period(targetdate, 0) dates = [] for shift in range(0, self._laglength[i]): dates.append(x.shift_date_by_period(targetdate, -(1 + shift))) featuredates.append(dates) return featuredates def _aggregate_features(self, featuredates, X): """Returns a 1D array of features for all dates in featuredates and features in X. The output array is in the order: feature1_t-1,feature1_t-2,feature1_t-3,feature2_t-1,feature2_t-2, and so on... Args: featuredates: A list of lists with the dates for which the data from X should be extracted X: A list of FixedIndexTimeseriesobjects. Its length must correspond to the length of 1st-level list of featuredates. Returns: An array with feature values Raises: None """ X_values = full(sum(self._laglength), nan) k = 0 for i, x in enumerate(X): try: ts = x.timeseries.reindex(featuredates[i]) # avoids the FutureWarning by pandas X_values[k:k + self._laglength[i]] = ts[featuredates[i]].values except KeyError: pass k = k + self._laglength[i] return X_values def train(self, y=None): """Trains the model with X and y as training set Args: y: A FixedIndexTimeseries instance that contains the target data on which the model shall be trained. Is meant to be used for cross validation or if not all availabe data shall be used for training. Default: None (the complete available dataset given when the instance was initialised is used.) Returns: None Raises: InsufficientData: is raised when there is not enough data to train the model for one complete year. """ if not y: y = self._y freq = len(self._seasonal) if self._decompose and freq>1: dec = decomp(y.timeseries.values, period=freq) y = FixedIndexTimeseries(pandas.Series(dec.resid+dec.trend, index=y.timeseries.index), mode=y.mode) seasonal = FixedIndexTimeseries(
pandas.Series(dec.seasonal, index=y.timeseries.index)
pandas.Series
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from distutils.version import LooseVersion import inspect import numpy as np import pandas as pd import pyspark import databricks.koalas as ks from databricks.koalas.exceptions import PandasNotImplementedError from databricks.koalas.missing.indexes import MissingPandasLikeIndex, MissingPandasLikeMultiIndex from databricks.koalas.testing.utils import ReusedSQLTestCase, TestUtils class IndexesTest(ReusedSQLTestCase, TestUtils): @property def pdf(self): return pd.DataFrame( {"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0],}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9], ) @property def kdf(self): return ks.from_pandas(self.pdf) def test_index(self): for pdf in [ pd.DataFrame(np.random.randn(10, 5), index=list("abcdefghij")), pd.DataFrame( np.random.randn(10, 5), index=pd.date_range("2011-01-01", freq="D", periods=10) ), pd.DataFrame(np.random.randn(10, 5), columns=list("abcde")).set_index(["a", "b"]), ]: kdf = ks.from_pandas(pdf) self.assert_eq(kdf.index, pdf.index) def test_index_getattr(self): kidx = self.kdf.index item = "databricks" expected_error_message = "'Index' object has no attribute '{}'".format(item) with self.assertRaisesRegex(AttributeError, expected_error_message): kidx.__getattr__(item) def test_multi_index_getattr(self): arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]] idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color")) pdf = pd.DataFrame(np.random.randn(4, 5), idx) kdf = ks.from_pandas(pdf) kidx = kdf.index item = "databricks" expected_error_message = "'MultiIndex' object has no attribute '{}'".format(item) with self.assertRaisesRegex(AttributeError, expected_error_message): kidx.__getattr__(item) def test_to_series(self): pidx = self.pdf.index kidx = self.kdf.index self.assert_eq(kidx.to_series(), pidx.to_series()) self.assert_eq(repr(kidx.to_series(name="a")), repr(pidx.to_series(name="a"))) # With name pidx.name = "Koalas" kidx.name = "Koalas" self.assert_eq(repr(kidx.to_series()), repr(pidx.to_series())) self.assert_eq(repr(kidx.to_series(name=("x", "a"))), repr(pidx.to_series(name=("x", "a")))) # With tupled name pidx.name = ("x", "a") kidx.name = ("x", "a") self.assert_eq(repr(kidx.to_series()), repr(pidx.to_series())) self.assert_eq(repr(kidx.to_series(name="a")), repr(pidx.to_series(name="a"))) self.assert_eq((kidx + 1).to_series(), (pidx + 1).to_series()) pidx = self.pdf.set_index("b", append=True).index kidx = self.kdf.set_index("b", append=True).index with self.sql_conf({"spark.sql.execution.arrow.enabled": False}): self.assert_eq(kidx.to_series(), pidx.to_series()) self.assert_eq(kidx.to_series(name="a"), pidx.to_series(name="a")) def test_to_frame(self): pidx = self.pdf.index kidx = self.kdf.index self.assert_eq(repr(kidx.to_frame()), repr(pidx.to_frame())) self.assert_eq(repr(kidx.to_frame(index=False)), repr(pidx.to_frame(index=False))) pidx.name = "a" kidx.name = "a" self.assert_eq(repr(kidx.to_frame()), repr(pidx.to_frame())) self.assert_eq(repr(kidx.to_frame(index=False)), repr(pidx.to_frame(index=False))) if LooseVersion(pd.__version__) >= LooseVersion("0.24"): # The `name` argument is added in pandas 0.24. self.assert_eq(repr(kidx.to_frame(name="x")), repr(pidx.to_frame(name="x"))) self.assert_eq( repr(kidx.to_frame(index=False, name="x")), repr(pidx.to_frame(index=False, name="x")), ) pidx = self.pdf.set_index("b", append=True).index kidx = self.kdf.set_index("b", append=True).index self.assert_eq(repr(kidx.to_frame()), repr(pidx.to_frame())) self.assert_eq(repr(kidx.to_frame(index=False)), repr(pidx.to_frame(index=False))) if LooseVersion(pd.__version__) >= LooseVersion("0.24"): # The `name` argument is added in pandas 0.24. self.assert_eq( repr(kidx.to_frame(name=["x", "y"])), repr(pidx.to_frame(name=["x", "y"])) ) self.assert_eq( repr(kidx.to_frame(index=False, name=["x", "y"])), repr(pidx.to_frame(index=False, name=["x", "y"])), ) def test_index_names(self): kdf = self.kdf self.assertIsNone(kdf.index.name) idx = pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name="x") pdf = pd.DataFrame(np.random.randn(10, 5), index=idx, columns=list("abcde")) kdf = ks.from_pandas(pdf) pser = pdf.a kser = kdf.a self.assertEqual(kdf.index.name, pdf.index.name) self.assertEqual(kdf.index.names, pdf.index.names) pidx = pdf.index kidx = kdf.index pidx.name = "renamed" kidx.name = "renamed" self.assertEqual(kidx.name, pidx.name) self.assertEqual(kidx.names, pidx.names) self.assert_eq(kidx, pidx) self.assertEqual(kdf.index.name, pdf.index.name) self.assertEqual(kdf.index.names, pdf.index.names) self.assertEqual(kser.index.names, pser.index.names) pidx.name = None kidx.name = None self.assertEqual(kidx.name, pidx.name) self.assertEqual(kidx.names, pidx.names) self.assert_eq(kidx, pidx) self.assertEqual(kdf.index.name, pdf.index.name) self.assertEqual(kdf.index.names, pdf.index.names) self.assertEqual(kser.index.names, pser.index.names) with self.assertRaisesRegex(ValueError, "Names must be a list-like"): kidx.names = "hi" expected_error_message = "Length of new names must be {}, got {}".format( len(kdf._internal.index_map), len(["0", "1"]) ) with self.assertRaisesRegex(ValueError, expected_error_message): kidx.names = ["0", "1"] def test_multi_index_names(self): arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]] idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color")) pdf = pd.DataFrame(np.random.randn(4, 5), idx) kdf = ks.from_pandas(pdf) self.assertEqual(kdf.index.names, pdf.index.names) pidx = pdf.index kidx = kdf.index pidx.names = ["renamed_number", "renamed_color"] kidx.names = ["renamed_number", "renamed_color"] self.assertEqual(kidx.names, pidx.names) pidx.names = ["renamed_number", None] kidx.names = ["renamed_number", None] self.assertEqual(kidx.names, pidx.names) if LooseVersion(pyspark.__version__) < LooseVersion("2.4"): # PySpark < 2.4 does not support struct type with arrow enabled. with self.sql_conf({"spark.sql.execution.arrow.enabled": False}): self.assert_eq(kidx, pidx) else: self.assert_eq(kidx, pidx) with self.assertRaises(PandasNotImplementedError): kidx.name with self.assertRaises(PandasNotImplementedError): kidx.name = "renamed" def test_index_rename(self): pdf = pd.DataFrame( np.random.randn(10, 5), index=pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name="x") ) kdf = ks.from_pandas(pdf) pidx = pdf.index kidx = kdf.index self.assert_eq(kidx.rename("y"), pidx.rename("y")) self.assert_eq(kdf.index.names, pdf.index.names) kidx.rename("z", inplace=True) pidx.rename("z", inplace=True) self.assert_eq(kidx, pidx) self.assert_eq(kdf.index.names, pdf.index.names) self.assert_eq(kidx.rename(None), pidx.rename(None)) self.assert_eq(kdf.index.names, pdf.index.names) def test_multi_index_rename(self): arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]] idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color")) pdf = pd.DataFrame(np.random.randn(4, 5), idx) kdf = ks.from_pandas(pdf) pmidx = pdf.index kmidx = kdf.index self.assert_eq(kmidx.rename(["n", "c"]), pmidx.rename(["n", "c"])) self.assert_eq(kdf.index.names, pdf.index.names) kmidx.rename(["num", "col"], inplace=True) pmidx.rename(["num", "col"], inplace=True) self.assert_eq(kmidx, pmidx) self.assert_eq(kdf.index.names, pdf.index.names) self.assert_eq(kmidx.rename([None, None]), pmidx.rename([None, None])) self.assert_eq(kdf.index.names, pdf.index.names) self.assertRaises(TypeError, lambda: kmidx.rename("number")) self.assertRaises(ValueError, lambda: kmidx.rename(["number"])) def test_multi_index_levshape(self): pidx = pd.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2)]) kidx = ks.from_pandas(pidx) self.assertEqual(pidx.levshape, kidx.levshape) def test_index_unique(self): kidx = self.kdf.index # here the output is different than pandas in terms of order expected = [0, 1, 3, 5, 6, 8, 9] self.assert_eq(expected, sorted(kidx.unique().to_pandas())) self.assert_eq(expected, sorted(kidx.unique(level=0).to_pandas())) expected = [1, 2, 4, 6, 7, 9, 10] self.assert_eq(expected, sorted((kidx + 1).unique().to_pandas())) with self.assertRaisesRegex(IndexError, "Too many levels*"): kidx.unique(level=1) with self.assertRaisesRegex(KeyError, "Requested level (hi)*"): kidx.unique(level="hi") def test_multi_index_copy(self): arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]] idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color")) pdf = pd.DataFrame(np.random.randn(4, 5), idx) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.index.copy(), pdf.index.copy()) def test_drop_duplicates(self): pidx = pd.Index([4, 2, 4, 1, 4, 3]) kidx = ks.from_pandas(pidx) self.assert_eq(kidx.drop_duplicates().sort_values(), pidx.drop_duplicates().sort_values()) self.assert_eq( (kidx + 1).drop_duplicates().sort_values(), (pidx + 1).drop_duplicates().sort_values() ) def test_dropna(self): pidx = pd.Index([np.nan, 2, 4, 1, np.nan, 3]) kidx = ks.from_pandas(pidx) self.assert_eq(kidx.dropna(), pidx.dropna()) self.assert_eq((kidx + 1).dropna(), (pidx + 1).dropna()) def test_index_symmetric_difference(self): pidx1 = pd.Index([1, 2, 3, 4]) pidx2 = pd.Index([2, 3, 4, 5]) kidx1 = ks.from_pandas(pidx1) kidx2 = ks.from_pandas(pidx2) self.assert_eq( kidx1.symmetric_difference(kidx2).sort_values(), pidx1.symmetric_difference(pidx2).sort_values(), ) self.assert_eq( (kidx1 + 1).symmetric_difference(kidx2).sort_values(), (pidx1 + 1).symmetric_difference(pidx2).sort_values(), ) pmidx1 = pd.MultiIndex( [["lama", "cow", "falcon"], ["speed", "weight", "length"]], [[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 0, 0, 0, 1, 2, 0, 1, 2]], ) pmidx2 = pd.MultiIndex( [["koalas", "cow", "falcon"], ["speed", "weight", "length"]], [[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 0, 0, 0, 1, 2, 0, 1, 2]], ) kmidx1 = ks.from_pandas(pmidx1) kmidx2 = ks.from_pandas(pmidx2) self.assert_eq( kmidx1.symmetric_difference(kmidx2).sort_values(), pmidx1.symmetric_difference(pmidx2).sort_values(), ) idx = ks.Index(["a", "b", "c"]) midx = ks.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")]) with self.assertRaisesRegex(NotImplementedError, "Doesn't support*"): idx.symmetric_difference(midx) def test_multi_index_symmetric_difference(self): idx = ks.Index(["a", "b", "c"]) midx = ks.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")]) midx_ = ks.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")]) self.assert_eq( midx.symmetric_difference(midx_), midx.to_pandas().symmetric_difference(midx_.to_pandas()), ) with self.assertRaisesRegex(NotImplementedError, "Doesn't support*"): midx.symmetric_difference(idx) def test_missing(self): kdf = ks.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}) # Index functions missing_functions = inspect.getmembers(MissingPandasLikeIndex, inspect.isfunction) unsupported_functions = [ name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function" ] for name in unsupported_functions: with self.assertRaisesRegex( PandasNotImplementedError, "method.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name), ): getattr(kdf.set_index("a").index, name)() deprecated_functions = [ name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function" ] for name in deprecated_functions: with self.assertRaisesRegex( PandasNotImplementedError, "method.*Index.*{}.*is deprecated".format(name) ): getattr(kdf.set_index("a").index, name)() # MultiIndex functions missing_functions = inspect.getmembers(MissingPandasLikeMultiIndex, inspect.isfunction) unsupported_functions = [ name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function" ] for name in unsupported_functions: with self.assertRaisesRegex( PandasNotImplementedError, "method.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name), ): getattr(kdf.set_index(["a", "b"]).index, name)() deprecated_functions = [ name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function" ] for name in deprecated_functions: with self.assertRaisesRegex( PandasNotImplementedError, "method.*Index.*{}.*is deprecated".format(name) ): getattr(kdf.set_index(["a", "b"]).index, name)() # Index properties missing_properties = inspect.getmembers( MissingPandasLikeIndex, lambda o: isinstance(o, property) ) unsupported_properties = [ name for (name, type_) in missing_properties if type_.fget.__name__ == "unsupported_property" ] for name in unsupported_properties: with self.assertRaisesRegex( PandasNotImplementedError, "property.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name), ): getattr(kdf.set_index("a").index, name) deprecated_properties = [ name for (name, type_) in missing_properties if type_.fget.__name__ == "deprecated_property" ] for name in deprecated_properties: with self.assertRaisesRegex( PandasNotImplementedError, "property.*Index.*{}.*is deprecated".format(name) ): getattr(kdf.set_index("a").index, name) # MultiIndex properties missing_properties = inspect.getmembers( MissingPandasLikeMultiIndex, lambda o: isinstance(o, property) ) unsupported_properties = [ name for (name, type_) in missing_properties if type_.fget.__name__ == "unsupported_property" ] for name in unsupported_properties: with self.assertRaisesRegex( PandasNotImplementedError, "property.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name), ): getattr(kdf.set_index(["a", "b"]).index, name) deprecated_properties = [ name for (name, type_) in missing_properties if type_.fget.__name__ == "deprecated_property" ] for name in deprecated_properties: with self.assertRaisesRegex( PandasNotImplementedError, "property.*Index.*{}.*is deprecated".format(name) ): getattr(kdf.set_index(["a", "b"]).index, name) def test_index_has_duplicates(self): indexes = [("a", "b", "c"), ("a", "a", "c"), (1, 3, 3), (1, 2, 3)] names = [None, "ks", "ks", None] has_dup = [False, True, True, False] for idx, name, expected in zip(indexes, names, has_dup): pdf = pd.DataFrame({"a": [1, 2, 3]}, index=pd.Index(idx, name=name)) kdf = ks.from_pandas(pdf) self.assertEqual(kdf.index.has_duplicates, expected) def test_multiindex_has_duplicates(self): indexes = [ [list("abc"), list("edf")], [list("aac"), list("edf")], [list("aac"), list("eef")], [[1, 4, 4], [4, 6, 6]], ] has_dup = [False, False, True, True] for idx, expected in zip(indexes, has_dup): pdf = pd.DataFrame({"a": [1, 2, 3]}, index=idx) kdf = ks.from_pandas(pdf) self.assertEqual(kdf.index.has_duplicates, expected) def test_multi_index_not_supported(self): kdf = ks.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}) with self.assertRaisesRegex(TypeError, "cannot perform any with this index type"): kdf.set_index(["a", "b"]).index.any() with self.assertRaisesRegex(TypeError, "cannot perform all with this index type"): kdf.set_index(["a", "b"]).index.all() def test_index_nlevels(self): pdf = pd.DataFrame({"a": [1, 2, 3]}, index=pd.Index(["a", "b", "c"])) kdf = ks.from_pandas(pdf) self.assertEqual(kdf.index.nlevels, 1) def test_multiindex_nlevel(self): pdf = pd.DataFrame({"a": [1, 2, 3]}, index=[list("abc"), list("def")]) kdf = ks.from_pandas(pdf) self.assertEqual(kdf.index.nlevels, 2) def test_multiindex_from_arrays(self): arrays = [["a", "a", "b", "b"], ["red", "blue", "red", "blue"]] pidx = pd.MultiIndex.from_arrays(arrays) kidx = ks.MultiIndex.from_arrays(arrays) self.assert_eq(pidx, kidx) def test_multiindex_swaplevel(self): pidx = pd.MultiIndex.from_arrays([["a", "b"], [1, 2]]) kidx = ks.from_pandas(pidx) self.assert_eq(pidx.swaplevel(0, 1), kidx.swaplevel(0, 1)) pidx = pd.MultiIndex.from_arrays([["a", "b"], [1, 2]], names=["word", "number"]) kidx = ks.from_pandas(pidx) self.assert_eq(pidx.swaplevel(0, 1), kidx.swaplevel(0, 1)) pidx = pd.MultiIndex.from_arrays([["a", "b"], [1, 2]], names=["word", None]) kidx = ks.from_pandas(pidx) self.assert_eq(pidx.swaplevel(-2, -1), kidx.swaplevel(-2, -1)) self.assert_eq(pidx.swaplevel(0, 1), kidx.swaplevel(0, 1)) self.assert_eq(pidx.swaplevel("word", 1), kidx.swaplevel("word", 1)) with self.assertRaisesRegex(IndexError, "Too many levels: Index"): kidx.swaplevel(-3, "word") with self.assertRaisesRegex(IndexError, "Too many levels: Index"): kidx.swaplevel(0, 2) with self.assertRaisesRegex(IndexError, "Too many levels: Index"): kidx.swaplevel(0, -3) with self.assertRaisesRegex(KeyError, "Level work not found"): kidx.swaplevel(0, "work") def test_multiindex_droplevel(self): pidx = pd.MultiIndex.from_tuples( [("a", "x", 1), ("b", "y", 2)], names=["level1", "level2", "level3"] ) kidx = ks.from_pandas(pidx) with self.assertRaisesRegex(IndexError, "Too many levels: Index has only 3 levels, not 5"): kidx.droplevel(4) with self.assertRaisesRegex(KeyError, "Level level4 not found"): kidx.droplevel("level4") with self.assertRaisesRegex(KeyError, "Level.*level3.*level4.*not found"): kidx.droplevel([("level3", "level4")]) with self.assertRaisesRegex( ValueError, "Cannot remove 4 levels from an index with 3 levels: at least one " "level must be left.", ): kidx.droplevel([0, 0, 1, 2]) with self.assertRaisesRegex( ValueError, "Cannot remove 3 levels from an index with 3 levels: at least one " "level must be left.", ): kidx.droplevel([0, 1, 2]) self.assert_eq(pidx.droplevel(0), kidx.droplevel(0)) self.assert_eq(pidx.droplevel([0, 1]), kidx.droplevel([0, 1])) self.assert_eq(pidx.droplevel([0, "level2"]), kidx.droplevel([0, "level2"])) def test_index_fillna(self): pidx = pd.Index([1, 2, None]) kidx = ks.from_pandas(pidx) self.assert_eq(pidx.fillna(0), kidx.fillna(0)) self.assert_eq(pidx.rename("name").fillna(0), kidx.rename("name").fillna(0)) with self.assertRaisesRegex(TypeError, "Unsupported type <class 'list'>"): kidx.fillna([1, 2]) def test_index_drop(self): pidx = pd.Index([1, 2, 3]) kidx = ks.from_pandas(pidx) self.assert_eq(pidx.drop(1), kidx.drop(1)) self.assert_eq(pidx.drop([1, 2]), kidx.drop([1, 2])) def test_multiindex_drop(self): pidx = pd.MultiIndex.from_tuples( [("a", "x"), ("b", "y"), ("c", "z")], names=["level1", "level2"] ) kidx = ks.from_pandas(pidx) self.assert_eq(pidx.drop("a"), kidx.drop("a")) self.assert_eq(pidx.drop(["a", "b"]), kidx.drop(["a", "b"])) self.assert_eq(pidx.drop(["x", "y"], level=1), kidx.drop(["x", "y"], level=1)) self.assert_eq(pidx.drop(["x", "y"], level="level2"), kidx.drop(["x", "y"], level="level2")) pidx.names = ["lv1", "lv2"] kidx.names = ["lv1", "lv2"] self.assert_eq(pidx.drop(["x", "y"], level="lv2"), kidx.drop(["x", "y"], level="lv2")) self.assertRaises(IndexError, lambda: kidx.drop(["a", "b"], level=2)) self.assertRaises(KeyError, lambda: kidx.drop(["a", "b"], level="level")) kidx.names = ["lv", "lv"] self.assertRaises(ValueError, lambda: kidx.drop(["x", "y"], level="lv")) def test_sort_values(self): pidx = pd.Index([-10, -100, 200, 100]) kidx = ks.from_pandas(pidx) self.assert_eq(pidx.sort_values(), kidx.sort_values()) self.assert_eq(pidx.sort_values(ascending=False), kidx.sort_values(ascending=False)) pidx.name = "koalas" kidx.name = "koalas" self.assert_eq(pidx.sort_values(), kidx.sort_values()) self.assert_eq(pidx.sort_values(ascending=False), kidx.sort_values(ascending=False)) pidx = pd.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)]) kidx = ks.from_pandas(pidx) pidx.names = ["hello", "koalas", "goodbye"] kidx.names = ["hello", "koalas", "goodbye"] self.assert_eq(pidx.sort_values(), kidx.sort_values()) self.assert_eq(pidx.sort_values(ascending=False), kidx.sort_values(ascending=False)) def test_index_drop_duplicates(self): pidx = pd.Index([1, 1, 2]) kidx = ks.from_pandas(pidx) self.assert_eq(pidx.drop_duplicates().sort_values(), kidx.drop_duplicates().sort_values()) pidx = pd.MultiIndex.from_tuples([(1, 1), (1, 1), (2, 2)], names=["level1", "level2"]) kidx = ks.from_pandas(pidx) self.assert_eq(pidx.drop_duplicates().sort_values(), kidx.drop_duplicates().sort_values()) def test_index_sort(self): idx = ks.Index([1, 2, 3, 4, 5]) midx = ks.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2)]) with self.assertRaisesRegex( TypeError, "cannot sort an Index object in-place, use sort_values instead" ): idx.sort() with self.assertRaisesRegex( TypeError, "cannot sort an Index object in-place, use sort_values instead" ): midx.sort() def test_multiindex_isna(self): kidx = ks.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)]) with self.assertRaisesRegex(NotImplementedError, "isna is not defined for MultiIndex"): kidx.isna() with self.assertRaisesRegex(NotImplementedError, "isna is not defined for MultiIndex"): kidx.isnull() with self.assertRaisesRegex(NotImplementedError, "notna is not defined for MultiIndex"): kidx.notna() with self.assertRaisesRegex(NotImplementedError, "notna is not defined for MultiIndex"): kidx.notnull() def test_index_nunique(self): pidx = pd.Index([1, 1, 2, None]) kidx = ks.from_pandas(pidx) self.assert_eq(pidx.nunique(), kidx.nunique()) self.assert_eq(pidx.nunique(dropna=True), kidx.nunique(dropna=True)) def test_multiindex_nunique(self): kidx = ks.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)]) with self.assertRaisesRegex(NotImplementedError, "notna is not defined for MultiIndex"): kidx.notnull() def test_multiindex_rename(self): pidx = pd.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)]) kidx = ks.from_pandas(pidx) pidx = pidx.rename(list("ABC")) kidx = kidx.rename(list("ABC")) self.assert_eq(pidx, kidx) pidx = pidx.rename(["my", "name", "is"]) kidx = kidx.rename(["my", "name", "is"]) self.assert_eq(pidx, kidx) def test_multiindex_set_names(self): pidx = pd.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)]) kidx = ks.from_pandas(pidx) pidx = pidx.set_names(["set", "new", "names"]) kidx = kidx.set_names(["set", "new", "names"]) self.assert_eq(pidx, kidx) pidx.set_names(["set", "new", "names"], inplace=True) kidx.set_names(["set", "new", "names"], inplace=True) self.assert_eq(pidx, kidx) pidx = pidx.set_names("first", level=0) kidx = kidx.set_names("first", level=0) self.assert_eq(pidx, kidx) pidx = pidx.set_names("second", level=1) kidx = kidx.set_names("second", level=1) self.assert_eq(pidx, kidx) pidx = pidx.set_names("third", level=2) kidx = kidx.set_names("third", level=2) self.assert_eq(pidx, kidx) pidx.set_names("first", level=0, inplace=True) kidx.set_names("first", level=0, inplace=True) self.assert_eq(pidx, kidx) pidx.set_names("second", level=1, inplace=True) kidx.set_names("second", level=1, inplace=True) self.assert_eq(pidx, kidx) pidx.set_names("third", level=2, inplace=True) kidx.set_names("third", level=2, inplace=True) self.assert_eq(pidx, kidx) def test_multiindex_from_tuples(self): tuples = [(1, "red"), (1, "blue"), (2, "red"), (2, "blue")] pidx = pd.MultiIndex.from_tuples(tuples) kidx = ks.MultiIndex.from_tuples(tuples) self.assert_eq(pidx, kidx) def test_multiindex_from_product(self): iterables = [[0, 1, 2], ["green", "purple"]] pidx = pd.MultiIndex.from_product(iterables) kidx = ks.MultiIndex.from_product(iterables) self.assert_eq(pidx, kidx) def test_multiindex_tuple_column_name(self): column_labels = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y"), ("b", "z")]) pdf = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=column_labels) pdf.set_index(("a", "x"), append=True, inplace=True) kdf = ks.from_pandas(pdf) self.assert_eq(pdf, kdf) def test_len(self): pidx = pd.Index(range(10000)) kidx = ks.from_pandas(pidx) self.assert_eq(len(pidx), len(kidx)) pidx = pd.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)]) kidx = ks.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)]) self.assert_eq(len(pidx), len(kidx)) def test_delete(self): pidx = pd.Index([10, 9, 8, 7, 6, 7, 8, 9, 10]) kidx = ks.Index([10, 9, 8, 7, 6, 7, 8, 9, 10]) self.assert_eq(pidx.delete(5).sort_values(), kidx.delete(5).sort_values()) self.assert_eq(pidx.delete(-5).sort_values(), kidx.delete(-5).sort_values()) if LooseVersion(np.__version__) < LooseVersion("1.19"): self.assert_eq( pidx.delete([0, 10000]).sort_values(), kidx.delete([0, 10000]).sort_values() ) self.assert_eq( pidx.delete([10000, 20000]).sort_values(), kidx.delete([10000, 20000]).sort_values() ) else: self.assert_eq(pidx.delete([0]).sort_values(), kidx.delete([0, 10000]).sort_values()) self.assert_eq(pidx.delete([]).sort_values(), kidx.delete([10000, 20000]).sort_values()) with self.assertRaisesRegex(IndexError, "index 10 is out of bounds for axis 0 with size 9"): kidx.delete(10) pidx = pd.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)]) kidx = ks.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)]) self.assert_eq(pidx.delete(1).sort_values(), kidx.delete(1).sort_values()) self.assert_eq(pidx.delete(-1).sort_values(), kidx.delete(-1).sort_values()) if LooseVersion(np.__version__) < LooseVersion("1.19"): self.assert_eq( pidx.delete([0, 10000]).sort_values(), kidx.delete([0, 10000]).sort_values() ) self.assert_eq( pidx.delete([10000, 20000]).sort_values(), kidx.delete([10000, 20000]).sort_values() ) else: self.assert_eq(pidx.delete([0]).sort_values(), kidx.delete([0, 10000]).sort_values()) self.assert_eq(pidx.delete([]).sort_values(), kidx.delete([10000, 20000]).sort_values()) def test_append(self): # Index pidx = pd.Index(range(10000)) kidx = ks.from_pandas(pidx) self.assert_eq(pidx.append(pidx), kidx.append(kidx)) # Index with name pidx1 = pd.Index(range(10000), name="a") pidx2 = pd.Index(range(10000), name="b") kidx1 = ks.from_pandas(pidx1) kidx2 = ks.from_pandas(pidx2) self.assert_eq(pidx1.append(pidx2), kidx1.append(kidx2)) self.assert_eq(pidx2.append(pidx1), kidx2.append(kidx1)) # Index from DataFrame pdf1 = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=["a", "b", "c"]) pdf2 = pd.DataFrame({"a": [7, 8, 9], "d": [10, 11, 12]}, index=["x", "y", "z"]) kdf1 = ks.from_pandas(pdf1) kdf2 = ks.from_pandas(pdf2) pidx1 = pdf1.set_index("a").index pidx2 = pdf2.set_index("d").index kidx1 = kdf1.set_index("a").index kidx2 = kdf2.set_index("d").index self.assert_eq(pidx1.append(pidx2), kidx1.append(kidx2)) self.assert_eq(pidx2.append(pidx1), kidx2.append(kidx1)) # Index from DataFrame with MultiIndex columns pdf1 = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) pdf2 = pd.DataFrame({"a": [7, 8, 9], "d": [10, 11, 12]}) pdf1.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y")]) pdf2.columns = pd.MultiIndex.from_tuples([("a", "x"), ("d", "y")]) kdf1 = ks.from_pandas(pdf1) kdf2 = ks.from_pandas(pdf2) pidx1 = pdf1.set_index(("a", "x")).index pidx2 = pdf2.set_index(("d", "y")).index kidx1 = kdf1.set_index(("a", "x")).index kidx2 = kdf2.set_index(("d", "y")).index self.assert_eq(pidx1.append(pidx2), kidx1.append(kidx2)) self.assert_eq(pidx2.append(pidx1), kidx2.append(kidx1)) # MultiIndex pmidx = pd.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)]) kmidx = ks.from_pandas(pmidx) self.assert_eq(pmidx.append(pmidx), kmidx.append(kmidx)) # MultiIndex with names pmidx1 = pd.MultiIndex.from_tuples( [("a", "x", 1), ("b", "y", 2), ("c", "z", 3)], names=["x", "y", "z"] ) pmidx2 = pd.MultiIndex.from_tuples( [("a", "x", 1), ("b", "y", 2), ("c", "z", 3)], names=["p", "q", "r"] ) kmidx1 = ks.from_pandas(pmidx1) kmidx2 = ks.from_pandas(pmidx2) self.assert_eq(pmidx1.append(pmidx2), kmidx1.append(kmidx2)) self.assert_eq(pmidx2.append(pmidx1), kmidx2.append(kmidx1)) self.assert_eq(pmidx1.append(pmidx2).names, kmidx1.append(kmidx2).names) self.assert_eq(pmidx1.append(pmidx2).names, kmidx1.append(kmidx2).names) # Index & MultiIndex currently is not supported expected_error_message = r"append\(\) between Index & MultiIndex currently is not supported" with self.assertRaisesRegex(NotImplementedError, expected_error_message): kidx.append(kmidx) with self.assertRaisesRegex(NotImplementedError, expected_error_message): kmidx.append(kidx) def test_argmin(self): pidx = pd.Index([100, 50, 10, 20, 30, 60, 0, 50, 0, 100, 100, 100, 20, 0, 0]) kidx = ks.from_pandas(pidx) self.assert_eq(pidx.argmin(), kidx.argmin()) # MultiIndex kidx = ks.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)]) with self.assertRaisesRegex( TypeError, "reduction operation 'argmin' not allowed for this dtype" ): kidx.argmin() def test_argmax(self): pidx = pd.Index([100, 50, 10, 20, 30, 60, 0, 50, 0, 100, 100, 100, 20, 0, 0]) kidx = ks.from_pandas(pidx) self.assert_eq(pidx.argmax(), kidx.argmax()) # MultiIndex kidx = ks.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)]) with self.assertRaisesRegex( TypeError, "reduction operation 'argmax' not allowed for this dtype" ): kidx.argmax() def test_monotonic(self): # test monotonic_increasing & monotonic_decreasing for MultiIndex. # Since the Behavior for null value was changed in pandas >= 1.0.0, # several cases are tested differently. datas = [] # increasing / decreasing ordered each index level with string datas.append([("w", "a"), ("x", "b"), ("y", "c"), ("z", "d")]) datas.append([("w", "d"), ("x", "c"), ("y", "b"), ("z", "a")]) datas.append([("z", "a"), ("y", "b"), ("x", "c"), ("w", "d")]) datas.append([("z", "d"), ("y", "c"), ("x", "b"), ("w", "a")]) # mixed order each index level with string datas.append([("z", "a"), ("x", "b"), ("y", "c"), ("w", "d")]) datas.append([("z", "a"), ("y", "c"), ("x", "b"), ("w", "d")]) # increasing / decreasing ordered each index level with integer datas.append([(1, 100), (2, 200), (3, 300), (4, 400), (5, 500)]) datas.append([(1, 500), (2, 400), (3, 300), (4, 200), (5, 100)]) datas.append([(5, 100), (4, 200), (3, 300), (2, 400), (1, 500)]) datas.append([(5, 500), (4, 400), (3, 300), (2, 200), (1, 100)]) # mixed order each index level with integer datas.append([(1, 500), (3, 400), (2, 300), (4, 200), (5, 100)]) datas.append([(1, 100), (2, 300), (3, 200), (4, 400), (5, 500)]) # integer / negative mixed tests datas.append([("a", -500), ("b", -400), ("c", -300), ("d", -200), ("e", -100)]) datas.append([("e", -500), ("d", -400), ("c", -300), ("b", -200), ("a", -100)]) datas.append([(-5, "a"), (-4, "b"), (-3, "c"), (-2, "d"), (-1, "e")]) datas.append([(-5, "e"), (-4, "d"), (-3, "c"), (-2, "b"), (-1, "a")]) datas.append([(-5, "e"), (-3, "d"), (-2, "c"), (-4, "b"), (-1, "a")]) datas.append([(-5, "e"), (-4, "c"), (-3, "b"), (-2, "d"), (-1, "a")]) # None type tests (None type is treated as the smallest value) datas.append([(1, 100), (2, 200), (None, 300), (4, 400), (5, 500)]) datas.append([(5, None), (4, 200), (3, 300), (2, 400), (1, 500)]) datas.append([(5, 100), (4, 200), (3, None), (2, 400), (1, 500)]) datas.append([(5, 100), (4, 200), (3, 300), (2, 400), (1, None)]) datas.append([(1, 100), (2, 200), (None, None), (4, 400), (5, 500)]) datas.append([(-5, None), (-4, None), (-3, None), (-2, None), (-1, None)]) datas.append([(None, "e"), (None, "c"), (None, "b"), (None, "d"), (None, "a")]) datas.append([(None, None), (None, None), (None, None), (None, None), (None, None)]) # duplicated index value tests datas.append([("x", "d"), ("y", "c"), ("y", "b"), ("z", "a")]) datas.append([("x", "d"), ("y", "b"), ("y", "c"), ("z", "a")]) datas.append([("x", "d"), ("y", "c"), ("y", None), ("z", "a")]) datas.append([("x", "d"), ("y", None), ("y", None), ("z", "a")]) datas.append([("x", "d"), ("y", "c"), ("y", "b"), (None, "a")]) datas.append([("x", "d"), ("y", "b"), ("y", "c"), (None, "a")]) # more depth tests datas.append([("x", "d", "o"), ("y", "c", "p"), ("y", "c", "q"), ("z", "a", "r")]) datas.append([("x", "d", "o"), ("y", "c", "q"), ("y", "c", "p"), ("z", "a", "r")]) datas.append([("x", "d", "o"), ("y", "c", "p"), ("y", "c", None), ("z", "a", "r")]) datas.append([("x", "d", "o"), ("y", "c", None), ("y", "c", None), ("z", "a", "r")]) for data in datas: with self.subTest(data=data): pmidx = pd.MultiIndex.from_tuples(data) kmidx = ks.from_pandas(pmidx) self.assert_eq(kmidx.is_monotonic_increasing, pmidx.is_monotonic_increasing) self.assert_eq(kmidx.is_monotonic_decreasing, pmidx.is_monotonic_decreasing) # The datas below are showing different result depends on pandas version. # Because the behavior of handling null values is changed in pandas >= 1.0.0. datas = [] datas.append([(None, 100), (2, 200), (3, 300), (4, 400), (5, 500)]) datas.append([(1, 100), (2, 200), (3, 300), (4, 400), (None, 500)]) datas.append([(None, None), (2, 200), (3, 300), (4, 400), (5, 500)]) datas.append([(1, 100), (2, 200), (3, 300), (4, 400), (None, None)]) datas.append([("x", "d"), ("y", None), ("y", "c"), ("z", "a")]) datas.append([("x", "d", "o"), ("y", "c", None), ("y", "c", "q"), ("z", "a", "r")]) for data in datas: with self.subTest(data=data): pmidx = pd.MultiIndex.from_tuples(data) kmidx = ks.from_pandas(pmidx) expected_increasing_result = pmidx.is_monotonic_increasing if LooseVersion(pd.__version__) < LooseVersion("1.0.0"): expected_increasing_result = not expected_increasing_result self.assert_eq(kmidx.is_monotonic_increasing, expected_increasing_result) self.assert_eq(kmidx.is_monotonic_decreasing, pmidx.is_monotonic_decreasing) def test_difference(self): # Index kidx1 = ks.Index([1, 2, 3, 4], name="koalas") kidx2 = ks.Index([3, 4, 5, 6], name="koalas") pidx1 = kidx1.to_pandas() pidx2 = kidx2.to_pandas() self.assert_eq(kidx1.difference(kidx2).sort_values(), pidx1.difference(pidx2).sort_values()) self.assert_eq( kidx1.difference([3, 4, 5, 6]).sort_values(), pidx1.difference([3, 4, 5, 6]).sort_values(), ) self.assert_eq( kidx1.difference((3, 4, 5, 6)).sort_values(), pidx1.difference((3, 4, 5, 6)).sort_values(), ) self.assert_eq( kidx1.difference({3, 4, 5, 6}).sort_values(), pidx1.difference({3, 4, 5, 6}).sort_values(), ) self.assert_eq( kidx1.difference({3: 1, 4: 2, 5: 3, 6: 4}).sort_values(), pidx1.difference({3: 1, 4: 2, 5: 3, 6: 4}).sort_values(), ) # Exceptions for Index with self.assertRaisesRegex(TypeError, "Input must be Index or array-like"): kidx1.difference("1234") with self.assertRaisesRegex(TypeError, "Input must be Index or array-like"): kidx1.difference(1234) with self.assertRaisesRegex(TypeError, "Input must be Index or array-like"): kidx1.difference(12.34) with self.assertRaisesRegex(TypeError, "Input must be Index or array-like"): kidx1.difference(None) with self.assertRaisesRegex(TypeError, "Input must be Index or array-like"): kidx1.difference(np.nan) with self.assertRaisesRegex( ValueError, "The 'sort' keyword only takes the values of None or True; 1 was passed." ): kidx1.difference(kidx2, sort=1) # MultiIndex kidx1 = ks.MultiIndex.from_tuples( [("a", "x", 1), ("b", "y", 2), ("c", "z", 3)], names=["hello", "koalas", "world"] ) kidx2 = ks.MultiIndex.from_tuples( [("a", "x", 1), ("b", "z", 2), ("k", "z", 3)], names=["hello", "koalas", "world"] ) pidx1 = kidx1.to_pandas() pidx2 = kidx2.to_pandas() self.assert_eq(kidx1.difference(kidx2).sort_values(), pidx1.difference(pidx2).sort_values()) self.assert_eq( kidx1.difference({("a", "x", 1)}).sort_values(), pidx1.difference({("a", "x", 1)}).sort_values(), ) self.assert_eq( kidx1.difference({("a", "x", 1): [1, 2, 3]}).sort_values(), pidx1.difference({("a", "x", 1): [1, 2, 3]}).sort_values(), ) # Exceptions for MultiIndex with self.assertRaisesRegex(TypeError, "other must be a MultiIndex or a list of tuples"): kidx1.difference(["b", "z", "2"]) def test_repeat(self): pidx = pd.Index(["a", "b", "c"]) kidx = ks.from_pandas(pidx) self.assert_eq(kidx.repeat(3).sort_values(), pidx.repeat(3).sort_values()) self.assert_eq(kidx.repeat(0).sort_values(), pidx.repeat(0).sort_values()) self.assert_eq((kidx + "x").repeat(3).sort_values(), (pidx + "x").repeat(3).sort_values()) self.assertRaises(ValueError, lambda: kidx.repeat(-1)) self.assertRaises(ValueError, lambda: kidx.repeat("abc")) pmidx = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")]) kmidx = ks.from_pandas(pmidx) self.assert_eq(kmidx.repeat(3).sort_values(), pmidx.repeat(3).sort_values()) self.assert_eq(kmidx.repeat(0).sort_values(), pmidx.repeat(0).sort_values()) self.assertRaises(ValueError, lambda: kmidx.repeat(-1)) self.assertRaises(ValueError, lambda: kmidx.repeat("abc")) def test_unique(self): pidx = pd.Index(["a", "b", "a"]) kidx = ks.from_pandas(pidx) self.assert_eq(kidx.unique().sort_values(), pidx.unique().sort_values()) self.assert_eq(kidx.unique().sort_values(), pidx.unique().sort_values()) pmidx = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("x", "a")]) kmidx = ks.from_pandas(pmidx) self.assert_eq(kmidx.unique().sort_values(), pmidx.unique().sort_values()) self.assert_eq(kmidx.unique().sort_values(), pmidx.unique().sort_values()) def test_asof(self): # Increasing values pidx = pd.Index(["2013-12-31", "2014-01-02", "2014-01-03"]) kidx = ks.from_pandas(pidx) self.assert_eq(kidx.asof("2014-01-01"), pidx.asof("2014-01-01")) self.assert_eq(kidx.asof("2014-01-02"), pidx.asof("2014-01-02")) self.assert_eq(repr(kidx.asof("1999-01-02")), repr(pidx.asof("1999-01-02"))) # Decreasing values pidx = pd.Index(["2014-01-03", "2014-01-02", "2013-12-31"]) kidx = ks.from_pandas(pidx) self.assert_eq(kidx.asof("2014-01-01"), pidx.asof("2014-01-01")) self.assert_eq(kidx.asof("2014-01-02"), pidx.asof("2014-01-02")) self.assert_eq(kidx.asof("1999-01-02"), pidx.asof("1999-01-02")) self.assert_eq(repr(kidx.asof("2015-01-02")), repr(pidx.asof("2015-01-02"))) # Not increasing, neither decreasing (ValueError) kidx = ks.Index(["2013-12-31", "2015-01-02", "2014-01-03"]) self.assertRaises(ValueError, lambda: kidx.asof("2013-12-31")) kmidx = ks.MultiIndex.from_tuples([("a", "a"), ("a", "b"), ("a", "c")]) self.assertRaises(NotImplementedError, lambda: kmidx.asof(("a", "b"))) def test_union(self): # Index pidx1 = pd.Index([1, 2, 3, 4]) pidx2 = pd.Index([3, 4, 5, 6]) kidx1 = ks.from_pandas(pidx1) kidx2 = ks.from_pandas(pidx2) self.assert_eq(kidx1.union(kidx2), pidx1.union(pidx2)) self.assert_eq(kidx2.union(kidx1), pidx2.union(pidx1)) self.assert_eq( kidx1.union([3, 4, 5, 6]), pidx1.union([3, 4, 5, 6]), ) self.assert_eq( kidx2.union([1, 2, 3, 4]), pidx2.union([1, 2, 3, 4]), ) self.assert_eq( kidx1.union(ks.Series([3, 4, 5, 6])), pidx1.union(pd.Series([3, 4, 5, 6])), ) self.assert_eq( kidx2.union(ks.Series([1, 2, 3, 4])), pidx2.union(pd.Series([1, 2, 3, 4])), ) # Testing if the result is correct after sort=False. # The `sort` argument is added in pandas 0.24. if LooseVersion(pd.__version__) >= LooseVersion("0.24"): self.assert_eq( kidx1.union(kidx2, sort=False).sort_values(), pidx1.union(pidx2, sort=False).sort_values(), ) self.assert_eq( kidx2.union(kidx1, sort=False).sort_values(), pidx2.union(pidx1, sort=False).sort_values(), ) self.assert_eq( kidx1.union([3, 4, 5, 6], sort=False).sort_values(), pidx1.union([3, 4, 5, 6], sort=False).sort_values(), ) self.assert_eq( kidx2.union([1, 2, 3, 4], sort=False).sort_values(), pidx2.union([1, 2, 3, 4], sort=False).sort_values(), ) self.assert_eq( kidx1.union(ks.Series([3, 4, 5, 6]), sort=False).sort_values(), pidx1.union(pd.Series([3, 4, 5, 6]), sort=False).sort_values(), ) self.assert_eq( kidx2.union(ks.Series([1, 2, 3, 4]), sort=False).sort_values(), pidx2.union(pd.Series([1, 2, 3, 4]), sort=False).sort_values(), ) # Duplicated values for Index is supported in pandas >= 1.0.0 if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"): pidx1 = pd.Index([1, 2, 3, 4, 3, 4, 3, 4]) pidx2 = pd.Index([3, 4, 3, 4, 5, 6]) kidx1 = ks.from_pandas(pidx1) kidx2 = ks.from_pandas(pidx2) self.assert_eq(kidx1.union(kidx2), pidx1.union(pidx2)) self.assert_eq(kidx2.union(kidx1), pidx2.union(pidx1)) self.assert_eq( kidx1.union([3, 4, 3, 3, 5, 6]), pidx1.union([3, 4, 3, 4, 5, 6]), ) self.assert_eq( kidx2.union([1, 2, 3, 4, 3, 4, 3, 4]), pidx2.union([1, 2, 3, 4, 3, 4, 3, 4]), ) self.assert_eq( kidx1.union(ks.Series([3, 4, 3, 3, 5, 6])), pidx1.union(pd.Series([3, 4, 3, 4, 5, 6])), ) self.assert_eq( kidx2.union(ks.Series([1, 2, 3, 4, 3, 4, 3, 4])), pidx2.union(pd.Series([1, 2, 3, 4, 3, 4, 3, 4])), ) # MultiIndex pmidx1 = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("x", "a"), ("x", "b")]) pmidx2 = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("x", "c"), ("x", "d")]) pmidx3 = pd.MultiIndex.from_tuples([(1, 1), (1, 2), (1, 3), (1, 4), (1, 3), (1, 4)]) pmidx4 = pd.MultiIndex.from_tuples([(1, 3), (1, 4), (1, 5), (1, 6)]) kmidx1 = ks.from_pandas(pmidx1) kmidx2 = ks.from_pandas(pmidx2) kmidx3 = ks.from_pandas(pmidx3) kmidx4 = ks.from_pandas(pmidx4) self.assert_eq(kmidx1.union(kmidx2), pmidx1.union(pmidx2)) self.assert_eq(kmidx2.union(kmidx1), pmidx2.union(pmidx1)) self.assert_eq(kmidx3.union(kmidx4), pmidx3.union(pmidx4)) self.assert_eq(kmidx4.union(kmidx3), pmidx4.union(pmidx3)) self.assert_eq( kmidx1.union([("x", "a"), ("x", "b"), ("x", "c"), ("x", "d")]), pmidx1.union([("x", "a"), ("x", "b"), ("x", "c"), ("x", "d")]), ) self.assert_eq( kmidx2.union([("x", "a"), ("x", "b"), ("x", "a"), ("x", "b")]), pmidx2.union([("x", "a"), ("x", "b"), ("x", "a"), ("x", "b")]), ) self.assert_eq( kmidx3.union([(1, 3), (1, 4), (1, 5), (1, 6)]), pmidx3.union([(1, 3), (1, 4), (1, 5), (1, 6)]), ) self.assert_eq( kmidx4.union([(1, 1), (1, 2), (1, 3), (1, 4), (1, 3), (1, 4)]), pmidx4.union([(1, 1), (1, 2), (1, 3), (1, 4), (1, 3), (1, 4)]), ) # Testing if the result is correct after sort=False. # The `sort` argument is added in pandas 0.24. if LooseVersion(pd.__version__) >= LooseVersion("0.24"): self.assert_eq( kmidx1.union(kmidx2, sort=False).sort_values(), pmidx1.union(pmidx2, sort=False).sort_values(), ) self.assert_eq( kmidx2.union(kmidx1, sort=False).sort_values(), pmidx2.union(pmidx1, sort=False).sort_values(), ) self.assert_eq( kmidx3.union(kmidx4, sort=False).sort_values(), pmidx3.union(pmidx4, sort=False).sort_values(), ) self.assert_eq( kmidx4.union(kmidx3, sort=False).sort_values(), pmidx4.union(pmidx3, sort=False).sort_values(), ) self.assert_eq( kmidx1.union( [("x", "a"), ("x", "b"), ("x", "c"), ("x", "d")], sort=False ).sort_values(), pmidx1.union( [("x", "a"), ("x", "b"), ("x", "c"), ("x", "d")], sort=False ).sort_values(), ) self.assert_eq( kmidx2.union( [("x", "a"), ("x", "b"), ("x", "a"), ("x", "b")], sort=False ).sort_values(), pmidx2.union( [("x", "a"), ("x", "b"), ("x", "a"), ("x", "b")], sort=False ).sort_values(), ) self.assert_eq( kmidx3.union([(1, 3), (1, 4), (1, 5), (1, 6)], sort=False).sort_values(), pmidx3.union([(1, 3), (1, 4), (1, 5), (1, 6)], sort=False).sort_values(), ) self.assert_eq( kmidx4.union( [(1, 1), (1, 2), (1, 3), (1, 4), (1, 3), (1, 4)], sort=False ).sort_values(), pmidx4.union( [(1, 1), (1, 2), (1, 3), (1, 4), (1, 3), (1, 4)], sort=False ).sort_values(), ) self.assertRaises(NotImplementedError, lambda: kidx1.union(kmidx1)) self.assertRaises(TypeError, lambda: kmidx1.union(kidx1)) self.assertRaises(TypeError, lambda: kmidx1.union(["x", "a"])) self.assertRaises(ValueError, lambda: kidx1.union(ks.range(2))) def test_take(self): # Index pidx = pd.Index([100, 200, 300, 400, 500], name="Koalas") kidx = ks.from_pandas(pidx) self.assert_eq(kidx.take([0, 2, 4]).sort_values(), pidx.take([0, 2, 4]).sort_values()) self.assert_eq( kidx.take(range(0, 5, 2)).sort_values(), pidx.take(range(0, 5, 2)).sort_values() ) self.assert_eq(kidx.take([-4, -2, 0]).sort_values(), pidx.take([-4, -2, 0]).sort_values()) self.assert_eq( kidx.take(range(-4, 1, 2)).sort_values(), pidx.take(range(-4, 1, 2)).sort_values() ) # MultiIndex pmidx = pd.MultiIndex.from_tuples( [("x", "a"), ("x", "b"), ("x", "c")], names=["hello", "Koalas"] ) kmidx = ks.from_pandas(pmidx) self.assert_eq(kmidx.take([0, 2]).sort_values(), pmidx.take([0, 2]).sort_values()) self.assert_eq( kmidx.take(range(0, 4, 2)).sort_values(), pmidx.take(range(0, 4, 2)).sort_values() ) self.assert_eq(kmidx.take([-2, 0]).sort_values(), pmidx.take([-2, 0]).sort_values()) self.assert_eq( kmidx.take(range(-2, 1, 2)).sort_values(), pmidx.take(range(-2, 1, 2)).sort_values() ) # Checking the type of indices. self.assertRaises(ValueError, lambda: kidx.take(1)) self.assertRaises(ValueError, lambda: kidx.take("1")) self.assertRaises(ValueError, lambda: kidx.take({1, 2})) self.assertRaises(ValueError, lambda: kidx.take({1: None, 2: None})) self.assertRaises(ValueError, lambda: kmidx.take(1)) self.assertRaises(ValueError, lambda: kmidx.take("1")) self.assertRaises(ValueError, lambda: kmidx.take({1, 2})) self.assertRaises(ValueError, lambda: kmidx.take({1: None, 2: None})) def test_index_get_level_values(self): pidx = pd.Index([1, 2, 3], name="ks") kidx = ks.from_pandas(pidx) for level in [0, "ks"]: self.assert_eq(kidx.get_level_values(level), pidx.get_level_values(level)) def test_multiindex_get_level_values(self): pmidx =
pd.MultiIndex.from_tuples([("a", "d"), ("b", "e"), ("c", "f")])
pandas.MultiIndex.from_tuples
from itertools import groupby, zip_longest from fractions import Fraction from random import sample import json import pandas as pd import numpy as np import music21 as m21 from music21.meter import TimeSignatureException m21.humdrum.spineParser.flavors['JRP'] = True from collections import defaultdict #song has no meter class UnknownPGramType(Exception): def __init__(self, arg): self.arg = arg def __str__(self): return f"Unknown pgram type: {self.arg}." #compute features: def compute_completesmeasure_phrase(seq, ix, start_ix): endpos = Fraction(seq['features']['beatinphrase'][ix]) - \ Fraction(seq['features']['beatinphrase'][start_ix]) + \ Fraction(seq['features']['IOI_beatfraction'][ix]) return endpos % seq['features']['beatspermeasure'][ix] == 0 def compute_completesbeat_phrase(seq, ix, start_ix): endpos = Fraction(seq['features']['beatinphrase'][ix]) - \ Fraction(seq['features']['beatinphrase'][start_ix]) + \ Fraction(seq['features']['IOI_beatfraction'][ix]) return endpos % 1 == 0 def compute_completesmeasure_song(seq, ix): endpos = Fraction(seq['features']['beatinphrase'][ix]) - \ Fraction(seq['features']['beatinphrase'][0]) + \ Fraction(seq['features']['IOI_beatfraction'][ix]) return endpos % seq['features']['beatspermeasure'][ix] == 0 def compute_completesbeat_song(seq, ix): endpos = Fraction(seq['features']['beatinphrase'][ix]) - \ Fraction(seq['features']['beatinphrase'][0]) + \ Fraction(seq['features']['IOI_beatfraction'][ix]) return endpos % 1 == 0 #extract IOI in units of beat #IOI_beatfraction[i] is IOI from start of ith note till start of (i+1)th note #for last note: beatfraction is taken #Also to be interpreted as duration of note + duration of following rests (except for rests at end of melody) # #extract beats per measure def extractFeatures(seq_iter, vocalfeatures=True): count = 0 for seq in seq_iter: count += 1 if count % 100 == 0: print(count, end=' ') pairs = zip(seq['features']['beatinsong'],seq['features']['beatinsong'][1:]) #this possibly includes rests IOI_beatfraction = [Fraction(o[1])-Fraction(o[0]) for o in pairs] IOI_beatfraction = [str(bf) for bf in IOI_beatfraction] + [seq['features']['beatfraction'][-1]] seq['features']['IOI_beatfraction'] = IOI_beatfraction beatspermeasure = [m21.meter.TimeSignature(ts).beatCount for ts in seq['features']['timesignature']] seq['features']['beatspermeasure'] = beatspermeasure phrasepos = seq['features']['phrasepos'] phrasestart_ix=[0]*len(phrasepos) for ix in range(1,len(phrasestart_ix)): if phrasepos[ix] < phrasepos[ix-1]: phrasestart_ix[ix] = ix else: phrasestart_ix[ix] = phrasestart_ix[ix-1] seq['features']['phrasestart_ix'] = phrasestart_ix endOfPhrase = [x[1]<x[0] for x in zip(phrasepos, phrasepos[1:])] + [True] seq['features']['endOfPhrase'] = endOfPhrase cm_p = [compute_completesmeasure_phrase(seq, ix, phrasestart_ix[ix]) for ix in range(len(phrasepos))] cb_p = [compute_completesbeat_phrase(seq, ix, phrasestart_ix[ix]) for ix in range(len(phrasepos))] cm_s = [compute_completesmeasure_song(seq, ix) for ix in range(len(phrasepos))] cb_s = [compute_completesbeat_song(seq, ix) for ix in range(len(phrasepos))] seq['features']['completesmeasure_phrase'] = cm_p seq['features']['completesbeat_phrase'] = cb_p seq['features']['completesmeasure_song'] = cm_s seq['features']['completesbeat_song'] = cb_s if vocalfeatures: #move lyric features to end of melisma: #rhymes, rhymescontentwords, wordstress, noncontentword, wordend #and compute rhyme_noteoffset and rhyme_beatoffset if 'melismastate' in seq['features'].keys(): #vocal? lyrics = seq['features']['lyrics'] phoneme = seq['features']['phoneme'] melismastate = seq['features']['melismastate'] rhymes = seq['features']['rhymes'] rhymescontentwords = seq['features']['rhymescontentwords'] wordend = seq['features']['wordend'] noncontentword = seq['features']['noncontentword'] wordstress = seq['features']['wordstress'] rhymes_endmelisma, rhymescontentwords_endmelisma = [], [] wordend_endmelisma, noncontentword_endmelisma, wordstress_endmelisma = [], [], [] lyrics_endmelisma, phoneme_endmelisma = [], [] from_ix = 0 inmelisma = False for ix in range(len(phrasepos)): if melismastate[ix] == 'start': from_ix = ix inmelisma = True if melismastate[ix] == 'end': if not inmelisma: from_ix = ix inmelisma = False rhymes_endmelisma.append(rhymes[from_ix]) rhymescontentwords_endmelisma.append(rhymescontentwords[from_ix]) wordend_endmelisma.append(wordend[from_ix]) noncontentword_endmelisma.append(noncontentword[from_ix]) wordstress_endmelisma.append(wordstress[from_ix]) lyrics_endmelisma.append(lyrics[from_ix]) phoneme_endmelisma.append(phoneme[from_ix]) else: rhymes_endmelisma.append(False) rhymescontentwords_endmelisma.append(False) wordend_endmelisma.append(False) noncontentword_endmelisma.append(False) wordstress_endmelisma.append(False) lyrics_endmelisma.append(None) phoneme_endmelisma.append(None) seq['features']['rhymes_endmelisma'] = rhymes_endmelisma seq['features']['rhymescontentwords_endmelisma'] = rhymescontentwords_endmelisma seq['features']['wordend_endmelisma'] = wordend_endmelisma seq['features']['noncontentword_endmelisma'] = noncontentword_endmelisma seq['features']['wordstress_endmelisma'] = wordstress_endmelisma seq['features']['lyrics_endmelisma'] = lyrics_endmelisma seq['features']['phoneme_endmelisma'] = phoneme_endmelisma #compute rhyme_noteoffset and rhyme_beatoffset rhyme_noteoffset = [0] rhyme_beatoffset = [0.0] previous = 0 previousbeat = float(Fraction(seq['features']['beatinsong'][0])) for ix in range(1,len(rhymescontentwords_endmelisma)): if rhymescontentwords_endmelisma[ix-1]: #previous rhymes previous = ix previousbeat = float(Fraction(seq['features']['beatinsong'][ix])) rhyme_noteoffset.append(ix - previous) rhyme_beatoffset.append(float(Fraction(seq['features']['beatinsong'][ix])) - previousbeat) seq['features']['rhymescontentwords_noteoffset'] = rhyme_noteoffset seq['features']['rhymescontentwords_beatoffset'] = rhyme_beatoffset else: #vocal features requested, but not present. #skip melody continue #Or do this? if False: length = len(phrasepos) seq['features']['rhymes_endmelisma'] = [None] * length seq['features']['rhymescontentwords_endmelisma'] = [None] * length seq['features']['wordend_endmelisma'] = [None] * length seq['features']['noncontentword_endmelisma'] = [None] * length seq['features']['wordstress_endmelisma'] = [None] * length seq['features']['lyrics_endmelisma'] = [None] * length seq['features']['phoneme_endmelisma'] = [None] * length yield seq class NoFeaturesError(Exception): def __init__(self, arg): self.args = arg class NoTrigramsError(Exception): def __init__(self, arg): self.args = arg def __str__(self): return repr(self.value) #endix is index of last note + 1 def computeSumFractions(fractions, startix, endix): res = 0.0 for fr in fractions[startix:endix]: res = res + float(Fraction(fr)) return res #make groups of indices with the same successive pitch, but (optionally) not crossing phrase boundaries <- 20200331 crossing phrase boundaries should be allowed (contourfourth) #returns tuples (ix of first note in group, ix of last note in group + 1) #crossPhraseBreak=False splits on phrase break. N.B. Is Using GroundTruth! def breakpitchlist(midipitch, phrase_ix, crossPhraseBreak=False): res = [] if crossPhraseBreak: for _, g in groupby( enumerate(midipitch), key=lambda x:x[1]): glist = list(g) res.append( (glist[0][0], glist[-1][0]+1) ) else: #N.B. This uses the ground truth for _, g in groupby( enumerate(zip(midipitch,phrase_ix)), key=lambda x:(x[1][0],x[1][1])): glist = list(g) res.append( (glist[0][0], glist[-1][0]+1) ) return res #True if no phrase end at first or second item (span) in the trigram #trigram looks like ((8, 10), (10, 11), (11, 12)) def noPhraseBreak(tr, endOfPhrase): return not ( ( True in endOfPhrase[tr[0][0]:tr[0][1]] ) or \ ( True in endOfPhrase[tr[1][0]:tr[1][1]] ) ) #pgram_type : "pitch", "note" def extractPgramsFromCorpus(corpus, pgram_type="pitch", startat=0, endat=None): pgrams = {} arfftype = {} for ix, seq in enumerate(corpus): if endat is not None: if ix >= endat: continue if ix < startat: continue if not ix%100: print(ix, end=' ') songid = seq['id'] try: pgrams[songid], arfftype_new = extractPgramsFromMelody(seq, pgram_type=pgram_type) _,_ = addCrossRelations(pgrams[songid], arfftype_new, 'interval', newname='intervalsize', typeconv=lambda x: abs(int(x))) _,_ = addCrossRelations(pgrams[songid], arfftype_new, 'interval', newname='intervaldir', typeconv=np.sign) _,_ = addCrossRelations(pgrams[songid], arfftype_new, 'diatonicpitch', typeconv=int) _,_ = addCrossRelations(pgrams[songid], arfftype_new, 'VosHarmony', typeconv=int) _,_ = addCrossRelations(pgrams[songid], arfftype_new, 'beatstrength', typeconv=float) _,_ = addCrossRelations(pgrams[songid], arfftype_new, 'IOIbeatfraction', typeconv=float) if 'melismastate' in seq['features'].keys(): _,_ = addCrossRelations(pgrams[songid], arfftype_new, 'wordstress', typeconv=int) if 'informationcontent' in seq['features'].keys(): _,_ = addCrossRelations(pgrams[songid], arfftype_new, 'informationcontent', typeconv=float) except NoFeaturesError: print(songid, ": No features extracted.") except NoTrigramsError: print(songid, ": No trigrams extracted") #if ix > startat: # if arfftype.keys() != arfftype_new.keys(): # print("Warning: Melodies have different feature sets.") # print(list(zip_longest(arfftype.keys(), arfftype_new.keys()))) #Keep largest set of features possible. N.B. no guarantee that all features in arfftype are in each sequence. arfftype.update(arfftype_new) #concat melodies pgrams = pd.concat([v for v in pgrams.values()]) return pgrams, arfftype def extractPgramsFromMelody(seq, pgram_type, skipPhraseCrossing=False): # some aliases scaledegree = seq['features']['scaledegree'] endOfPhrase = seq['features']['endOfPhrase'] midipitch = seq['features']['midipitch'] phrase_ix = seq['features']['phrase_ix'] if pgram_type == "pitch": event_spans = breakpitchlist(midipitch, phrase_ix) #allow pitches to cross phrase break elif pgram_type == "note": event_spans = list(zip(range(len(scaledegree)),range(1,len(scaledegree)+1))) else: raise UnknownPGramType(pgram_type) # make trigram of spans event_spans = event_spans + [(None, None), (None, None)] pgram_span_ixs = list(zip(event_spans,event_spans[1:],event_spans[2:],event_spans[3:],event_spans[4:])) # If skipPhraseCrossing prune trigrams crossing phrase boundaries. WHY? #Why actually? e.g. kindr154 prhases of 2 pitches if skipPhraseCrossing: pgram_span_ixs = [ixs for ixs in pgram_span_ixs if noPhraseBreak(ixs,endOfPhrase)] if len(pgram_span_ixs) == 0: raise NoTrigramsError(seq['id']) # create dataframe with pgram names as index pgram_ids = [seq["id"]+'_'+str(ixs[0][0]).zfill(3) for ixs in pgram_span_ixs] pgrams = pd.DataFrame(index=pgram_ids) pgrams['ix0_0'] = pd.array([ix[0][0] for ix in pgram_span_ixs], dtype="Int16") pgrams['ix0_1'] = pd.array([ix[0][1] for ix in pgram_span_ixs], dtype="Int16") pgrams['ix1_0'] = pd.array([ix[1][0] for ix in pgram_span_ixs], dtype="Int16") pgrams['ix1_1'] = pd.array([ix[1][1] for ix in pgram_span_ixs], dtype="Int16") pgrams['ix2_0'] = pd.array([ix[2][0] for ix in pgram_span_ixs], dtype="Int16") pgrams['ix2_1'] = pd.array([ix[2][1] for ix in pgram_span_ixs], dtype="Int16") pgrams['ix3_0'] = pd.array([ix[3][0] for ix in pgram_span_ixs], dtype="Int16") pgrams['ix3_1'] = pd.array([ix[3][1] for ix in pgram_span_ixs], dtype="Int16") pgrams['ix4_0'] = pd.array([ix[4][0] for ix in pgram_span_ixs], dtype="Int16") pgrams['ix4_1'] = pd.array([ix[4][1] for ix in pgram_span_ixs], dtype="Int16") #add tune family ids and songids pgrams['tunefamily'] = seq['tunefamily'] pgrams['songid'] = seq['id'] pgrams, arfftype = extractPgramFeatures(pgrams, seq) return pgrams, arfftype def getBeatDuration(timesig): try: dur = float(m21.meter.TimeSignature(timesig).beatDuration.quarterLength) except TimeSignatureException: dur = float(Fraction(timesig) / Fraction('1/4')) return dur def oneCrossRelation(el1, el2, typeconv): if pd.isna(el1) or pd.isna(el2): return np.nan return '-' if typeconv(el2) < typeconv(el1) else '=' if typeconv(el1) == typeconv(el2) else '+' def addCrossRelations(pgrams, arfftype, featurename, newname=None, typeconv=int): postfixes = { 1 : 'first', 2 : 'second', 3 : 'third', 4 : 'fourth', 5 : 'fifth' } if newname is None: newname = featurename for ix1 in range(1,6): for ix2 in range(ix1+1,6): featname = newname + postfixes[ix1] + postfixes[ix2] source = zip(pgrams[featurename + postfixes[ix1]], pgrams[featurename + postfixes[ix2]]) pgrams[featname] = [oneCrossRelation(el1, el2, typeconv) for (el1, el2) in source] arfftype[featname] = '{-,=,+}' return pgrams, arfftype def extractPgramFeatures(pgrams, seq): # vocal? vocal = False if 'melismastate' in seq['features'].keys(): vocal = True arfftype = {} # some aliases scaledegree = seq['features']['scaledegree'] beatstrength = seq['features']['beatstrength'] diatonicpitch = seq['features']['diatonicpitch'] midipitch = seq['features']['midipitch'] chromaticinterval = seq['features']['chromaticinterval'] timesig = seq['features']['timesignature'] metriccontour = seq['features']['metriccontour'] beatinsong = seq['features']['beatinsong'] beatinphrase = seq['features']['beatinphrase'] endOfPhrase = seq['features']['endOfPhrase'] phrasestart_ix = seq['features']['phrasestart_ix'] phrase_ix = seq['features']['phrase_ix'] completesmeasure_song = seq['features']['completesmeasure_song'] completesbeat_song = seq['features']['completesbeat_song'] completesmeasure_phrase = seq['features']['completesmeasure_phrase'] completesbeat_phrase = seq['features']['completesbeat_phrase'] IOIbeatfraction = seq['features']['IOI_beatfraction'] nextisrest = seq['features']['nextisrest'] gpr2a = seq['features']['gpr2a_Frankland'] gpr2b = seq['features']['gpr2b_Frankland'] gpr3a = seq['features']['gpr3a_Frankland'] gpr3d = seq['features']['gpr3d_Frankland'] gprsum = seq['features']['gpr_Frankland_sum'] pprox = seq['features']['pitchproximity'] prev = seq['features']['pitchreversal'] lbdmpitch = seq['features']['lbdm_spitch'] lbdmioi = seq['features']['lbdm_sioi'] lbdmrest = seq['features']['lbdm_srest'] lbdm = seq['features']['lbdm_boundarystrength'] if vocal: wordstress = seq['features']['wordstress_endmelisma'] noncontentword = seq['features']['noncontentword_endmelisma'] wordend = seq['features']['wordend_endmelisma'] rhymescontentwords = seq['features']['rhymescontentwords_endmelisma'] rhymescontentwords_noteoffset = seq['features']['rhymescontentwords_noteoffset'] rhymescontentwords_beatoffset = seq['features']['rhymescontentwords_beatoffset'] melismastate = seq['features']['melismastate'] phrase_count = max(phrase_ix) + 1 pgrams['scaledegreefirst'] = pd.array([scaledegree[int(ix)] for ix in pgrams['ix0_0']], dtype="Int16") pgrams['scaledegreesecond'] = pd.array([scaledegree[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16") pgrams['scaledegreethird'] = pd.array([scaledegree[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16") pgrams['scaledegreefourth'] = pd.array([scaledegree[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16") pgrams['scaledegreefifth'] = pd.array([scaledegree[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16") arfftype['scaledegreefirst'] = 'numeric' arfftype['scaledegreesecond'] = 'numeric' arfftype['scaledegreethird'] = 'numeric' arfftype['scaledegreefourth'] = 'numeric' arfftype['scaledegreefifth'] = 'numeric' pgrams['diatonicpitchfirst'] = pd.array([diatonicpitch[int(ix)] for ix in pgrams['ix0_0']], dtype="Int16") pgrams['diatonicpitchsecond'] = pd.array([diatonicpitch[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16") pgrams['diatonicpitchthird'] = pd.array([diatonicpitch[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16") pgrams['diatonicpitchfourth'] = pd.array([diatonicpitch[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16") pgrams['diatonicpitchfifth'] = pd.array([diatonicpitch[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16") arfftype['diatonicpitchfirst'] = 'numeric' arfftype['diatonicpitchsecond'] = 'numeric' arfftype['diatonicpitchthird'] = 'numeric' arfftype['diatonicpitchfourth'] = 'numeric' arfftype['diatonicpitchfifth'] = 'numeric' pgrams['midipitchfirst'] = pd.array([midipitch[int(ix)] for ix in pgrams['ix0_0']], dtype="Int16") pgrams['midipitchsecond'] = pd.array([midipitch[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16") pgrams['midipitchthird'] = pd.array([midipitch[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16") pgrams['midipitchfourth'] = pd.array([midipitch[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16") pgrams['midipitchfifth'] = pd.array([midipitch[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16") arfftype['midipitchfirst'] = 'numeric' arfftype['midipitchsecond'] = 'numeric' arfftype['midipitchthird'] = 'numeric' arfftype['midipitchfourth'] = 'numeric' arfftype['midipitchfifth'] = 'numeric' pgrams['intervalfirst'] = pd.array([chromaticinterval[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix0_0']], dtype="Int16") pgrams['intervalsecond'] = pd.array([chromaticinterval[int(ix)] for ix in pgrams['ix1_0']], dtype="Int16") pgrams['intervalthird'] = pd.array([chromaticinterval[int(ix)] for ix in pgrams['ix2_0']], dtype="Int16") pgrams['intervalfourth'] = pd.array([chromaticinterval[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']], dtype="Int16") pgrams['intervalfifth'] = pd.array([chromaticinterval[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']], dtype="Int16") arfftype['intervalfirst'] = 'numeric' arfftype['intervalsecond'] = 'numeric' arfftype['intervalthird'] = 'numeric' arfftype['intervalfourth'] = 'numeric' arfftype['intervalfifth'] = 'numeric' parsons = {-1:'-', 0:'=', 1:'+'} #intervalcontour is not a good feature. Pitchcontour would be better. This will be in the cross-relations #pgrams['intervalcontoursecond'] = [parsons[np.sign(int2 - int1)] if not pd.isna(int1) else np.nan for int1, int2 in \ # zip(pgrams['intervalfirst'],pgrams['intervalsecond'])] #pgrams['intervalcontourthird'] = [parsons[np.sign(int2 - int1)] for int1, int2 in \ # zip(pgrams['intervalsecond'],pgrams['intervalthird'])] #pgrams['intervalcontourfourth'] = [parsons[np.sign(int2 - int1)] if not pd.isna(int2) else np.nan for int1, int2 in \ # zip(pgrams['intervalthird'],pgrams['intervalfourth'])] #pgrams['intervalcontourfifth'] = [parsons[np.sign(int2 - int1)] if not pd.isna(int2) else np.nan for int1, int2 in \ # zip(pgrams['intervalfourth'],pgrams['intervalfifth'])] #arfftype['intervalcontoursecond'] = '{-,=,+}' #arfftype['intervalcontourthird'] = '{-,=,+}' #arfftype['intervalcontourfourth'] = '{-,=,+}' #arfftype['intervalcontourfifth'] = '{-,=,+}' #intervals of which second tone has center of gravity according to Vos 2002 + octave equivalents VosCenterGravityASC = np.array([1, 5, 8]) VosCenterGravityDESC = np.array([-2, -4, -6, -7, -11]) VosCenterGravity = list(VosCenterGravityDESC-24) + \ list(VosCenterGravityDESC-12) + \ list(VosCenterGravityDESC) + \ list(VosCenterGravityASC) + \ list(VosCenterGravityASC+12) + \ list(VosCenterGravityASC+24) pgrams['VosCenterGravityfirst'] = [interval in VosCenterGravity if not pd.isna(interval) else np.nan for interval in pgrams['intervalfirst']] pgrams['VosCenterGravitysecond'] = [interval in VosCenterGravity for interval in pgrams['intervalsecond']] pgrams['VosCenterGravitythird'] = [interval in VosCenterGravity for interval in pgrams['intervalthird']] pgrams['VosCenterGravityfourth'] = [interval in VosCenterGravity if not pd.isna(interval) else np.nan for interval in pgrams['intervalfourth']] pgrams['VosCenterGravityfifth'] = [interval in VosCenterGravity if not pd.isna(interval) else np.nan for interval in pgrams['intervalfifth']] arfftype['VosCenterGravityfirst'] = '{True, False}' arfftype['VosCenterGravitysecond'] = '{True, False}' arfftype['VosCenterGravitythird'] = '{True, False}' arfftype['VosCenterGravityfourth'] = '{True, False}' arfftype['VosCenterGravityfifth'] = '{True, False}' VosHarmony = { 0: 0, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 1, 7: 6, 8: 5, 9: 4, 10: 3, 11: 2, 12: 7 } #interval modulo one octave, but 0 only for absolute unison (Vos 2002, p.633) def vosint(intervals): return [((np.sign(i)*i-1)%12+1 if i!=0 else 0) if not pd.isna(i) else np.nan for i in intervals] pgrams['VosHarmonyfirst'] = pd.array([VosHarmony[interval] if not pd.isna(interval) else np.nan for interval in vosint(pgrams['intervalfirst'])], dtype="Int16") pgrams['VosHarmonysecond'] = pd.array([VosHarmony[interval] for interval in vosint(pgrams['intervalsecond'])], dtype="Int16") pgrams['VosHarmonythird'] = pd.array([VosHarmony[interval] for interval in vosint(pgrams['intervalthird'])], dtype="Int16") pgrams['VosHarmonyfourth'] = pd.array([VosHarmony[interval] if not pd.isna(interval) else np.nan for interval in vosint(pgrams['intervalfourth'])], dtype="Int16") pgrams['VosHarmonyfifth'] = pd.array([VosHarmony[interval] if not pd.isna(interval) else np.nan for interval in vosint(pgrams['intervalfifth'])], dtype="Int16") arfftype['VosHarmonyfirst'] = 'numeric' arfftype['VosHarmonysecond'] = 'numeric' arfftype['VosHarmonythird'] = 'numeric' arfftype['VosHarmonyfourth'] = 'numeric' arfftype['VosHarmonyfifth'] = 'numeric' if 'informationcontent' in seq['features'].keys(): informationcontent = seq['features']['informationcontent'] pgrams['informationcontentfirst'] = [informationcontent[int(ix)] for ix in pgrams['ix0_0']] pgrams['informationcontentsecond'] = [informationcontent[int(ix)] for ix in pgrams['ix1_0']] pgrams['informationcontentthird'] = [informationcontent[int(ix)] for ix in pgrams['ix2_0']] pgrams['informationcontentfourth'] = [informationcontent[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']] pgrams['informationcontentfifth'] = [informationcontent[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']] arfftype['informationcontentfirst'] = 'numeric' arfftype['informationcontentsecond'] = 'numeric' arfftype['informationcontentthird'] = 'numeric' arfftype['informationcontentfourth'] = 'numeric' arfftype['informationcontentfifth'] = 'numeric' pgrams['contourfirst'] = [parsons[np.sign(i)] if not pd.isna(i) else np.nan for i in pgrams['intervalfirst']] pgrams['contoursecond'] = [parsons[np.sign(i)] for i in pgrams['intervalsecond']] pgrams['contourthird'] = [parsons[np.sign(i)] for i in pgrams['intervalthird']] pgrams['contourfourth'] = [parsons[np.sign(i)] if not pd.isna(i) else np.nan for i in pgrams['intervalfourth']] pgrams['contourfifth'] = [parsons[np.sign(i)] if not pd.isna(i) else np.nan for i in pgrams['intervalfifth']] arfftype['contourfirst'] = '{-,=,+}' arfftype['contoursecond'] = '{-,=,+}' arfftype['contourthird'] = '{-,=,+}' arfftype['contourfourth'] = '{-,=,+}' arfftype['contourfifth'] = '{-,=,+}' ###########################################3 #derived features from Interval and Contour pgrams['registraldirectionchange'] = [cont_sec != cont_third for cont_sec, cont_third in \ zip(pgrams['contoursecond'], pgrams['contourthird'])] arfftype['registraldirectionchange'] = '{True, False}' pgrams['largetosmall'] = [int_first >= 6 and int_second <=4 for int_first, int_second in \ zip(pgrams['intervalsecond'], pgrams['intervalthird'])] arfftype['largetosmall'] = '{True, False}' pgrams['contourreversal'] = [(i[0] == '-' and i[1] == '+') or (i[0]=='+' and i[1]=='-') \ for i in zip(pgrams['contoursecond'], pgrams['contourthird'])] arfftype['contourreversal'] = '{True, False}' pgrams['isascending'] = \ (pgrams['diatonicpitchfirst'] < pgrams['diatonicpitchsecond']) & \ (pgrams['diatonicpitchsecond'] < pgrams['diatonicpitchthird']) arfftype['isascending'] = '{True, False}' pgrams['isdescending'] = \ (pgrams['diatonicpitchfirst'] > pgrams['diatonicpitchsecond']) & \ (pgrams['diatonicpitchsecond'] > pgrams['diatonicpitchthird']) arfftype['isdescending'] = '{True, False}' diat = pgrams[['diatonicpitchfirst','diatonicpitchsecond','diatonicpitchthird']].values pgrams['ambitus'] = diat.max(1) - diat.min(1) arfftype['ambitus'] = 'numeric' pgrams['containsleap'] = \ (abs(pgrams['diatonicpitchsecond'] - pgrams['diatonicpitchfirst']) > 1) | \ (abs(pgrams['diatonicpitchthird'] - pgrams['diatonicpitchsecond']) > 1) arfftype['containsleap'] = '{True, False}' ###########################################3 pgrams['numberofnotesfirst'] = pd.array([ix2 - ix1 for ix1, ix2 in zip(pgrams['ix0_0'],pgrams['ix0_1'])], dtype="Int16") pgrams['numberofnotessecond'] = pd.array([ix2 - ix1 for ix1, ix2 in zip(pgrams['ix1_0'],pgrams['ix1_1'])], dtype="Int16") pgrams['numberofnotesthird'] = pd.array([ix2 - ix1 for ix1, ix2 in zip(pgrams['ix2_0'],pgrams['ix2_1'])], dtype="Int16") pgrams['numberofnotesfourth'] = pd.array([ix2 - ix1 if not pd.isna(ix1) else np.nan for ix1, ix2 in zip(pgrams['ix3_0'],pgrams['ix3_1'])], dtype="Int16") pgrams['numberofnotesfifth'] = pd.array([ix2 - ix1 if not pd.isna(ix1) else np.nan for ix1, ix2 in zip(pgrams['ix4_0'],pgrams['ix4_1'])], dtype="Int16") arfftype['numberofnotesfirst'] = 'numeric' arfftype['numberofnotessecond'] = 'numeric' arfftype['numberofnotesthird'] = 'numeric' arfftype['numberofnotesfourth'] = 'numeric' arfftype['numberofnotesfifth'] = 'numeric' if seq['freemeter']: pgrams['meternumerator'] = pd.array([np.nan for ix in pgrams['ix0_0']], dtype="Int16") pgrams['meterdenominator'] = pd.array([np.nan for ix in pgrams['ix0_0']], dtype="Int16") else: pgrams['meternumerator'] = pd.array([int(timesig[ix].split('/')[0]) for ix in pgrams['ix0_0']], dtype="Int16") pgrams['meterdenominator'] = pd.array([int(timesig[ix].split('/')[1]) for ix in pgrams['ix0_0']], dtype="Int16") arfftype['meternumerator'] = 'numeric' arfftype['meterdenominator'] = 'numeric' pgrams['nextisrestfirst'] = [nextisrest[ix-1] for ix in pgrams['ix0_1']] pgrams['nextisrestsecond'] = [nextisrest[ix-1] for ix in pgrams['ix1_1']] pgrams['nextisrestthird'] = [nextisrest[ix-1] for ix in pgrams['ix2_1']] pgrams['nextisrestfourth'] = [nextisrest[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']] pgrams['nextisrestfifth'] = [nextisrest[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']] arfftype['nextisrestfirst'] = '{True, False}' arfftype['nextisrestsecond'] = '{True, False}' arfftype['nextisrestthird'] = '{True, False}' arfftype['nextisrestfourth'] = '{True, False}' arfftype['nextisrestfifth'] = '{True, False}' pgrams['beatstrengthfirst'] = [beatstrength[int(ix)] for ix in pgrams['ix0_0']] pgrams['beatstrengthsecond'] = [beatstrength[int(ix)] for ix in pgrams['ix1_0']] pgrams['beatstrengththird'] = [beatstrength[int(ix)] for ix in pgrams['ix2_0']] pgrams['beatstrengthfourth'] = [beatstrength[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']] pgrams['beatstrengthfifth'] = [beatstrength[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']] arfftype['beatstrengthfirst'] = 'numeric' arfftype['beatstrengthsecond'] = 'numeric' arfftype['beatstrengththird'] = 'numeric' arfftype['beatstrengthfourth'] = 'numeric' arfftype['beatstrengthfifth'] = 'numeric' #these will be in crossrelations: beatstrengthfirstsecond, etc. #pgrams['metriccontourfirst'] = [metriccontour[int(ix)] for ix in pgrams['ix0_0']] #pgrams['metriccontoursecond'] = [metriccontour[int(ix)] for ix in pgrams['ix1_0']] #pgrams['metriccontourthird'] = [metriccontour[int(ix)] for ix in pgrams['ix2_0']] #pgrams['metriccontourfourth'] = [metriccontour[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']] #pgrams['metriccontourfifth'] = [metriccontour[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']] #arfftype['metriccontourfirst'] = '{-,=,+}' #arfftype['metriccontoursecond'] = '{-,=,+}' #arfftype['metriccontourthird'] = '{-,=,+}' #arfftype['metriccontourfourth'] = '{-,=,+}' #arfftype['metriccontourfifth'] = '{-,=,+}' pgrams['IOIbeatfractionfirst'] = [computeSumFractions(IOIbeatfraction, startix, endix) for \ startix, endix in zip(pgrams['ix0_0'],pgrams['ix0_1'])] pgrams['IOIbeatfractionsecond'] = [computeSumFractions(IOIbeatfraction, startix, endix) for \ startix, endix in zip(pgrams['ix1_0'],pgrams['ix1_1'])] pgrams['IOIbeatfractionthird'] = [computeSumFractions(IOIbeatfraction, startix, endix) for \ startix, endix in zip(pgrams['ix2_0'],pgrams['ix2_1'])] pgrams['IOIbeatfractionfourth'] = [computeSumFractions(IOIbeatfraction, startix, endix) if not pd.isna(startix) else np.nan for \ startix, endix in zip(pgrams['ix3_0'],pgrams['ix3_1'])] pgrams['IOIbeatfractionfifth'] = [computeSumFractions(IOIbeatfraction, startix, endix) if not pd.isna(startix) else np.nan for \ startix, endix in zip(pgrams['ix4_0'],pgrams['ix4_1'])] arfftype['IOIbeatfractionfirst'] = 'numeric' arfftype['IOIbeatfractionsecond'] = 'numeric' arfftype['IOIbeatfractionthird'] = 'numeric' arfftype['IOIbeatfractionfourth'] = 'numeric' arfftype['IOIbeatfractionfifth'] = 'numeric' pgrams['durationcummulation'] = [((d2 > d1) and (d3 > d2)) for d1, d2, d3 in \ zip(pgrams['IOIbeatfractionfirst'],pgrams['IOIbeatfractionsecond'],pgrams['IOIbeatfractionthird'])] arfftype['durationcummulation'] = '{True, False}' #these will be in crossrelation: IOIbeatfractionfirstsecond, etc. #pgrams['durationcontoursecond'] = [parsons[np.sign(dur2 - dur1)] for dur1, dur2 in \ # zip(pgrams['IOIbeatfractionfirst'],pgrams['IOIbeatfractionsecond'])] #pgrams['durationcontourthird'] = [parsons[np.sign(dur2 - dur1)] for dur1, dur2 in \ # zip(pgrams['IOIbeatfractionsecond'],pgrams['IOIbeatfractionthird'])] #pgrams['durationcontourfourth'] = [parsons[np.sign(dur2 - dur1)] if not pd.isna(dur2) else np.nan for dur1, dur2 in \ # zip(pgrams['IOIbeatfractionthird'],pgrams['IOIbeatfractionfourth'])] #pgrams['durationcontourfifth'] = [parsons[np.sign(dur2 - dur1)] if not pd.isna(dur2) else np.nan for dur1, dur2 in \ # zip(pgrams['IOIbeatfractionfourth'],pgrams['IOIbeatfractionfifth'])] #arfftype['durationcontoursecond'] = '{-,=,+}' #arfftype['durationcontourthird'] = '{-,=,+}' #arfftype['durationcontourfourth'] = '{-,=,+}' #arfftype['durationcontourfifth'] = '{-,=,+}' pgrams['onthebeatfirst'] = [Fraction(beatinsong[int(ix)]) % 1 == 0 for ix in pgrams['ix0_0']] pgrams['onthebeatsecond'] = [Fraction(beatinsong[int(ix)]) % 1 == 0 for ix in pgrams['ix1_0']] pgrams['onthebeatthird'] = [Fraction(beatinsong[int(ix)]) % 1 == 0 for ix in pgrams['ix2_0']] pgrams['onthebeatfourth'] = [Fraction(beatinsong[int(ix)]) % 1 == 0 if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']] pgrams['onthebeatfifth'] = [Fraction(beatinsong[int(ix)]) % 1 == 0 if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']] arfftype['onthebeatfirst'] = '{True, False}' arfftype['onthebeatsecond'] = '{True, False}' arfftype['onthebeatthird'] = '{True, False}' arfftype['onthebeatfourth'] = '{True, False}' arfftype['onthebeatfifth'] = '{True, False}' pgrams['completesmeasurephrase'] = [completesmeasure_phrase[ix-1] for ix in pgrams['ix2_1']] pgrams['completesmeasuresong'] = [completesmeasure_song[ix-1] for ix in pgrams['ix2_1']] pgrams['completesbeatphrase'] = [completesbeat_phrase[ix-1] for ix in pgrams['ix2_1']] pgrams['completesbeatsong'] = [completesbeat_song[ix-1] for ix in pgrams['ix2_1']] arfftype['completesmeasurephrase'] = '{True, False}' arfftype['completesmeasuresong'] = '{True, False}' arfftype['completesbeatphrase'] = '{True, False}' arfftype['completesbeatsong'] = '{True, False}' if 'grouper' in seq['features'].keys(): grouper = seq['features']['grouper'] pgrams['grouperfirst'] = [grouper[int(ix)] for ix in pgrams['ix0_0']] pgrams['groupersecond'] = [grouper[int(ix)] for ix in pgrams['ix1_0']] pgrams['grouperthird'] = [grouper[int(ix)] for ix in pgrams['ix2_0']] pgrams['grouperfourth'] = [grouper[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_0']] pgrams['grouperfifth'] = [grouper[int(ix)] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_0']] arfftype['grouperfirst'] = '{True, False}' arfftype['groupersecond'] = '{True, False}' arfftype['grouperthird'] = '{True, False}' arfftype['grouperfourth'] = '{True, False}' arfftype['grouperfifth'] = '{True, False}' #values for final note of third group pgrams['noteoffset'] = pd.array([(ix-1) - phrasestart_ix[(ix-1)] for ix in pgrams['ix2_1']], dtype="Int16") pgrams['beatoffset'] = [float(Fraction(beatinphrase[ix-1])) - \ float(Fraction(beatinphrase[phrasestart_ix[(ix-1)]])) \ for ix in pgrams['ix2_1']] arfftype['noteoffset'] = 'numeric' arfftype['beatoffset'] = 'numeric' pgrams['beatduration'] = [getBeatDuration(timesig[int(ix)]) for ix in pgrams['ix0_0']] pgrams['beatcount'] = pd.array([m21.meter.TimeSignature(timesig[int(ix)]).beatCount for ix in pgrams['ix0_0']], dtype="Int16") arfftype['beatduration'] = 'numeric' arfftype['beatcount'] = 'numeric' #get values for the last note! pgrams['gpr2afirst'] = [gpr2a[ix-1] for ix in pgrams['ix0_1']] pgrams['gpr2asecond'] = [gpr2a[ix-1] for ix in pgrams['ix1_1']] pgrams['gpr2athird'] = [gpr2a[ix-1] for ix in pgrams['ix2_1']] pgrams['gpr2afourth'] = [gpr2a[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']] pgrams['gpr2afifth'] = [gpr2a[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']] arfftype['gpr2afirst'] = 'numeric' arfftype['gpr2asecond'] = 'numeric' arfftype['gpr2athird'] = 'numeric' arfftype['gpr2afourth'] = 'numeric' arfftype['gpr2afifth'] = 'numeric' pgrams['gpr2bfirst'] = [gpr2b[ix-1] for ix in pgrams['ix0_1']] pgrams['gpr2bsecond'] = [gpr2b[ix-1] for ix in pgrams['ix1_1']] pgrams['gpr2bthird'] = [gpr2b[ix-1] for ix in pgrams['ix2_1']] pgrams['gpr2bfourth'] = [gpr2b[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']] pgrams['gpr2bfifth'] = [gpr2b[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']] arfftype['gpr2bfirst'] = 'numeric' arfftype['gpr2bsecond'] = 'numeric' arfftype['gpr2bthird'] = 'numeric' arfftype['gpr2bfourth'] = 'numeric' arfftype['gpr2bfifth'] = 'numeric' pgrams['gpr3afirst'] = [gpr3a[ix-1] for ix in pgrams['ix0_1']] pgrams['gpr3asecond'] = [gpr3a[ix-1] for ix in pgrams['ix1_1']] pgrams['gpr3athird'] = [gpr3a[ix-1] for ix in pgrams['ix2_1']] pgrams['gpr3afourth'] = [gpr3a[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']] pgrams['gpr3afifth'] = [gpr3a[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']] arfftype['gpr3afirst'] = 'numeric' arfftype['gpr3asecond'] = 'numeric' arfftype['gpr3athird'] = 'numeric' arfftype['gpr3afourth'] = 'numeric' arfftype['gpr3afifth'] = 'numeric' pgrams['gpr3dfirst'] = [gpr3d[ix-1] for ix in pgrams['ix0_1']] pgrams['gpr3dsecond'] = [gpr3d[ix-1] for ix in pgrams['ix1_1']] pgrams['gpr3dthird'] = [gpr3d[ix-1] for ix in pgrams['ix2_1']] pgrams['gpr3dfourth'] = [gpr3d[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']] pgrams['gpr3dfifth'] = [gpr3d[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']] arfftype['gpr3dfirst'] = 'numeric' arfftype['gpr3dsecond'] = 'numeric' arfftype['gpr3dthird'] = 'numeric' arfftype['gpr3dfourth'] = 'numeric' arfftype['gpr3dfifth'] = 'numeric' pgrams['gprsumfirst'] = [gprsum[ix-1] for ix in pgrams['ix0_1']] pgrams['gprsumsecond'] = [gprsum[ix-1] for ix in pgrams['ix1_1']] pgrams['gprsumthird'] = [gprsum[ix-1] for ix in pgrams['ix2_1']] pgrams['gprsumfourth'] = [gprsum[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix3_1']] pgrams['gprsumfifth'] = [gprsum[ix-1] if not pd.isna(ix) else np.nan for ix in pgrams['ix4_1']] arfftype['gprsumfirst'] = 'numeric' arfftype['gprsumsecond'] = 'numeric' arfftype['gprsumthird'] = 'numeric' arfftype['gprsumfourth'] = 'numeric' arfftype['gprsumfifth'] = 'numeric' pgrams['pitchproximityfirst'] = pd.array([pprox[ix] for ix in pgrams['ix0_0']], dtype="Int16") pgrams['pitchproximitysecond'] =
pd.array([pprox[ix] for ix in pgrams['ix1_0']], dtype="Int16")
pandas.array
from gsflow import PrmsData, GsflowModel from gsflow.prms import PrmsDay import pandas as pd import numpy as np import os ws = os.path.abspath(os.path.dirname(__file__)) def test_empty_prms_data(): data = PrmsData({}) assert isinstance(data, PrmsData) def test_build_prms_data(): data_df =
pd.DataFrame()
pandas.DataFrame
""" Performs a k means clustering and creates a graph over time of the topics """ import pymongo from pymongo import MongoClient import numpy as np import pandas as pd from time import time import matplotlib.pyplot as plt import seaborn as sns from sklearn.cluster import KMeans from sklearn.feature_extraction.text import TfidfVectorizer # and the number of clusters: N_CLUSTERS = 5 # Wrap everything in a if __name__ == "__main__": # to avoid threading problems if __name__ == "__main__": # Connect to mongodb, and use the enron_mail_2 db. This is 5k emails cn = MongoClient("localhost") db = cn.enron_mail_2 # get all of the messages emails = db.mail.find() # put it into a dataframe to feed into scikit learn emails_df = pd.DataFrame(list(emails)) print(emails_df.shape) # print(emails_df.head(5)) # use a term frequency vectorizer vect = TfidfVectorizer(sublinear_tf=True, min_df=0.05, max_df=0.5, stop_words="english", token_pattern=r"\b[A-Za-z]{3}[A-Za-z]*\b") # k-means clustering with N_CLUSTERS, 50 iterations should do it. Initialise 10 times to make sure clf = KMeans(n_clusters=N_CLUSTERS, random_state=0, max_iter=50, init="k-means++", n_init=10, verbose=True, n_jobs=-1) print("Clustering data with %s" % clf) # run clustering, takes about 20 minutes on a fast PC. t0 = time() word_vec = vect.fit_transform(emails_df["text"]) labels = clf.fit_predict(word_vec) print("done in %0.3fs" % (time() - t0)) word_vec_2d = word_vec.todense() emails_df["label"] = labels # get the centroids of the clusters and order them order_centroids = clf.cluster_centers_.argsort()[ :, ::-1] terms = vect.get_feature_names() # get the top terms for each of the clusters for i in range(N_CLUSTERS): print("Cluster %d:" % i, end="") for ind in order_centroids[i, :10]: print(" %s" % terms[ind], end="") print() # arrange the summary dataframe to pivot by month and date summary_df = emails_df summary_df["date"] =
pd.to_datetime(summary_df["date"])
pandas.to_datetime
import operator from enum import Enum from typing import Union, Any, Optional, Hashable import numpy as np import pandas as pd import pandas_flavor as pf from pandas.core.construction import extract_array from pandas.core.dtypes.common import ( is_categorical_dtype, is_datetime64_dtype, is_dtype_equal, is_extension_array_dtype, is_numeric_dtype, is_string_dtype, ) from pandas.core.reshape.merge import _MergeOperation from janitor.utils import check, check_column @pf.register_dataframe_method def conditional_join( df: pd.DataFrame, right: Union[pd.DataFrame, pd.Series], *conditions, how: str = "inner", sort_by_appearance: bool = False, df_columns: Optional[Any] = None, right_columns: Optional[Any] = None, ) -> pd.DataFrame: """ This is a convenience function that operates similarly to `pd.merge`, but allows joins on inequality operators, or a combination of equi and non-equi joins. Join solely on equality are not supported. If the join is solely on equality, `pd.merge` function covers that; if you are interested in nearest joins, or rolling joins, or the first match (lowest or highest) - `pd.merge_asof` covers that. There is also the IntervalIndex, which is usually more efficient for range joins, especially if the intervals do not overlap. Column selection in `df_columns` and `right_columns` is possible using the [`select_columns`][janitor.functions.select_columns.select_columns] syntax. This function returns rows, if any, where values from `df` meet the condition(s) for values from `right`. The conditions are passed in as a variable argument of tuples, where the tuple is of the form `(left_on, right_on, op)`; `left_on` is the column label from `df`, `right_on` is the column label from `right`, while `op` is the operator. For multiple conditions, the and(`&`) operator is used to combine the results of the individual conditions. The operator can be any of `==`, `!=`, `<=`, `<`, `>=`, `>`. A binary search is used to get the relevant rows for non-equi joins; this avoids a cartesian join, and makes the process less memory intensive. For equi-joins, Pandas internal merge function is used. The join is done only on the columns. MultiIndex columns are not supported. For non-equi joins, only numeric and date columns are supported. Only `inner`, `left`, and `right` joins are supported. If the columns from `df` and `right` have nothing in common, a single index column is returned; else, a MultiIndex column is returned. Example: >>> import pandas as pd >>> import janitor >>> df1 = pd.DataFrame({"value_1": [2, 5, 7, 1, 3, 4]}) >>> df2 = pd.DataFrame({"value_2A": [0, 3, 7, 12, 0, 2, 3, 1], ... "value_2B": [1, 5, 9, 15, 1, 4, 6, 3], ... }) >>> df1 value_1 0 2 1 5 2 7 3 1 4 3 5 4 >>> df2 value_2A value_2B 0 0 1 1 3 5 2 7 9 3 12 15 4 0 1 5 2 4 6 3 6 7 1 3 >>> df1.conditional_join( ... df2, ... ("value_1", "value_2A", ">="), ... ("value_1", "value_2B", "<=") ... ) value_1 value_2A value_2B 0 2 1 3 1 2 2 4 2 5 3 5 3 5 3 6 4 7 7 9 5 1 0 1 6 1 0 1 7 1 1 3 8 3 1 3 9 3 2 4 10 3 3 5 11 3 3 6 12 4 2 4 13 4 3 5 14 4 3 6 :param df: A pandas DataFrame. :param right: Named Series or DataFrame to join to. :param conditions: Variable argument of tuple(s) of the form `(left_on, right_on, op)`, where `left_on` is the column label from `df`, `right_on` is the column label from `right`, while `op` is the operator. The operator can be any of `==`, `!=`, `<=`, `<`, `>=`, `>`. For multiple conditions, the and(`&`) operator is used to combine the results of the individual conditions. :param how: Indicates the type of join to be performed. It can be one of `inner`, `left`, `right`. Full join is not supported. Defaults to `inner`. :param sort_by_appearance: Default is `False`. This is useful for strictly non-equi joins, where the user wants the original order maintained. If True, values from `df` and `right` that meet the join condition will be returned in the final dataframe in the same order that they were before the join. :param df_columns: Columns to select from `df`. It can be a single column or a list of columns. It is also possible to rename the output columns via a dictionary. :param right_columns: Columns to select from `right`. It can be a single column or a list of columns. It is also possible to rename the output columns via a dictionary. :returns: A pandas DataFrame of the two merged Pandas objects. """ return _conditional_join_compute( df, right, conditions, how, sort_by_appearance, df_columns, right_columns, ) class _JoinOperator(Enum): """ List of operators used in conditional_join. """ GREATER_THAN = ">" LESS_THAN = "<" GREATER_THAN_OR_EQUAL = ">=" LESS_THAN_OR_EQUAL = "<=" STRICTLY_EQUAL = "==" NOT_EQUAL = "!=" class _JoinTypes(Enum): """ List of join types for conditional_join. """ INNER = "inner" LEFT = "left" RIGHT = "right" operator_map = { _JoinOperator.STRICTLY_EQUAL.value: operator.eq, _JoinOperator.LESS_THAN.value: operator.lt, _JoinOperator.LESS_THAN_OR_EQUAL.value: operator.le, _JoinOperator.GREATER_THAN.value: operator.gt, _JoinOperator.GREATER_THAN_OR_EQUAL.value: operator.ge, _JoinOperator.NOT_EQUAL.value: operator.ne, } less_than_join_types = { _JoinOperator.LESS_THAN.value, _JoinOperator.LESS_THAN_OR_EQUAL.value, } greater_than_join_types = { _JoinOperator.GREATER_THAN.value, _JoinOperator.GREATER_THAN_OR_EQUAL.value, } def _check_operator(op: str): """ Check that operator is one of `>`, `>=`, `==`, `!=`, `<`, `<=`. Used in `conditional_join`. """ sequence_of_operators = {op.value for op in _JoinOperator} if op not in sequence_of_operators: raise ValueError( "The conditional join operator " f"should be one of {sequence_of_operators}" ) def _conditional_join_preliminary_checks( df: pd.DataFrame, right: Union[pd.DataFrame, pd.Series], conditions: tuple, how: str, sort_by_appearance: bool, df_columns: Any, right_columns: Any, ) -> tuple: """ Preliminary checks for conditional_join are conducted here. Checks include differences in number of column levels, length of conditions, existence of columns in dataframe, etc. """ check("right", right, [pd.DataFrame, pd.Series]) df = df.copy() right = right.copy() if isinstance(right, pd.Series): if not right.name: raise ValueError( "Unnamed Series are not supported for conditional_join." ) right = right.to_frame() if df.columns.nlevels != right.columns.nlevels: raise ValueError( "The number of column levels " "from the left and right frames must match. " "The number of column levels from the left dataframe " f"is {df.columns.nlevels}, while the number of column levels " f"from the right dataframe is {right.columns.nlevels}." ) if not conditions: raise ValueError("Kindly provide at least one join condition.") for condition in conditions: check("condition", condition, [tuple]) len_condition = len(condition) if len_condition != 3: raise ValueError( "condition should have only three elements; " f"{condition} however is of length {len_condition}." ) for left_on, right_on, op in conditions: check("left_on", left_on, [Hashable]) check("right_on", right_on, [Hashable]) check("operator", op, [str]) check_column(df, [left_on]) check_column(right, [right_on]) _check_operator(op) if all( (op == _JoinOperator.STRICTLY_EQUAL.value for *_, op in conditions) ): raise ValueError("Equality only joins are not supported.") check("how", how, [str]) checker = {jointype.value for jointype in _JoinTypes} if how not in checker: raise ValueError(f"'how' should be one of {checker}.") check("sort_by_appearance", sort_by_appearance, [bool]) if (df.columns.nlevels > 1) and ( isinstance(df_columns, dict) or isinstance(right_columns, dict) ): raise ValueError( "Column renaming with a dictionary is not supported " "for MultiIndex columns." ) return ( df, right, conditions, how, sort_by_appearance, df_columns, right_columns, ) def _conditional_join_type_check( left_column: pd.Series, right_column: pd.Series, op: str ) -> None: """ Raise error if column type is not any of numeric or datetime or string. """ permitted_types = { is_datetime64_dtype, is_numeric_dtype, is_string_dtype, is_categorical_dtype, } for func in permitted_types: if func(left_column): break else: raise ValueError( "conditional_join only supports " "string, category, numeric, or date dtypes (without timezone) - " f"'{left_column.name} is of type {left_column.dtype}." ) lk_is_cat = is_categorical_dtype(left_column) rk_is_cat = is_categorical_dtype(right_column) if lk_is_cat & rk_is_cat: if not left_column.array._categories_match_up_to_permutation( right_column.array ): raise ValueError( f"'{left_column.name}' and '{right_column.name}' " "should have the same categories, and the same order." ) elif not is_dtype_equal(left_column, right_column): raise ValueError( f"Both columns should have the same type - " f"'{left_column.name}' has {left_column.dtype} type;" f"'{right_column.name}' has {right_column.dtype} type." ) if (op in less_than_join_types.union(greater_than_join_types)) & ( (is_string_dtype(left_column) | is_categorical_dtype(left_column)) ): raise ValueError( "non-equi joins are supported " "only for datetime and numeric dtypes. " f"{left_column.name} in condition " f"({left_column.name}, {right_column.name}, {op}) " f"has a dtype {left_column.dtype}." ) return None def _conditional_join_compute( df: pd.DataFrame, right: pd.DataFrame, conditions: list, how: str, sort_by_appearance: bool, df_columns: Any, right_columns: Any, ) -> pd.DataFrame: """ This is where the actual computation for the conditional join takes place. A pandas DataFrame is returned. """ ( df, right, conditions, how, sort_by_appearance, df_columns, right_columns, ) = _conditional_join_preliminary_checks( df, right, conditions, how, sort_by_appearance, df_columns, right_columns, ) eq_check = False le_lt_check = False for condition in conditions: left_on, right_on, op = condition _conditional_join_type_check(df[left_on], right[right_on], op) if op == _JoinOperator.STRICTLY_EQUAL.value: eq_check = True elif op in less_than_join_types.union(greater_than_join_types): le_lt_check = True df.index = range(len(df)) right.index = range(len(right)) multiple_conditions = len(conditions) > 1 if not multiple_conditions: left_on, right_on, op = conditions[0] result = _generic_func_cond_join( df[left_on], right[right_on], op, multiple_conditions ) if result is None: return _create_conditional_join_empty_frame( df, right, how, df_columns, right_columns ) return _create_conditional_join_frame( df, right, *result, how, sort_by_appearance, df_columns, right_columns, ) if eq_check: result = _multiple_conditional_join_eq(df, right, conditions) elif le_lt_check: result = _multiple_conditional_join_le_lt(df, right, conditions) else: result = _multiple_conditional_join_ne(df, right, conditions) if result is None: return _create_conditional_join_empty_frame( df, right, how, df_columns, right_columns ) return _create_conditional_join_frame( df, right, *result, how, sort_by_appearance, df_columns, right_columns ) def _less_than_indices( left_c: pd.Series, right_c: pd.Series, strict: bool, ) -> tuple: """ Use binary search to get indices where left_c is less than or equal to right_c. If strict is True, then only indices where `left_c` is less than (but not equal to) `right_c` are returned. A tuple of integer indexes for left_c and right_c is returned. """ # no point going through all the hassle if left_c.min() > right_c.max(): return None any_nulls = pd.isna(right_c) if any_nulls.any(): right_c = right_c[~any_nulls] if right_c.empty: return None any_nulls = pd.isna(left_c) if any_nulls.any(): left_c = left_c[~any_nulls] if left_c.empty: return None any_nulls = None if not right_c.is_monotonic_increasing: right_c = right_c.sort_values(kind="stable") left_index = left_c.index.to_numpy(dtype=int, copy=False) left_c = extract_array(left_c, extract_numpy=True) right_index = right_c.index.to_numpy(dtype=int, copy=False) right_c = extract_array(right_c, extract_numpy=True) search_indices = right_c.searchsorted(left_c, side="left") # if any of the positions in `search_indices` # is equal to the length of `right_keys` # that means the respective position in `left_c` # has no values from `right_c` that are less than # or equal, and should therefore be discarded len_right = right_c.size rows_equal = search_indices == len_right if rows_equal.any(): left_c = left_c[~rows_equal] left_index = left_index[~rows_equal] search_indices = search_indices[~rows_equal] # the idea here is that if there are any equal values # shift to the right to the immediate next position # that is not equal if strict: rows_equal = right_c[search_indices] rows_equal = left_c == rows_equal # replace positions where rows are equal # with positions from searchsorted('right') # positions from searchsorted('right') will never # be equal and will be the furthermost in terms of position # example : right_c -> [2, 2, 2, 3], and we need # positions where values are not equal for 2; # the furthermost will be 3, and searchsorted('right') # will return position 3. if rows_equal.any(): replacements = right_c.searchsorted(left_c, side="right") # now we can safely replace values # with strictly less than positions search_indices = np.where(rows_equal, replacements, search_indices) # check again if any of the values # have become equal to length of right_c # and get rid of them rows_equal = search_indices == len_right if rows_equal.any(): left_c = left_c[~rows_equal] left_index = left_index[~rows_equal] search_indices = search_indices[~rows_equal] if not search_indices.size: return None right_c = [right_index[ind:len_right] for ind in search_indices] right_c = np.concatenate(right_c) left_c = np.repeat(left_index, len_right - search_indices) return left_c, right_c def _greater_than_indices( left_c: pd.Series, right_c: pd.Series, strict: bool, multiple_conditions: bool, ) -> tuple: """ Use binary search to get indices where left_c is greater than or equal to right_c. If strict is True, then only indices where `left_c` is greater than (but not equal to) `right_c` are returned. if multiple_conditions is False, a tuple of integer indexes for left_c and right_c is returned; else a tuple of the index for left_c, right_c, as well as the positions of left_c in right_c is returned. """ # quick break, avoiding the hassle if left_c.max() < right_c.min(): return None any_nulls = pd.isna(right_c) if any_nulls.any(): right_c = right_c[~any_nulls] if right_c.empty: return None any_nulls = pd.isna(left_c) if any_nulls.any(): left_c = left_c[~any_nulls] if left_c.empty: return None any_nulls = None if not right_c.is_monotonic_increasing: right_c = right_c.sort_values(kind="stable") left_index = left_c.index.to_numpy(dtype=int, copy=False) left_c = extract_array(left_c, extract_numpy=True) right_index = right_c.index.to_numpy(dtype=int, copy=False) right_c = extract_array(right_c, extract_numpy=True) search_indices = right_c.searchsorted(left_c, side="right") # if any of the positions in `search_indices` # is equal to 0 (less than 1), it implies that # left_c[position] is not greater than any value # in right_c rows_equal = search_indices < 1 if rows_equal.any(): left_c = left_c[~rows_equal] left_index = left_index[~rows_equal] search_indices = search_indices[~rows_equal] # the idea here is that if there are any equal values # shift downwards to the immediate next position # that is not equal if strict: rows_equal = right_c[search_indices - 1] rows_equal = left_c == rows_equal # replace positions where rows are equal with # searchsorted('left'); # however there can be scenarios where positions # from searchsorted('left') would still be equal; # in that case, we shift down by 1 if rows_equal.any(): replacements = right_c.searchsorted(left_c, side="left") # return replacements # `left` might result in values equal to len right_c replacements = np.where( replacements == right_c.size, replacements - 1, replacements ) # now we can safely replace values # with strictly greater than positions search_indices = np.where(rows_equal, replacements, search_indices) # any value less than 1 should be discarded # since the lowest value for binary search # with side='right' should be 1 rows_equal = search_indices < 1 if rows_equal.any(): left_c = left_c[~rows_equal] left_index = left_index[~rows_equal] search_indices = search_indices[~rows_equal] if not search_indices.size: return None if multiple_conditions: return left_index, right_index, search_indices right_c = [right_index[:ind] for ind in search_indices] right_c = np.concatenate(right_c) left_c = np.repeat(left_index, search_indices) return left_c, right_c def _not_equal_indices(left_c: pd.Series, right_c: pd.Series) -> tuple: """ Use binary search to get indices where `left_c` is exactly not equal to `right_c`. It is a combination of strictly less than and strictly greater than indices. A tuple of integer indexes for left_c and right_c is returned. """ dummy = np.array([], dtype=int) # deal with nulls l1_nulls = dummy r1_nulls = dummy l2_nulls = dummy r2_nulls = dummy any_left_nulls = left_c.isna() any_right_nulls = right_c.isna() if any_left_nulls.any(): l1_nulls = left_c.index[any_left_nulls.array] l1_nulls = l1_nulls.to_numpy(copy=False) r1_nulls = right_c.index # avoid NAN duplicates if any_right_nulls.any(): r1_nulls = r1_nulls[~any_right_nulls.array] r1_nulls = r1_nulls.to_numpy(copy=False) nulls_count = l1_nulls.size # blow up nulls to match length of right l1_nulls = np.tile(l1_nulls, r1_nulls.size) # ensure length of right matches left if nulls_count > 1: r1_nulls = np.repeat(r1_nulls, nulls_count) if any_right_nulls.any(): r2_nulls = right_c.index[any_right_nulls.array] r2_nulls = r2_nulls.to_numpy(copy=False) l2_nulls = left_c.index nulls_count = r2_nulls.size # blow up nulls to match length of left r2_nulls = np.tile(r2_nulls, l2_nulls.size) # ensure length of left matches right if nulls_count > 1: l2_nulls = np.repeat(l2_nulls, nulls_count) l1_nulls = np.concatenate([l1_nulls, l2_nulls]) r1_nulls = np.concatenate([r1_nulls, r2_nulls]) outcome = _less_than_indices(left_c, right_c, strict=True) if outcome is None: lt_left = dummy lt_right = dummy else: lt_left, lt_right = outcome outcome = _greater_than_indices( left_c, right_c, strict=True, multiple_conditions=False ) if outcome is None: gt_left = dummy gt_right = dummy else: gt_left, gt_right = outcome left_c = np.concatenate([lt_left, gt_left, l1_nulls]) right_c = np.concatenate([lt_right, gt_right, r1_nulls]) if (not left_c.size) & (not right_c.size): return None return left_c, right_c def _eq_indices( left_c: pd.Series, right_c: pd.Series, ) -> tuple: """ Use binary search to get indices where left_c is equal to right_c. Returns a tuple of the left_index, right_index, lower_boundary and upper_boundary. """ # no point going through all the hassle if left_c.min() > right_c.max(): return None if left_c.max() < right_c.min(): return None any_nulls = pd.isna(right_c) if any_nulls.any(): right_c = right_c[~any_nulls] if right_c.empty: return None any_nulls = pd.isna(left_c) if any_nulls.any(): left_c = left_c[~any_nulls] if left_c.empty: return None any_nulls = None if not right_c.is_monotonic_increasing: right_c = right_c.sort_values(kind="stable") left_index = left_c.index.to_numpy(dtype=int, copy=False) left_c = extract_array(left_c, extract_numpy=True) right_index = right_c.index.to_numpy(dtype=int, copy=False) right_c = extract_array(right_c, extract_numpy=True) lower_boundary = right_c.searchsorted(left_c, side="left") upper_boundary = right_c.searchsorted(left_c, side="right") keep_rows = lower_boundary < upper_boundary if not keep_rows.any(): return None if not keep_rows.all(): left_index = left_index[keep_rows] lower_boundary = lower_boundary[keep_rows] upper_boundary = upper_boundary[keep_rows] return left_index, right_index, lower_boundary, upper_boundary def _generic_func_cond_join( left_c: pd.Series, right_c: pd.Series, op: str, multiple_conditions: bool, ) -> tuple: """ Generic function to call any of the individual functions (_less_than_indices, _greater_than_indices, or _not_equal_indices). """ strict = False if op in { _JoinOperator.GREATER_THAN.value, _JoinOperator.LESS_THAN.value, _JoinOperator.NOT_EQUAL.value, }: strict = True if op in less_than_join_types: return _less_than_indices(left_c, right_c, strict) elif op in greater_than_join_types: return _greater_than_indices( left_c, right_c, strict, multiple_conditions ) elif op == _JoinOperator.NOT_EQUAL.value: return _not_equal_indices(left_c, right_c) def _generate_indices( left_index: np.ndarray, right_index: np.ndarray, conditions: list ) -> tuple: """ Run a for loop to get the final indices. This iteratively goes through each condition, builds a boolean array, and gets indices for rows that meet the condition requirements. `conditions` is a list of tuples, where a tuple is of the form: `(Series from df, Series from right, operator)`. """ for condition in conditions: left_c, right_c, op = condition left_c = extract_array(left_c, extract_numpy=True)[left_index] right_c = extract_array(right_c, extract_numpy=True)[right_index] op = operator_map[op] mask = op(left_c, right_c) if not mask.any(): return None if is_extension_array_dtype(mask): mask = mask.to_numpy(dtype=bool, na_value=False) if not mask.all(): left_index = left_index[mask] right_index = right_index[mask] return left_index, right_index def _multiple_conditional_join_ne( df: pd.DataFrame, right: pd.DataFrame, conditions: list ) -> tuple: """ Get indices for multiple conditions, where all the operators are `!=`. Returns a tuple of (left_index, right_index) """ # currently, there is no optimization option here # not equal typically combines less than # and greater than, so a lot more rows are returned # than just less than or greater than # here we get indices for the first condition in conditions # then use those indices to get the final indices, # using _generate_indices first, *rest = conditions left_on, right_on, op = first # get indices from the first condition result = _generic_func_cond_join( df[left_on], right[right_on], op, multiple_conditions=False ) if result is None: return None rest = ( (df[left_on], right[right_on], op) for left_on, right_on, op in rest ) return _generate_indices(*result, rest) def _multiple_conditional_join_eq( df: pd.DataFrame, right: pd.DataFrame, conditions: list ) -> tuple: """ Get indices for multiple conditions, if any of the conditions has an `==` operator. Returns a tuple of (df_index, right_index) """ # TODO # this uses the idea in the `_range_indices` function # for less than and greater than; # I'd like to believe there is a smarter/more efficient way of doing this # where the filter occurs within the join, and avoids a blow-up # the current implementation uses # a list comprehension to find first matches # in a bid to reduce the blow up size ... # this applies only to integers/dates # and only offers advantages in scenarios # where the right is duplicated # for one to many joins, # or one to one or strings/category, use merge # as it is significantly faster than a binary search eqs = [ (left_on, right_on) for left_on, right_on, op in conditions if op == _JoinOperator.STRICTLY_EQUAL.value ] left_on, right_on = zip(*eqs) left_on = [*left_on] right_on = [*right_on] strings_or_category = any( col for col in left_on if (
is_string_dtype(df[col])
pandas.core.dtypes.common.is_string_dtype
""" OBJECT RECOGNITION USING A SPIKING NEURAL NETWORK. * The data preparation module. @author: atenagm1375 """ import os import numpy as np import pandas as pd from torch.utils.data import Dataset import cv2 class CaltechDataset(Dataset): """ CaltechDataset class. Attributes ---------- caltech_dataset_loader : utils.data.CaltechDatasetLoader An instance of CaltechDatasetLoader. train : bool, optional Defines whether to load the train instances or the test. The default is True. Keyword Arguments ----------------- size_low : int The size of first GaussianBlur filter. size_high : int The size of second GaussianBlur filter. """ def __init__(self, caltech_dataset_loader, train=True, **kwargs): self._cdl = caltech_dataset_loader if kwargs: self._cdl.apply_DoG(kwargs.get("size_low", 0), kwargs.get("size_high", 0)) self.dataframe = self._cdl.data_frame.iloc[ self._cdl.train_idx] if train else \ self._cdl.data_frame.iloc[self._cdl.test_idx] def __len__(self): """ Get number of instances in the dataset. Returns ------- int number of instances in the dataset. """ return len(self.dataframe) def __getitem__(self, index): """ Get value(s) at the described index. Returns the image matrix and one-hot encoded label of the instance(s) at location index. Parameters ---------- index : int The index to return values of. Returns ------- tuple of two numpy.arrays The tuple of image matrix and the label array. """ return self.dataframe["x"].iloc[index].astype(np.float32), \ self.dataframe[self._cdl.classes].iloc[index].values.astype( np.float32) class CaltechDatasetLoader: """ Loads the Caltech dataset. Attributes ---------- path : str Path to Caltech image folders. classes: list of str List of classes. image_size: tuple, optional The input image size. All images are resized to the specified size. The default is (100, 100). """ def __init__(self, path, classes, image_size=(100, 100)): self.classes = classes self.n_classes = len(classes) self.data_frame = pd.DataFrame() self.train_idx = [] self.test_idx = [] x = [] y = [] for obj in classes: cls_path = path + ("/" if path[-1] != "/" else "") + obj + "/" for img_path in os.listdir(cls_path): img = cv2.imread(cls_path + img_path, 0) img = cv2.resize(img, image_size, interpolation=cv2.INTER_CUBIC) x.append(img.reshape((1, *image_size))) y.append(obj) self.n_samples = len(y) self.data_frame = pd.DataFrame({"x": x, "y": y}, columns=["x", "y"]) enc = pd.get_dummies(self.data_frame["y"]) self.data_frame = pd.concat([self.data_frame, enc], axis=1) def apply_DoG(self, size_low, size_high): """ Apply DoG filter on input images. Parameters ---------- size_low : int The size of first GaussianBlur filter. size_high : int The size of second GaussianBlur filter. Returns ------- None. """ try: s1, s2 = (size_low, size_low), (size_high, size_high) self.data_frame["x"] = self.data_frame.x.apply( lambda im: cv2.GaussianBlur(im, s1, 0) - cv2.GaussianBlur(im, s2, 0)) except cv2.error: print("DoG failed to apply") pass def split_train_test(self, test_ratio=0.3): """ Split train and test samples. Parameters ---------- test_ratio : float, optional The ratio of test samples. The default is 0.3. Returns ------- x_train : numpy.array Train image data. x_test : numpy.array Test image data. y_train : numpy.array Train class labels. y_test : numpy.array Test class labels. """ train_df =
pd.DataFrame(columns=["x", *self.classes])
pandas.DataFrame
""" Implements a series of technical indicators used in finance and trading. """ import pandas as pd def ADX(data, ma, full_output=False): """ Calculate average directional index (ADX) for a given ohlc dataframe. Parameters ---------- data: pd.DataFrame DataFrame containing OHLC data. Columns must have the following labels: High, Low, Close. Open column is not mandatory. ma: int How many obversations will be used to calculate moving average full_output: bool Returns input data and support series used in calculation Returns ---------- pd.DataFrame With columns adx and dx For full output, dm_pos, dm_neg, tr, roll_tr, roll_dmp, roll_dmn, di_pos, di_neg, di_sum, di_diff are shown too """ # Handles input data if not isinstance(data, pd.DataFrame): raise TypeError('Input data is not a pandas DataFrame.') if not set(['High', 'Low', 'Close']).issubset(data.columns): raise IndexError('Missing necessary columns (High, Low or Close).') # Handles parameter input if not isinstance(ma, int): raise TypeError('ma parameter is not integer type.') full_df = pd.DataFrame() # Compute true range full_df['diff_hl'] = abs(data['High'] - data['Low']) full_df['diff_hc'] = abs(data['High'] - data['Close'].shift(1)) full_df['diff_lc'] = abs(data['Low'] - data['Close'].shift(1)) full_df['tr'] = full_df[['diff_hl', 'diff_hc', 'diff_lc']].max(axis=1) # Delete diff columns full_df = full_df.drop(['diff_hl', 'diff_hc', 'diff_lc'], axis=1) # Compute directional momentum full_df['dm_pos'] = data['High'] - data['High'].shift(1) full_df['dm_neg'] = data['Low'].shift(1) - data['Low'] # Only positive values full_df.loc[full_df['dm_pos'] < 0, 'dm_pos'] = 0 full_df.loc[full_df['dm_neg'] < 0, 'dm_neg'] = 0 # Take rolling sum full_df['roll_tr'] = full_df['tr'].rolling(ma).sum() full_df['roll_dmp'] = full_df['dm_pos'].rolling(ma).sum() full_df['roll_dmn'] = full_df['dm_neg'].rolling(ma).sum() # Compute new rolling sum roll_tr = [None for i in range(ma)] roll_dmp = [None for i in range(ma)] roll_dmn = [None for i in range(ma)] roll_tr.append(full_df['roll_tr'].iloc[ma]) roll_dmp.append(full_df['roll_dmp'].iloc[ma]) roll_dmn.append(full_df['roll_dmn'].iloc[ma]) # Don't know if there is a vector method to do that for i in range(ma+1, full_df.shape[0]): temp_tr = (roll_tr[-1] - (roll_tr[-1] / ma) + full_df.iloc[i, -6]) roll_tr.append(temp_tr) temp_dmp = (roll_dmp[-1] - (roll_dmp[-1] / ma) + full_df.iloc[i, -5]) roll_dmp.append(temp_dmp) temp_dmn = (roll_dmn[-1] - (roll_dmn[-1] / ma) + full_df.iloc[i, -4]) roll_dmn.append(temp_dmn) # Change series in df full_df['roll_tr'] = roll_tr full_df['roll_dmp'] = roll_dmp full_df['roll_dmn'] = roll_dmn # Compute directional indicator full_df['di_pos'] = 100 * (full_df['roll_dmp'] / full_df['roll_tr']) full_df['di_neg'] = 100 * (full_df['roll_dmn'] / full_df['roll_tr']) # Compute sum and diff full_df['di_sum'] = full_df['di_pos'] + full_df['di_neg'] full_df['di_diff'] = abs(full_df['di_pos'] - full_df['di_neg']) # Compute dx and rolling for adx full_df['dx'] = (full_df['di_diff'] / full_df['di_sum']) * 100 full_df['adx'] = full_df['dx'].rolling(ma).mean() # Same trick as for roll series adx = [None for i in range(2*ma-1)] adx.append(full_df['adx'].iloc[2*ma-1]) for i in range((2*ma), full_df.shape[0]): temp = (adx[-1] * (ma - 1) + full_df.iloc[i, -2]) / ma adx.append(temp) full_df['adx'] = adx # Prepares return df if full_output == True: df = data.copy() df = pd.concat([df, full_df], axis=1) else: df = full_df[['adx', 'dx']] return df def ATR(data, ma, full_output=False): """ Calculate average true range (ATR) for a given ohlc data. Parameters ---------- data: pd.DataFrame DataFrame containing OHLC data. Columns must have the following labels: High, Low, Close. Open column is not mandatory. ma: int How many obversations will be used to calculate moving average full_output: bool Returns input data and support series used in calculation Returns ---------- pd.DataFrame With columns atr and tr (true range) For full output, dff_hl, dff_hc and dff_lc are shown too """ # Handles input data if not isinstance(data, pd.DataFrame): raise TypeError('Input data is not a pandas DataFrame.') if not set(['High', 'Low', 'Close']).issubset(data.columns): raise IndexError('Missing necessary columns (High, Low or Close).') # Handles parameter input if not isinstance(ma, int): raise TypeError('ma parameter is not integer type.') full_df = pd.DataFrame() # Compute ranges full_df['dff_hl'] = abs(data['High'] - data['Low']) full_df['dff_hc'] = abs(data['High'] - data['Close'].shift(1)) full_df['dff_lc'] = abs(data['Low'] - data['Close'].shift(1)) full_df['tr'] = full_df[['dff_hl', 'dff_hc', 'dff_lc']].max(axis=1) full_df['atr'] = full_df['tr'].rolling(ma).mean() # Prepares return df if full_output == True: df = data.copy() df = pd.concat([df, full_df], axis=1) else: df = full_df[['atr', 'tr']] return df def bollband(data, ma, full_output=False): ''' Calculate bollinger bands for a given series. Parameters ---------- data: pd.Series/pd.DataFrame Series or dataframe to calculate bands. If df is passed, it must have a close or adjusted close column with the following labels: - Close - close - Adj Close - adj close ma: int Moving average parameter Returns ---------- pd.DataFrame With columns bollband_up and bollband_low. For full output, ma is shown too ''' # Handles input data if isinstance(data, pd.DataFrame): # All possibles names for close column possible_cols = ['Close', 'close', 'Adj Close', 'adj close'] # Select them cols = cols = [col for col in data.columns if col in possible_cols] # Check if there's only one close column if len(cols) > 1: raise KeyError('Ambiguous number of possible close prices column.') elif len(cols) == 0: raise IndexError('No close column. Pass desired column as a pd.Series.') # Copy data as series series = data[cols[0]].copy() elif isinstance(data, pd.Series): series = data.copy() else: raise TypeError('Input data is not a pandas Series or DataFrame.') # Handles parameter input if not isinstance(ma, int): raise TypeError('ma parameter is not integer type.') full_df = pd.DataFrame() full_df['ma'] = series.rolling(ma).mean() full_df['bollband_up'] = full_df['ma'] + 2 * full_df['ma'].rolling(ma).std() full_df['bollband_low'] = full_df['ma'] - 2 * full_df['ma'].rolling(ma).std() # Prepares return df if full_output == True: df = data.copy() df = pd.concat([df, full_df], axis=1) else: df = full_df[['bollband_up', 'bollband_low']] return df def MACD(data, slow, fast, ma, full_output=False): """ Calculate moving average convergence divergence (MACD) for a given time series (usually close prices). Parameters ---------- data: pd.Series/pd.DataFrame Series or dataframe to calculate MACD. If df is passed, it must have a close or adjusted close column with the following labels: - Close - close - Adj Close - adj close slow: int How many observations the slow line will look back fast: int How many obversations the fast line will look back ma: int How many obversations will be used to calculate moving average full_output: bool Returns input data and support series used in calculation Returns ---------- pd.DataFrame With columns macd_line and macd_signal For full output, slow_ma and fast_ma are shown too """ # Handles input data if isinstance(data, pd.DataFrame): # All possibles names for close column possible_cols = ['Close', 'close', 'Adj Close', 'adj close'] # Select them cols = cols = [col for col in data.columns if col in possible_cols] # Check if there's only one close column if len(cols) > 1: raise KeyError('Ambiguous number of possible close prices column.') elif len(cols) == 0: raise IndexError('No close column. Pass desired column as a pd.Series.') # Copy data as series series = data[cols[0]].copy() elif isinstance(data, pd.Series): series = data.copy() else: raise TypeError('Input data is not a pandas Series or DataFrame.') # Handles parameters inputs for parameter in [slow, fast, ma]: if not isinstance(parameter, int): raise TypeError('One or more parameters are not integer type.') if slow <= fast: raise ValueError('Slow line must have a value bigger than fast line') full_df =
pd.DataFrame()
pandas.DataFrame
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Jun 9 16:26:30 2021 @author: mike """ import os import yaml import numpy as np import pandas as pd from hilltoppy import web_service as ws import requests from time import sleep # from sklearn.neighbors import LocalOutlierFactor pd.options.display.max_columns = 10 ###########################################################3 ### Parameters base_path = os.path.realpath(os.path.dirname(__file__)) with open(os.path.join(base_path, 'parameters.yml')) as param: param = yaml.safe_load(param) mtypes = param['source']['mtypes'] base_url = param['source']['api_endpoint'] hts = param['source']['hts'] std_factor = param['source']['std_factor'] iqr_factor = param['source']['iqr_factor'] date1 = pd.Timestamp.now().round('s').isoformat() stats_csv = 'summary_stats_{date}.csv'.format(date=date1) std_csv = 'std_outliers_{date}.csv'.format(date=date1) iqr_csv = 'iqr_outliers_{date}.csv'.format(date=date1) dtl_csv = 'detection_limit_outliers_{date}.csv'.format(date=date1) min_max_csv = 'min_max_outliers_{date}.csv'.format(date=date1) ########################################################### ### Helper functions def get_stations(base_url, hts, mtype): """ Function to get the stations/sites associated with a particular measurement type. Parameters ---------- base_url : str The endpoint url for the Hilltop server. hts : str The hts "file" that is added to the end of the base_url. mtype : str The measurement type to query. Returns ------- DataFrame """ stns1 = ws.site_list(base_url, hts, location='LatLong') # There's a problem with Hilltop that requires running the site list without a measurement first... stns1 = ws.site_list(base_url, hts, location='LatLong', measurement=mtype) stns2 = stns1[(stns1.lat > -47.5) & (stns1.lat < -34) & (stns1.lon > 166) & (stns1.lon < 179)].dropna().copy() stns2.rename(columns={'SiteName': 'ref'}, inplace=True) return stns2 def get_results(base_url, hts, mtype, ref): """ Function to get the time series results and associated stats from one or many sites associated with a particular measurement type. Parameters ---------- base_url : str The endpoint url for the Hilltop server. hts : str The hts "file" that is added to the end of the base_url. mtype : str The measurement type to query. ref : str The reference id of the site. Returns ------- Three DataFrames results, detection limits, and stats """ ### Get data res_list = [] for s in ref: timer = 5 while timer > 0: try: res = ws.get_data(base_url, hts, s, mtype).Value break except requests.exceptions.ConnectionError as err: print(s + ' and ' + mtype + ' error: ' + str(err)) timer = timer - 1 sleep(30) except ValueError as err: print(s + ' and ' + mtype + ' error: ' + str(err)) break except Exception as err: print(str(err)) timer = timer - 1 sleep(30) if timer == 0: raise ValueError('The Hilltop request tried too many times...the server is probably down') res_list.append(res) res1 = pd.concat(res_list) ### Process DTLs dtl1 = res1[res1.str.contains('<')] dtl1 = pd.to_numeric(dtl1.str.replace('<', '')).to_frame() dtl1['censored'] = '<' dtl2 = res1[res1.str.contains('>')] dtl2 = pd.to_numeric(dtl2.str.replace('>', '')).to_frame() dtl2['censored'] = '>' dtl3 = pd.concat([dtl1, dtl2]) ### Remove DTLs from results res2 = res1.loc[~res1.index.isin(dtl3.index)] res2 = pd.to_numeric(res2, errors='coerce').dropna() ### Run stats grp1 = res2.reset_index().groupby(['Site', 'Measurement']) dtl_count = dtl3.reset_index().groupby(['Site', 'Measurement']).Value.count() dtl_count.name = 'DTL count' data_count = grp1.Value.count() total_count = data_count.add(dtl_count, fill_value=0).astype(int) total_count.name = 'total count' mean1 = grp1.Value.mean().round(3) mean1.name = 'mean' median1 = grp1.Value.median().round(3) median1.name = 'median' max1 = grp1.Value.max().round(3) max1.name = 'max' min1 = grp1.Value.min().round(3) min1.name = 'min' q1 = grp1.Value.quantile(0.25).round(3) q1.name = 'Q1' q3 = grp1.Value.quantile(0.75).round(3) q3.name = 'Q3' std1 = grp1.Value.std().round(3) std1.name = 'standard deviation' from_date = grp1['DateTime'].min() from_date.name = 'start date' to_date = grp1['DateTime'].max() to_date.name = 'end date' ### Make stats df stats_df1 = pd.concat([total_count, dtl_count, from_date, to_date, min1, q1, median1, mean1, q3, max1, std1], axis=1) ### return return res2, dtl3, stats_df1 def std_outliers(res, stats, factor): """ Function to assess outliers according to the number of standard deviations from the mean. Parameters ---------- res : DataFrame the time series results from the get_results function. stats : DataFrame the stats results from the get_results function. factor : int, float The number of standard deviations to use. Returns ------- DataFrame """ col_name1 = 'mean + std*' + str(factor) std1 = (stats['mean'] + (stats['standard deviation']*factor)) std1.name = col_name1 col_name2 = 'mean - std*' + str(factor) std2 = (stats['mean'] - (stats['standard deviation']*factor)) std2.name = col_name2 std2.loc[std2 < 0] = 0 std = pd.concat([std1, std2], axis=1) data1 = pd.merge(res.reset_index(), std.reset_index(), on=['Site', 'Measurement']) data2 = data1[data1['Value'] > data1[col_name1]] data3 = data1[data1['Value'] < data1[col_name2]] data4 = pd.concat([data2, data3]) return data4 def iqr_outliers(res, stats, factor): """ Function to assess outliers according to the number of interquartile ranges (IQR) from the 3rd quartile. Parameters ---------- res : DataFrame the time series results from the get_results function. stats : DataFrame the stats results from the get_results function. factor : int, float The number of IQRs to use. Returns ------- DataFrame """ col_name1 = 'Q3 + IQR*' + str(factor) std1 = (stats['Q3'] + (stats['Q3'] - stats['Q1'])*factor) std1.name = col_name1 col_name2 = 'Q3 - IQR*' + str(factor) std2 = (stats['Q3'] - (stats['Q3'] - stats['Q1'])*factor) std2.name = col_name2 std2.loc[std2 < 0] = 0 std =
pd.concat([std1, std2], axis=1)
pandas.concat
# Import required libraries import os, re from random import randint import flask from flask import Flask, render_template import dash import base64, io #from dash.dependencies import Input, Output import dash_core_components as dcc import dash_html_components as html from dash_table import DataTable import chart_studio import chart_studio.plotly as py import plotly.graph_objects as go import plotly.figure_factory as ff from sklearn import * from sklearn import svm from sklearn.model_selection import train_test_split, cross_val_score from sklearn.metrics import confusion_matrix, classification_report, \ precision_score, jaccard_score, matthews_corrcoef, f1_score import pandas as pd from numpy import * import datetime import itertools from datetime import datetime as dt layout = """ <!doctype html> <!--suppress ALL --> <html> <head> <title>Precipitable Water Model</title> <link rel="icon" href="https://github.com/physicsgoddess1972/Precipitable-Water-Model/blob/docs/docs/assets/img/icon.png?raw=true"> <link rel="shortcut icon" type="image/png" href="https://github.com/physicsgoddess1972/Precipitable-Water-Model/blob/docs/docs/assets/img/icon.png?raw=true"> {%scripts%} <script type='text/javascript' src='assets/js/jquery.min.js'></script> <script type='text/javascript' href='assets/js/materialize.min.js'></script> <script type='text/javascript' href='assets/js/sidenav.js'></script> <link rel="stylesheet" href="https://cdn.jsdelivr.net/gh/devicons/[email protected]/devicon.min.css"> <link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.6.0/css/all.css"> <link rel="stylesheet" href="https://fonts.googleapis.com/icon?family=Material+Icons" > <link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Source+Code+Pro&display=swap"> {%css%} </head> <body role='flatdoc'> <div class="demo-layout mdl-layout mdl-js-layout mdl-layout--fixed-drawer mdl-layout--fixed-header"> <header class="demo-header mdl-layout__header mdl-color--grey-100 mdl-color-text--grey-600"> <div class="mdl-layout__header-row"> <span class="mdl-layout-title">Precipitable Water Model</span> <div class="mdl-layout-spacer" style="padding-right: 50%;"></div> <a href="changelog.html#v2"> <div class="chip" style="height: 67%"> <div style="display: flex"> <div style="width: 10%;"> <i class="material-icons" style="padding-top: 8px">new_releases</i> </div> <div style="width: 90%; padding-left: 20px; padding-top: 6px"> <b>Version 2 is available</b> </div> </div> </div> </a> </div> </header> <div class="demo-drawer mdl-layout__drawer mdl-color--blue-grey-900 mdl-color-text--blue-grey-50"> <nav class="demo-navigation mdl-navigation mdl-color--blue-grey-900 sidebar"> <ul class="nav flex-column" id="nav_accordion"> <a class="mdl-navigation__link" id="dash-html" href="dash.html"><i class="material-icons" role="presentation">dashboard</i>Home</a> <li class="nav-item has-submenu"> <a class="mdl-navigation__link nav-link" href="#">Main Project<i class="material-icons" role="presentation">expand_more</i></a> <ul class="submenu collapse"> <a class="mdl-navigation__link" id="index-html" href="https://physicsgoddess1972.github.io/Precipitable-Water-Model/index.html"><i class="material-icons" role="presentation">chrome_reader_mode</i>Documentation</a> <a class="mdl-navigation__link" id="contrib-html" href="https://physicsgoddess1972.github.io/Precipitable-Water-Model/contrib.html"><i class="material-icons" role="presentation">group_add</i>Contribute</a> <a class="mdl-navigation__link" id="code-html" href="https://physicsgoddess1972.github.io/Precipitable-Water-Model/code.html"><i class="devicon-r-plain material-icons"></i>R Features</a> <a class="mdl-navigation__link" id="deployment-html" href="https://physicsgoddess1972.github.io/Precipitable-Water-Model/deployment.html"><i class="material-icons" role="presentation">build</i>Deployment</a> <a class="mdl-navigation__link" id="changelog-html" href="https://physicsgoddess1972.github.io/Precipitable-Water-Model/changelog.html"><i class="material-icons" role="presentation">new_releases</i>Changelog</a> </ul> </li> <li class="nav-item has-submenu"> <a class="mdl-navigation__link nav-link" data-toggle="dropdown" href="#">Side Projects <i class="material-icons" role="presentation">expand_more</i></a> <ul class="submenu collapse"> <a class="mdl-navigation__link" id="machine_learning-html" href="https://physicsgoddess1972.github.io/Precipitable-Water-Model/machine_learning.html"><i class="material-icons">memory</i>Machine Learning</a> <a class="mdl-navigation__link" id="automation-html" href="https://physicsgoddess1972.github.io/Precipitable-Water-Model/automation.html"><i class="material-icons">smart_toy</i>Automation</a> </ul> </li> <a class="mdl-navigation__link" id="research-html" href="https://physicsgoddess1972.github.io/Precipitable-Water-Model/research.html"><i class="material-icons">science</i>Research</a> <a class="mdl-navigation__link" onclick="$('#maintainers').modal('open');"><i class="material-icons" role="presentation">face</i>The Maintainers</a> <hr> <a class="mdl-navigation__link" href="https://github.com/physicsgoddess1972/Precipitable-Water-Model"><i class="material-icons"><i class="fab fa-github big-icon"></i></i> View on Github</a> <a class="mdl-navigation__link" href="https://github.com/physicsgoddess1972/Precipitable-Water-Model/archive/master.zip"><i class="material-icons" role="presentation">cloud_download</i>Download the Repo</a> <a class="mdl-navigation__link" href="https://github.com/physicsgoddess1972/Precipitable-Water-Model/issues"><i class="material-icons" role="presentation">bug_report</i>Bug Report</a> </ul> </nav> </div> <main class="mdl-layout__content"> <div id="modal-maintainers"></div> <div id="modal-introduction"></div> <div class="menubar" style="padding-right: -100%;"></div> <div class='content'> <a id="top"></a> <div class="collapsible"> <div class="collapsible-header"> <h2>Machine Learning Dashboard</h2> </div> <div class="panel"> {%app_entry%} </div> </div> </div> <nav class="bottom-nav" style="width: 100%;"> <a class="bottom-nav__action" href="#top"> <svg class="bottom-nav__icon" viewBox="0 0 24 24"> <path d="M4 12l1.41 1.41L11 7.83V20h2V7.83l5.58 5.59L20 12l-8-8-8 8z"/> </svg> <span class="bottom-nav__label">Back to Top</span> </a> <a class="bottom-nav__action--current" href="https://pw-ml-dash.uc.r.appspot.com/"> <i class="bottom-nav__icon material-icons" role="presentation" style="margin-bottom: -10px; margin-top: -18px">memory</i> <span class="bottom-nav__label">Machine Learning</span> </a> <a class="bottom-nav__action" href="https://pw-data-dash.uc.r.appspot.com/"> <i class="bottom-nav__icon material-icons" role="presentation" style="margin-bottom: -10px; margin-top: -18px">insights</i> <span class="bottom-nav__label">Data Dashboard</span> </a> <a class="bottom-nav__action" href="https://pw-map-dash.uc.r.appspot.com/"> <i class="bottom-nav__icon material-icons" role="presentation" style="margin-bottom: -10px; margin-top: -18px">travel_explore</i> <span class="bottom-nav__label">Import Config</span> </a> </nav> </main> </div> {%config%} <script src="https://code.getmdl.io/1.3.0/material.min.js"></script> <script src="./assets/js/modal1.js"></script> {%scripts%} {%renderer%} </body> </html> """ app = dash.Dash(__name__, assets_folder='assets', index_string=layout, external_scripts=['https://code.getmdl.io/1.3.0/material.min.js']) server = app.server df = pd.read_csv("https://raw.githubusercontent.com/physicsgoddess1972/Precipitable-Water-Model/pmat-socorro-nm/data/ml_data.csv") def parse_data(contents, filename, clear): if clear == 0: try: content_type, content_string = contents.split(',') decoded = base64.b64decode(content_string) df = pd.read_csv(io.StringIO(decoded.decode('utf-8'))) except AttributeError: df = pd.read_csv("https://raw.githubusercontent.com/physicsgoddess1972/Precipitable-Water-Model/pmat-socorro-nm/data/ml_data.csv") elif clear > 0: df = pd.read_csv("https://raw.githubusercontent.com/physicsgoddess1972/Precipitable-Water-Model/pmat-socorro-nm/data/ml_data.csv") return df def analysis(randstate, setting, checkopt, trainsize, data): ## Shoving data and labels into an array X = pd.DataFrame(array(data[data.columns[1:3]])) Y = pd.DataFrame(array(data[data.columns[-1]])) ## Redefining data labels to be -1 or 1 Y[Y == "clear sky"] = -1 Y[Y == "overcast"] = 1 X_train, X_test, y_train, y_test = train_test_split(X, Y, train_size=trainsize, random_state=randstate) svc = svm.SVC(kernel='linear', degree=5, C=2).fit(X_train.to_numpy().tolist(), y_train.to_numpy().tolist()) # # Minimum and Maximum values for the testing data x_min, x_max = X[0].min() - 1, X[0].max() + 1 y_min, y_max = 0, X[1].max() + 1 # # Analysis coefficients w = svc.coef_[0] a = -w[0] / w[1] xx, yy = meshgrid(arange(x_min, x_max, 0.2), arange(y_min, y_max, 0.2)) # # X-components of the support vectors and decision boundary db_x = linspace(x_min, x_max) # # X-component of the decision boundary db_y = a * db_x - svc.intercept_[0] / w[1] # # Y-components of the support vectors sv1_y = a * db_x - (svc.intercept_[0] - 1) / w[1] sv2_y = a * db_x - (svc.intercept_[0] + 1) / w[1] df_x1 = pd.DataFrame({'SV1': db_x}) df_y1 = pd.DataFrame({'SV1': sv1_y}) df_l1 = pd.DataFrame({'SV1': ["rgb(0, 0, 0)"]}) df_x2 = pd.DataFrame({'SV2': db_x}) df_y2 = pd.DataFrame({'SV2': sv2_y}) df_l2 = pd.DataFrame({'SV2': ["rgb(0, 0, 0)"]}) df_x3 = pd.DataFrame({'DB': db_x}) df_y3 = pd.DataFrame({'DB': db_y}) df_l3 = pd.DataFrame({'DB': ["rgb(202, 8, 205)"]}) if setting == "Training": title = "Training Dataset Temperature vs TPW" df_x = pd.DataFrame({'Training': X_train[0]}) df_y =
pd.DataFrame({'Training': X_train[1]})
pandas.DataFrame
import random import numpy as np import pytest import pandas as pd from pandas import ( Categorical, DataFrame, NaT, Timestamp, date_range, ) import pandas._testing as tm class TestDataFrameSortValues: def test_sort_values(self): frame = DataFrame( [[1, 1, 2], [3, 1, 0], [4, 5, 6]], index=[1, 2, 3], columns=list("ABC") ) # by column (axis=0) sorted_df = frame.sort_values(by="A") indexer = frame["A"].argsort().values expected = frame.loc[frame.index[indexer]] tm.assert_frame_equal(sorted_df, expected) sorted_df = frame.sort_values(by="A", ascending=False) indexer = indexer[::-1] expected = frame.loc[frame.index[indexer]] tm.assert_frame_equal(sorted_df, expected) sorted_df = frame.sort_values(by="A", ascending=False) tm.assert_frame_equal(sorted_df, expected) # GH4839 sorted_df = frame.sort_values(by=["A"], ascending=[False]) tm.assert_frame_equal(sorted_df, expected) # multiple bys sorted_df = frame.sort_values(by=["B", "C"]) expected = frame.loc[[2, 1, 3]] tm.assert_frame_equal(sorted_df, expected) sorted_df = frame.sort_values(by=["B", "C"], ascending=False) tm.assert_frame_equal(sorted_df, expected[::-1]) sorted_df = frame.sort_values(by=["B", "A"], ascending=[True, False]) tm.assert_frame_equal(sorted_df, expected) msg = "No axis named 2 for object type DataFrame" with pytest.raises(ValueError, match=msg): frame.sort_values(by=["A", "B"], axis=2, inplace=True) # by row (axis=1): GH#10806 sorted_df = frame.sort_values(by=3, axis=1) expected = frame tm.assert_frame_equal(sorted_df, expected) sorted_df = frame.sort_values(by=3, axis=1, ascending=False) expected = frame.reindex(columns=["C", "B", "A"]) tm.assert_frame_equal(sorted_df, expected) sorted_df = frame.sort_values(by=[1, 2], axis="columns") expected = frame.reindex(columns=["B", "A", "C"]) tm.assert_frame_equal(sorted_df, expected) sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=[True, False]) tm.assert_frame_equal(sorted_df, expected) sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=False) expected = frame.reindex(columns=["C", "B", "A"]) tm.assert_frame_equal(sorted_df, expected) msg = r"Length of ascending \(5\) != length of by \(2\)" with pytest.raises(ValueError, match=msg): frame.sort_values(by=["A", "B"], axis=0, ascending=[True] * 5) def test_sort_values_by_empty_list(self): # https://github.com/pandas-dev/pandas/issues/40258 expected = DataFrame({"a": [1, 4, 2, 5, 3, 6]}) result = expected.sort_values(by=[]) tm.assert_frame_equal(result, expected) assert result is not expected def test_sort_values_inplace(self): frame = DataFrame( np.random.randn(4, 4), index=[1, 2, 3, 4], columns=["A", "B", "C", "D"] ) sorted_df = frame.copy() return_value = sorted_df.sort_values(by="A", inplace=True) assert return_value is None expected = frame.sort_values(by="A") tm.assert_frame_equal(sorted_df, expected) sorted_df = frame.copy() return_value = sorted_df.sort_values(by=1, axis=1, inplace=True) assert return_value is None expected = frame.sort_values(by=1, axis=1) tm.assert_frame_equal(sorted_df, expected) sorted_df = frame.copy() return_value = sorted_df.sort_values(by="A", ascending=False, inplace=True) assert return_value is None expected = frame.sort_values(by="A", ascending=False) tm.assert_frame_equal(sorted_df, expected) sorted_df = frame.copy() return_value = sorted_df.sort_values( by=["A", "B"], ascending=False, inplace=True ) assert return_value is None expected = frame.sort_values(by=["A", "B"], ascending=False) tm.assert_frame_equal(sorted_df, expected) def test_sort_values_multicolumn(self): A = np.arange(5).repeat(20) B = np.tile(np.arange(5), 20) random.shuffle(A) random.shuffle(B) frame = DataFrame({"A": A, "B": B, "C": np.random.randn(100)}) result = frame.sort_values(by=["A", "B"]) indexer = np.lexsort((frame["B"], frame["A"])) expected = frame.take(indexer) tm.assert_frame_equal(result, expected) result = frame.sort_values(by=["A", "B"], ascending=False) indexer = np.lexsort( (frame["B"].rank(ascending=False), frame["A"].rank(ascending=False)) ) expected = frame.take(indexer) tm.assert_frame_equal(result, expected) result = frame.sort_values(by=["B", "A"]) indexer = np.lexsort((frame["A"], frame["B"])) expected = frame.take(indexer) tm.assert_frame_equal(result, expected) def test_sort_values_multicolumn_uint64(self): # GH#9918 # uint64 multicolumn sort df = DataFrame( { "a": pd.Series([18446637057563306014, 1162265347240853609]), "b": pd.Series([1, 2]), } ) df["a"] = df["a"].astype(np.uint64) result = df.sort_values(["a", "b"]) expected = DataFrame( { "a": pd.Series([18446637057563306014, 1162265347240853609]), "b": pd.Series([1, 2]), }, index=pd.Index([1, 0]), ) tm.assert_frame_equal(result, expected) def test_sort_values_nan(self): # GH#3917 df = DataFrame( {"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]} ) # sort one column only expected = DataFrame( {"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]}, index=[2, 0, 3, 1, 6, 4, 5], ) sorted_df = df.sort_values(["A"], na_position="first") tm.assert_frame_equal(sorted_df, expected) expected = DataFrame( {"A": [np.nan, 8, 6, 4, 2, 1, 1], "B": [5, 4, 5, 5, np.nan, 9, 2]}, index=[2, 5, 4, 6, 1, 0, 3], ) sorted_df = df.sort_values(["A"], na_position="first", ascending=False) tm.assert_frame_equal(sorted_df, expected) expected = df.reindex(columns=["B", "A"]) sorted_df = df.sort_values(by=1, axis=1, na_position="first") tm.assert_frame_equal(sorted_df, expected) # na_position='last', order expected = DataFrame( {"A": [1, 1, 2, 4, 6, 8, np.nan], "B": [2, 9, np.nan, 5, 5, 4, 5]}, index=[3, 0, 1, 6, 4, 5, 2], ) sorted_df = df.sort_values(["A", "B"]) tm.assert_frame_equal(sorted_df, expected) # na_position='first', order expected = DataFrame( {"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 2, 9, np.nan, 5, 5, 4]}, index=[2, 3, 0, 1, 6, 4, 5], ) sorted_df = df.sort_values(["A", "B"], na_position="first") tm.assert_frame_equal(sorted_df, expected) # na_position='first', not order expected = DataFrame( {"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]}, index=[2, 0, 3, 1, 6, 4, 5], ) sorted_df = df.sort_values(["A", "B"], ascending=[1, 0], na_position="first") tm.assert_frame_equal(sorted_df, expected) # na_position='last', not order expected = DataFrame( {"A": [8, 6, 4, 2, 1, 1, np.nan], "B": [4, 5, 5, np.nan, 2, 9, 5]}, index=[5, 4, 6, 1, 3, 0, 2], ) sorted_df = df.sort_values(["A", "B"], ascending=[0, 1], na_position="last") tm.assert_frame_equal(sorted_df, expected) def test_sort_values_stable_descending_sort(self): # GH#6399 df = DataFrame( [[2, "first"], [2, "second"], [1, "a"], [1, "b"]], columns=["sort_col", "order"], ) sorted_df = df.sort_values(by="sort_col", kind="mergesort", ascending=False) tm.assert_frame_equal(df, sorted_df) @pytest.mark.parametrize( "expected_idx_non_na, ascending", [ [ [3, 4, 5, 0, 1, 8, 6, 9, 7, 10, 13, 14], [True, True], ], [ [0, 3, 4, 5, 1, 8, 6, 7, 10, 13, 14, 9], [True, False], ], [ [9, 7, 10, 13, 14, 6, 8, 1, 3, 4, 5, 0], [False, True], ], [ [7, 10, 13, 14, 9, 6, 8, 1, 0, 3, 4, 5], [False, False], ], ], ) @pytest.mark.parametrize("na_position", ["first", "last"]) def test_sort_values_stable_multicolumn_sort( self, expected_idx_non_na, ascending, na_position ): # GH#38426 Clarify sort_values with mult. columns / labels is stable df = DataFrame( { "A": [1, 2, np.nan, 1, 1, 1, 6, 8, 4, 8, 8, np.nan, np.nan, 8, 8], "B": [9, np.nan, 5, 2, 2, 2, 5, 4, 5, 3, 4, np.nan, np.nan, 4, 4], } ) # All rows with NaN in col "B" only have unique values in "A", therefore, # only the rows with NaNs in "A" have to be treated individually: expected_idx = ( [11, 12, 2] + expected_idx_non_na if na_position == "first" else expected_idx_non_na + [2, 11, 12] ) expected = df.take(expected_idx) sorted_df = df.sort_values( ["A", "B"], ascending=ascending, na_position=na_position ) tm.assert_frame_equal(sorted_df, expected) def test_sort_values_stable_categorial(self): # GH#16793 df = DataFrame({"x": Categorical(np.repeat([1, 2, 3, 4], 5), ordered=True)}) expected = df.copy() sorted_df = df.sort_values("x", kind="mergesort") tm.assert_frame_equal(sorted_df, expected) def test_sort_values_datetimes(self): # GH#3461, argsort / lexsort differences for a datetime column df = DataFrame( ["a", "a", "a", "b", "c", "d", "e", "f", "g"], columns=["A"], index=date_range("20130101", periods=9), ) dts = [ Timestamp(x) for x in [ "2004-02-11", "2004-01-21", "2004-01-26", "2005-09-20", "2010-10-04", "2009-05-12", "2008-11-12", "2010-09-28", "2010-09-28", ] ] df["B"] = dts[::2] + dts[1::2] df["C"] = 2.0 df["A1"] = 3.0 df1 = df.sort_values(by="A") df2 = df.sort_values(by=["A"]) tm.assert_frame_equal(df1, df2) df1 = df.sort_values(by="B") df2 = df.sort_values(by=["B"]) tm.assert_frame_equal(df1, df2) df1 = df.sort_values(by="B") df2 = df.sort_values(by=["C", "B"]) tm.assert_frame_equal(df1, df2) def test_sort_values_frame_column_inplace_sort_exception(self, float_frame): s = float_frame["A"] with pytest.raises(ValueError, match="This Series is a view"): s.sort_values(inplace=True) cp = s.copy() cp.sort_values() # it works! def test_sort_values_nat_values_in_int_column(self): # GH#14922: "sorting with large float and multiple columns incorrect" # cause was that the int64 value NaT was considered as "na". Which is # only correct for datetime64 columns. int_values = (2, int(NaT.value)) float_values = (2.0, -1.797693e308) df = DataFrame( {"int": int_values, "float": float_values}, columns=["int", "float"] ) df_reversed = DataFrame( {"int": int_values[::-1], "float": float_values[::-1]}, columns=["int", "float"], index=[1, 0], ) # NaT is not a "na" for int64 columns, so na_position must not # influence the result: df_sorted = df.sort_values(["int", "float"], na_position="last") tm.assert_frame_equal(df_sorted, df_reversed) df_sorted = df.sort_values(["int", "float"], na_position="first") tm.assert_frame_equal(df_sorted, df_reversed) # reverse sorting order df_sorted = df.sort_values(["int", "float"], ascending=False) tm.assert_frame_equal(df_sorted, df) # and now check if NaT is still considered as "na" for datetime64 # columns: df = DataFrame( {"datetime": [Timestamp("2016-01-01"), NaT], "float": float_values}, columns=["datetime", "float"], ) df_reversed = DataFrame( {"datetime": [NaT, Timestamp("2016-01-01")], "float": float_values[::-1]}, columns=["datetime", "float"], index=[1, 0], ) df_sorted = df.sort_values(["datetime", "float"], na_position="first") tm.assert_frame_equal(df_sorted, df_reversed) df_sorted = df.sort_values(["datetime", "float"], na_position="last") tm.assert_frame_equal(df_sorted, df) # Ascending should not affect the results. df_sorted = df.sort_values(["datetime", "float"], ascending=False) tm.assert_frame_equal(df_sorted, df) def test_sort_nat(self): # GH 16836 d1 = [Timestamp(x) for x in ["2016-01-01", "2015-01-01", np.nan, "2016-01-01"]] d2 = [ Timestamp(x) for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"] ] df = DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3]) d3 = [Timestamp(x) for x in ["2015-01-01", "2016-01-01", "2016-01-01", np.nan]] d4 = [ Timestamp(x) for x in ["2014-01-01", "2015-01-01", "2017-01-01", "2016-01-01"] ] expected = DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2]) sorted_df = df.sort_values(by=["a", "b"]) tm.assert_frame_equal(sorted_df, expected) def test_sort_values_na_position_with_categories(self): # GH#22556 # Positioning missing value properly when column is Categorical. categories = ["A", "B", "C"] category_indices = [0, 2, 4] list_of_nans = [np.nan, np.nan] na_indices = [1, 3] na_position_first = "first" na_position_last = "last" column_name = "c" reversed_categories = sorted(categories, reverse=True) reversed_category_indices = sorted(category_indices, reverse=True) reversed_na_indices = sorted(na_indices) df = DataFrame( { column_name: Categorical( ["A", np.nan, "B", np.nan, "C"], categories=categories, ordered=True ) } ) # sort ascending with na first result = df.sort_values( by=column_name, ascending=True, na_position=na_position_first ) expected = DataFrame( { column_name: Categorical( list_of_nans + categories, categories=categories, ordered=True ) }, index=na_indices + category_indices, ) tm.assert_frame_equal(result, expected) # sort ascending with na last result = df.sort_values( by=column_name, ascending=True, na_position=na_position_last ) expected = DataFrame( { column_name: Categorical( categories + list_of_nans, categories=categories, ordered=True ) }, index=category_indices + na_indices, ) tm.assert_frame_equal(result, expected) # sort descending with na first result = df.sort_values( by=column_name, ascending=False, na_position=na_position_first ) expected = DataFrame( { column_name: Categorical( list_of_nans + reversed_categories, categories=categories, ordered=True, ) }, index=reversed_na_indices + reversed_category_indices, ) tm.assert_frame_equal(result, expected) # sort descending with na last result = df.sort_values( by=column_name, ascending=False, na_position=na_position_last ) expected = DataFrame( { column_name: Categorical( reversed_categories + list_of_nans, categories=categories, ordered=True, ) }, index=reversed_category_indices + reversed_na_indices, ) tm.assert_frame_equal(result, expected) def test_sort_values_nat(self): # GH#16836 d1 = [Timestamp(x) for x in ["2016-01-01", "2015-01-01", np.nan, "2016-01-01"]] d2 = [ Timestamp(x) for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"] ] df = DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3]) d3 = [Timestamp(x) for x in ["2015-01-01", "2016-01-01", "2016-01-01", np.nan]] d4 = [ Timestamp(x) for x in ["2014-01-01", "2015-01-01", "2017-01-01", "2016-01-01"] ] expected = DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2]) sorted_df = df.sort_values(by=["a", "b"]) tm.assert_frame_equal(sorted_df, expected) def test_sort_values_na_position_with_categories_raises(self): df = DataFrame( { "c": Categorical( ["A", np.nan, "B", np.nan, "C"], categories=["A", "B", "C"], ordered=True, ) } ) with pytest.raises(ValueError, match="invalid na_position: bad_position"): df.sort_values(by="c", ascending=False, na_position="bad_position") @pytest.mark.parametrize("inplace", [True, False]) @pytest.mark.parametrize( "original_dict, sorted_dict, ignore_index, output_index", [ ({"A": [1, 2, 3]}, {"A": [3, 2, 1]}, True, [0, 1, 2]), ({"A": [1, 2, 3]}, {"A": [3, 2, 1]}, False, [2, 1, 0]), ( {"A": [1, 2, 3], "B": [2, 3, 4]}, {"A": [3, 2, 1], "B": [4, 3, 2]}, True, [0, 1, 2], ), ( {"A": [1, 2, 3], "B": [2, 3, 4]}, {"A": [3, 2, 1], "B": [4, 3, 2]}, False, [2, 1, 0], ), ], ) def test_sort_values_ignore_index( self, inplace, original_dict, sorted_dict, ignore_index, output_index ): # GH 30114 df = DataFrame(original_dict) expected = DataFrame(sorted_dict, index=output_index) kwargs = {"ignore_index": ignore_index, "inplace": inplace} if inplace: result_df = df.copy() result_df.sort_values("A", ascending=False, **kwargs) else: result_df = df.sort_values("A", ascending=False, **kwargs) tm.assert_frame_equal(result_df, expected) tm.assert_frame_equal(df, DataFrame(original_dict)) def test_sort_values_nat_na_position_default(self): # GH 13230 expected = DataFrame( { "A": [1, 2, 3, 4, 4], "date": pd.DatetimeIndex( [ "2010-01-01 09:00:00", "2010-01-01 09:00:01", "2010-01-01 09:00:02", "2010-01-01 09:00:03", "NaT", ] ), } ) result = expected.sort_values(["A", "date"]) tm.assert_frame_equal(result, expected) def test_sort_values_item_cache(self, using_array_manager): # previous behavior incorrect retained an invalid _item_cache entry df = DataFrame(np.random.randn(4, 3), columns=["A", "B", "C"]) df["D"] = df["A"] * 2 ser = df["A"] if not using_array_manager: assert len(df._mgr.blocks) == 2 df.sort_values(by="A") ser.values[0] = 99 assert df.iloc[0, 0] == df["A"][0] def test_sort_values_reshaping(self): # GH 39426 values = list(range(21)) expected = DataFrame([values], columns=values) df = expected.sort_values(expected.index[0], axis=1, ignore_index=True) tm.assert_frame_equal(df, expected) class TestDataFrameSortKey: # test key sorting (issue 27237) def test_sort_values_inplace_key(self, sort_by_key): frame = DataFrame( np.random.randn(4, 4), index=[1, 2, 3, 4], columns=["A", "B", "C", "D"] ) sorted_df = frame.copy() return_value = sorted_df.sort_values(by="A", inplace=True, key=sort_by_key) assert return_value is None expected = frame.sort_values(by="A", key=sort_by_key) tm.assert_frame_equal(sorted_df, expected) sorted_df = frame.copy() return_value = sorted_df.sort_values( by=1, axis=1, inplace=True, key=sort_by_key ) assert return_value is None expected = frame.sort_values(by=1, axis=1, key=sort_by_key) tm.assert_frame_equal(sorted_df, expected) sorted_df = frame.copy() return_value = sorted_df.sort_values( by="A", ascending=False, inplace=True, key=sort_by_key ) assert return_value is None expected = frame.sort_values(by="A", ascending=False, key=sort_by_key) tm.assert_frame_equal(sorted_df, expected) sorted_df = frame.copy() sorted_df.sort_values( by=["A", "B"], ascending=False, inplace=True, key=sort_by_key ) expected = frame.sort_values(by=["A", "B"], ascending=False, key=sort_by_key) tm.assert_frame_equal(sorted_df, expected) def test_sort_values_key(self): df = DataFrame(np.array([0, 5, np.nan, 3, 2, np.nan])) result = df.sort_values(0) expected = df.iloc[[0, 4, 3, 1, 2, 5]] tm.assert_frame_equal(result, expected) result = df.sort_values(0, key=lambda x: x + 5) expected = df.iloc[[0, 4, 3, 1, 2, 5]] tm.assert_frame_equal(result, expected) result = df.sort_values(0, key=lambda x: -x, ascending=False) expected = df.iloc[[0, 4, 3, 1, 2, 5]] tm.assert_frame_equal(result, expected) def test_sort_values_by_key(self): df = DataFrame( { "a": np.array([0, 3, np.nan, 3, 2, np.nan]), "b": np.array([0, 2, np.nan, 5, 2, np.nan]), } ) result = df.sort_values("a", key=lambda x: -x) expected = df.iloc[[1, 3, 4, 0, 2, 5]] tm.assert_frame_equal(result, expected) result = df.sort_values(by=["a", "b"], key=lambda x: -x) expected = df.iloc[[3, 1, 4, 0, 2, 5]] tm.assert_frame_equal(result, expected) result = df.sort_values(by=["a", "b"], key=lambda x: -x, ascending=False) expected = df.iloc[[0, 4, 1, 3, 2, 5]] tm.assert_frame_equal(result, expected) def test_sort_values_by_key_by_name(self): df = DataFrame( { "a": np.array([0, 3, np.nan, 3, 2, np.nan]), "b": np.array([0, 2, np.nan, 5, 2, np.nan]), } ) def key(col): if col.name == "a": return -col else: return col result = df.sort_values(by="a", key=key) expected = df.iloc[[1, 3, 4, 0, 2, 5]] tm.assert_frame_equal(result, expected) result = df.sort_values(by=["a"], key=key) expected = df.iloc[[1, 3, 4, 0, 2, 5]] tm.assert_frame_equal(result, expected) result = df.sort_values(by="b", key=key) expected = df.iloc[[0, 1, 4, 3, 2, 5]] tm.assert_frame_equal(result, expected) result = df.sort_values(by=["a", "b"], key=key) expected = df.iloc[[1, 3, 4, 0, 2, 5]] tm.assert_frame_equal(result, expected) def test_sort_values_key_string(self): df = DataFrame(np.array([["hello", "goodbye"], ["hello", "Hello"]])) result = df.sort_values(1) expected = df[::-1] tm.assert_frame_equal(result, expected) result = df.sort_values([0, 1], key=lambda col: col.str.lower()) tm.assert_frame_equal(result, df) result = df.sort_values( [0, 1], key=lambda col: col.str.lower(), ascending=False ) expected = df.sort_values(1, key=lambda col: col.str.lower(), ascending=False)
tm.assert_frame_equal(result, expected)
pandas._testing.assert_frame_equal
from visions.core.model import VisionsBaseType, VisionsTypeset from visions.core.implementations.types import visions_generic from visions.core.model.relations import IdentityRelation import pandas.api.types as pdt import matplotlib.pyplot as plt import pandas as pd import numpy as np class visions_statistical_set(VisionsTypeset): """Typeset that exclusively supports time related types Includes support for the following types: - visions_binary - visions_nominal - visions_ordinal - visions_interval - visions_ratio """ def __init__(self): types = { visions_binary, visions_nominal, visions_ordinal, visions_interval, visions_ratio, } super().__init__(types) class visions_binary(VisionsBaseType): @classmethod def get_relations(cls): return [IdentityRelation(visions_binary, visions_nominal)] @classmethod def contains_op(cls, series: pd.Series) -> bool: return pdt.is_bool_dtype(series) class visions_nominal(VisionsBaseType): @classmethod def get_relations(cls): return [IdentityRelation(visions_nominal, visions_generic)] @classmethod def contains_op(cls, series: pd.Series) -> bool: return (pdt.is_categorical_dtype(series) and not series.cat.ordered) or pdt.is_bool_dtype(series) class visions_ordinal(VisionsBaseType): @classmethod def get_relations(cls): return [IdentityRelation(visions_ordinal, visions_generic)] @classmethod def contains_op(cls, series: pd.Series) -> bool: return pdt.is_categorical_dtype(series) and series.cat.ordered class visions_interval(VisionsBaseType): """ Aliases """ @classmethod def get_relations(cls): return [IdentityRelation(visions_interval, visions_generic)] @classmethod def contains_op(cls, series: pd.Series) -> bool: return pdt.is_numeric_dtype(series) and not pdt.is_bool_dtype(series) class visions_ratio(VisionsBaseType): @classmethod def get_relations(cls): return [IdentityRelation(visions_ratio, visions_interval)] @classmethod def contains_op(cls, series: pd.Series) -> bool: return False # TODO: make test cases series_interval1 = pd.Series([2.4, 5.6, 3.5, 2.5, 4], name='interval 1') series_interval3 = pd.Series([2, 5, 3, 2, 4, np.nan], dtype="Int64", name='interval 1') series_interval2 = pd.Series([1, 5, 7, 9, 11], name='interval 2') series_ratio1 = pd.Series([2.4, 5.6, 3.5, 2.5, 4], name='ratio 1') series_ratio2 = pd.Series([2.3, 4, 2.3, 6.3, 7.8], name='ratio 2') series_binary = pd.Series([True, True, False, True, False], dtype=bool, name='binary') series_nominal1 = pd.Series(['kaas', 'yoghurt', 'kaas', 'melk', 'melk'], dtype='category', name='nominal 1') series_nominal2 = pd.Series([1, 2, 3, 3, 1], dtype='category', name='nominal 2') series_ordinal1 = pd.Series(pd.Categorical([1, 2, 3, 3, 1], categories=[1, 2, 3], ordered=True), name='ordinal 1') series_ordinal2 = pd.Series(pd.Categorical([2, 2, 2, 3, 4], categories=[1, 2, 3, 4], ordered=True), name='ordinal 2') series_datetime = pd.to_datetime( pd.Series(['3/11/2000', '5/12/2008', '12/2/1993', '2/12/1993', '2/4/1923'], name='datetime')) series_time_delta = pd.Series( [pd.Timedelta('1 days 00:03:43'), pd.Timedelta('5 days 12:33:57'),
pd.Timedelta('0 days 01:25:07')
pandas.Timedelta
import astropy.io.fits as fits from astropy.coordinates import SkyCoord from astropy import wcs import pandas as pd import os import numpy as np from skimage import filters import time from skimage import measure from scipy import ndimage import matplotlib.pyplot as plt from threading import Thread from multiprocessing import Pool from time import sleep, ctime from DensityClust.clustring_subfunc import \ kc_coord_3d, kc_coord_2d, get_xyz """ 在计算距离和梯度的时候,采用了多线程 """ class Data: def __init__(self, data_name): self.data_name = data_name self.data = None self.wcs = None self.size_x = 0 self.size_y = 0 self.size_z = 0 self.ND = 0 self.get_data_inf() def get_data_inf(self): data = fits.getdata(data_name) # self.wcs = self.get_wcs() size_x, size_y, size_z = data.shape self.size_x = size_x self.size_y = size_y self.size_z = size_z self.data = data self.ND = size_x * size_y * size_z def get_wcs(self): """ 得到wcs信息 :return: data_wcs """ data_header = fits.getheader(self.data_name) keys = data_header.keys() key = [k for k in keys if k.endswith('4')] [data_header.remove(k) for k in key] try: data_header.remove('VELREF') except: pass data_wcs = wcs.WCS(data_header) return data_wcs class LocDenCluster: def __init__(self, para, data_name): """ 根据决策图得到聚类中心和聚类中心个数 :param para: para.rhomin: Minimum density para.deltamin: Minimum delta para.v_min: Minimum volume para.noise: The noise level of the data, used for data truncation calculation para.sigma: Standard deviation of Gaussian filtering """ self.out = None self.outcat = None self.mask = None self.gradmin = para["gradmin"] self.rhomin = para["rhomin"] self.deltamin = para["deltamin"] self.v_min = para["v_min"] self.rms = para["noise"] self.dc = para['dc'] self.is_plot = para['is_plot'] self.Data = Data(data_name) ND = self.Data.ND self.Gradient = np.zeros(ND, np.float) self.IndNearNeigh = np.zeros(ND, np.int) self.delta = np.zeros(ND, np.float) def summary(self): table_title = ['rhomin', 'deltamin', 'v_min', 'gradmin', 'noise', 'dc'] para = np.array([[self.rhomin, self.deltamin, self.v_min, self.gradmin, self.rms, self.dc]]) para_pd = pd.DataFrame(para, columns=table_title) # print(para_pd) return para_pd def change_pix2word(self): """ 将算法检测的结果(像素单位)转换到天空坐标系上去 :return: outcat_wcs ['ID', 'Peak1', 'Peak2', 'Peak3', 'Cen1', 'Cen2', 'Cen3', 'Size1', 'Size2', 'Size3', 'Peak', 'Sum', 'Volume'] -->3d ['ID', 'Peak1', 'Peak2', 'Cen1', 'Cen2', 'Size1', 'Size2', 'Peak', 'Sum', 'Volume'] -->2d """ outcat = self.outcat if outcat is None: return else: outcat_column = outcat.shape[1] data_wcs = self.Data.wcs if outcat_column == 10: # 2d result peak1, peak2 = data_wcs.all_pix2world(outcat['Peak1'], outcat['Peak2'], 1) cen1, cen2 = data_wcs.all_pix2world(outcat['Cen1'], outcat['Cen2'], 1) size1, size2 = np.array([outcat['Size1'] * 30, outcat['Size2'] * 30]) clump_Peak = np.column_stack([peak1, peak2]) clump_Cen = np.column_stack([cen1, cen2]) clustSize = np.column_stack([size1, size2]) clustPeak, clustSum, clustVolume = np.array([outcat['Peak'], outcat['Sum'], outcat['Volume']]) id_clumps = [] # MWSIP017.558+00.150+020.17 分别表示:银经:17.558°, 银纬:0.15°,速度:20.17km/s for item_l, item_b in zip(cen1, cen2): str_l = 'MWSIP' + ('%.03f' % item_l).rjust(7, '0') if item_b < 0: str_b = '-' + ('%.03f' % abs(item_b)).rjust(6, '0') else: str_b = '+' + ('%.03f' % abs(item_b)).rjust(6, '0') id_clumps.append(str_l + str_b) id_clumps = np.array(id_clumps) table_title = ['ID', 'Peak1', 'Peak2', 'Cen1', 'Cen2', 'Size1', 'Size2', 'Peak', 'Sum', 'Volume'] elif outcat_column == 13: # 3d result peak1, peak2, peak3 = data_wcs.all_pix2world(outcat['Peak1'], outcat['Peak2'], outcat['Peak3'], 1) cen1, cen2, cen3 = data_wcs.all_pix2world(outcat['Cen1'], outcat['Cen2'], outcat['Cen3'], 1) size1, size2, size3 = np.array([outcat['Size1'] * 30, outcat['Size2'] * 30, outcat['Size3'] * 0.166]) clustPeak, clustSum, clustVolume = np.array([outcat['Peak'], outcat['Sum'], outcat['Volume']]) clump_Peak = np.column_stack([peak1, peak2, peak3 / 1000]) clump_Cen = np.column_stack([cen1, cen2, cen3 / 1000]) clustSize = np.column_stack([size1, size2, size3]) id_clumps = [] # MWISP017.558+00.150+020.17 分别表示:银经:17.558°, 银纬:0.15°,速度:20.17km/s for item_l, item_b, item_v in zip(cen1, cen2, cen3 / 1000): str_l = 'MWISP' + ('%.03f' % item_l).rjust(7, '0') if item_b < 0: str_b = '-' + ('%.03f' % abs(item_b)).rjust(6, '0') else: str_b = '+' + ('%.03f' % abs(item_b)).rjust(6, '0') if item_v < 0: str_v = '-' + ('%.03f' % abs(item_v)).rjust(6, '0') else: str_v = '+' + ('%.03f' % abs(item_v)).rjust(6, '0') id_clumps.append(str_l + str_b + str_v) id_clumps = np.array(id_clumps) table_title = ['ID', 'Peak1', 'Peak2', 'Peak3', 'Cen1', 'Cen2', 'Cen3', 'Size1', 'Size2', 'Size3', 'Peak', 'Sum', 'Volume'] else: print('outcat columns is %d' % outcat_column) return None outcat_wcs = np.column_stack((id_clumps, clump_Peak, clump_Cen, clustSize, clustPeak, clustSum, clustVolume)) outcat_wcs = pd.DataFrame(outcat_wcs, columns=table_title) return outcat_wcs def densityCluster_3d(self): data = self.Data.data k1 = 1 # 第1次计算点的邻域大小 k2 = np.ceil(self.deltamin).astype(np.int) # 第2次计算点的邻域大小 xx = get_xyz(data) # xx: 3D data coordinates 坐标原点是 1 dim = data.ndim size_x, size_y, size_z = data.shape maxed = size_x + size_y + size_z ND = size_x * size_y * size_z # Initialize the return result: mask and out mask = np.zeros_like(data, dtype=np.int) out = np.zeros_like(data, dtype=np.float) data_filter = filters.gaussian(data, self.dc) rho = data_filter.flatten() rho_Ind = np.argsort(-rho) rho_sorted = rho[rho_Ind] delta, IndNearNeigh, Gradient = np.zeros(ND, np.float), np.zeros(ND, np.int), np.zeros(ND, np.float) delta[rho_Ind[0]] = np.sqrt(size_x ** 2 + size_y ** 2 + size_z ** 2) # delta 记录距离, # IndNearNeigh 记录:两个密度点的联系 % index of nearest neighbor with higher density IndNearNeigh[rho_Ind[0]] = rho_Ind[0] t0_ = time.time() # calculating delta and Gradient for ii in range(1, ND): # 密度降序排序后,即密度第ii大的索引(在rho中) ordrho_ii = rho_Ind[ii] rho_ii = rho_sorted[ii] # 第ii大的密度值 if rho_ii >= self.rms: delta[ordrho_ii] = maxed point_ii_xy = xx[ordrho_ii, :] get_value = True # 判断是否需要在大循环中继续执行,默认需要,一旦在小循环中赋值成功,就不在大循环中运行 idex, bt = kc_coord_3d(point_ii_xy, size_z, size_y, size_x, k1) for ordrho_jj, item in zip(idex, bt): rho_jj = rho[ordrho_jj] # 根据索引在rho里面取值 dist_i_j = np.sqrt(((point_ii_xy - item) ** 2).sum()) # 计算两点间的距离 gradient = (rho_jj - rho_ii) / dist_i_j if dist_i_j <= delta[ordrho_ii] and gradient >= 0: delta[ordrho_ii] = dist_i_j Gradient[ordrho_ii] = gradient IndNearNeigh[ordrho_ii] = ordrho_jj get_value = False if get_value: # 表明,在(2 * k1 + 1) * (2 * k1 + 1) * (2 * k1 + 1)的邻域中没有找到比该点高,距离最近的点,则在更大的邻域中搜索 idex, bt = kc_coord_3d(point_ii_xy, size_z, size_y, size_x, k2) for ordrho_jj, item in zip(idex, bt): rho_jj = rho[ordrho_jj] # 根据索引在rho里面取值 dist_i_j = np.sqrt(((point_ii_xy - item) ** 2).sum()) # 计算两点间的距离 gradient = (rho_jj - rho_ii) / dist_i_j if dist_i_j <= delta[ordrho_ii] and gradient >= 0: delta[ordrho_ii] = dist_i_j Gradient[ordrho_ii] = gradient IndNearNeigh[ordrho_ii] = ordrho_jj get_value = False if get_value: delta[ordrho_ii] = k2 + 0.0001 Gradient[ordrho_ii] = -1 IndNearNeigh[ordrho_ii] = ND else: IndNearNeigh[ordrho_ii] = ND delta_sorted = np.sort(-delta) * -1 delta[rho_Ind[0]] = delta_sorted[1] t1_ = time.time() print('delata, rho and Gradient are calculated, using %.2f seconds' % (t1_ - t0_)) # 根据密度和距离来确定类中心 clusterInd = -1 * np.ones(ND + 1) clust_index = np.intersect1d(np.where(rho > self.rhomin), np.where(delta > self.deltamin)) clust_num = len(clust_index) # icl是用来记录第i个类中心在xx中的索引值 icl = np.zeros(clust_num, dtype=int) n_clump = 0 for ii in range(clust_num): i = clust_index[ii] icl[n_clump] = i n_clump += 1 clusterInd[i] = n_clump # assignation 将其他非类中心分配到离它最近的类中心中去 # clusterInd = -1 表示该点不是类的中心点,属于其他点,等待被分配到某个类中去 # 类的中心点的梯度Gradient被指定为 - 1 if self.is_plot == 1: pass for i in range(ND): ordrho_i = rho_Ind[i] if clusterInd[ordrho_i] == -1: # not centroid clusterInd[ordrho_i] = clusterInd[IndNearNeigh[ordrho_i]] else: Gradient[ordrho_i] = -1 # 将类中心点的梯度设置为-1 clump_volume = np.zeros(n_clump) for i in range(n_clump): clump_volume[i] = clusterInd.tolist().count(i + 1) # centInd [类中心点在xx坐标下的索引值,类中心在centInd的索引值: 代表类别编号] centInd = [] for i, item in enumerate(clump_volume): if item >= self.v_min: centInd.append([icl[i], i]) centInd = np.array(centInd, np.int) mask_grad = np.where(Gradient > self.gradmin)[0] # 通过梯度确定边界后,还需要进一步利用最小体积来排除假核 n_clump = centInd.shape[0] clump_sum, clump_volume, clump_peak = np.zeros([n_clump, 1]), np.zeros([n_clump, 1]), np.zeros([n_clump, 1]) clump_Cen, clump_size = np.zeros([n_clump, dim]), np.zeros([n_clump, dim]) clump_Peak = np.zeros([n_clump, dim], np.int) clump_ii = 0 for i, item in enumerate(centInd): rho_cluster_i = np.zeros(ND) index_cluster_i = np.where(clusterInd == (item[1] + 1))[0] # centInd[i, 1] --> item[1] 表示第i个类中心的编号 index_cc = np.intersect1d(mask_grad, index_cluster_i) rho_cluster_i[index_cluster_i] = rho[index_cluster_i] rho_cc_mean = rho[index_cc].mean() * 0.2 index_cc_rho = np.where(rho_cluster_i > rho_cc_mean)[0] index_cluster_rho = np.union1d(index_cc, index_cc_rho) cl_1_index_ = xx[index_cluster_rho, :] - 1 # -1 是为了在data里面用索引取值(从0开始) # clusterInd 标记的点的编号是从1开始, 没有标记的点的编号为-1 clustNum = cl_1_index_.shape[0] cl_i = np.zeros(data.shape, np.int) for item_ in cl_1_index_: cl_i[item_[2], item_[1], item_[0]] = 1 # 形态学处理 # cl_i = morphology.closing(cl_i) # 做开闭运算会对相邻两个云核的掩膜有影响 L = ndimage.binary_fill_holes(cl_i).astype(int) L = measure.label(L) # Labeled input image. Labels with value 0 are ignored. STATS = measure.regionprops(L) Ar_sum = [] for region in STATS: coords = region.coords # 经过验证,坐标原点为0 temp = 0 for item_coord in coords: temp += data[item_coord[0], item_coord[1], item_coord[2]] Ar_sum.append(temp) Ar = np.array(Ar_sum) ind = np.where(Ar == Ar.max())[0] L[L != ind[0] + 1] = 0 cl_i = L / (ind[0] + 1) coords = STATS[ind[0]].coords # 最大的连通域对应的坐标 if coords.shape[0] > self.v_min: coords = coords[:, [2, 1, 0]] clump_i_ = np.zeros(coords.shape[0]) for j, item_coord in enumerate(coords): clump_i_[j] = data[item_coord[2], item_coord[1], item_coord[0]] clustsum = clump_i_.sum() + 0.0001 # 加一个0.0001 防止分母为0 clump_Cen[clump_ii, :] = np.matmul(clump_i_, coords) / clustsum clump_volume[clump_ii, 0] = clustNum clump_sum[clump_ii, 0] = clustsum x_i = coords - clump_Cen[clump_ii, :] clump_size[clump_ii, :] = 2.3548 * np.sqrt((np.matmul(clump_i_, x_i ** 2) / clustsum) - (np.matmul(clump_i_, x_i) / clustsum) ** 2) clump_i = data * cl_i out = out + clump_i mask = mask + cl_i * (clump_ii + 1) clump_peak[clump_ii, 0] = clump_i.max() clump_Peak[clump_ii, [2, 1, 0]] = np.argwhere(clump_i == clump_i.max())[0] clump_ii += 1 else: pass clump_Peak = clump_Peak + 1 clump_Cen = clump_Cen + 1 # python坐标原点是从0开始的,在这里整体加1,改为以1为坐标原点 id_clumps = np.array([item + 1 for item in range(n_clump)], np.int).T id_clumps = id_clumps.reshape([n_clump, 1]) LDC_outcat = np.column_stack( (id_clumps, clump_Peak, clump_Cen, clump_size, clump_peak, clump_sum, clump_volume)) LDC_outcat = LDC_outcat[:clump_ii, :] table_title = ['ID', 'Peak1', 'Peak2', 'Peak3', 'Cen1', 'Cen2', 'Cen3', 'Size1', 'Size2', 'Size3', 'Peak', 'Sum', 'Volume'] LDC_outcat = pd.DataFrame(LDC_outcat, columns=table_title) self.outcat = LDC_outcat self.mask = mask self.out = out def get_delta(self, rho_Ind, rho_sorted, xx, maxed, rho, size_z, size_y, size_x, k1, k2, ND, ND_start, ND_end): # print(ND_start, ND_end) print('---开始---', ND_start, '时间', ctime()) for ii in range(ND_start, ND_end, 1): # 密度降序排序后,即密度第ii大的索引(在rho中) ordrho_ii = rho_Ind[ii] rho_ii = rho_sorted[ii] # 第ii大的密度值 delta_ordrho_ii, Gradient_ordrho_ii, IndNearNeigh_ordrho_ii = 0, 0, 0 if rho_ii >= self.rms: delta_ordrho_ii = maxed point_ii_xy = xx[ordrho_ii, :] get_value = True # 判断是否需要在大循环中继续执行,默认需要,一旦在小循环中赋值成功,就不在大循环中运行 idex, bt = kc_coord_3d(point_ii_xy, size_z, size_y, size_x, k1) for ordrho_jj, item in zip(idex, bt): rho_jj = rho[ordrho_jj] # 根据索引在rho里面取值 dist_i_j = np.sqrt(((point_ii_xy - item) ** 2).sum()) # 计算两点间的距离 gradient = (rho_jj - rho_ii) / dist_i_j if dist_i_j <= delta_ordrho_ii and gradient >= 0: delta_ordrho_ii = dist_i_j Gradient_ordrho_ii = gradient IndNearNeigh_ordrho_ii = ordrho_jj get_value = False if get_value: # 表明,在(2 * k1 + 1) * (2 * k1 + 1) * (2 * k1 + 1)的邻域中没有找到比该点高,距离最近的点,则在更大的邻域中搜索 idex, bt = kc_coord_3d(point_ii_xy, size_z, size_y, size_x, k2) for ordrho_jj, item in zip(idex, bt): rho_jj = rho[ordrho_jj] # 根据索引在rho里面取值 dist_i_j = np.sqrt(((point_ii_xy - item) ** 2).sum()) # 计算两点间的距离 gradient = (rho_jj - rho_ii) / dist_i_j if dist_i_j <= delta_ordrho_ii and gradient >= 0: delta_ordrho_ii = dist_i_j Gradient_ordrho_ii = gradient IndNearNeigh_ordrho_ii = ordrho_jj get_value = False if get_value: delta_ordrho_ii = k2 + 0.0001 Gradient_ordrho_ii = -1 IndNearNeigh_ordrho_ii = ND else: IndNearNeigh_ordrho_ii = ND # print(delta_ordrho_ii) self.delta[ordrho_ii] = delta_ordrho_ii self.Gradient[ordrho_ii] = Gradient_ordrho_ii self.IndNearNeigh[ordrho_ii] = IndNearNeigh_ordrho_ii print('***结束***', ND_start, '时间', ctime()) print(self.delta.max()) print(self.Gradient.max()) print(self.IndNearNeigh.max()) def densityCluster_3d_multi(self): data = self.Data.data k1 = 1 # 第1次计算点的邻域大小 k2 = np.ceil(self.deltamin).astype(np.int) # 第2次计算点的邻域大小 xx = get_xyz(data) # xx: 3D data coordinates 坐标原点是 1 dim = data.ndim size_x, size_y, size_z = data.shape maxed = size_x + size_y + size_z ND = size_x * size_y * size_z # Initialize the return result: mask and out mask = np.zeros_like(data, dtype=np.int) out = np.zeros_like(data, dtype=np.float) data_filter = filters.gaussian(data, self.dc) rho = data_filter.flatten() rho_Ind = np.argsort(-rho) rho_sorted = rho[rho_Ind] self.delta[rho_Ind[0]] = np.sqrt(size_x ** 2 + size_y ** 2 + size_z ** 2) # delta 记录距离, # IndNearNeigh 记录:两个密度点的联系 % index of nearest neighbor with higher density self.IndNearNeigh[rho_Ind[0]] = rho_Ind[0] t0_ = time.time() # calculating delta and Gradient # p = Pool(count) # for i_count in range(count): # ND_start = 1 + i_count*ittt # ND_end = 1 + (i_count + 1) * ittt # if i_count == count-1: # ND_end = ND # p.apply_async(self.get_delta, args=(rho_Ind, rho_sorted, xx, maxed, rho, size_z, size_y, size_x, k1, k2, ND, ND_start, ND_end)) # detect_single(data_ij_name, para) # p.apply_async(self.get_delta, # args=(rho_Ind, rho_sorted, xx, maxed, rho, size_z, size_y, size_x, k1, k2, ND, 1, ND)) # self.get_delta(rho_Ind, rho_sorted, xx, maxed, rho, size_z, size_y, size_x, k1, k2, ND, 1, ND) # p.close() # p.join() count = 4 ittt = int(ND / count) ts = [] for i_count in range(count): ND_start = 1 + i_count*ittt ND_end = 1 + (i_count + 1) * ittt if i_count == count-1: ND_end = ND t = Thread(target=self.get_delta, args=(rho_Ind, rho_sorted, xx, maxed, rho, size_z, size_y, size_x, k1, k2, ND, ND_start, ND_end)) ts.append(t) [i.start() for i in ts] [i.join() for i in ts] print(self.delta.max()) print(self.Gradient.max()) print(self.IndNearNeigh.max()) # p = Pool(count) # for data_ij_name in data_ij_name_list: # p.apply_async(detect_single, args=(data_ij_name, para)) # # detect_single(data_ij_name, para) # p.close() # p.join() # t.join() # for ii in range(1, ND): # # 密度降序排序后,即密度第ii大的索引(在rho中) # ordrho_ii = rho_Ind[ii] # rho_ii = rho_sorted[ii] # 第ii大的密度值 # t = Thread(target=self.get_delta, args=(ordrho_ii, rho_ii, xx, maxed, rho, size_z, size_y, size_x, k1, k2, ND)) # t.start() # t.join() # ts.append(t) # ts = [] # count = 2 # # for i_count in range(count): # ND_start, ND_end = 1, int(ND/2) # t = Thread(target=self.get_delta, args=(rho_Ind, rho_sorted, xx, maxed, rho, size_z, size_y, size_x, k1, k2, ND, ND_start, ND_end)) # t.start() # t.join() # t1 = Thread(target=self.get_delta, args=(rho_Ind, rho_sorted, xx, maxed, rho, size_z, size_y, size_x, k1, k2, ND, ND_start, 1000000)) # t2 = Thread(target=self.get_delta, args=(rho_Ind, rho_sorted, xx, maxed, rho, size_z, size_y, size_x, k1, k2, ND, 1000000, 2000000)) # t3 = Thread(target=self.get_delta, # args=(rho_Ind, rho_sorted, xx, maxed, rho, size_z, size_y, size_x, k1, k2, ND, 2000000, ND)) # # # 启动线程运行 # t1.start() # t2.start() # t3.start() # # # 等待所有线程执行完毕 # t1.join() # join() 等待线程终止,要不然一直挂起 # t2.join() # t3.join() delta_sorted = np.sort(-self.delta) * -1 self.delta[rho_Ind[0]] = delta_sorted[1] t1_ = time.time() print('delata, rho and Gradient are calculated, using %.2f seconds' % (t1_ - t0_)) # 根据密度和距离来确定类中心 clusterInd = -1 * np.ones(ND + 1) clust_index = np.intersect1d(np.where(rho > self.rhomin), np.where(self.delta > self.deltamin)) clust_num = len(clust_index) # icl是用来记录第i个类中心在xx中的索引值 icl = np.zeros(clust_num, dtype=int) n_clump = 0 for ii in range(clust_num): i = clust_index[ii] icl[n_clump] = i n_clump += 1 clusterInd[i] = n_clump # assignation 将其他非类中心分配到离它最近的类中心中去 # clusterInd = -1 表示该点不是类的中心点,属于其他点,等待被分配到某个类中去 # 类的中心点的梯度Gradient被指定为 - 1 if self.is_plot == 1: pass for i in range(ND): ordrho_i = rho_Ind[i] if clusterInd[ordrho_i] == -1: # not centroid clusterInd[ordrho_i] = clusterInd[self.IndNearNeigh[ordrho_i]] else: self.Gradient[ordrho_i] = -1 # 将类中心点的梯度设置为-1 clump_volume = np.zeros(n_clump) for i in range(n_clump): clump_volume[i] = clusterInd.tolist().count(i + 1) # centInd [类中心点在xx坐标下的索引值,类中心在centInd的索引值: 代表类别编号] centInd = [] for i, item in enumerate(clump_volume): if item >= self.v_min: centInd.append([icl[i], i]) centInd = np.array(centInd, np.int) mask_grad = np.where(self.Gradient > self.gradmin)[0] # 通过梯度确定边界后,还需要进一步利用最小体积来排除假核 n_clump = centInd.shape[0] clump_sum, clump_volume, clump_peak = np.zeros([n_clump, 1]), np.zeros([n_clump, 1]), np.zeros([n_clump, 1]) clump_Cen, clump_size = np.zeros([n_clump, dim]), np.zeros([n_clump, dim]) clump_Peak = np.zeros([n_clump, dim], np.int) clump_ii = 0 for i, item in enumerate(centInd): rho_cluster_i = np.zeros(ND) index_cluster_i = np.where(clusterInd == (item[1] + 1))[0] # centInd[i, 1] --> item[1] 表示第i个类中心的编号 index_cc = np.intersect1d(mask_grad, index_cluster_i) rho_cluster_i[index_cluster_i] = rho[index_cluster_i] rho_cc_mean = rho[index_cc].mean() * 0.2 index_cc_rho = np.where(rho_cluster_i > rho_cc_mean)[0] index_cluster_rho = np.union1d(index_cc, index_cc_rho) cl_1_index_ = xx[index_cluster_rho, :] - 1 # -1 是为了在data里面用索引取值(从0开始) # clusterInd 标记的点的编号是从1开始, 没有标记的点的编号为-1 clustNum = cl_1_index_.shape[0] cl_i = np.zeros(data.shape, np.int) for item_ in cl_1_index_: cl_i[item_[2], item_[1], item_[0]] = 1 # 形态学处理 # cl_i = morphology.closing(cl_i) # 做开闭运算会对相邻两个云核的掩膜有影响 L = ndimage.binary_fill_holes(cl_i).astype(int) L = measure.label(L) # Labeled input image. Labels with value 0 are ignored. STATS = measure.regionprops(L) Ar_sum = [] for region in STATS: coords = region.coords # 经过验证,坐标原点为0 temp = 0 for item_coord in coords: temp += data[item_coord[0], item_coord[1], item_coord[2]] Ar_sum.append(temp) Ar = np.array(Ar_sum) ind = np.where(Ar == Ar.max())[0] L[L != ind[0] + 1] = 0 cl_i = L / (ind[0] + 1) coords = STATS[ind[0]].coords # 最大的连通域对应的坐标 if coords.shape[0] > self.v_min: coords = coords[:, [2, 1, 0]] clump_i_ = np.zeros(coords.shape[0]) for j, item_coord in enumerate(coords): clump_i_[j] = data[item_coord[2], item_coord[1], item_coord[0]] clustsum = clump_i_.sum() + 0.0001 # 加一个0.0001 防止分母为0 clump_Cen[clump_ii, :] = np.matmul(clump_i_, coords) / clustsum clump_volume[clump_ii, 0] = clustNum clump_sum[clump_ii, 0] = clustsum x_i = coords - clump_Cen[clump_ii, :] clump_size[clump_ii, :] = 2.3548 * np.sqrt((np.matmul(clump_i_, x_i ** 2) / clustsum) - (np.matmul(clump_i_, x_i) / clustsum) ** 2) clump_i = data * cl_i out = out + clump_i mask = mask + cl_i * (clump_ii + 1) clump_peak[clump_ii, 0] = clump_i.max() clump_Peak[clump_ii, [2, 1, 0]] = np.argwhere(clump_i == clump_i.max())[0] clump_ii += 1 else: pass clump_Peak = clump_Peak + 1 clump_Cen = clump_Cen + 1 # python坐标原点是从0开始的,在这里整体加1,改为以1为坐标原点 id_clumps = np.array([item + 1 for item in range(n_clump)], np.int).T id_clumps = id_clumps.reshape([n_clump, 1]) LDC_outcat = np.column_stack( (id_clumps, clump_Peak, clump_Cen, clump_size, clump_peak, clump_sum, clump_volume)) LDC_outcat = LDC_outcat[:clump_ii, :] table_title = ['ID', 'Peak1', 'Peak2', 'Peak3', 'Cen1', 'Cen2', 'Cen3', 'Size1', 'Size2', 'Size3', 'Peak', 'Sum', 'Volume'] LDC_outcat =
pd.DataFrame(LDC_outcat, columns=table_title)
pandas.DataFrame
import numpy as np import matplotlib.pyplot as plt import pandas as pd import os import itertools from pandas.core.indexes import interval from scipy import signal import time class Make_dataset(): def __init__(self): self.list_i = [1]+list(range(3,22)) self.df_original = pd.read_csv("data/df_original.csv") self.list_j2 = self.df_original["j2"] self.list_interval = self.df_original["interval"] self.list_N = self.df_original["N"] self.list_freq_idx = self.df_original["freq_idx"] self.sampling_num_surface = int(1023) # 表面粗さや2DFFTを計算したりする点数 self.resolution = 5.85829e-3 # 表面の測定間隔, 2DFFTで使用 self.freq_resolution = 1 / (self.resolution*self.sampling_num_surface) self.df_original_180 = pd.read_csv("data/df_original_180.csv") self.expansion = 9 self.df_new = pd.DataFrame(index=list(range(20*self.expansion)),columns=["X","Y","Z","Amp","Freq","Fx","Fy","Sq","Sku"]) # def make_df_original(self): # 加工パラメータのデータを読み取る # # os.chdir("/data") # self.df_original = pd.read_csv('data/data_new.csv',header=0,index_col=0,usecols=range(1,13),engine="python",encoding='shift-jis') # for f,g in itertools.product(range(20),range(self.expansion)): # self.df_original.loc[f*self.expansion+g] = self.df_original.loc[f*self.expansion] # 20個のデータを720個に拡張 # self.df_original = self.df_original.sort_index() # インデックス順に並び替え def _read_sensor_df(self,i2): # 加工条件i2のセンサデータを読み取る, どこから何点読み取るか DIR_sensor = 'data/sensor_data_new/sensor_data'+str(i2)+'.csv' # num_of_file = sum(os.path.isfile(os.path.join(DIR_sensor, name)) for name in os.listdir(DIR_sensor)) #i2番目のデータのファイル数 # dict_num_of_read = {1:0, 2:0, 4:2} # ファイル数に応じてある番号目のファイル1つだけ読み取る # # sensor_df = pd.DataFrame(index=[], columns=(["ax","ay","az"])) # file_DIR_sensor = DIR_sensor + '/auto$0$'+str(dict_num_of_read[num_of_file])+'.CSV' self.sensor_df = pd.read_csv(DIR_sensor,skiprows=0,nrows=200000,header=0,index_col=0, engine="python",encoding='shift-jis') # j1番目のファイルを順番に読み取り # for j1 in range(num_of_file): # file_DIR_sensor = DIR_sensor + '/auto$0$'+str(j1)+'.CSV' # new_sensor_df = pd.read_csv(file_DIR_sensor,skiprows=57,header=None,names=(["ax","ay","az"]),usecols=[2,3,4],skipfooter=3, engine="python",encoding='shift-jis') # j1番目のファイルを順番に読み取り # sensor_df = sensor_df.append(new_sensor_df,ignore_index=True) # sensor_df["t"] = np.arange(len(sensor_df["ax"])) self.size_sensor_df = len(self.sensor_df["t"]) # sensor_df["ax"] = sensor_df["ax"] /0.102 # sensor_df["ay"] = sensor_df["ay"] /0.102 # sensor_df["az"] = sensor_df["az"] /0.102 # return sensor_df def _divide_sensor_df(self,j,N): #j2はセンサデータのどの辺を何個,抽出するか left = int(self.size_sensor_df*j) self.sensor_df_part = self.sensor_df.loc[left:left+N] self.freq = np.fft.fftfreq(int(N), d=1e-4)[1:int(N/2)] # 周波数 def _do_fft(self,N,freq_idx): # nameにはax,ay,azが入る list_spec_ac = [] list_spec = [] list_peak_spec = [] for direction in ("ax","ay","az"): FFT = np.fft.fft(self.sensor_df_part[direction])[1:int(N/2)] # 変換結果 spec_ac = np.abs(FFT/(N/2))*1e6 # 単位をm/s^2からμm/s^2に変換 omega_matrix = (2*np.pi*self.freq)**2 spec = spec_ac / omega_matrix # 変位のスペクトル(μm) print("強制振動周波数 : {} Hz".format(self.freq[freq_idx])) print(direction+"方向のピークスペクトル : {} nm".format(round(spec[freq_idx],4)*1e3)) list_peak_spec.append(round(spec[freq_idx],4)*1e3) list_spec_ac.append(spec_ac) list_spec.append(spec) print(list_peak_spec) list_peak_XYZ = [np.abs(list_peak_spec[0]-list_peak_spec[2])/np.sqrt(2),list_peak_spec[1],(list_peak_spec[0]+list_peak_spec[2])/np.sqrt(2)] print(list_peak_XYZ) return list_peak_XYZ # ここから表面データ def _read_surface_data(self,i2,k1): # 加工条件i2, 測定場所k1のデータ読み取り(粗い方,1) df = pd.read_csv('data/surface_data/'+str(i2)+'-1-'+str(k1)+'.csv',usecols=range(1023),skipfooter=1,names=list(range(1023)),dtype=float,engine="python") # 生データ df2 = df.replace(0,np.nan) # 0を欠損値扱いに df3 = df2.interpolate(limit_direction="both") # 欠損値を両側から平均で補完 z_raw = df3.to_numpy() # ここから最小二乗法 N = self.sampling_num_surface x = np.arange(N)*self.resolution self.Y,self.X = np.meshgrid(x,x) # 3次元形状でx,y軸を作るときはこれでよい X = self.X.reshape(-1,1) Y = self.Y.reshape(-1,1) z_raw = z_raw.reshape(-1,1) ones = np.ones(X.shape) Mat = np.hstack([X,Y,ones]) # この行列が最小二乗法の元となる # 以下, Ax = b の連立方程式をxについて解く A = np.dot(Mat.T,Mat) b = np.dot(Mat.T,z_raw) x = np.linalg.solve(A,b) z_new = z_raw - x[0]*X-x[1]*Y-x[2] z_new =z_new.reshape(N,N) self.surface_data = z_new def _caluculate_Sq_Sku(self): A = ((len(self.surface_data)-1)*self.resolution)**2 dA = self.resolution**2 Sq_2 = np.sum(self.surface_data**2*dA)/A Sq = np.sqrt(Sq_2) Sku = np.sum(self.surface_data**4*dA)/(A*Sq**4) return Sq,Sku def _do_2DFFT(self,n1): # n1はスペクトルを計算する領域 FFT = np.fft.fft2(self.surface_data) # 変換 FFT = np.fft.fftshift(FFT) #周波数シフト FFT[508:515,508:515] = 1e-3 # 中心に近い低周波成分を1に spec = np.abs(FFT)/(self.sampling_num_surface/2)**2 # パワースペクトル spec = spec[511:511+n1,511-n1+1:511+n1] # スペクトルの領域を狭める # fx = np.arange(n1)*self.freq_resolution # fy = np.arange(-n1+1,n1)*self.freq_resolution # FY,FX = np.meshgrid(fy,fx) Amp = round(np.max(spec),2) # スペクトルの最大値 idx = np.array(np.unravel_index(np.argmax(spec), spec.shape)) - np.array([0,n1]) #最大値の座標 print("最大スペクトルの点 : {}".format(idx)) Freq = round(np.sqrt(idx[0]**2+idx[1]**2)*self.freq_resolution,2) # 最大値の空間周波数(距離に比例) Fx = round(abs(idx[0])*self.freq_resolution,3) Fy = round(abs(idx[1])*self.freq_resolution,3) # angle = round(np.degrees(np.arctan2(fy,fx))) # 最大座標の角度 return Amp,Freq,Fx,Fy # ここがデータセット作成のメイン def make_df_new(self): # 720回読み取って新しいdf_newを作成 for i1,i2 in enumerate(self.list_i): # 20回繰り返す self._read_sensor_df(i2) j2 = self.list_j2[i1] interval = self.list_interval[i1] N = self.list_N[i1] freq_idx = self.list_freq_idx[i1] # for j3,k1 in itertools.product(range(3),range(1,4)): # 9回繰り返す for j in range(3): # まずはセンサデータの振動部分を取り出し, 3方向の加速度データを取得 self._divide_sensor_df(j2+j*interval,N) list_peak_XYZ = self._do_fft(N,freq_idx) for k1 in range(1,4): self._read_surface_data(i2,k1) # 次に表面データを読み取る Sq,Sku = self._caluculate_Sq_Sku() Amp,Freq,Fx,Fy = self._do_2DFFT(100) self.df_new.iloc[9*i1 + 3*j + (k1 - 1)] = [list_peak_XYZ[0],list_peak_XYZ[1],list_peak_XYZ[2],Amp,Freq,Fx,Fy,Sq,Sku] def main(): time_start = time.perf_counter() runner = Make_dataset() runner.make_df_new() df_complete =
pd.concat([runner.df_original_180, runner.df_new], axis=1)
pandas.concat
# -*- coding: utf-8 -*- import re import numpy as np import pytest from pandas.core.dtypes.common import ( is_bool_dtype, is_categorical, is_categorical_dtype, is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64_ns_dtype, is_datetime64tz_dtype, is_datetimetz, is_dtype_equal, is_interval_dtype, is_period, is_period_dtype, is_string_dtype) from pandas.core.dtypes.dtypes import ( CategoricalDtype, DatetimeTZDtype, IntervalDtype, PeriodDtype, registry) import pandas as pd from pandas import ( Categorical, CategoricalIndex, IntervalIndex, Series, date_range) from pandas.core.sparse.api import SparseDtype import pandas.util.testing as tm @pytest.fixture(params=[True, False, None]) def ordered(request): return request.param class Base(object): def setup_method(self, method): self.dtype = self.create() def test_hash(self): hash(self.dtype) def test_equality_invalid(self): assert not self.dtype == 'foo' assert not is_dtype_equal(self.dtype, np.int64) def test_numpy_informed(self): pytest.raises(TypeError, np.dtype, self.dtype) assert not self.dtype == np.str_ assert not np.str_ == self.dtype def test_pickle(self): # make sure our cache is NOT pickled # clear the cache type(self.dtype).reset_cache() assert not len(self.dtype._cache) # force back to the cache result = tm.round_trip_pickle(self.dtype) assert not len(self.dtype._cache) assert result == self.dtype class TestCategoricalDtype(Base): def create(self): return CategoricalDtype() def test_pickle(self): # make sure our cache is NOT pickled # clear the cache type(self.dtype).reset_cache() assert not len(self.dtype._cache) # force back to the cache result = tm.round_trip_pickle(self.dtype) assert result == self.dtype def test_hash_vs_equality(self): dtype = self.dtype dtype2 = CategoricalDtype() assert dtype == dtype2 assert dtype2 == dtype assert hash(dtype) == hash(dtype2) def test_equality(self): assert is_dtype_equal(self.dtype, 'category') assert is_dtype_equal(self.dtype, CategoricalDtype()) assert not is_dtype_equal(self.dtype, 'foo') def test_construction_from_string(self): result = CategoricalDtype.construct_from_string('category') assert is_dtype_equal(self.dtype, result) pytest.raises( TypeError, lambda: CategoricalDtype.construct_from_string('foo')) def test_constructor_invalid(self): msg = "Parameter 'categories' must be list-like" with pytest.raises(TypeError, match=msg): CategoricalDtype("category") dtype1 = CategoricalDtype(['a', 'b'], ordered=True) dtype2 = CategoricalDtype(['x', 'y'], ordered=False) c = Categorical([0, 1], dtype=dtype1, fastpath=True) @pytest.mark.parametrize('values, categories, ordered, dtype, expected', [ [None, None, None, None, CategoricalDtype()], [None, ['a', 'b'], True, None, dtype1], [c, None, None, dtype2, dtype2], [c, ['x', 'y'], False, None, dtype2], ]) def test_from_values_or_dtype( self, values, categories, ordered, dtype, expected): result = CategoricalDtype._from_values_or_dtype(values, categories, ordered, dtype) assert result == expected @pytest.mark.parametrize('values, categories, ordered, dtype', [ [None, ['a', 'b'], True, dtype2], [None, ['a', 'b'], None, dtype2], [None, None, True, dtype2], ]) def test_from_values_or_dtype_raises(self, values, categories, ordered, dtype): msg = "Cannot specify `categories` or `ordered` together with `dtype`." with pytest.raises(ValueError, match=msg): CategoricalDtype._from_values_or_dtype(values, categories, ordered, dtype) def test_is_dtype(self): assert CategoricalDtype.is_dtype(self.dtype) assert CategoricalDtype.is_dtype('category') assert CategoricalDtype.is_dtype(CategoricalDtype()) assert not CategoricalDtype.is_dtype('foo') assert not CategoricalDtype.is_dtype(np.float64) def test_basic(self): assert
is_categorical_dtype(self.dtype)
pandas.core.dtypes.common.is_categorical_dtype
import datetime import logging import os import random from typing import Dict, List, Optional, Tuple, Union, cast try: import ib_insync except ModuleNotFoundError: print("Can't find ib_insync") import pandas as pd import helpers.dbg as dbg import helpers.printing as hprint import helpers.s3 as hs3 # from tqdm.notebook import tqdm _LOG = logging.getLogger(__name__) def ib_connect(client_id: int = 0, is_notebook: bool = True) -> ib_insync.ib.IB: # TODO(gp): Add check if we are in notebook. if is_notebook: ib_insync.util.startLoop() ib = ib_insync.IB() host = os.environ["IB_GW_CONNECTION_HOST"] port = os.environ["IB_GW_CONNECTION_PORT"] _LOG.debug("Trying to connect to client_id=%s", client_id) ib.connect(host=host, port=port, clientId=client_id) # ib_insync.IB.RaiseRequestErrors = True _LOG.debug("Connected to IB: client_id=%s", client_id) return ib def get_free_client_id(max_attempts: Optional[int]) -> int: """ Find free slot to connect to IB gateway. """ free_client_id = -1 max_attempts = 1 if max_attempts is None else max_attempts for i in random.sample( range(1, max_attempts + 1), max_attempts, ): try: ib_connection = ib_connect(i, is_notebook=False) except TimeoutError: continue free_client_id = i ib_connection.disconnect() break if free_client_id == -1: raise TimeoutError("Couldn't connect to IB") return free_client_id def to_contract_details(ib, contract): print("contract= (%s)\n\t%s" % (type(contract), contract)) contract_details = ib.reqContractDetails(contract) print( "contract_details= (%s)\n\t%s" % (type(contract_details), contract_details) ) dbg.dassert_eq(len(contract_details), 1) return hprint.obj_to_str(contract_details[0]) def get_contract_details( ib: ib_insync.ib.IB, contract: ib_insync.Contract, simplify_df: bool = False ) -> pd.DataFrame: _LOG.debug("contract=%s", contract) cds = ib.reqContractDetails(contract) _LOG.info("num contracts=%s", len(cds)) contracts = [cd.contract for cd in cds] _LOG.debug("contracts[0]=%s", contracts[0]) contracts_df = ib_insync.util.df(contracts) if simplify_df: # TODO(*): remove or avoid since it is only one place where `core` is used. # _LOG.debug(cexplo.print_column_variability(contracts_df)) # Remove exchange. _LOG.debug("exchange=%s", contracts_df["exchange"].unique()) contracts_df.sort_values("lastTradeDateOrContractMonth", inplace=True) contracts_df = contracts_df.drop(columns=["exchange", "comboLegs"]) # Remove duplicates. contracts_df = contracts_df.drop_duplicates() # Remove constant values. # threshold = 1 # TODO(*): remove or avoid since it is only one place where `core` is used. # contracts_df = cexplo.remove_columns_with_low_variability( # contracts_df, threshold # ) return contracts_df # ############################################################################# def get_df_signature(df: pd.DataFrame) -> str: if df is None or df.empty: return "" txt = "len=%d [%s, %s]" % (len(df), df.index[0], df.index[-1]) return txt def to_ET( ts: Union[datetime.datetime, pd.Timestamp, str], as_datetime: bool = True ) -> Union[datetime.datetime, pd.Timestamp, str]: # Handle IB convention that an empty string means now. if ts == "": return "" ts =
pd.Timestamp(ts)
pandas.Timestamp
from dataset import Dataset import pandas as pd from utils_dr_pre_word_simi import * import sys import csv import spacy import codecs import os from utils import * from transformers import * from dataset_dr import Dataset_dr import torch import numpy as np import subprocess TRAIN_DR = 'data/parads/train_dr.csv' DEV_DR = 'data/parads/dev_dr.csv' TEST_DR = 'data/parads/test_dr.csv' fw = open('verb_no_simi.txt', 'w') batchsize_dr = 4 device_dr = torch.device("cuda:1" if torch.cuda.is_available() else "cpu") verb2simi = load_word2simi() tokenizer_dr = OpenAIGPTTokenizer.from_pretrained('openai-gpt') token_dict_dr = { 'bos_token': '<start>', 'eos_token': '<end>', 'pad_token': '<pad>', 'cls_token': '<cls>', 'additional_special_tokens': ['<pos>', '<neg>', '<equal>', '<VERB>'] } num_added_token_dr = tokenizer_dr.add_special_tokens(token_dict_dr) print(tokenizer_dr.vocab_size) cats = ['pos', 'neg', 'equal'] def simi_word(verb, descat): ''' at train and gen time, get the simi verb with descat get the infi form of word ''' infi = word_infinitive(verb) row = verb2simi[verb2simi['verb'] == infi] li = row[descat].tolist() if len(li) > 0: return li[0] fw.write(verb+'\n') return verb def extract_args(sen, para, train_time): if para: sen_del = sen['sendel0'] descat = sen['oricat1'] verbs = sen['verbs1'] para_sen = sen['sen1'] else: sen_del = sen['sendel'] descat = sen['oricat'] verbs = sen['verbs'] para_sen = sen['sen'] if not train_time: descat = sen['descat'] return sen_del, descat, verbs, para_sen def sen_in(sen, noi_idx, train_time=True, para=False): sen_idx = sen[0] sen = sen[1] sen_del, descat, verbs, para_sen = extract_args(sen, para, train_time) ori_verbs = verbs.split() add_verbs = '' if sen_idx in noi_idx: for v in ori_verbs: add_verbs += simi_word(v, descat) else: add_verbs = verbs newsen = '<start> ' + sen_del if not train_time: newsen = newsen + '<cls> ' + descat + '<start>' else: newsen += '<cls> ' + descat + '<start> ' + para_sen + ' <end>' tok_li = tokenizer_dr.encode(newsen, add_special_tokens=False) return tok_li, add_verbs def sen_in_retr(sen, df, method): senavg = df[df['sen']==sen]['glove_avg'] df['glove_avg'] = df['glove_avg'] - senavg def parse_file_dr(file, noi_frac=0.1, train_time=True, para=False): path = os.path.abspath(file) with open(path,encoding='UTF-8') as f: df = pd.read_csv(f) noi_df = df.sample(frac=noi_frac) if train_time: tok_li = [sen_in(sen, noi_df.index, train_time=train_time, para=para) for sen in df.iterrows()] tok_li = np.array(tok_li) df['v_supplied'] = tok_li[:, 1] tok_li = tok_li[:, 0] else: cats = ['pos', 'neg', 'equal'] tok_li = [] retdf =
pd.DataFrame()
pandas.DataFrame
import os import sys import sqlalchemy as sa import pandas as pd wrk_dir = '/home/aaron/sp' # wrk_dir = '/home/mike/git/streampulse/server_copy/sp' sys.path.insert(0, wrk_dir) os.chdir(wrk_dir) import config as cfg pw = cfg.MYSQL_PW db = sa.create_engine('mysql://root:{0}@localhost/sp'.format(pw)) #get number of users, observations, and sites to post on SP landing page nusers = pd.read_sql("select count(id) as n from user", db.engine).n[0] nobs = pd.read_sql("select count(id) as n from data", db.engine).n[0] nobs_powell =
pd.read_sql("select count(id) as n from powell", db.engine)
pandas.read_sql
# Import the `pandas` library as `pd` import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from sklearn import tree import sklearn.metrics as metrics import sklearn.preprocessing as preprocessing # Load in the data with `read_csv()` horsesDataSet = pd.read_csv("horse.csv", header=0, delimiter=',') #description of dataSet descriptionHorsesDataSet = horsesDataSet.describe() # --------------------------- Exploratory analysis --------------------------- #first 5 and last 5 entries in dataSet firstRowsDataSet = horsesDataSet.head(5) lastRowsDataSet = horsesDataSet.tail(5) # sampling data # Take a sample of 5 horsesDataSetSample = horsesDataSet.sample(5) result = pd.isnull(horsesDataSet) # --------------------------- Pre processing --------------------------- # iterate through each attribute and define the percentage of missing values # populate array with zeros with column dimensions of dataset qtd_nan = [0 for x in range(horsesDataSet.shape[1])] # populate array with zeros with column dimensions of dataset qtd_total = [0 for x in range(horsesDataSet.shape[1])] i = 0 while i < horsesDataSet.shape[1]: # get array of boolean describing each line as null or not for i attribute attributeLinesIsNA = pd.isna(horsesDataSet.iloc[:, i]) # get current attribute label name currentAttributeLabel = list(horsesDataSet)[i] qtd_nan[i] = horsesDataSet.loc[attributeLinesIsNA, currentAttributeLabel].shape[0] qtd_total[i] = horsesDataSet.loc[:, currentAttributeLabel].shape[0] i = i+1 percentageArray = np.divide(qtd_nan, qtd_total) threshold = 0.5 PreProcessedHorseDataSet = horsesDataSet i = 0 while i < horsesDataSet.shape[1]: if percentageArray[i] > threshold: # get current attribute label name currentAttributeLabel = list(horsesDataSet)[i] # drop attribute column if na values > threshold PreProcessedHorseDataSet = PreProcessedHorseDataSet.drop(columns=currentAttributeLabel) i = i + 1 # fill remaining lines with mean values PreProcessedHorseDataSet = PreProcessedHorseDataSet.fillna(horsesDataSet.mean()) # Show Statistics of DataSet StatisticsPreProcessedHorseDataSet = PreProcessedHorseDataSet.describe(include='all') # Altering Categorical missing values to Mode Value (value that appear the most often) i = 0 while i < PreProcessedHorseDataSet.shape[1]: # return the most frequent value (first index because mode() returns a DataFrame) attributeMode = PreProcessedHorseDataSet.mode().iloc[0, i] currentAttributeLabel = list(PreProcessedHorseDataSet)[i] PreProcessedHorseDataSet[currentAttributeLabel] = PreProcessedHorseDataSet[currentAttributeLabel].fillna(attributeMode) i = i+1 # categorical attribute binarization categoricalHorseDataSet = PreProcessedHorseDataSet.select_dtypes(include='object') categoricalHorseDataSet = categoricalHorseDataSet.drop('outcome', axis=1) categoricalHorseDataSetDummy = pd.get_dummies(categoricalHorseDataSet) PreProcessedHorseDataSet = pd.concat([categoricalHorseDataSetDummy, PreProcessedHorseDataSet.loc[:, 'outcome']], axis=1) # --------------------------- Decision Tree --------------------------- AttributesHorseDataSet = PreProcessedHorseDataSet.drop('outcome', axis=1) TargetHorseDataSet = PreProcessedHorseDataSet.loc[:, 'outcome'] # mapping 'euthanized' values to 'died' to tune fitting TargetHorseDataSet = TargetHorseDataSet.map(lambda x: 'died' if x == 'euthanized' else x) # label encoder labelEncoder = preprocessing.LabelEncoder() labelEncoder.fit(TargetHorseDataSet.values) TargetHorseEncodedArray = labelEncoder.transform(TargetHorseDataSet.values) TargetHorseEncodedDataSet =
pd.DataFrame(TargetHorseEncodedArray, columns=['outcome'])
pandas.DataFrame
######################################## IMPORTING ############################ import matplotlib.pyplot as plt import pandas as pd import numpy as np from statsmodels.graphics.tsaplots import plot_acf, plot_pacf from statsmodels.tsa.arima_model import ARIMA from sklearn.metrics import mean_squared_error import pmdarima as pm import seaborn as sns sns.set() ########################## PLOTS ############################################ lnmx = pd.read_csv("lnmx_series.csv",index_col="Year") lnmx = lnmx["40"] lnmx = lnmx[lnmx.index<2011] plt.plot(lnmx,linewidth=1,color="k") plt.xlabel("Time (Years)") plt.ylabel("Mortality Rate") plt.title("LNMX Series") plt.show() split= 1990 Train, Test = lnmx[lnmx.index<=split] ,lnmx[lnmx.index>split] # Train and Test series plt.plot(Train,color="darkblue",linewidth=1,label="Train") plt.plot(Test,color="coral",linewidth=1,label="Test") plt.legend() plt.show() # print(len(lnmx)) # print(len(Train)) # print(len(Test)) # print( len(lnmx) - len(Train) - len(Test) ) lnmx_diff = lnmx.diff().dropna() plt.plot(lnmx_diff,c="k",linewidth=1) plt.xlabel("Time (Years)") plt.title(" LNMX Differentiated ") plt.show() # # # ACF plot # # plot_acf(lnmx, lags=20, c="k") # # plt.show() # # # PACF plot # # plot_pacf(lnmx, lags=20, c= "k") # # plt.show() # ACF plot plot_acf(lnmx_diff, lags=30, c="k") plt.title("ACF (diff)") plt.show() # PACF plot plot_pacf(lnmx_diff, lags=30, c= "k") plt.title("PACF (diff)") plt.show() ############################################ AUTO ARIMA ########################################### # # model = pm.auto_arima(Train, start_p=0, start_q=0, # # test='adf', # use adftest to find optimal 'd' # # max_p=10, max_q=10, # maximum p and q # # m=1, # frequency of series # # d=None, # let model determine 'd' # # seasonal=False, # No Seasonality # # start_P=0, # # D=0, # # trace=True, # # error_action='ignore', # # suppress_warnings=True, # # stepwise=True) # # print(model.summary()) # ################################## ARIMA ########################################################### # model = ARIMA(train, order=(p,d,q)) model = ARIMA(Train, order=(1, 1, 1)) fitted = model.fit(disp=0) # Forecast fc, se, conf = fitted.forecast(len(Test), alpha=0.05) # 95% conf # Make as pandas series fc_series =
pd.Series(fc, index=Test.index)
pandas.Series
""" Plot Tools This script have a sets of tools that handle metrics and plots graphs, matrix and tables. The tools here are implemented basically by Scikit, and adapted as demanded, bellow is possible see the references to this fonts: - A compilation of bests 50 matplotlib visualization, to analysis data: https://www.machinelearningplus.com/plots/top-50-matplotlib-visualizations-the-master-plots-python/ - Implementations of Histograms, Density Plots, Box and Whisker Plots, Correlation Matrix Plot and Scatterplot Matrix: https://machinelearningmastery.com/visualize-machine-learning-data-python-pandas/ - API reference to Scikit metric: https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics - API reference to Scikit plot metrics: https://scikit-plot.readthedocs.io/en/stable/metrics.html This file can also be imported as a module and contains the following functions: * plot_confusion_matrix - plot/save and image of confusion matrix * get_and_plot_metrics - return the metrics of a set of data and show/save/print a table with metrics of a set of data * plot_distribution_data - plot a set of data in a 2D plan, showing the distribution, where axis x and y are 2 diff features * test_this_module - Function that runs a MNIST problem to test this module """ import numpy as np import matplotlib.pyplot as plt import pandas as pd from statistics import mean from sklearn.decomposition import PCA from sklearn.utils.multiclass import unique_labels import sklearn.metrics as skmetrics import scikitplot as skplt import seaborn as sns def plot_confusion_matrix(y_true, y_pred, labels=None, true_labels=None, pred_labels=None, title=None, normalize=False, hide_zeros=False, hide_counts=False, x_tick_rotation=0, ax=None, figsize=None, cmap='Blues', title_fontsize='large', text_fontsize='medium', save_image=False, plot=True, image_path="", image_name=""): """Generates confusion matrix plot from predictions and true labels, and show and/or save in disc. :param y_true: (array-like, shape (n_samples)) – Ground truth (correct) target values. :param y_pred: (array-like, shape (n_samples)) – Estimated targets as returned by a classifier. :param labels: (array-like, shape (n_classes), optional) – List of labels to index the matrix. This may be used to reorder or select a subset of labels. If none is given, those that appear at least once in y_true or y_pred are used in sorted order. :param true_labels: (array-like, optional) – The true labels to display. If none is given, then all of the labels are used. :param pred_labels: (array-like, optional) – The predicted labels to display. If none is given, then all of the labels are used. :param title: (string, optional) – Title of the generated plot. Defaults to “Confusion Matrix” if normalize is True. Else, defaults to “Normalized Confusion Matrix. :param normalize: (bool, optional) – If True, normalizes the confusion matrix before plotting. Defaults to False. :param hide_zeros: (bool, optional) – If True, does not plot cells containing a value of zero. Defaults to False. :param hide_counts: (bool, optional) – If True, doe not overlay counts. Defaults to False. :param x_tick_rotation: (int, optional) – Rotates x-axis tick labels by the specified angle. This is useful in cases where there are numerous categories and the labels overlap each other. :param ax: (matplotlib.axes.Axes, optional) – The axes upon which to plot the curve. If None, the plot is drawn on a new set of axes. :param figsize: (2-tuple, optional) – Tuple denoting figure size of the plot e.g. (6, 6). Defaults to None. :param cmap: (string or matplotlib.colors.Colormap instance, optional) – Colormap used for plotting the projection. View Matplotlib Colormap documentation for available options. https://matplotlib.org/users/colormaps.html :param title_fontsize: (string or int, optional) – Matplotlib-style fontsizes. Use e.g. “small”, “medium”, “large” or integer-values. Defaults to “large”. :param text_fontsize: (string or int, optional) – Matplotlib-style fontsizes. Use e.g. “small”, “medium”, “large” or integer-values. Defaults to “medium”. :param save_image: (Boolean, optional) – If True, save image in disc :param plot: (Boolean, optional) – If True, shows the plot :param image_path: (string, optional) – Path to save image, e.g.: '../results' :param image_name: (string, optional) – Image name """ # Plot confusion matrix skplt.metrics.plot_confusion_matrix(y_true, y_pred, labels, true_labels, pred_labels, title, normalize, hide_zeros, hide_counts, x_tick_rotation, ax, figsize, cmap, title_fontsize, text_fontsize) # Save and/or show plotted matrix if save_image: plt.savefig(image_path + '/' + image_name) if plot: plt.show() def get_and_plot_metrics(y_true, y_pred, labels=None, labels_name=None, pos_label=1, average='macro', sample_weight=None, normalize=True, accuracy_score=True, f1_score=True, precision_score=True, recall_score=True, plot_table=False, save_table=False, table_format='latex', table_name="", file_path="", file_name=""): """Get metrics from predictors and true labels, and show/save table in file as latex or plain-text :param y_true: (array-like, shape (n_samples)) – Ground truth (correct) target values. :param y_pred: (array-like, shape (n_samples)) – Estimated targets as returned by a classifier. :param labels: (array-like, shape (n_classes), optional) – List of labels to index the matrix. This may be used to reorder or select a subset of labels. If none is given, those that appear at least once in y_true or y_pred are used in sorted order. :param labels_name: (array-like[string], shape (n_classes), optional) – List of labels name to show in plots :param pos_label: (str or int, 1 by default, optional) – The class to report if average='binary' and the data is binary. If the data are multiclass or multilabel, this will be ignored; setting labels=[pos_label] and average != 'binary' will report scores for that label only. :param average: (string, [None, ‘binary’, ‘micro’, ‘macro’(default), ‘samples’, ‘weighted’], optional) – :param sample_weight: (array-like of shape = [n_samples], optional) – This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. :param normalize: (bool (default=True), optional) – If False, return the number of correctly classified samples. Otherwise, return the fraction of correctly classified samples. :param accuracy_score: (bool (default=True), optional) – Calculate and return accuracy score :param f1_score: (bool (default=True), optional) – Calculate and return f1 score :param precision_score: (bool (default=True), optional) – Calculate and return precision score :param recall_score: (bool (default=True), optional) – Calculate and return recall score :param plot_table: (bool (default=True), optional) – Plot and show table :param save_table: (bool (default=False), optional) – Save table in file :param table_format: (string, ['latex', 'plain-text'], optional) – Convert table to file format :param table_name: (string, optional) – Name of table in plots and file caption if latex format :param file_path: (string, optional) – Path to save file, e.g.: '../results' :param file_name: (string, optional) – Image name, e.g.: 'test_table_file' :return: (array-like, shape (n_selected_metrics)) – An array with return of metrics selected: '([accuracy_score,][f1_score,][precision_score,][recall_score])'. The elements inside array-like follow the return of selected metric, ex: f1_score with Average None, return an array with the scores for each class. See this page for more about metrics: https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics """ # return of function ret = [] # Get accuracy rounded for 2 decimals accuracy = np.round(skmetrics.accuracy_score(y_true, y_pred, normalize, sample_weight), 2) # Define the metrics to be calculated, and possibles averages, and define columns name metrics = ('f1_score', 'precision_score', 'recall_score') averages = (None, 'micro', 'macro', 'weighted') columns = ('F1 Score', 'Precision', 'Recall') # Initialize the dictionary containing the metrics data_scores = {metric: {} for metric in metrics} # Calculate every metric for every average rounded for 2 decimals and save in 'data_scores' for metric in metrics: for metric_average in averages: data_scores[metric][metric_average] = np.round( getattr(skmetrics, metric)(y_true, y_pred, labels, pos_label, metric_average, sample_weight) , 2) # Create table, converting all metrics excluding the accuracy to DataFrame and add labels_name metrics_table = pd.DataFrame({metric: data_scores[metric][None] for metric in metrics}, index=np.unique(list(y_true) + list(y_pred)) if labels_name is None else labels_name) # Add an empty row to separate averages and accuracy metrics_table.loc[''] = ['' for _ in range(len(columns))] # Add an row with accuracy metrics_table.loc['accuracy'] = [accuracy] + ['-' for _ in range(len(columns)-1)] # Add an row for which average for index in range(len(averages) - 1): metrics_table.loc[averages[index + 1] + ' avg'] = [data_scores[metric][averages[index+1]] for metric in metrics] # With all metrics calculated, append the selected metrics and average to return if accuracy_score: ret.append(np.asarray(accuracy)) if f1_score: ret.append(np.asarray(data_scores['f1_score'][average])) if precision_score: ret.append(np.asarray(data_scores['precision_score'][average])) if recall_score: ret.append(np.asarray(data_scores['recall_score'][average])) # If plot table was selected, convert to Matplotlit Table and plot table if plot_table: cell = [] for row in range(len(metrics_table)): cell.append(metrics_table.iloc[row]) plt.table(cellText=cell, rowLabels=metrics_table.index, colLabels=metrics_table.columns, colWidths=[0.6/len(metrics_table.columns) for _ in columns], loc='center') plt.axis('off') plt.show() # If save table was selected, save file as format demand, converting to latex if necessary if save_table: if table_format == 'latex': f = open(file_path + '/' + file_name + '.tex', "w") f.write(metrics_table.to_latex()) f.close() elif table_format == 'plain-text': f = open(file_path + '/' + file_name + '.txt', 'w') f.write(metrics_table.to_string()) f.close() else: raise Exception("Table format argument invalid!") return ret def plot_distribution_data(X, y, features_name=None, save_image=False, plot=True, image_path="", image_name=""): """Plot distribution/histogram of data by pair of feature :param X: (array-like, shape (n_samples, n_features)) – Feature set to project, where n_samples is the number of samples and n_features is the number of features. :param y: (array-like, shape (n_samples) or (n_samples, n_features)) – Target relative to X for labeling. :param features_name: (array-like[string], shape(n_features)) – Name of each feature :param save_image: (Boolean, optional) – If True, save image in disc :param plot: (Boolean, optional) – If True, shows the plot :param image_path: (string, optional) – Path to save image, e.g.: '../results' :param image_name: (string, optional) – Image name """ d = pd.DataFrame(data=X, columns=features_name) # Add labels to data d['features'] = np.array(['-> '+str(label) for label in y]) # Drop duplicates and single samples d = d.drop_duplicates() d = d[d.groupby('features')['features'].transform('count').ge(3)] # Plot data distribution try: sns.pairplot(d, hue='features') except: plt.clf() sns.pairplot(d, hue='features', diag_kind='hist') # Save and/or show plotted distribution if save_image: plt.savefig(image_path + '/' + image_name) if plot: plt.show() def plot_correlation_matrix(X, features_name=None, save_image=False, plot=True, image_path="", image_name=""): """Plot correlation matrix of features :param X: (array-like, shape (n_samples, n_features)) – Feature set to project, where n_samples is the number of samples and n_features is the number of features. :param features_name: (array-like[string], shape(n_features)) – Name of each feature :param save_image: (Boolean, optional) – If True, save image in disc :param plot: (Boolean, optional) – If True, shows the plot :param image_path: (string, optional) – Path to save image, e.g.: '../results' :param image_name: (string, optional) – Image name """ d =
pd.DataFrame(data=X, columns=features_name)
pandas.DataFrame
import pandas as pd import numpy as np from itertools import combinations from numpy.linalg import svd from dask_ml.preprocessing import OneHotEncoder, DummyEncoder, OrdinalEncoder from sklearn.base import clone, BaseEstimator, TransformerMixin from sklearn.utils.multiclass import type_of_target from robusta.utils import all_subsets from category_encoders import * __all__ = [ 'LabelBinarizer', 'OrdinalEncoder', 'LabelEncoder1D', 'LabelEncoder', 'Categorizer1D', 'Categorizer', 'OneHotEncoder', 'DummyEncoder', 'FrequencyEncoder', 'FeatureCombiner', 'BackwardDifferenceEncoder', 'BinaryEncoder', 'HashingEncoder', 'HelmertEncoder', 'OrdinalEncoder', 'SumEncoder', 'PolynomialEncoder', 'BaseNEncoder', 'SVDEncoder', 'ThermometerEncoder1D', 'ThermometerEncoder', 'GroupByEncoder', ] class LabelEncoder1D(BaseEstimator, TransformerMixin): """Encode categories as integers. """ def __init__(self): pass def fit(self, y): self.cats_ = y.astype('category').cat.categories self.dtype = y.dtype self.mapper = dict(zip(self.cats_, range(len(self.cats_)))) self.inv_mapper = {val: key for key, val in self.mapper.items()} self.mapper[np.nan] = -1 self.inv_mapper[-1] = np.nan return self def transform(self, y): return y.map(self.mapper) def inverse_transform(self, y): return y.map(self.inv_mapper).astype(self.dtype) class LabelEncoder(LabelEncoder1D): def fit(self, X, y=None): self.transformers = {} for col in X.columns: self.transformers[col] = LabelEncoder1D().fit(X[col]) return self def transform(self, X): Xt = pd.DataFrame(index=X.index, columns=X.columns) for col, transformer in self.transformers.items(): Xt[col] = transformer.transform(X[col]) return Xt def inverse_transform(self, X): Xt = pd.DataFrame(index=X.index, columns=X.columns) for col, transformer in self.transformers.items(): Xt[col] = transformer.inverse_transform(X[col]) return Xt class Categorizer1D(BaseEstimator, TransformerMixin): """Convert categories to 'category' dtype of the same range. """ def __init__(self): pass def fit(self, y): """Learn categories Parameters ---------- y : Series Returns ------- self """ self.cats_ = y.astype('category').values.categories return self def transform(self, y): """Convert y to fitted categories Parameters ---------- y : Series Returns ------- yt : Series Transformed input. """ return pd.Categorical(y, categories=self.cats_) class Categorizer(BaseEstimator, TransformerMixin): """Convert categories to 'category' dtype of the same range. """ def __init__(self): pass def fit(self, X, y=None): """Learn categories Parameters ---------- X : DataFrame, shape [n_samples, n_features] The data to determine the categories of each feature. Returns ------- self """ self.transformers = {} for col in X.columns: self.transformers[col] = Categorizer1D().fit(X[col]) return self def transform(self, X): """Convert X to fitted categories Parameters ---------- X : DataFrame, shape [n_samples, n_features] The data to transform. Returns ------- Xt : DataFrame, shape [n_samples, n_features] Transformed input. """ Xt = pd.DataFrame(index=X.index) for col, transformer in self.transformers.items(): Xt[col] = transformer.transform(X[col]) return Xt class FrequencyEncoder(BaseEstimator, TransformerMixin): """Encode categorical features as it's frequencies. """ def __init__(self, normalize=True): self.normalize = normalize def fit(self, X, y=None): """Fit FrequencyEncoder to X. Parameters ---------- X : DataFrame, shape [n_samples, n_features] The data to determine frequencies. Returns ------- self """ norm = self.normalize self.value_counts_ = {col: x.value_counts(norm) for col, x in X.items()} return self def transform(self, X): """Transform X using frequency encoding. Parameters ---------- X : DataFrame, shape [n_samples, n_features] The data to transform. Returns ------- Xt : DataFrame, shape [n_samples, n_features] Transformed input. """ Xt = pd.DataFrame(index=X.index) for col, vc in self.value_counts_.items(): Xt[col] = X[col].map(vc) return Xt.astype(float) class FeatureCombiner(BaseEstimator, TransformerMixin): """Extract Feature Combinations """ def __init__(self, orders=[2, 3], sep=','): self.orders = orders self.sep = sep def fit(self, X, y=None): subsets = all_subsets(X.columns, self.orders) self.subsets_ = [list(subset) for subset in subsets] self.n_subsets_ = len(self.subsets_) return self def transform(self, X): X = X.astype(str) X = pd.concat([X[subset].apply(self.sep.join, axis=1).rename(sep(subset)) for subset in subsets], axis=1) return X class SVDEncoder(BaseEstimator, TransformerMixin): """Encode categorical features by pairwise transforming categorical features to the counter matrix and embedding with SVD. """ def __init__(self, n_components=0.9): self.n_components = n_components def fit(self, X, y=None): """Fit data Parameters ---------- X : DataFrame, shape [n_samples, n_features] The data to determine frequencies. Returns ------- self """ # Check data assert not X.isna().any().any(), 'Missing values are not allowed' columns = X.columns self.embeddings_ = {col: pd.DataFrame(index=X[col].unique()) for col in columns} self.n_components_ = pd.DataFrame(index=columns, columns=columns) self.sigmas_ = {} for a, b in combinations(columns, 2): # Count Matrix x = X.groupby([a, b]).size().unstack().fillna(0) # SVD u, s, v = svd(x, full_matrices=False) v = v.T # n_components if isinstance(self.n_components, int): n_components_ = min(self.n_components, len(s)) elif isinstance(self.n_components, float): ratio = s.cumsum()/s.sum() n_components_ = (ratio > self.n_components).argmax() + 1 else: raise ValueError('Unknown n_components type:', self.n_components) self.n_components_[a, b] = n_components_ self.n_components_[b, a] = n_components_ # Truncate u_cols, v_cols = [], [] for i in range(n_components_): u_cols.append('({},{})_svd{}'.format(a, b, i+1)) v_cols.append('({},{})_svd{}'.format(b, a, i+1)) u =
pd.DataFrame(u[:, :n_components_], columns=u_cols, index=x.index)
pandas.DataFrame
## 1. Introduction ## import pandas as pd happiness2015 =
pd.read_csv("World_Happiness_2015.csv")
pandas.read_csv
import pandas as pd import streamlit as st import numpy as np import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import cross_val_score from sklearn import linear_model from sklearn import neighbors from sklearn.model_selection import GridSearchCV from sklearn.model_selection import KFold import category_encoders as ec from sklearn import preprocessing from sklearn.preprocessing import StandardScaler import plotly.graph_objects as go st.markdown("# DOPP3") st.markdown('## Which characteristics are predictive for countries with large populations living in extreme poverty?') with st.echo(): # READ TRANSFORMED CSV FILE raw = pd.read_csv("../data/transformed.csv") #st.write(raw.head(100)) feature_descriptions = pd.read_csv("../data/feature_descriptions.csv") #st.write(feature_descriptions) # FEATURES WITH LESS THAN 50% MISSING VALUES features = feature_descriptions.where(feature_descriptions['na_percent']<=50.0).dropna(0) # ONLY DEMOGRAFIC FEATURES! cols = features['Unnamed: 0'].tolist() #cols_to_drop = 7:13 + 18:25 cols = cols[0:7]+ cols[13:18] + [cols[25]] #st.write(cols) dataset = raw[cols] st.write(dataset.head(100)) st.markdown('## Exploratory Data Analysis') st.markdown('After selection of only demographic features with less than 50% missing values we are left with the following attributes: ') # PRINTING DESCRIPTIONS OF FEATURES for col in cols: desc = pd.DataFrame(features.where(features['Unnamed: 0'] == col).dropna(0)['descriptions']) st.write(col, ':', desc.to_string(header=False, index=False)) st.write('All attributes except *Location*(**categorical**) are **numeric**') # CORRELATION MATRIX plt.figure(figsize=(35,30)) sns.set(font_scale=3.1) sns.heatmap(dataset.corr(),annot=True) st.pyplot() st.write('From the correlation matrix, we can see that all *population measures correlate* with each other (as excepted). Moreover, we can notice a strong negative correlation between *fertility rate* and *life expectancy at birth* as well as *life expectancy at birth* and *mortality rate*.') #st.write(dataset.drop(labels=[cols[0], cols[-1]], axis=1).head(10)) # PLOT DISTRIBUTION OF THE ATTRIBUTES fig = dataset.drop(labels=[cols[0], cols[-1]], axis=1).hist(figsize=(14,14), xlabelsize=10, ylabelsize=10, bins=20) [x.title.set_size(20) for x in fig.ravel()] plt.tight_layout() st.pyplot() st.markdown('## Missing Values') st.markdown('The dataset contains a lot of missing values. We did **linear interpolation** for *each attribute* in *each country* separately. The values that were **not handled by the interpolation** were set to the *mean* of the column (probably because they are on the beginning/end of the column). Some attributes of the certain countries were without any values, we set those to 0. ') with st.echo(): by_country = dataset.groupby(by=dataset['LOCATION']) dataset_full = pd.DataFrame(columns=cols) dataset_full2 = pd.DataFrame(columns=cols) for name, group in by_country : tdf = pd.DataFrame(columns=cols) tdf2 = pd.DataFrame(columns=cols) tdf['TIME'] = group['TIME'] tdf['poverty'] = group['poverty'] # cols with all NaN values all_null = group.isna().all() null_cols = all_null.where(all_null == 1).dropna(0).index.tolist() tdf[null_cols] = 0 # cols for interpolation cols_to_int = all_null.where(all_null == 0).dropna(0).index.tolist()[2:] cols_to_int.remove('poverty') #st.write(group[cols_to_int].isnull().values.sum()) tdf[cols_to_int] = group[cols_to_int].interpolate(method='linear', axis=0) tdf['LOCATION'] = name # fill the NaN values that were not interpolated tdf.fillna(tdf.mean(), inplace=True) # Another way to interpolate - take mean for the cols with all NaNs tdf2 = group.interpolate(method ='linear', limit_direction ='forward', axis = 0) tdf2 = tdf2.interpolate(method ='linear', limit_direction ='backward', axis = 0) tdf2['LOCATION'] = name tdf2.fillna(dataset.drop(labels=['LOCATION'], axis=1).mean(), inplace=True) dataset_full2 = pd.concat([dataset_full2,tdf2]) dataset_full = pd.concat([dataset_full,tdf]) dataset_full2.sort_index(inplace=True) dataset_full.sort_index(inplace=True) st.write(dataset_full2.head(100)) st.markdown('## ML Models') with st.echo(): # GROUND TRUTH AS NUMERIC y = dataset_full['poverty'] y = y.apply(lambda x: 1 if x==True else 0) #st.write(y.head(100)) # FEATURE MATRIX UNSCALED AND SCALED X = dataset_full.drop(labels=['poverty'], axis=1) X_2 = dataset_full2.drop(labels=['LOCATION', 'poverty'], axis=1) X_noloc = dataset_full.drop(labels=['poverty'], axis=1).drop(labels=['LOCATION'], axis=1) st.write(X_2) sc = StandardScaler() X_s = sc.fit_transform(X.drop(labels=['LOCATION'], axis=1)) X_s = pd.DataFrame(X_s, columns=cols[1:12]) #ATTRIBUTE 'LOCATION' - BINARY ENCODER encoder = ec.BinaryEncoder(cols=['LOCATION']) L_enc = encoder.fit_transform(X['LOCATION']) X = pd.concat([X.drop(labels=['LOCATION'], axis=1),L_enc], axis=1) X_s =
pd.concat([X_s,L_enc], axis=1)
pandas.concat
import numpy as np import matplotlib.pyplot as plt import lightkurve as lk from scipy import interpolate from astropy.io import fits from astropy.wcs import WCS from astropy.coordinates import SkyCoord from astropy import units as u from astropy.coordinates import SkyCoord, Angle from copy import deepcopy import pandas as pd from .R_load import R_val def Get_Catalogue(tpf, Catalog = 'gaia'): """ Get the coordinates and mag of all sources in the field of view from a specified catalogue. I/347/gaia2dis Distances to 1.33 billion stars in Gaia DR2 (Bailer-Jones+, 2018) ------- Inputs- ------- tpf class target pixel file lightkurve class Catalogue str Permitted options: 'gaia', 'dist', 'ps1' -------- Outputs- -------- coords array coordinates of sources Gmag array Gmags of sources """ c1 = SkyCoord(tpf.ra, tpf.dec, frame='icrs', unit='deg') # Use pixel scale for query size pix_scale = 4.0 # arcseconds / pixel for Kepler, default if tpf.mission == 'TESS': pix_scale = 21.0 # We are querying with a diameter as the radius, overfilling by 2x. from astroquery.vizier import Vizier Vizier.ROW_LIMIT = -1 if Catalog == 'gaia': catalog = "I/345/gaia2" elif Catalog == 'dist': catalog = "I/347/gaia2dis" elif Catalog == 'ps1': catalog = "II/349/ps1" elif Catalog == 'skymapper': catalog = 'II/358/smss' else: raise ValueError("{} not recognised as a catalog. Available options: 'gaia', 'dist','ps1'") result = Vizier.query_region(c1, catalog=[catalog], radius=Angle(np.max(tpf.shape[1:]) * pix_scale, "arcsec")) no_targets_found_message = ValueError('Either no sources were found in the query region ' 'or Vizier is unavailable') #too_few_found_message = ValueError('No sources found brighter than {:0.1f}'.format(magnitude_limit)) if result is None: raise no_targets_found_message elif len(result) == 0: raise no_targets_found_message result = result[catalog].to_pandas() return result def Get_Gaia(tpf, magnitude_limit = 18, Offset = 10): """ Get the coordinates and mag of all gaia sources in the field of view. ------- Inputs- ------- tpf class target pixel file lightkurve class magnitude_limit float cutoff for Gaia sources Offset int offset for the boundary -------- Outputs- -------- coords array coordinates of sources Gmag array Gmags of sources """ keys = ['objID','RAJ2000','DEJ2000','e_RAJ2000','e_DEJ2000','gmag','e_gmag','gKmag','e_gKmag','rmag', 'e_rmag','rKmag','e_rKmag','imag','e_imag','iKmag','e_iKmag','zmag','e_zmag','zKmag','e_zKmag', 'ymag','e_ymag','yKmag','e_yKmag','tmag','gaiaid','gaiamag','gaiadist','gaiadist_u','gaiadist_l', 'row','col'] result = Get_Catalogue(tpf, Catalog = 'gaia') result = result[result.Gmag < magnitude_limit] if len(result) == 0: raise no_targets_found_message radecs = np.vstack([result['RA_ICRS'], result['DE_ICRS']]).T coords = tpf.wcs.all_world2pix(radecs, 0) ## TODO, is origin supposed to be zero or one? Gmag = result['Gmag'].values #Jmag = result['Jmag'] ind = (((coords[:,0] >= -10) & (coords[:,1] >= -10)) & ((coords[:,0] < (tpf.shape[1] + 10)) & (coords[:,1] < (tpf.shape[2] + 10)))) coords = coords[ind] Gmag = Gmag[ind] Tmag = Gmag - 0.5 #Jmag = Jmag[ind] return coords, Tmag def mag2flux(mag,zp): f = 10**(2/5*(zp-mag)) return f def PS1_to_TESS_mag(PS1,ebv = 0): zp = 25 gr = (PS1.gmag - PS1.rmag).values eg, e = R_val('g',gr=gr,ext=ebv); er, e = R_val('r',gr=gr,ext=ebv) ei, e = R_val('i',gr=gr,ext=ebv); ez, e = R_val('z',gr=gr,ext=ebv) ey, e = R_val('y',gr=gr,ext=ebv); et, e = R_val('tess',gr=gr,ext=ebv) eg = eg * ebv; er = er * ebv; ei = ei * ebv; ez = ez * ebv ey = ey * ebv; et = et * ebv g = mag2flux(PS1.gmag.values - eg,zp) r = mag2flux(PS1.rmag.values - er,zp) i = mag2flux(PS1.imag.values - ei,zp) z = mag2flux(PS1.zmag.values - ez,zp) y = mag2flux(PS1.ymag.values - ey,zp) cr = 0.25582823; ci = 0.27609407; cz = 0.35809516 cy = 0.11244277; cp = 0.00049096 t = (cr*r + ci*i + cz*z + cy*y)*(g/i)**cp t = -2.5*np.log10(t) + zp + et PS1['tmag'] = t return PS1 def SM_to_TESS_mag(SM,ebv = 0): zp = 25 gr = (SM.gmag - SM.rmag).values eg, e = R_val('g',gr=gr,ext=ebv,system='skymapper') er, e = R_val('r',gr=gr,ext=ebv,system='skymapper') ei, e = R_val('i',gr=gr,ext=ebv,system='skymapper') ez, e = R_val('z',gr=gr,ext=ebv,system='skymapper') et, e = R_val('tess',gr=gr,ext=ebv) eg = eg * ebv; er = er * ebv; ei = ei * ebv ez = ez * ebv; et = et * ebv g = mag2flux(SM.gmag.values - eg,zp) r = mag2flux(SM.rmag.values - er,zp) i = mag2flux(SM.imag.values - ei,zp) z = mag2flux(SM.zmag.values - ez,zp) cr = 0.25825435; ci = 0.35298213 cz = 0.39388206; cp = -0.00170817 t = (cr*r + ci*i + cz*z)*(g/i)**cp t = -2.5*np.log10(t) + zp + et SM['tmag'] = t return SM def Get_PS1(tpf, magnitude_limit = 18, Offset = 10): """ Get the coordinates and mag of all PS1 sources in the field of view. ------- Inputs- ------- tpf class target pixel file lightkurve class magnitude_limit float cutoff for Gaia sources Offset int offset for the boundary -------- Outputs- -------- coords array coordinates of sources Gmag array Gmags of sources """ result = Get_Catalogue(tpf, Catalog = 'ps1') result = result[np.isfinite(result.rmag) & np.isfinite(result.imag)]# & np.isfinite(result.zmag)& np.isfinite(result.ymag)] result = PS1_to_TESS_mag(result) result = result[result.tmag < magnitude_limit] if len(result) == 0: raise no_targets_found_message radecs = np.vstack([result['RAJ2000'], result['DEJ2000']]).T coords = tpf.wcs.all_world2pix(radecs, 0) ## TODO, is origin supposed to be zero or one? Tessmag = result['tmag'].values #Jmag = result['Jmag'] ind = (((coords[:,0] >= -10) & (coords[:,1] >= -10)) & ((coords[:,0] < (tpf.shape[1] + 10)) & (coords[:,1] < (tpf.shape[2] + 10)))) coords = coords[ind] Tessmag = Tessmag[ind] #Jmag = Jmag[ind] return coords, Tessmag def Skymapper_df(sm): a = np.zeros(len(sm['ObjectId']),dtype=np.object) a[:] = 's' b = sm['ObjectId'].values.astype(str).astype(np.object) obj = a+b keep = ['objID','RAJ2000', 'DEJ2000','e_RAJ2000','e_DEJ2000','gmag', 'e_gmag', 'gKmag', 'e_gKmag', 'rmag', 'e_rmag', 'rKmag', 'e_rKmag', 'imag', 'e_imag', 'iKmag', 'e_iKmag', 'zmag', 'e_zmag', 'zKmag', 'e_zKmag', 'ymag', 'e_ymag', 'yKmag', 'e_yKmag', 'tmag'] df = pd.DataFrame(columns=keep) df['objID'] = obj df['RAJ2000'] = sm['RAICRS'].values df['DEJ2000'] = sm['DEICRS'].values df['e_RAJ2000'] = sm['e_RAICRS'].values df['e_DEJ2000'] = sm['e_DEICRS'].values df['gmag'] = sm['gPSF'].values df['rmag'] = sm['rPSF'].values df['imag'] = sm['iPSF'].values df['zmag'] = sm['zPSF'].values df['e_gmag'] = sm['gPSF'].values * np.nan df['e_rmag'] = sm['rPSF'].values * np.nan df['e_imag'] = sm['iPSF'].values * np.nan df['e_zmag'] = sm['zPSF'].values * np.nan df['gKmag'] = sm['gPetro'].values df['rKmag'] = sm['rPetro'].values df['iKmag'] = sm['iPetro'].values df['zKmag'] = sm['zPetro'].values return df def Unified_catalog(tpf,magnitude_limit=18,offset=10): """ Find all sources present in the TESS field from PS!, and Gaia. Catalogs are cross matched through distance, and Gaia distances are assigned from Gaia ID. Returns a pandas dataframe with all relevant catalog information ------ Input- ------ tpf lk.Targetpixelfile target pixel file of the TESS region ------- Output- ------- result pd.DataFrame Combined catalog """ pd.options.mode.chained_assignment = None # need to look at how the icrs coords are offset from J2000 # Get gaia catalogs gaia = Get_Catalogue(tpf, Catalog = 'gaia') gaiadist = Get_Catalogue(tpf, Catalog = 'dist') # Get PS1 and structure it if tpf.dec > -30: ps1 = Get_Catalogue(tpf, Catalog = 'ps1') ps1 = ps1[np.isfinite(ps1.rmag) & np.isfinite(ps1.imag)]# & np.isfinite(result.zmag)& np.isfinite(result.ymag)] ps1 = PS1_to_TESS_mag(ps1) keep = ['objID','RAJ2000', 'DEJ2000','e_RAJ2000','e_DEJ2000','gmag', 'e_gmag', 'gKmag', 'e_gKmag', 'rmag', 'e_rmag', 'rKmag', 'e_rKmag', 'imag', 'e_imag', 'iKmag', 'e_iKmag', 'zmag', 'e_zmag', 'zKmag', 'e_zKmag', 'ymag', 'e_ymag', 'yKmag', 'e_yKmag', 'tmag'] result = ps1[keep] else: sm = Get_Catalogue(tpf, Catalog = 'skymapper') sm = Skymapper_df(sm) sm = sm[np.isfinite(sm.rmag) & np.isfinite(sm.imag)]# & np.isfinite(result.zmag)& np.isfinite(result.ymag)] sm = PS1_to_TESS_mag(sm) result = sm # Define the columns for Gaia information result['gaiaid'] = 0 result['gaiaid'] = result['gaiaid'].astype(int) result['gaiamag'] = np.nan result['gaiadist'] = np.nan result['gaiadist_u'] = np.nan result['gaiadist_l'] = np.nan # Set up arrays to calculate the distance between all PS1 and Gaia sources dra = np.zeros((len(gaia),len(result))) dra = dra + gaia.RA_ICRS.values[:,np.newaxis] dra = dra - result.RAJ2000.values[np.newaxis,:] dde = np.zeros((len(gaia),len(result))) dde = dde + gaia.DE_ICRS.values[:,np.newaxis] dde = dde - result.DEJ2000.values[np.newaxis,:] # Calculate distance dist = np.sqrt(dde**2 + dra**2) if isinstance(dist,np.ndarray): ind = np.argmin(dist,axis=1) far = dist <= (1/60**2) * 1 # difference smaller than 1 arcsec # Get index of all valid matches and add the Gaia info indo = np.nansum(far,axis=1) > 0 ind = ind[indo] result.gaiaid.iloc[ind] = gaia.Source.values[indo] result.gaiamag.iloc[ind] = gaia.Gmag.values[indo] #result.tmag.iloc[ind] = gaia.Gmag.values[indo] - .5 # Add Gaia sources without matches to the dataframe keys = list(result.keys()) indo = np.where(~indo)[0] for i in indo: df =
pd.DataFrame(columns=keys)
pandas.DataFrame
from collections import OrderedDict import datetime from datetime import timedelta from io import StringIO import json import os import numpy as np import pytest from pandas.compat import is_platform_32bit, is_platform_windows import pandas.util._test_decorators as td import pandas as pd from pandas import DataFrame, DatetimeIndex, Series, Timestamp, read_json import pandas._testing as tm _seriesd = tm.getSeriesData() _tsd = tm.getTimeSeriesData() _frame = DataFrame(_seriesd) _intframe = DataFrame({k: v.astype(np.int64) for k, v in _seriesd.items()}) _tsframe = DataFrame(_tsd) _cat_frame = _frame.copy() cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * (len(_cat_frame) - 15) _cat_frame.index = pd.CategoricalIndex(cat, name="E") _cat_frame["E"] = list(reversed(cat)) _cat_frame["sort"] = np.arange(len(_cat_frame), dtype="int64") _mixed_frame = _frame.copy() def assert_json_roundtrip_equal(result, expected, orient): if orient == "records" or orient == "values": expected = expected.reset_index(drop=True) if orient == "values": expected.columns = range(len(expected.columns)) tm.assert_frame_equal(result, expected) @pytest.mark.filterwarnings("ignore:the 'numpy' keyword is deprecated:FutureWarning") class TestPandasContainer: @pytest.fixture(autouse=True) def setup(self): self.intframe = _intframe.copy() self.tsframe = _tsframe.copy() self.mixed_frame = _mixed_frame.copy() self.categorical = _cat_frame.copy() yield del self.intframe del self.tsframe del self.mixed_frame def test_frame_double_encoded_labels(self, orient): df = DataFrame( [["a", "b"], ["c", "d"]], index=['index " 1', "index / 2"], columns=["a \\ b", "y / z"], ) result = read_json(df.to_json(orient=orient), orient=orient) expected = df.copy() assert_json_roundtrip_equal(result, expected, orient) @pytest.mark.parametrize("orient", ["split", "records", "values"]) def test_frame_non_unique_index(self, orient): df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"]) result = read_json(df.to_json(orient=orient), orient=orient) expected = df.copy() assert_json_roundtrip_equal(result, expected, orient) @pytest.mark.parametrize("orient", ["index", "columns"]) def test_frame_non_unique_index_raises(self, orient): df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"]) msg = f"DataFrame index must be unique for orient='{orient}'" with pytest.raises(ValueError, match=msg): df.to_json(orient=orient) @pytest.mark.parametrize("orient", ["split", "values"]) @pytest.mark.parametrize( "data", [ [["a", "b"], ["c", "d"]], [[1.5, 2.5], [3.5, 4.5]], [[1, 2.5], [3, 4.5]], [[Timestamp("20130101"), 3.5], [Timestamp("20130102"), 4.5]], ], ) def test_frame_non_unique_columns(self, orient, data): df = DataFrame(data, index=[1, 2], columns=["x", "x"]) result = read_json( df.to_json(orient=orient), orient=orient, convert_dates=["x"] ) if orient == "values": expected = pd.DataFrame(data) if expected.iloc[:, 0].dtype == "datetime64[ns]": # orient == "values" by default will write Timestamp objects out # in milliseconds; these are internally stored in nanosecond, # so divide to get where we need # TODO: a to_epoch method would also solve; see GH 14772 expected.iloc[:, 0] = expected.iloc[:, 0].astype(np.int64) // 1000000 elif orient == "split": expected = df tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("orient", ["index", "columns", "records"]) def test_frame_non_unique_columns_raises(self, orient): df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "x"]) msg = f"DataFrame columns must be unique for orient='{orient}'" with pytest.raises(ValueError, match=msg): df.to_json(orient=orient) def test_frame_default_orient(self, float_frame): assert float_frame.to_json() == float_frame.to_json(orient="columns") @pytest.mark.parametrize("dtype", [False, float]) @pytest.mark.parametrize("convert_axes", [True, False]) @pytest.mark.parametrize("numpy", [True, False]) def test_roundtrip_simple(self, orient, convert_axes, numpy, dtype, float_frame): data = float_frame.to_json(orient=orient) result = pd.read_json( data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype ) expected = float_frame assert_json_roundtrip_equal(result, expected, orient) @pytest.mark.parametrize("dtype", [False, np.int64]) @pytest.mark.parametrize("convert_axes", [True, False]) @pytest.mark.parametrize("numpy", [True, False]) def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype): data = self.intframe.to_json(orient=orient) result = pd.read_json( data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype ) expected = self.intframe.copy() if ( numpy and (is_platform_32bit() or is_platform_windows()) and not dtype and orient != "split" ): # TODO: see what is causing roundtrip dtype loss expected = expected.astype(np.int32) assert_json_roundtrip_equal(result, expected, orient) @pytest.mark.parametrize("dtype", [None, np.float64, np.int, "U3"]) @pytest.mark.parametrize("convert_axes", [True, False]) @pytest.mark.parametrize("numpy", [True, False]) def test_roundtrip_str_axes(self, orient, convert_axes, numpy, dtype): df = DataFrame( np.zeros((200, 4)), columns=[str(i) for i in range(4)], index=[str(i) for i in range(200)], dtype=dtype, ) # TODO: do we even need to support U3 dtypes? if numpy and dtype == "U3" and orient != "split": pytest.xfail("Can't decode directly to array") data = df.to_json(orient=orient) result = pd.read_json( data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype ) expected = df.copy() if not dtype: expected = expected.astype(np.int64) # index columns, and records orients cannot fully preserve the string # dtype for axes as the index and column labels are used as keys in # JSON objects. JSON keys are by definition strings, so there's no way # to disambiguate whether those keys actually were strings or numeric # beforehand and numeric wins out. # TODO: Split should be able to support this if convert_axes and (orient in ("split", "index", "columns")): expected.columns = expected.columns.astype(np.int64) expected.index = expected.index.astype(np.int64) elif orient == "records" and convert_axes: expected.columns = expected.columns.astype(np.int64) assert_json_roundtrip_equal(result, expected, orient) @pytest.mark.parametrize("convert_axes", [True, False]) @pytest.mark.parametrize("numpy", [True, False]) def test_roundtrip_categorical(self, orient, convert_axes, numpy): # TODO: create a better frame to test with and improve coverage if orient in ("index", "columns"): pytest.xfail(f"Can't have duplicate index values for orient '{orient}')") data = self.categorical.to_json(orient=orient) if numpy and orient in ("records", "values"): pytest.xfail(f"Orient {orient} is broken with numpy=True") result = pd.read_json( data, orient=orient, convert_axes=convert_axes, numpy=numpy ) expected = self.categorical.copy() expected.index = expected.index.astype(str) # Categorical not preserved expected.index.name = None # index names aren't preserved in JSON if not numpy and orient == "index": expected = expected.sort_index() assert_json_roundtrip_equal(result, expected, orient) @pytest.mark.parametrize("convert_axes", [True, False]) @pytest.mark.parametrize("numpy", [True, False]) def test_roundtrip_empty(self, orient, convert_axes, numpy, empty_frame): data = empty_frame.to_json(orient=orient) result = pd.read_json( data, orient=orient, convert_axes=convert_axes, numpy=numpy ) expected = empty_frame.copy() # TODO: both conditions below are probably bugs if convert_axes: expected.index = expected.index.astype(float) expected.columns = expected.columns.astype(float) if numpy and orient == "values": expected = expected.reindex([0], axis=1).reset_index(drop=True) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("convert_axes", [True, False]) @pytest.mark.parametrize("numpy", [True, False]) def test_roundtrip_timestamp(self, orient, convert_axes, numpy): # TODO: improve coverage with date_format parameter data = self.tsframe.to_json(orient=orient) result = pd.read_json( data, orient=orient, convert_axes=convert_axes, numpy=numpy ) expected = self.tsframe.copy() if not convert_axes: # one off for ts handling # DTI gets converted to epoch values idx = expected.index.astype(np.int64) // 1000000 if orient != "split": # TODO: handle consistently across orients idx = idx.astype(str) expected.index = idx assert_json_roundtrip_equal(result, expected, orient) @pytest.mark.parametrize("convert_axes", [True, False]) @pytest.mark.parametrize("numpy", [True, False]) def test_roundtrip_mixed(self, orient, convert_axes, numpy): if numpy and orient != "split": pytest.xfail("Can't decode directly to array") index = pd.Index(["a", "b", "c", "d", "e"]) values = { "A": [0.0, 1.0, 2.0, 3.0, 4.0], "B": [0.0, 1.0, 0.0, 1.0, 0.0], "C": ["foo1", "foo2", "foo3", "foo4", "foo5"], "D": [True, False, True, False, True], } df = DataFrame(data=values, index=index) data = df.to_json(orient=orient) result = pd.read_json( data, orient=orient, convert_axes=convert_axes, numpy=numpy ) expected = df.copy() expected = expected.assign(**expected.select_dtypes("number").astype(np.int64)) if not numpy and orient == "index": expected = expected.sort_index() assert_json_roundtrip_equal(result, expected, orient) @pytest.mark.parametrize( "data,msg,orient", [ ('{"key":b:a:d}', "Expected object or value", "columns"), # too few indices ( '{"columns":["A","B"],' '"index":["2","3"],' '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}', r"Shape of passed values is \(3, 2\), indices imply \(2, 2\)", "split", ), # too many columns ( '{"columns":["A","B","C"],' '"index":["1","2","3"],' '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}', "3 columns passed, passed data had 2 columns", "split", ), # bad key ( '{"badkey":["A","B"],' '"index":["2","3"],' '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}', r"unexpected key\(s\): badkey", "split", ), ], ) def test_frame_from_json_bad_data_raises(self, data, msg, orient): with pytest.raises(ValueError, match=msg): read_json(StringIO(data), orient=orient) @pytest.mark.parametrize("dtype", [True, False]) @pytest.mark.parametrize("convert_axes", [True, False]) @pytest.mark.parametrize("numpy", [True, False]) def test_frame_from_json_missing_data(self, orient, convert_axes, numpy, dtype): num_df = DataFrame([[1, 2], [4, 5, 6]]) result = read_json( num_df.to_json(orient=orient), orient=orient, convert_axes=convert_axes, dtype=dtype, ) assert np.isnan(result.iloc[0, 2]) obj_df = DataFrame([["1", "2"], ["4", "5", "6"]]) result = read_json( obj_df.to_json(orient=orient), orient=orient, convert_axes=convert_axes, dtype=dtype, ) if not dtype: # TODO: Special case for object data; maybe a bug? assert result.iloc[0, 2] is None else: assert np.isnan(result.iloc[0, 2]) @pytest.mark.parametrize("inf", [np.inf, np.NINF]) @pytest.mark.parametrize("dtype", [True, False]) def test_frame_infinity(self, orient, inf, dtype): # infinities get mapped to nulls which get mapped to NaNs during # deserialisation df = DataFrame([[1, 2], [4, 5, 6]]) df.loc[0, 2] = inf result = read_json(df.to_json(), dtype=dtype) assert np.isnan(result.iloc[0, 2]) @pytest.mark.skipif( is_platform_32bit(), reason="not compliant on 32-bit, xref #15865" ) @pytest.mark.parametrize( "value,precision,expected_val", [ (0.95, 1, 1.0), (1.95, 1, 2.0), (-1.95, 1, -2.0), (0.995, 2, 1.0), (0.9995, 3, 1.0), (0.99999999999999944, 15, 1.0), ], ) def test_frame_to_json_float_precision(self, value, precision, expected_val): df = pd.DataFrame([dict(a_float=value)]) encoded = df.to_json(double_precision=precision) assert encoded == f'{{"a_float":{{"0":{expected_val}}}}}' def test_frame_to_json_except(self): df = DataFrame([1, 2, 3]) msg = "Invalid value 'garbage' for option 'orient'" with pytest.raises(ValueError, match=msg): df.to_json(orient="garbage") def test_frame_empty(self): df = DataFrame(columns=["jim", "joe"]) assert not df._is_mixed_type tm.assert_frame_equal( read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False ) # GH 7445 result = pd.DataFrame({"test": []}, index=[]).to_json(orient="columns") expected = '{"test":{}}' assert result == expected def test_frame_empty_mixedtype(self): # mixed type df = DataFrame(columns=["jim", "joe"]) df["joe"] = df["joe"].astype("i8") assert df._is_mixed_type tm.assert_frame_equal( read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False ) def test_frame_mixedtype_orient(self): # GH10289 vals = [ [10, 1, "foo", 0.1, 0.01], [20, 2, "bar", 0.2, 0.02], [30, 3, "baz", 0.3, 0.03], [40, 4, "qux", 0.4, 0.04], ] df = DataFrame( vals, index=list("abcd"), columns=["1st", "2nd", "3rd", "4th", "5th"] ) assert df._is_mixed_type right = df.copy() for orient in ["split", "index", "columns"]: inp = df.to_json(orient=orient) left = read_json(inp, orient=orient, convert_axes=False) tm.assert_frame_equal(left, right) right.index = np.arange(len(df)) inp = df.to_json(orient="records") left = read_json(inp, orient="records", convert_axes=False) tm.assert_frame_equal(left, right) right.columns = np.arange(df.shape[1]) inp = df.to_json(orient="values") left = read_json(inp, orient="values", convert_axes=False) tm.assert_frame_equal(left, right) def test_v12_compat(self, datapath): df = DataFrame( [ [1.56808523, 0.65727391, 1.81021139, -0.17251653], [-0.2550111, -0.08072427, -0.03202878, -0.17581665], [1.51493992, 0.11805825, 1.629455, -1.31506612], [-0.02765498, 0.44679743, 0.33192641, -0.27885413], [0.05951614, -2.69652057, 1.28163262, 0.34703478], ], columns=["A", "B", "C", "D"], index=pd.date_range("2000-01-03", "2000-01-07"), ) df["date"] = pd.Timestamp("19920106 18:21:32.12") df.iloc[3, df.columns.get_loc("date")] = pd.Timestamp("20130101") df["modified"] = df["date"] df.iloc[1, df.columns.get_loc("modified")] = pd.NaT dirpath = datapath("io", "json", "data") v12_json = os.path.join(dirpath, "tsframe_v012.json") df_unser = pd.read_json(v12_json) tm.assert_frame_equal(df, df_unser) df_iso = df.drop(["modified"], axis=1) v12_iso_json = os.path.join(dirpath, "tsframe_iso_v012.json") df_unser_iso = pd.read_json(v12_iso_json) tm.assert_frame_equal(df_iso, df_unser_iso) def test_blocks_compat_GH9037(self): index = pd.date_range("20000101", periods=10, freq="H") df_mixed = DataFrame( OrderedDict( float_1=[ -0.92077639, 0.77434435, 1.25234727, 0.61485564, -0.60316077, 0.24653374, 0.28668979, -2.51969012, 0.95748401, -1.02970536, ], int_1=[ 19680418, 75337055, 99973684, 65103179, 79373900, 40314334, 21290235, 4991321, 41903419, 16008365, ], str_1=[ "78c608f1", "64a99743", "13d2ff52", "ca7f4af2", "97236474", "bde7e214", "1a6bde47", "b1190be5", "7a669144", "8d64d068", ], float_2=[ -0.0428278, -1.80872357, 3.36042349, -0.7573685, -0.48217572, 0.86229683, 1.08935819, 0.93898739, -0.03030452, 1.43366348, ], str_2=[ "14f04af9", "d085da90", "4bcfac83", "81504caf", "2ffef4a9", "08e2f5c4", "07e1af03", "addbd4a7", "1f6a09ba", "4bfc4d87", ], int_2=[ 86967717, 98098830, 51927505, 20372254, 12601730, 20884027, 34193846, 10561746, 24867120, 76131025, ], ), index=index, ) # JSON deserialisation always creates unicode strings df_mixed.columns = df_mixed.columns.astype("unicode") df_roundtrip = pd.read_json(df_mixed.to_json(orient="split"), orient="split") tm.assert_frame_equal( df_mixed, df_roundtrip, check_index_type=True, check_column_type=True, by_blocks=True, check_exact=True, ) def test_frame_nonprintable_bytes(self): # GH14256: failing column caused segfaults, if it is not the last one class BinaryThing: def __init__(self, hexed): self.hexed = hexed self.binary = bytes.fromhex(hexed) def __str__(self) -> str: return self.hexed hexed = "574b4454ba8c5eb4f98a8f45" binthing = BinaryThing(hexed) # verify the proper conversion of printable content df_printable = DataFrame({"A": [binthing.hexed]}) assert df_printable.to_json() == f'{{"A":{{"0":"{hexed}"}}}}' # check if non-printable content throws appropriate Exception df_nonprintable = DataFrame({"A": [binthing]}) msg = "Unsupported UTF-8 sequence length when encoding string" with pytest.raises(OverflowError, match=msg): df_nonprintable.to_json() # the same with multiple columns threw segfaults df_mixed = DataFrame({"A": [binthing], "B": [1]}, columns=["A", "B"]) with pytest.raises(OverflowError): df_mixed.to_json() # default_handler should resolve exceptions for non-string types result = df_nonprintable.to_json(default_handler=str) expected = f'{{"A":{{"0":"{hexed}"}}}}' assert result == expected assert ( df_mixed.to_json(default_handler=str) == f'{{"A":{{"0":"{hexed}"}},"B":{{"0":1}}}}' ) def test_label_overflow(self): # GH14256: buffer length not checked when writing label result = pd.DataFrame({"bar" * 100000: [1], "foo": [1337]}).to_json() expected = f'{{"{"bar" * 100000}":{{"0":1}},"foo":{{"0":1337}}}}' assert result == expected def test_series_non_unique_index(self): s = Series(["a", "b"], index=[1, 1]) msg = "Series index must be unique for orient='index'" with pytest.raises(ValueError, match=msg): s.to_json(orient="index") tm.assert_series_equal( s, read_json(s.to_json(orient="split"), orient="split", typ="series") ) unser = read_json(s.to_json(orient="records"), orient="records", typ="series") tm.assert_numpy_array_equal(s.values, unser.values) def test_series_default_orient(self, string_series): assert string_series.to_json() == string_series.to_json(orient="index") @pytest.mark.parametrize("numpy", [True, False]) def test_series_roundtrip_simple(self, orient, numpy, string_series): data = string_series.to_json(orient=orient) result = pd.read_json(data, typ="series", orient=orient, numpy=numpy) expected = string_series if orient in ("values", "records"): expected = expected.reset_index(drop=True) if orient != "split": expected.name = None tm.assert_series_equal(result, expected) @pytest.mark.parametrize("dtype", [False, None]) @pytest.mark.parametrize("numpy", [True, False]) def test_series_roundtrip_object(self, orient, numpy, dtype, object_series): data = object_series.to_json(orient=orient) result = pd.read_json( data, typ="series", orient=orient, numpy=numpy, dtype=dtype ) expected = object_series if orient in ("values", "records"): expected = expected.reset_index(drop=True) if orient != "split": expected.name = None tm.assert_series_equal(result, expected) @pytest.mark.parametrize("numpy", [True, False]) def test_series_roundtrip_empty(self, orient, numpy, empty_series): data = empty_series.to_json(orient=orient) result = pd.read_json(data, typ="series", orient=orient, numpy=numpy) expected = empty_series if orient in ("values", "records"): expected = expected.reset_index(drop=True) else: expected.index = expected.index.astype(float) tm.assert_series_equal(result, expected) @pytest.mark.parametrize("numpy", [True, False]) def test_series_roundtrip_timeseries(self, orient, numpy, datetime_series): data = datetime_series.to_json(orient=orient) result = pd.read_json(data, typ="series", orient=orient, numpy=numpy) expected = datetime_series if orient in ("values", "records"): expected = expected.reset_index(drop=True) if orient != "split": expected.name = None tm.assert_series_equal(result, expected) @pytest.mark.parametrize("dtype", [np.float64, np.int]) @pytest.mark.parametrize("numpy", [True, False]) def test_series_roundtrip_numeric(self, orient, numpy, dtype): s = Series(range(6), index=["a", "b", "c", "d", "e", "f"]) data = s.to_json(orient=orient) result = pd.read_json(data, typ="series", orient=orient, numpy=numpy) expected = s.copy() if orient in ("values", "records"): expected = expected.reset_index(drop=True) tm.assert_series_equal(result, expected) def test_series_to_json_except(self): s = Series([1, 2, 3]) msg = "Invalid value 'garbage' for option 'orient'" with pytest.raises(ValueError, match=msg): s.to_json(orient="garbage") def test_series_from_json_precise_float(self): s = Series([4.56, 4.56, 4.56]) result = read_json(s.to_json(), typ="series", precise_float=True) tm.assert_series_equal(result, s, check_index_type=False) def test_series_with_dtype(self): # GH 21986 s = Series([4.56, 4.56, 4.56]) result = read_json(s.to_json(), typ="series", dtype=np.int64) expected = Series([4] * 3) tm.assert_series_equal(result, expected) @pytest.mark.parametrize( "dtype,expected", [ (True, Series(["2000-01-01"], dtype="datetime64[ns]")), (False, Series([946684800000])), ], ) def test_series_with_dtype_datetime(self, dtype, expected): s = Series(["2000-01-01"], dtype="datetime64[ns]") data = s.to_json() result = pd.read_json(data, typ="series", dtype=dtype) tm.assert_series_equal(result, expected) def test_frame_from_json_precise_float(self): df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]]) result = read_json(df.to_json(), precise_float=True) tm.assert_frame_equal( result, df, check_index_type=False, check_column_type=False ) def test_typ(self): s = Series(range(6), index=["a", "b", "c", "d", "e", "f"], dtype="int64") result = read_json(s.to_json(), typ=None) tm.assert_series_equal(result, s) def test_reconstruction_index(self): df = DataFrame([[1, 2, 3], [4, 5, 6]]) result = read_json(df.to_json()) tm.assert_frame_equal(result, df) df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=["A", "B", "C"]) result = read_json(df.to_json()) tm.assert_frame_equal(result, df) def test_path(self, float_frame): with tm.ensure_clean("test.json") as path: for df in [ float_frame, self.intframe, self.tsframe, self.mixed_frame, ]: df.to_json(path) read_json(path) def test_axis_dates(self, datetime_series): # frame json = self.tsframe.to_json() result = read_json(json) tm.assert_frame_equal(result, self.tsframe) # series json = datetime_series.to_json() result = read_json(json, typ="series") tm.assert_series_equal(result, datetime_series, check_names=False) assert result.name is None def test_convert_dates(self, datetime_series): # frame df = self.tsframe.copy() df["date"] = Timestamp("20130101") json = df.to_json() result = read_json(json) tm.assert_frame_equal(result, df) df["foo"] = 1.0 json = df.to_json(date_unit="ns") result = read_json(json, convert_dates=False) expected = df.copy() expected["date"] = expected["date"].values.view("i8") expected["foo"] = expected["foo"].astype("int64") tm.assert_frame_equal(result, expected) # series ts = Series(Timestamp("20130101"), index=datetime_series.index) json = ts.to_json() result = read_json(json, typ="series") tm.assert_series_equal(result, ts) @pytest.mark.parametrize("date_format", ["epoch", "iso"]) @pytest.mark.parametrize("as_object", [True, False]) @pytest.mark.parametrize( "date_typ", [datetime.date, datetime.datetime, pd.Timestamp] ) def test_date_index_and_values(self, date_format, as_object, date_typ): data = [date_typ(year=2020, month=1, day=1), pd.NaT] if as_object: data.append("a") ser = pd.Series(data, index=data) result = ser.to_json(date_format=date_format) if date_format == "epoch": expected = '{"1577836800000":1577836800000,"null":null}' else: expected = ( '{"2020-01-01T00:00:00.000Z":"2020-01-01T00:00:00.000Z","null":null}' ) if as_object: expected = expected.replace("}", ',"a":"a"}') assert result == expected @pytest.mark.parametrize( "infer_word", [ "trade_time", "date", "datetime", "sold_at", "modified", "timestamp", "timestamps", ], ) def test_convert_dates_infer(self, infer_word): # GH10747 from pandas.io.json import dumps data = [{"id": 1, infer_word: 1036713600000}, {"id": 2}] expected = DataFrame( [[1, Timestamp("2002-11-08")], [2, pd.NaT]], columns=["id", infer_word] ) result = read_json(dumps(data))[["id", infer_word]] tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "date,date_unit", [ ("20130101 20:43:42.123", None), ("20130101 20:43:42", "s"), ("20130101 20:43:42.123", "ms"), ("20130101 20:43:42.123456", "us"), ("20130101 20:43:42.123456789", "ns"), ], ) def test_date_format_frame(self, date, date_unit): df = self.tsframe.copy() df["date"] = Timestamp(date) df.iloc[1, df.columns.get_loc("date")] = pd.NaT df.iloc[5, df.columns.get_loc("date")] = pd.NaT if date_unit: json = df.to_json(date_format="iso", date_unit=date_unit) else: json = df.to_json(date_format="iso") result = read_json(json) expected = df.copy() expected.index = expected.index.tz_localize("UTC") expected["date"] = expected["date"].dt.tz_localize("UTC") tm.assert_frame_equal(result, expected) def test_date_format_frame_raises(self): df = self.tsframe.copy() msg = "Invalid value 'foo' for option 'date_unit'" with pytest.raises(ValueError, match=msg): df.to_json(date_format="iso", date_unit="foo") @pytest.mark.parametrize( "date,date_unit", [ ("20130101 20:43:42.123", None), ("20130101 20:43:42", "s"), ("20130101 20:43:42.123", "ms"), ("20130101 20:43:42.123456", "us"), ("20130101 20:43:42.123456789", "ns"), ], ) def test_date_format_series(self, date, date_unit, datetime_series): ts = Series(Timestamp(date), index=datetime_series.index) ts.iloc[1] = pd.NaT ts.iloc[5] = pd.NaT if date_unit: json = ts.to_json(date_format="iso", date_unit=date_unit) else: json = ts.to_json(date_format="iso") result = read_json(json, typ="series") expected = ts.copy() expected.index = expected.index.tz_localize("UTC") expected = expected.dt.tz_localize("UTC") tm.assert_series_equal(result, expected) def test_date_format_series_raises(self, datetime_series): ts = Series(Timestamp("20130101 20:43:42.123"), index=datetime_series.index) msg = "Invalid value 'foo' for option 'date_unit'" with pytest.raises(ValueError, match=msg): ts.to_json(date_format="iso", date_unit="foo") @pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"]) def test_date_unit(self, unit): df = self.tsframe.copy() df["date"] = Timestamp("20130101 20:43:42") dl = df.columns.get_loc("date") df.iloc[1, dl] = Timestamp("19710101 20:43:42") df.iloc[2, dl] = Timestamp("21460101 20:43:42") df.iloc[4, dl] = pd.NaT json = df.to_json(date_format="epoch", date_unit=unit) # force date unit result = read_json(json, date_unit=unit) tm.assert_frame_equal(result, df) # detect date unit result = read_json(json, date_unit=None) tm.assert_frame_equal(result, df) def test_weird_nested_json(self): # this used to core dump the parser s = r"""{ "status": "success", "data": { "posts": [ { "id": 1, "title": "A blog post", "body": "Some useful content" }, { "id": 2, "title": "Another blog post", "body": "More content" } ] } }""" read_json(s) def test_doc_example(self): dfj2 = DataFrame(np.random.randn(5, 2), columns=list("AB")) dfj2["date"] = Timestamp("20130101") dfj2["ints"] = range(5) dfj2["bools"] = True dfj2.index = pd.date_range("20130101", periods=5) json = dfj2.to_json() result = read_json(json, dtype={"ints": np.int64, "bools": np.bool_}) tm.assert_frame_equal(result, result) def test_misc_example(self): # parsing unordered input fails result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]', numpy=True) expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"]) error_msg = """DataFrame\\.index are different DataFrame\\.index values are different \\(100\\.0 %\\) \\[left\\]: Index\\(\\['a', 'b'\\], dtype='object'\\) \\[right\\]: RangeIndex\\(start=0, stop=2, step=1\\)""" with pytest.raises(AssertionError, match=error_msg): tm.assert_frame_equal(result, expected, check_index_type=False) result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]') expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"]) tm.assert_frame_equal(result, expected) @tm.network @pytest.mark.single def test_round_trip_exception_(self): # GH 3867 csv = "https://raw.github.com/hayd/lahman2012/master/csvs/Teams.csv" df = pd.read_csv(csv) s = df.to_json() result = pd.read_json(s) tm.assert_frame_equal(result.reindex(index=df.index, columns=df.columns), df) @tm.network @pytest.mark.single @pytest.mark.parametrize( "field,dtype", [ ["created_at", pd.DatetimeTZDtype(tz="UTC")], ["closed_at", "datetime64[ns]"], ["updated_at", pd.DatetimeTZDtype(tz="UTC")], ], ) def test_url(self, field, dtype): url = "https://api.github.com/repos/pandas-dev/pandas/issues?per_page=5" # noqa result = read_json(url, convert_dates=True) assert result[field].dtype == dtype def test_timedelta(self): converter = lambda x: pd.to_timedelta(x, unit="ms") s = Series([timedelta(23), timedelta(seconds=5)]) assert s.dtype == "timedelta64[ns]" result = pd.read_json(s.to_json(), typ="series").apply(converter) tm.assert_series_equal(result, s) s = Series([timedelta(23), timedelta(seconds=5)], index=pd.Index([0, 1])) assert s.dtype == "timedelta64[ns]" result = pd.read_json(s.to_json(), typ="series").apply(converter) tm.assert_series_equal(result, s) frame = DataFrame([timedelta(23), timedelta(seconds=5)]) assert frame[0].dtype == "timedelta64[ns]" tm.assert_frame_equal(frame, pd.read_json(frame.to_json()).apply(converter)) frame = DataFrame( { "a": [timedelta(days=23), timedelta(seconds=5)], "b": [1, 2], "c": pd.date_range(start="20130101", periods=2), } ) result = pd.read_json(frame.to_json(date_unit="ns")) result["a"] = pd.to_timedelta(result.a, unit="ns") result["c"] = pd.to_datetime(result.c) tm.assert_frame_equal(frame, result) def test_mixed_timedelta_datetime(self): frame = DataFrame( {"a": [timedelta(23), pd.Timestamp("20130101")]}, dtype=object ) expected = DataFrame( {"a": [pd.Timedelta(frame.a[0]).value, pd.Timestamp(frame.a[1]).value]} ) result = pd.read_json(frame.to_json(date_unit="ns"), dtype={"a": "int64"}) tm.assert_frame_equal(result, expected, check_index_type=False) @pytest.mark.parametrize("as_object", [True, False]) @pytest.mark.parametrize("date_format", ["iso", "epoch"]) @pytest.mark.parametrize("timedelta_typ", [pd.Timedelta, timedelta]) def test_timedelta_to_json(self, as_object, date_format, timedelta_typ): # GH28156: to_json not correctly formatting Timedelta data = [timedelta_typ(days=1), timedelta_typ(days=2), pd.NaT] if as_object: data.append("a") ser = pd.Series(data, index=data) if date_format == "iso": expected = ( '{"P1DT0H0M0S":"P1DT0H0M0S","P2DT0H0M0S":"P2DT0H0M0S","null":null}' ) else: expected = '{"86400000":86400000,"172800000":172800000,"null":null}' if as_object: expected = expected.replace("}", ',"a":"a"}') result = ser.to_json(date_format=date_format) assert result == expected def test_default_handler(self): value = object() frame = DataFrame({"a": [7, value]}) expected = DataFrame({"a": [7, str(value)]}) result = pd.read_json(frame.to_json(default_handler=str)) tm.assert_frame_equal(expected, result, check_index_type=False) def test_default_handler_indirect(self): from pandas.io.json import dumps def default(obj): if isinstance(obj, complex): return [("mathjs", "Complex"), ("re", obj.real), ("im", obj.imag)] return str(obj) df_list = [ 9, DataFrame( {"a": [1, "STR", complex(4, -5)], "b": [float("nan"), None, "N/A"]}, columns=["a", "b"], ), ] expected = ( '[9,[[1,null],["STR",null],[[["mathjs","Complex"],' '["re",4.0],["im",-5.0]],"N\\/A"]]]' ) assert dumps(df_list, default_handler=default, orient="values") == expected def test_default_handler_numpy_unsupported_dtype(self): # GH12554 to_json raises 'Unhandled numpy dtype 15' df = DataFrame( {"a": [1, 2.3, complex(4, -5)], "b": [float("nan"), None, complex(1.2, 0)]}, columns=["a", "b"], ) expected = ( '[["(1+0j)","(nan+0j)"],' '["(2.3+0j)","(nan+0j)"],' '["(4-5j)","(1.2+0j)"]]' ) assert df.to_json(default_handler=str, orient="values") == expected def test_default_handler_raises(self): msg = "raisin" def my_handler_raises(obj): raise TypeError(msg) with pytest.raises(TypeError, match=msg): DataFrame({"a": [1, 2, object()]}).to_json( default_handler=my_handler_raises ) with pytest.raises(TypeError, match=msg): DataFrame({"a": [1, 2, complex(4, -5)]}).to_json( default_handler=my_handler_raises ) def test_categorical(self): # GH4377 df.to_json segfaults with non-ndarray blocks df = DataFrame({"A": ["a", "b", "c", "a", "b", "b", "a"]}) df["B"] = df["A"] expected = df.to_json() df["B"] = df["A"].astype("category") assert expected == df.to_json() s = df["A"] sc = df["B"] assert s.to_json() == sc.to_json() def test_datetime_tz(self): # GH4377 df.to_json segfaults with non-ndarray blocks tz_range = pd.date_range("20130101", periods=3, tz="US/Eastern") tz_naive = tz_range.tz_convert("utc").tz_localize(None) df = DataFrame({"A": tz_range, "B": pd.date_range("20130101", periods=3)}) df_naive = df.copy() df_naive["A"] = tz_naive expected = df_naive.to_json() assert expected == df.to_json() stz = Series(tz_range) s_naive = Series(tz_naive) assert stz.to_json() == s_naive.to_json() def test_sparse(self): # GH4377 df.to_json segfaults with non-ndarray blocks df = pd.DataFrame(np.random.randn(10, 4)) df.loc[:8] = np.nan sdf = df.astype("Sparse") expected = df.to_json() assert expected == sdf.to_json() s = pd.Series(np.random.randn(10)) s.loc[:8] = np.nan ss = s.astype("Sparse") expected = s.to_json() assert expected == ss.to_json() @pytest.mark.parametrize( "ts", [ Timestamp("2013-01-10 05:00:00Z"), Timestamp("2013-01-10 00:00:00", tz="US/Eastern"), Timestamp("2013-01-10 00:00:00-0500"), ], ) def test_tz_is_utc(self, ts): from pandas.io.json import dumps exp = '"2013-01-10T05:00:00.000Z"' assert dumps(ts, iso_dates=True) == exp dt = ts.to_pydatetime() assert dumps(dt, iso_dates=True) == exp @pytest.mark.parametrize( "tz_range", [ pd.date_range("2013-01-01 05:00:00Z", periods=2), pd.date_range("2013-01-01 00:00:00", periods=2, tz="US/Eastern"), pd.date_range("2013-01-01 00:00:00-0500", periods=2), ], ) def test_tz_range_is_utc(self, tz_range): from pandas.io.json import dumps exp = '["2013-01-01T05:00:00.000Z","2013-01-02T05:00:00.000Z"]' dfexp = ( '{"DT":{' '"0":"2013-01-01T05:00:00.000Z",' '"1":"2013-01-02T05:00:00.000Z"}}' ) assert dumps(tz_range, iso_dates=True) == exp dti = pd.DatetimeIndex(tz_range) assert dumps(dti, iso_dates=True) == exp df = DataFrame({"DT": dti}) result = dumps(df, iso_dates=True) assert result == dfexp def test_read_inline_jsonl(self): # GH9180 result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True) expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"]) tm.assert_frame_equal(result, expected) @td.skip_if_not_us_locale def test_read_s3_jsonl(self, s3_resource): # GH17200 result = read_json("s3n://pandas-test/items.jsonl", lines=True) expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"]) tm.assert_frame_equal(result, expected) def test_read_local_jsonl(self): # GH17200 with tm.ensure_clean("tmp_items.json") as path: with open(path, "w") as infile: infile.write('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n') result = read_json(path, lines=True) expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"]) tm.assert_frame_equal(result, expected) def test_read_jsonl_unicode_chars(self): # GH15132: non-ascii unicode characters # \u201d == RIGHT DOUBLE QUOTATION MARK # simulate file handle json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n' json = StringIO(json) result = read_json(json, lines=True) expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"]) tm.assert_frame_equal(result, expected) # simulate string json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n' result = read_json(json, lines=True) expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"]) tm.assert_frame_equal(result, expected) def test_read_json_large_numbers(self): # GH18842 json = '{"articleId": "1404366058080022500245"}' json = StringIO(json) result = read_json(json, typ="series") expected = Series(1.404366e21, index=["articleId"]) tm.assert_series_equal(result, expected) json = '{"0": {"articleId": "1404366058080022500245"}}' json = StringIO(json) result = read_json(json) expected = DataFrame(1.404366e21, index=["articleId"], columns=[0]) tm.assert_frame_equal(result, expected) def test_to_jsonl(self): # GH9180 df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"]) result = df.to_json(orient="records", lines=True) expected = '{"a":1,"b":2}\n{"a":1,"b":2}' assert result == expected df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=["a", "b"]) result = df.to_json(orient="records", lines=True) expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}' assert result == expected tm.assert_frame_equal(pd.read_json(result, lines=True), df) # GH15096: escaped characters in columns and data df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], columns=["a\\", "b"]) result = df.to_json(orient="records", lines=True) expected = '{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}' assert result == expected tm.assert_frame_equal(pd.read_json(result, lines=True), df) # TODO: there is a near-identical test for pytables; can we share? def test_latin_encoding(self): # GH 13774 pytest.skip("encoding not implemented in .to_json(), xref #13774") values = [ [b"E\xc9, 17", b"", b"a", b"b", b"c"], [b"E\xc9, 17", b"a", b"b", b"c"], [b"EE, 17", b"", b"a", b"b", b"c"], [b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"], [b"", b"a", b"b", b"c"], [b"\xf8\xfc", b"a", b"b", b"c"], [b"A\xf8\xfc", b"", b"a", b"b", b"c"], [np.nan, b"", b"b", b"c"], [b"A\xf8\xfc", np.nan, b"", b"b", b"c"], ] values = [ [x.decode("latin-1") if isinstance(x, bytes) else x for x in y] for y in values ] examples = [] for dtype in ["category", object]: for val in values: examples.append(Series(val, dtype=dtype)) def roundtrip(s, encoding="latin-1"): with tm.ensure_clean("test.json") as path: s.to_json(path, encoding=encoding) retr = read_json(path, encoding=encoding) tm.assert_series_equal(s, retr, check_categorical=False) for s in examples: roundtrip(s) def test_data_frame_size_after_to_json(self): # GH15344 df = DataFrame({"a": [str(1)]}) size_before = df.memory_usage(index=True, deep=True).sum() df.to_json() size_after = df.memory_usage(index=True, deep=True).sum() assert size_before == size_after @pytest.mark.parametrize( "index", [None, [1, 2], [1.0, 2.0], ["a", "b"], ["1", "2"], ["1.", "2."]] ) @pytest.mark.parametrize("columns", [["a", "b"], ["1", "2"], ["1.", "2."]]) def test_from_json_to_json_table_index_and_columns(self, index, columns): # GH25433 GH25435 expected = DataFrame([[1, 2], [3, 4]], index=index, columns=columns) dfjson = expected.to_json(orient="table") result = pd.read_json(dfjson, orient="table") tm.assert_frame_equal(result, expected) def test_from_json_to_json_table_dtypes(self): # GH21345 expected = pd.DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["5", "6"]}) dfjson = expected.to_json(orient="table") result = pd.read_json(dfjson, orient="table") tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("dtype", [True, {"b": int, "c": int}]) def test_read_json_table_dtype_raises(self, dtype): # GH21345 df = pd.DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["5", "6"]}) dfjson = df.to_json(orient="table") msg = "cannot pass both dtype and orient='table'" with pytest.raises(ValueError, match=msg): pd.read_json(dfjson, orient="table", dtype=dtype) def test_read_json_table_convert_axes_raises(self): # GH25433 GH25435 df = DataFrame([[1, 2], [3, 4]], index=[1.0, 2.0], columns=["1.", "2."]) dfjson = df.to_json(orient="table") msg = "cannot pass both convert_axes and orient='table'" with pytest.raises(ValueError, match=msg): pd.read_json(dfjson, orient="table", convert_axes=True) @pytest.mark.parametrize( "data, expected", [ ( DataFrame([[1, 2], [4, 5]], columns=["a", "b"]), {"columns": ["a", "b"], "data": [[1, 2], [4, 5]]}, ), ( DataFrame([[1, 2], [4, 5]], columns=["a", "b"]).rename_axis("foo"), {"columns": ["a", "b"], "data": [[1, 2], [4, 5]]}, ), ( DataFrame( [[1, 2], [4, 5]], columns=["a", "b"], index=[["a", "b"], ["c", "d"]] ), {"columns": ["a", "b"], "data": [[1, 2], [4, 5]]}, ), (Series([1, 2, 3], name="A"), {"name": "A", "data": [1, 2, 3]}), ( Series([1, 2, 3], name="A").rename_axis("foo"), {"name": "A", "data": [1, 2, 3]}, ), ( Series([1, 2], name="A", index=[["a", "b"], ["c", "d"]]), {"name": "A", "data": [1, 2]}, ), ], ) def test_index_false_to_json_split(self, data, expected): # GH 17394 # Testing index=False in to_json with orient='split' result = data.to_json(orient="split", index=False) result = json.loads(result) assert result == expected @pytest.mark.parametrize( "data", [ (DataFrame([[1, 2], [4, 5]], columns=["a", "b"])), (DataFrame([[1, 2], [4, 5]], columns=["a", "b"]).rename_axis("foo")), ( DataFrame( [[1, 2], [4, 5]], columns=["a", "b"], index=[["a", "b"], ["c", "d"]] ) ), (Series([1, 2, 3], name="A")), (Series([1, 2, 3], name="A").rename_axis("foo")), (Series([1, 2], name="A", index=[["a", "b"], ["c", "d"]])), ], ) def test_index_false_to_json_table(self, data): # GH 17394 # Testing index=False in to_json with orient='table' result = data.to_json(orient="table", index=False) result = json.loads(result) expected = { "schema":
pd.io.json.build_table_schema(data, index=False)
pandas.io.json.build_table_schema
import numpy as np import pandas as pd import os.path from pytest import approx, raises, mark, skip import lenskit.metrics.predict as pm import lk_test_utils as lktu def test_check_missing_empty(): pm._check_missing(pd.Series([]), 'error') # should pass assert True def test_check_missing_has_values(): pm._check_missing(pd.Series([1, 3, 2]), 'error') # should pass assert True def test_check_missing_nan_raises(): with raises(ValueError): pm._check_missing(pd.Series([1, np.nan, 3]), 'error') def test_check_missing_raises(): data = pd.Series([1, 7, 3], ['a', 'b', 'd']) ref = pd.Series([3, 2, 4], ['b', 'c', 'd']) ref, data = ref.align(data, join='left') with raises(ValueError): pm._check_missing(data, 'error') def test_check_joined_ok(): data = pd.Series([1, 7, 3], ['a', 'b', 'd']) ref = pd.Series([3, 2, 4], ['b', 'c', 'd']) ref, data = ref.align(data, join='inner') pm._check_missing(ref, 'error') # should get here assert True def test_check_missing_ignore(): data = pd.Series([1, 7, 3], ['a', 'b', 'd']) ref = pd.Series([3, 2, 4], ['b', 'c', 'd']) ref, data = ref.align(data, join='left') pm._check_missing(data, 'ignore') # should get here assert True def test_rmse_one(): rmse = pm.rmse([1], [1]) assert isinstance(rmse, float) assert rmse == approx(0) rmse = pm.rmse([1], [2]) assert rmse == approx(1) rmse = pm.rmse([1], [0.5]) assert rmse == approx(0.5) def test_rmse_two(): rmse = pm.rmse([1, 2], [1, 2]) assert isinstance(rmse, float) assert rmse == approx(0) rmse = pm.rmse([1, 1], [2, 2]) assert rmse == approx(1) rmse = pm.rmse([1, 3], [3, 1]) assert rmse == approx(2) rmse = pm.rmse([1, 3], [3, 2]) assert rmse == approx(np.sqrt(5 / 2)) def test_rmse_array_two(): rmse = pm.rmse(np.array([1, 2]), np.array([1, 2])) assert isinstance(rmse, float) assert rmse == approx(0) rmse = pm.rmse(np.array([1, 1]), np.array([2, 2])) assert rmse == approx(1) rmse = pm.rmse(np.array([1, 3]), np.array([3, 1])) assert rmse == approx(2) def test_rmse_series_two(): rmse = pm.rmse(pd.Series([1, 2]), pd.Series([1, 2])) assert isinstance(rmse, float) assert rmse == approx(0) rmse = pm.rmse(pd.Series([1, 1]), pd.Series([2, 2])) assert rmse == approx(1) rmse = pm.rmse(pd.Series([1, 3]), pd.Series([3, 1])) assert rmse == approx(2) def test_rmse_series_subset_axis(): rmse = pm.rmse(pd.Series([1, 3], ['a', 'c']), pd.Series([3, 4, 1], ['a', 'b', 'c'])) assert rmse == approx(2) def test_rmse_series_missing_value_error(): with raises(ValueError): pm.rmse(pd.Series([1, 3], ['a', 'd']), pd.Series([3, 4, 1], ['a', 'b', 'c'])) def test_rmse_series_missing_value_ignore(): rmse = pm.rmse(pd.Series([1, 3], ['a', 'd']), pd.Series([3, 4, 1], ['a', 'b', 'c']), missing='ignore') assert rmse == approx(2) def test_mae_two(): mae = pm.mae([1, 2], [1, 2]) assert isinstance(mae, float) assert mae == approx(0) mae = pm.mae([1, 1], [2, 2]) assert mae == approx(1) mae = pm.mae([1, 3], [3, 1]) assert mae == approx(2) mae = pm.mae([1, 3], [3, 2]) assert mae == approx(1.5) def test_mae_array_two(): mae = pm.mae(np.array([1, 2]), np.array([1, 2])) assert isinstance(mae, float) assert mae == approx(0) mae = pm.mae(np.array([1, 1]), np.array([2, 2])) assert mae == approx(1) mae = pm.mae(np.array([1, 3]), np.array([3, 1])) assert mae == approx(2) def test_mae_series_two(): mae = pm.mae(pd.Series([1, 2]), pd.Series([1, 2])) assert isinstance(mae, float) assert mae == approx(0) mae = pm.mae(pd.Series([1, 1]),
pd.Series([2, 2])
pandas.Series
from flask import Flask, render_template, request import csv import pandas as pd import numpy as np from rank_bm25 import BM25Okapi ########### def data_process(train_data,test_data,whole_data): #### READ Train, test, and all replies data #### file = open(train_data) read_csv = csv.reader(file) train = [] for row in read_csv: train.append(row) train= train[1:] train = pd.DataFrame(train,columns=['message_id','response_id','rating']) file = open(test_data) read_csv = csv.reader(file) test = [] for row in read_csv: test.append(row) test= test[1:] test =
pd.DataFrame(test,columns=['message_id','response_id'])
pandas.DataFrame