prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from functools import reduce
import math
import numpy as np
import pandas as pd
from ctypes import *
# from .talib_series import LINEARREG_SLOPE
from easyquant.easydealutils.easymongo import MongoIo
import datetime
try:
import talib
except:
print('PLEASE install TALIB to call these methods')
import os
# lib = cdll.LoadLibrary("%s/%s" % (os.path.abspath("."), "talib_ext.so"))
lib = cdll.LoadLibrary("/usr/share/talib/%s" % ("talib_ext.so"))
"""
Series 类
这个是下面以DataFrame为输入的基础函数
return pd.Series format
"""
__STOCK_INFOS = pd.DataFrame()
__REALTIME_DATAS = {}
def __INITDATAS(dateStr = None):
mongo = MongoIo()
global __STOCK_INFOS, __REALTIME_DATAS
if len(__STOCK_INFOS) == 0:
__STOCK_INFOS = mongo.get_stock_info()
# STOCK_INFOS =
if dateStr == None:
dateObj = datetime.datetime.now()
else:
# datetime.datetime.strptime(st, "%Y-%m-%d %H:%M:%S"))
dateObj = datetime.datetime.strptime(dateStr, "%Y-%m-%d")
weekDay = dateObj.weekday()
if weekDay > 4:
dateObj = dateObj - datetime.timedelta(weekDay - 4)
dateStr = dateObj.strftime('%Y-%m-%d')
if dateStr not in __REALTIME_DATAS.keys():
__REALTIME_DATAS[dateStr] = mongo.get_realtime(dateStr=dateStr)
return dateStr
def __STOCK_INFO(code):
__INITDATAS()
return __STOCK_INFOS.query("code=='%s'" % code)
def __REALTIME_DATA(code, dateStr):
global __REALTIME_DATAS
dateStr = __INITDATAS(dateStr)
try:
return __REALTIME_DATAS[dateStr].query("code=='%s'" % code)
except Exception as e:
# print("__REALTIME_DATA", code, dateStr, e)
return pd.DataFrame()
def EMA(Series, N):
# return pd.Series.ewm(Series, span=N, min_periods=N - 1, adjust=True).mean()
Series = Series.fillna(0)
res = talib.EMA(Series.values, N)
return pd.Series(res, index=Series.index)
def EXPMA(Series, N):
# return pd.Series.ewm(Series, span=N, min_periods=N - 1, adjust=True).mean()
Series = Series.fillna(0)
res = talib.EMA(Series.values, N)
return pd.Series(res, index=Series.index)
def MA(Series, N):
# return pd.Series.rolling(Series, N).mean()
Series = Series.fillna(0)
res = talib.MA(Series.values, N)
return pd.Series(res, index=Series.index)
# 威廉SMA 参考https://www.joinquant.com/post/867
def SMA(Series, N, M=1):
"""
威廉SMA算法
本次修正主要是对于返回值的优化,现在的返回值会带上原先输入的索引index
2018/5/3
@yutiansut
"""
ret = []
i = 1
length = len(Series)
# 跳过X中前面几个 nan 值
while i < length:
if np.isnan(Series.iloc[i]):
ret.append(0)
i += 1
else:
break
if i < length:
preY = Series.iloc[i] # Y'
else:
preY = None
ret.append(preY)
while i < length:
Y = (M * Series.iloc[i] + (N - M) * preY) / float(N)
ret.append(Y)
preY = Y
i += 1
return pd.Series(ret, index=Series.tail(len(ret)).index)
def DIFF(Series, N=1):
return pd.Series(Series).diff(N)
def HHV(Series, NS):
if isinstance(NS, pd.Series):
ncount = len(NS)
tf_p = c_float * ncount
np_OUT = tf_p(0)
na_Series = np.asarray(Series).astype(np.float32)
na_NS = np.asarray(NS).astype(np.int32)
np_S = cast(na_Series.ctypes.data, POINTER(c_float))
np_N = cast(na_NS.ctypes.data, POINTER(c_int))
lib.hhv(ncount, np_OUT, np_S, np_N)
return pd.Series(np.asarray(np_OUT), dtype=np.float64, index=Series.index)
if NS == 0:
return Series
return pd.Series(Series).rolling(NS).max()
def LLV(Series, NS):
if isinstance(NS, pd.Series):
ncount = len(NS)
tf_p = c_float * ncount
np_OUT = tf_p(0)
na_Series = np.asarray(Series).astype(np.float32)
na_NS = np.asarray(NS).astype(np.int32)
np_S = cast(na_Series.ctypes.data, POINTER(c_float))
np_N = cast(na_NS.ctypes.data, POINTER(c_int))
lib.llv(ncount, np_OUT, np_S, np_N)
return pd.Series(np.asarray(np_OUT), dtype=np.float64, index=Series.index)
if NS == 0:
return Series
return pd.Series(Series).rolling(NS).min()
def SUMS(Series, NS):
ncount = len(NS)
tf_p=c_float * ncount
np_OUT =tf_p(0)
na_Series=np.asarray(Series).astype(np.float32)
na_NS=np.asarray(NS).astype(np.int32)
np_S=cast(na_Series.ctypes.data, POINTER(c_float))
np_N=cast(na_NS.ctypes.data, POINTER(c_int))
lib.sum(ncount, np_OUT, np_S, np_N)
return pd.Series(np.asarray(np_OUT), dtype=np.float64)
def DMA(Series, Weight):
ncount = len(Series)
tf_p = c_float * ncount
np_OUT = tf_p(0)
na_Series = np.asarray(Series).astype(np.float32)
na_Weight = np.asarray(Weight.fillna(1)).astype(np.float32)
np_S = cast(na_Series.ctypes.data, POINTER(c_float))
np_W = cast(na_Weight.ctypes.data, POINTER(c_float))
lib.dma(ncount, np_OUT, np_S, np_W)
return pd.Series(np.asarray(np_OUT), dtype=np.float64, index=Series.index)
def SUM(Series, N):
if N == 0:
return Series.cumsum()
else:
return pd.Series.rolling(Series, N).sum()
def ABS(Series):
return abs(Series)
def MAX(A, B):
var = IF(A > B, A, B)
return var
def MIN(A, B):
var = IF(A < B, A, B)
return var
def SINGLE_CROSS(A, B):
if A.iloc[-2] < B.iloc[-2] and A.iloc[-1] > B.iloc[-1]:
return True
else:
return False
def CROSS(A, B):
"""A<B then A>B A上穿B B下穿A
Arguments:
A {[type]} -- [description]
B {[type]} -- [description]
Returns:
[type] -- [description]
"""
if isinstance(A, int) or isinstance(A, float):
A1 = pd.Series(B).copy()
A1[:] = A
A = A1
var = np.where(A < B, 1, 0)
return (pd.Series(var, index=A.index).diff() < 0).apply(int)
def COUNT(COND, N):
"""
2018/05/23 修改
参考https://github.com/QUANTAXIS/QUANTAXIS/issues/429
现在返回的是series
"""
return pd.Series(np.where(COND, 1, 0), index=COND.index).rolling(N).sum()
def IF(COND, V1, V2):
# if isinstance(V1, np.int64) or isinstance(V1, np.int):
if isinstance(COND, np.bool_) or isinstance(COND, bool):
if COND:
return V1
else:
return V2
var = np.where(COND, V1, V2)
if isinstance(V1, pd.Series):
return pd.Series(var, index=V1.index)
else:
return pd.Series(var, index=COND.index)
def IFAND(COND1, COND2, V1, V2):
if isinstance(COND1, np.bool) and isinstance(COND2, np.bool):
if COND1 and COND2:
return V1
else:
return V2
if isinstance(COND1, np.bool_) or isinstance(COND1, bool):
temp = COND1
COND1 = COND2
COND2 = temp
elif isinstance(COND2, np.bool_) or isinstance(COND2, bool):
pass
else:
if len(COND1) < len(COND2):
COND2=COND2[COND2.index>=COND1.index[0]]
elif len(COND1) > len(COND2):
COND1 = COND1[COND1.index >= COND2.index[0]]
var = np.where(np.logical_and(COND1,COND2), V1, V2)
return pd.Series(var, index=COND1.index)
def IFAND3(COND1, COND2, COND3, V1, V2):
# if len(COND1) < len(COND2):
# COND2=COND2[COND2.index>=COND1.index[0]]
# elif len(COND1) > len(COND2):
# COND1 = COND1[COND1.index >= COND2.index[0]]
var1 = np.where(np.logical_and(COND1,COND2), True, False)
var = np.where(np.logical_and(var1, COND3), V1, V2)
return pd.Series(var, index=COND1.index)
# if isinstance(V1, pd.Series):
# return pd.Series(var, index=V1.index)
# else:
# return pd.Series(var, index=COND1.index)
def IFAND4(COND1, COND2, COND3, COND4, V1, V2):
# if len(COND1) < len(COND2):
# COND2=COND2[COND2.index>=COND1.index[0]]
# elif len(COND1) > len(COND2):
# COND1 = COND1[COND1.index >= COND2.index[0]]
var1 = np.where(np.logical_and(COND1,COND2), True, False)
var2 = np.where(np.logical_and(var1, COND3), True, False)
var = np.where(np.logical_and(var2, COND4), V1, V2)
return pd.Series(var, index=COND1.index)
# if isinstance(V1, pd.Series):
# return pd.Series(var, index=V1.index)
# else:
# return pd.Series(var, index=COND1.index)
def IFAND5(COND1, COND2, COND3, COND4, COND5, V1, V2):
# if len(COND1) < len(COND2):
# COND2=COND2[COND2.index>=COND1.index[0]]
# elif len(COND1) > len(COND2):
# COND1 = COND1[COND1.index >= COND2.index[0]]
var1 = np.where(np.logical_and(COND1,COND2), True, False)
var2 = np.where(np.logical_and(var1, COND3), True, False)
var3 = np.where(np.logical_and(var2, COND4), True, False)
var = np.where(np.logical_and(var3, COND5), V1, V2)
return | pd.Series(var, index=COND1.index) | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Reconstruct SKOS
Script which analyses and reconstructs a SKOS hierarchy.
"""
__author__ = "<NAME>"
__version__ = "1.0.0"
__license__ = "cc0-1.0"
import os
import csv
import pandas as pd
import pickle
from xml.dom.minidom import parse
from datetime import datetime
from analyse import *
import os.path
def main():
start = datetime.now()
"""
print("Please provide the name of the input file located in the 'data' folder (e.g. example.rdf):")
source_file = os.path.abspath('..\data\\') + input()
print("Please provide a name for the output files (e.g. example_transformed.rdf) (only 'example' is replaced by the input and placed in the 'out folder')")
output_name = input()
"""
targeted_input_files = ["rma-skos-lib"]
input_file = targeted_input_files[0]
source_file = os.path.join(os.path.abspath('..\\xslt_mapping\\output'), input_file) + '.rdf'
output_name = input_file
transformed_file = 'output/{}_transformed.rdf'.format(output_name)
issue_file = 'output/{}_differences.csv'.format(output_name)
typeless_file = 'output/{}_typeless.csv'.format(output_name)
analyse_file = 'output/{}_analyse.xlsx'.format(output_name)
dict_file = 'output/{}_dictionary.pkl'.format(output_name)
print('{} started analysis'.format(time(start)))
dom = parse(source_file)
print('{} parsed {}'.format(time(start), source_file))
concepts = list_concepts(dom)
print('{} analyzing {} concepts'
.format(time(start), len(concepts)))
concept_schemes = referenced_concept_schemes(dom)
print('{} identified {} concept schemes'
.format(time(start), len(concept_schemes)))
# Add unknown scheme, for concepts without a type
concept_schemes.append('http://hdl.handle.net/10934/RM0001.SCHEME.UNKOWN')
schemeless_concepts = list_schemeless_concepts(dom)
print('{} {} concepts without a concept scheme'
.format(time(start), len(schemeless_concepts)))
missing_references = missing_outward_references(dom)
missing_references = restructure_missing_references(missing_references)
print('{} found {} hierarchical inconsistencies'
.format(time(start), len(missing_references)))
undefined_concepts = undefined_concept_references(dom)
print('{} found {} references to undefined concepts'
.format(time(start), len(undefined_concepts)))
new_dom = dom.cloneNode(dom)
new_dom = add_concept_schemes(new_dom, concept_schemes)
print('{} added {} concept schemes to dom'
.format(time(start), len(concept_schemes)))
new_dom = fix_loose_references(new_dom, missing_references)
print('{} added the {} missing references to file{}'
.format(time(start), len(missing_references), transformed_file))
new_dom = remove_undefined_references(new_dom, undefined_concepts)
print('{} removed the {} undefined references from file {}'
.format(time(start), len(undefined_concepts), transformed_file))
topconcepts = find_top_concepts(new_dom)
print('{} found {} concepts without broader concepts'
.format(time(start), len(topconcepts)))
schemes_dict = find_all_schemes(new_dom, 'no')
print('{} created a dictionary of schemes'
.format(time(start)))
new_dom = add_top_concepts(new_dom, topconcepts, schemes_dict)
print('{} added topconcept nodes to file {}'
.format(time(start), transformed_file))
the_properties = all_properties(new_dom, 'yes')
print('{} created property dictionary for each concept'
.format(time(start)))
write_dom_to_file(new_dom, transformed_file)
print('{} wrote new dom to file {}'
.format(time(start), transformed_file))
save_schemeless(schemeless_concepts, typeless_file)
print('{} wrote concepts without scheme to file {}'
.format(time(start), typeless_file))
save_differences(missing_references, undefined_concepts, issue_file)
print('{} wrote hierarchical differences to file {}'
.format(time(start), issue_file))
write_analyse_file(the_properties, analyse_file)
print('{} write analyse results to the file {}'
.format(time(start), analyse_file))
output = open(dict_file, 'wb')
properties_dict = {}
for concept in the_properties:
the_id = concept['id']
properties_dict[the_id] = concept
pickle.dump(properties_dict, output)
output.close()
print('{} Saved the properties of each concept to file {}'
.format(time(start), dict_file))
def create_output_dir():
if not os.path.exists('output'):
os.mkdir('output')
def add_concept_schemes(dom, concept_schemes):
# Add missing skos:ConceptScheme nodes to the root
root = dom.childNodes.item(0)
for scheme in concept_schemes:
scheme_node = dom.createElement('skos:ConceptScheme')
root.appendChild(scheme_node)
scheme_node.setAttribute('rdf:about', scheme)
concept_node = dom.createElement('dct:title')
scheme_node.appendChild(concept_node)
concept_node.setAttribute('xml:lang', 'nl')
if scheme == 'http://hdl.handle.net/10934/RM0001.SCHEME.UNKOWN':
text_node = dom.createTextNode('Scheme Unknown')
else:
text_node = dom.createTextNode(scheme[42:])
concept_node.appendChild(text_node)
return dom
def remove_reference(dom, reference):
# Remove a reference from a concept
c1 = reference[2]
c2 = reference[0]
if c1 == c2:
relation = inverse_property(reference[1])
else:
c1 = reference[0]
c2 = reference[2]
relation = reference[1]
c1 = get_concept(dom, c1)
if c1 is not None:
property_node = get_relation_property(c1, relation, c2)
c1.removeChild(property_node)
return dom
def remove_undefined_references(dom, references):
# remove all undefined references
for reference in references:
dom = remove_reference(dom, reference)
return dom
def fix_loose_references(dom, references):
# A fix of the loose references
for reference in references:
c1 = reference[0]
relation = reference[1]
c2 = reference[2]
if c1 == c2:
dom = remove_reference(dom, reference)
else:
c1 = get_concept(dom, c1)
if c1 is not None:
new_node = dom.createElement(relation)
c1.appendChild(new_node)
new_node.setAttribute('rdf:resource', c2)
return dom
def add_top_concepts(dom, concepts, schemes):
# Add the topconcept nodes to the concepts without broader concepts and to the conceptscheme nodes
for concept in concepts:
concept_id = concept
the_schemes = schemes[concept_id]
concept = get_concept(dom, concept)
if the_schemes == []:
the_schemes.append('http://hdl.handle.net/10934/RM0001.SCHEME.UNKOWN')
for scheme in the_schemes:
new_node = dom.createElement('skos:topConceptOf')
concept.appendChild(new_node)
new_node.setAttribute('rdf:resource', scheme)
scheme = get_concept_scheme(dom, scheme)
extra_node = dom.createElement('skos:hasTopConcept')
scheme.appendChild(extra_node)
extra_node.setAttribute('rdf:resource', concept_id)
return dom
def save_schemeless(schemeless_concepts, typeless_file):
# Each typeless concept is written to a csv file
a_file = open(typeless_file, "w", encoding='utf-8')
the_writer = csv.writer(a_file)
for schemeless in schemeless_concepts:
the_writer.writerow([schemeless])
a_file.close()
def save_differences(list1, list2, issue_file):
# Each difference is written to a csv file
header_list = ['concept 1', 'type of relation', 'concept 2']
a_file = open(issue_file, "w", newline='')
writer = csv.writer(a_file)
writer.writerow(header_list)
for difference in list1:
writer.writerow(difference)
writer.writerow(['-','-','-'])
for difference in list2:
writer.writerow(difference)
a_file.close()
def write_dom_to_file(dom, file):
# Write a dom to a XML file
xml_file = open(file, "w", encoding='utf-8')
xml_file.write(dom.toprettyxml())
xml_file.close()
def write_analyse_file(list, file):
# Write all analyses to a file
#writer = pd.ExcelWriter(file, engine='xlsxwriter')
with pd.ExcelWriter(file) as writer:
reference_dict, reference_list = reference_analyse(list)
df_full = pd.DataFrame.from_dict(list)
df_full.to_excel(writer, sheet_name='Full')
reference_df = pd.DataFrame(reference_list, index=['Broader', 'Narrower', 'Related'])
reference_df.to_excel(writer, sheet_name='Reference1')
reference_df2 = pd.DataFrame(reference_dict, columns=['B-N-R', '#'])
reference_df2 = reference_df2.sort_values(by=['#'], ascending=False)
reference_df2.to_excel(writer, sheet_name='Reference2')
dict1, dict2, dict3 = label_analyse(list)
label_df = pd.DataFrame.from_dict(dict1, orient='index')
label_df.to_excel(writer, sheet_name='Labels')
label_df2 = pd.DataFrame.from_dict(dict2, orient='index')
label_df2.to_excel(writer, sheet_name='Labels2')
label_df3 = pd.DataFrame.from_dict(dict3, orient='index')
label_df3.to_excel(writer, sheet_name='Labels3')
matches_dict = matches_analyse(list)
matches_df = pd.DataFrame(matches_dict, columns=['Matches', '#'])
matches_df.to_excel(writer, sheet_name='Matches')
type_dict = type_analyse(list)
type_df = | pd.DataFrame.from_dict(type_dict, orient='index') | pandas.DataFrame.from_dict |
from flask import render_template, flash, redirect, url_for, request, send_file, send_from_directory
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from werkzeug.urls import url_parse
from app import app
from app.forms import LoginForm
import boto3
from flask_login import current_user, login_user, logout_user, login_required
from app.models import User
import csv
import shutil
import requests
import json
import os
import pandas as pd
import numpy as np
import exifread
s3_client = boto3.client('s3')
bucket_name = 'w210-img-upload'
s3_resource = boto3.resource('s3')
my_bucket = s3_resource.Bucket(bucket_name)
db_string = "postgres://dbmaster:dbpa$$w0rd!@w210postgres01.c8siy60gz3hg.us-east-1.rds.amazonaws.com:5432/w210results"
def df_to_geojson(df, properties, lat='Lat', lon='Long'):
geojson = {'type':'FeatureCollection', 'features':[]}
for _, row in df.iterrows():
feature = {'type':'Feature',
'properties':{},
'geometry':{'type':'Point',
'coordinates':[]}}
feature['geometry']['coordinates'] = [row[lon],row[lat]]
for prop in properties:
feature['properties'][prop] = row[prop]
geojson['features'].append(feature)
return geojson
def gpsParser(x):
if x == 0.0:
return 0.0
else:
degrees = int(x[1:-1].split(',')[0])
try:
minNumerator = int(x[1:-1].split(',')[1].split('/')[0])
minDenominator = int(x[1:-1].split(',')[1].split('/')[1])
except IndexError:
minNumerator = int(x[1:-1].split(',')[1].split('/')[0])
minDenominator = 1.0
try:
secNumerator = int(x[1:-1].split(',')[2].split('/')[0])
secDenominator = int(x[1:-1].split(',')[2].split('/')[1])
except IndexError:
secNumerator = int(x[1:-1].split(',')[2].split('/')[0])
secDenominator = 1.0
deciMinutes = minNumerator/minDenominator/60
deciSeconds = secNumerator/secDenominator/3600
return(np.round(degrees+deciMinutes+deciSeconds,6))
def exifExtractor(file):
image = open(file, 'rb')
tags = exifread.process_file(image)
gpsInfo = {'fileName': image.name.lower().split('/')[-1]}
for k in ['GPS GPSLatitudeRef', 'GPS GPSLatitude', 'GPS GPSLongitudeRef', 'GPS GPSLongitude']:
try:
gpsInfo[k] = str(tags[k])
except KeyError:
gpsInfo[k] = 0.0
return gpsInfo
def formatLabel(x):
if x == 'american_black_bear':
return 'Black bear'
elif x == 'domestic_cow':
return 'Cow'
elif x == 'domestic_dog':
return 'Dog'
elif x == 'gray_fox':
return 'Gray fox'
elif x == 'red_fox':
return 'Red fox'
elif x == 'white_tailed_deer':
return 'White-tailed deer'
elif x == 'mule_deer':
return 'Mule deer'
elif x == 'wild_turkey':
return 'Wild turkey'
elif x == 'red_deer':
return 'Elk'
else:
return x.capitalize()
def purge_local(dir):
excepts = []
prefix = current_user.username+'/'
my_bucket.objects.filter(Prefix=prefix).delete()
engine = create_engine(db_string, echo=True)
connection = engine.connect()
connection.execute("DROP TABLE IF EXISTS {}".format('test_upload'))
connection.close()
engine.dispose()
for file in os.listdir(dir):
file_path = os.path.join(dir,file)
try:
if os.path.isfile(file_path):
os.remove(file_path)
except Exception as e:
excepts.append(e)
return excepts
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html', title='Home')
@app.route('/about')
def about():
return render_template('about.html', title='About Us')
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('upload'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
flash('Invalid username or password')
return redirect(url_for('login'))
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('upload')
return redirect(next_page)
return render_template('login_page.html', title='Sign In', form=form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/upload', methods=['GET', 'POST'])
@login_required
def upload():
check_file = app.config['DOWNLOAD_FOLDER']+current_user.username+'/'+current_user.username+'_results.csv'
if os.path.isfile(check_file):
purge_local(os.path.join(app.config['DOWNLOAD_FOLDER'],current_user.username))
if request.method == 'POST':
data_files = request.files.getlist('file[]')
for data_file in data_files:
name, ext = os.path.splitext(data_file.filename)
if ext.lower() in [".jpg",".jpeg",".png"]:
filename_old = current_user.username+'/upload/'+data_file.filename
filename_new = filename_old.lower()
s3_client.upload_fileobj(data_file, bucket_name, filename_new)
print("Uploading "+data_file.filename+" to "+bucket_name+".")
else:
pass
upload_dir = '/home/ubuntu/s3bucket/'+current_user.username+'/upload/'
dfGPSRaw = pd.DataFrame()
for file in os.listdir(upload_dir):
df_tmp = pd.DataFrame.from_dict([exifExtractor(os.path.join(upload_dir,file))],orient='columns')
dfGPSRaw = dfGPSRaw.append(df_tmp)
dfGPSRaw['LatRef'] = dfGPSRaw['GPS GPSLatitudeRef'].apply(lambda x: 1 if x == 'N' else -1)
dfGPSRaw['LonRef'] = dfGPSRaw['GPS GPSLongitudeRef'].apply(lambda x: 1 if x == 'E' else -1)
dfGPSRaw['Lat'] = dfGPSRaw['GPS GPSLatitude'].apply(gpsParser)*dfGPSRaw['LatRef']
dfGPSRaw['Long'] = dfGPSRaw['GPS GPSLongitude'].apply(gpsParser)*dfGPSRaw['LonRef']
dfGPStmp = dfGPSRaw[['fileName','Lat', 'Long']]
dfGPStmp = dfGPStmp.set_index('fileName')
geotags_file = app.config['DOWNLOAD_FOLDER']+current_user.username+'/geotags.csv'
if os.path.isfile(geotags_file):
dfGPSold = pd.read_csv(geotags_file)
dfGPSnew = pd.concat([dfGPSold, dfGPStmp])
dfGPSnew.to_csv(geotags_file)
else:
dfGPStmp.to_csv(geotags_file)
return redirect(url_for('complete'))
else:
username = current_user.username
return render_template('upload.html', title='File Upload', username = username)
@app.route('/complete', methods=['GET', 'POST'])
@login_required
def complete():
if request.method == "POST":
if 'upload_again' in request.form:
return redirect(url_for('upload'))
elif 'launcher' in request.form:
return redirect(url_for('classify'))
else:
return render_template('complete.html', title='Thank You!')
@app.route('/output', methods=['GET', 'POST'])
@login_required
def output():
engine = create_engine(db_string, echo=True)
Base = declarative_base(engine)
output_file = app.config['DOWNLOAD_FOLDER']+current_user.username+'/'+current_user.username+'_results.csv'
# os.remove(output_file)
class Results(Base):
__tablename__ = 'test_upload'
# __tablename__ = 'dummy_table'
# __tablename__ = str(current_user.username + '_results')
__table_args__ = {'autoload':True}
metadata = Base.metadata
Session = sessionmaker(bind=engine)
session = Session()
qry = session.query(Results)
with open(output_file, 'w') as csvfile:
outcsv = csv.writer(csvfile, delimiter=',',quotechar='"', quoting = csv.QUOTE_MINIMAL)
header = Results.__table__.columns.keys()
outcsv.writerow(header)
for record in qry.all():
outcsv.writerow([getattr(record, c) for c in header ])
df_results = pd.read_csv(output_file)
df_resTransform = df_results.loc[df_results.groupby(['fileName'])['probability'].idxmax()]
dfGPS = | pd.read_csv(app.config['DOWNLOAD_FOLDER']+current_user.username+'/geotags.csv') | pandas.read_csv |
import logging
import pandas as pd
from pandas import DataFrame
from autogluon.common.features.types import R_DATETIME, S_DATETIME_AS_OBJECT
from .abstract import AbstractFeatureGenerator
logger = logging.getLogger(__name__)
class DatetimeFeatureGenerator(AbstractFeatureGenerator):
"""Transforms datetime features into numeric features.
Parameters
----------
features : list, optional
A list of datetime features to parse out of dates.
For a full list of options see the methods inside pandas.Series.dt at https://pandas.pydata.org/docs/reference/api/pandas.Series.html
"""
def __init__(self,
features: list = ['year', 'month', 'day', 'dayofweek'],
**kwargs
):
super().__init__(**kwargs)
self.features = features
def _fit_transform(self, X: DataFrame, **kwargs) -> (DataFrame, dict):
self._fillna_map = self._compute_fillna_map(X)
X_out = self._transform(X)
type_family_groups_special = dict(
datetime_as_int=list(X_out.columns)
)
return X_out, type_family_groups_special
def _transform(self, X: DataFrame) -> DataFrame:
return self._generate_features_datetime(X)
@staticmethod
def get_default_infer_features_in_args() -> dict:
return dict(required_raw_special_pairs=[
(R_DATETIME, None),
(None, [S_DATETIME_AS_OBJECT])
])
def _compute_fillna_map(self, X: DataFrame):
fillna_map = dict()
for datetime_feature in self.features_in:
datetime_series = pd.to_datetime(X[datetime_feature], errors='coerce')
# Best guess is currently to fill by the mean.
fillna_datetime = datetime_series.mean()
fillna_map[datetime_feature] = fillna_datetime
return fillna_map
# TODO: Improve handling of missing datetimes
def _generate_features_datetime(self, X: DataFrame) -> DataFrame:
X_datetime = DataFrame(index=X.index)
for datetime_feature in self.features_in:
# TODO: Be aware: When converted to float32 by downstream models, the seconds value will be up to 3 seconds off the true time due to rounding error. If seconds matter, find a separate way to generate (Possibly subtract smallest datetime from all values).
# TODO: could also return an extra boolean column is_nan which could provide predictive signal.
X_datetime[datetime_feature] = | pd.to_datetime(X[datetime_feature], errors='coerce') | pandas.to_datetime |
#
# Copyright 2020 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Testing out the datacompy functionality
"""
import io
import logging
import sys
from datetime import datetime
from decimal import Decimal
from unittest import mock
import numpy as np
import pandas as pd
import pytest
from pandas.util.testing import assert_series_equal
from pytest import raises
import datacompy
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def test_numeric_columns_equal_abs():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_numeric_columns_equal_rel():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |False
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |False
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|True
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_date_columns_equal():
data = """a|b|expected
2017-01-01|2017-01-01|True
2017-01-02|2017-01-02|True
2017-10-01|2017-10-10|False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_equal_with_ignore_spaces():
data = """a|b|expected
2017-01-01|2017-01-01 |True
2017-01-02 |2017-01-02|True
2017-10-01 |2017-10-10 |False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2, ignore_spaces=True)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
2017-01-01|2017-01-01 |True
2017-01-02 |2017-01-02|True
2017-10-01 |2017-10-10 |False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2, ignore_spaces=True)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_unequal():
"""I want datetime fields to match with dates stored as strings
"""
df = pd.DataFrame([{"a": "2017-01-01", "b": "2017-01-02"}, {"a": "2017-01-01"}])
df["a_dt"] = pd.to_datetime(df["a"])
df["b_dt"] = pd.to_datetime(df["b"])
assert datacompy.columns_equal(df.a, df.a_dt).all()
assert datacompy.columns_equal(df.b, df.b_dt).all()
assert datacompy.columns_equal(df.a_dt, df.a).all()
assert datacompy.columns_equal(df.b_dt, df.b).all()
assert not datacompy.columns_equal(df.b_dt, df.a).any()
assert not datacompy.columns_equal(df.a_dt, df.b).any()
assert not datacompy.columns_equal(df.a, df.b_dt).any()
assert not datacompy.columns_equal(df.b, df.a_dt).any()
def test_bad_date_columns():
"""If strings can't be coerced into dates then it should be false for the
whole column.
"""
df = pd.DataFrame(
[{"a": "2017-01-01", "b": "2017-01-01"}, {"a": "2017-01-01", "b": "217-01-01"}]
)
df["a_dt"] = pd.to_datetime(df["a"])
assert not datacompy.columns_equal(df.a_dt, df.b).any()
def test_rounded_date_columns():
"""If strings can't be coerced into dates then it should be false for the
whole column.
"""
df = pd.DataFrame(
[
{"a": "2017-01-01", "b": "2017-01-01 00:00:00.000000", "exp": True},
{"a": "2017-01-01", "b": "2017-01-01 00:00:00.123456", "exp": False},
{"a": "2017-01-01", "b": "2017-01-01 00:00:01.000000", "exp": False},
{"a": "2017-01-01", "b": "2017-01-01 00:00:00", "exp": True},
]
)
df["a_dt"] = pd.to_datetime(df["a"])
actual = datacompy.columns_equal(df.a_dt, df.b)
expected = df["exp"]
assert_series_equal(actual, expected, check_names=False)
def test_decimal_float_columns_equal():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": 1, "expected": True},
{"a": Decimal("1.3"), "b": 1.3, "expected": True},
{"a": Decimal("1.000003"), "b": 1.000003, "expected": True},
{"a": Decimal("1.000000004"), "b": 1.000000003, "expected": False},
{"a": Decimal("1.3"), "b": 1.2, "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": 1, "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_float_columns_equal_rel():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": 1, "expected": True},
{"a": Decimal("1.3"), "b": 1.3, "expected": True},
{"a": Decimal("1.000003"), "b": 1.000003, "expected": True},
{"a": Decimal("1.000000004"), "b": 1.000000003, "expected": True},
{"a": Decimal("1.3"), "b": 1.2, "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": 1, "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.001)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_columns_equal():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.3"), "expected": True},
{"a": Decimal("1.000003"), "b": Decimal("1.000003"), "expected": True},
{"a": Decimal("1.000000004"), "b": Decimal("1.000000003"), "expected": False},
{"a": Decimal("1.3"), "b": Decimal("1.2"), "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": Decimal("1"), "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_columns_equal_rel():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.3"), "expected": True},
{"a": Decimal("1.000003"), "b": Decimal("1.000003"), "expected": True},
{"a": Decimal("1.000000004"), "b": Decimal("1.000000003"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.2"), "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": Decimal("1"), "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.001)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_infinity_and_beyond():
df = pd.DataFrame(
[
{"a": np.inf, "b": np.inf, "expected": True},
{"a": -np.inf, "b": -np.inf, "expected": True},
{"a": -np.inf, "b": np.inf, "expected": False},
{"a": np.inf, "b": -np.inf, "expected": False},
{"a": 1, "b": 1, "expected": True},
{"a": 1, "b": 0, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1", "expected": False},
{"a": 1, "b": "yo", "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column_with_ignore_spaces():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi ", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1 ", "expected": False},
{"a": 1, "b": "yo ", "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column_with_ignore_spaces_and_case():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi ", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1 ", "expected": False},
{"a": 1, "b": "yo ", "expected": False},
{"a": "Hi", "b": "hI ", "expected": True},
{"a": "HI", "b": "HI ", "expected": True},
{"a": "hi", "b": "hi ", "expected": True},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, ignore_spaces=True, ignore_case=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_compare_df_setter_bad():
df = pd.DataFrame([{"a": 1, "A": 2}, {"a": 2, "A": 2}])
with raises(TypeError, match="df1 must be a pandas DataFrame"):
compare = datacompy.Compare("a", "a", ["a"])
with raises(ValueError, match="df1 must have all columns from join_columns"):
compare = datacompy.Compare(df, df.copy(), ["b"])
with raises(ValueError, match="df1 must have unique column names"):
compare = datacompy.Compare(df, df.copy(), ["a"])
df_dupe = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 3}])
assert datacompy.Compare(df_dupe, df_dupe.copy(), ["a", "b"]).df1.equals(df_dupe)
def test_compare_df_setter_good():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"A": 1, "B": 2}, {"A": 2, "B": 3}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
assert compare.join_columns == ["a"]
compare = datacompy.Compare(df1, df2, ["A", "b"])
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
assert compare.join_columns == ["a", "b"]
def test_compare_df_setter_different_cases():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"A": 1, "b": 2}, {"A": 2, "b": 3}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
def test_compare_df_setter_bad_index():
df = pd.DataFrame([{"a": 1, "A": 2}, {"a": 2, "A": 2}])
with raises(TypeError, match="df1 must be a pandas DataFrame"):
compare = datacompy.Compare("a", "a", on_index=True)
with raises(ValueError, match="df1 must have unique column names"):
compare = datacompy.Compare(df, df.copy(), on_index=True)
def test_compare_on_index_and_join_columns():
df = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
with raises(Exception, match="Only provide on_index or join_columns"):
compare = datacompy.Compare(df, df.copy(), on_index=True, join_columns=["a"])
def test_compare_df_setter_good_index():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 3}])
compare = datacompy.Compare(df1, df2, on_index=True)
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
def test_columns_overlap():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 3}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1_unq_columns() == set()
assert compare.df2_unq_columns() == set()
assert compare.intersect_columns() == {"a", "b"}
def test_columns_no_overlap():
df1 = pd.DataFrame([{"a": 1, "b": 2, "c": "hi"}, {"a": 2, "b": 2, "c": "yo"}])
df2 = pd.DataFrame([{"a": 1, "b": 2, "d": "oh"}, {"a": 2, "b": 3, "d": "ya"}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1_unq_columns() == {"c"}
assert compare.df2_unq_columns() == {"d"}
assert compare.intersect_columns() == {"a", "b"}
def test_10k_rows():
df1 = pd.DataFrame(np.random.randint(0, 100, size=(10000, 2)), columns=["b", "c"])
df1.reset_index(inplace=True)
df1.columns = ["a", "b", "c"]
df2 = df1.copy()
df2["b"] = df2["b"] + 0.1
compare_tol = datacompy.Compare(df1, df2, ["a"], abs_tol=0.2)
assert compare_tol.matches()
assert len(compare_tol.df1_unq_rows) == 0
assert len(compare_tol.df2_unq_rows) == 0
assert compare_tol.intersect_columns() == {"a", "b", "c"}
assert compare_tol.all_columns_match()
assert compare_tol.all_rows_overlap()
assert compare_tol.intersect_rows_match()
compare_no_tol = datacompy.Compare(df1, df2, ["a"])
assert not compare_no_tol.matches()
assert len(compare_no_tol.df1_unq_rows) == 0
assert len(compare_no_tol.df2_unq_rows) == 0
assert compare_no_tol.intersect_columns() == {"a", "b", "c"}
assert compare_no_tol.all_columns_match()
assert compare_no_tol.all_rows_overlap()
assert not compare_no_tol.intersect_rows_match()
@mock.patch("datacompy.logging.debug")
def test_subset(mock_debug):
df1 = pd.DataFrame([{"a": 1, "b": 2, "c": "hi"}, {"a": 2, "b": 2, "c": "yo"}])
df2 = pd.DataFrame([{"a": 1, "c": "hi"}])
comp = datacompy.Compare(df1, df2, ["a"])
assert comp.subset()
assert mock_debug.called_with("Checking equality")
@mock.patch("datacompy.logging.info")
def test_not_subset(mock_info):
df1 = pd.DataFrame([{"a": 1, "b": 2, "c": "hi"}, {"a": 2, "b": 2, "c": "yo"}])
df2 = pd.DataFrame([{"a": 1, "b": 2, "c": "hi"}, {"a": 2, "b": 2, "c": "great"}])
comp = datacompy.Compare(df1, df2, ["a"])
assert not comp.subset()
assert mock_info.called_with("Sample c mismatch: a: 2, df1: yo, df2: great")
def test_large_subset():
df1 = pd.DataFrame(np.random.randint(0, 100, size=(10000, 2)), columns=["b", "c"])
df1.reset_index(inplace=True)
df1.columns = ["a", "b", "c"]
df2 = df1[["a", "b"]].sample(50).copy()
comp = datacompy.Compare(df1, df2, ["a"])
assert not comp.matches()
assert comp.subset()
def test_string_joiner():
df1 = pd.DataFrame([{"ab": 1, "bc": 2}, {"ab": 2, "bc": 2}])
df2 = pd.DataFrame([{"ab": 1, "bc": 2}, {"ab": 2, "bc": 2}])
compare = datacompy.Compare(df1, df2, "ab")
assert compare.matches()
def test_decimal_with_joins():
df1 = pd.DataFrame([{"a": Decimal("1"), "b": 2}, {"a": Decimal("2"), "b": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
compare = datacompy.Compare(df1, df2, "a")
assert compare.matches()
assert compare.all_columns_match()
assert compare.all_rows_overlap()
assert compare.intersect_rows_match()
def test_decimal_with_nulls():
df1 = pd.DataFrame([{"a": 1, "b": Decimal("2")}, {"a": 2, "b": Decimal("2")}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}, {"a": 3, "b": 2}])
compare = datacompy.Compare(df1, df2, "a")
assert not compare.matches()
assert compare.all_columns_match()
assert not compare.all_rows_overlap()
assert compare.intersect_rows_match()
def test_strings_with_joins():
df1 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
df2 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
compare = datacompy.Compare(df1, df2, "a")
assert compare.matches()
assert compare.all_columns_match()
assert compare.all_rows_overlap()
assert compare.intersect_rows_match()
def test_index_joining():
df1 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
df2 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
compare = datacompy.Compare(df1, df2, on_index=True)
assert compare.matches()
def test_index_joining_strings_i_guess():
df1 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
df2 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
df1.index = df1["a"]
df2.index = df2["a"]
df1.index.name = df2.index.name = None
compare = datacompy.Compare(df1, df2, on_index=True)
assert compare.matches()
def test_index_joining_non_overlapping():
df1 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
df2 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}, {"a": "back fo mo", "b": 3}])
compare = datacompy.Compare(df1, df2, on_index=True)
assert not compare.matches()
assert compare.all_columns_match()
assert compare.intersect_rows_match()
assert len(compare.df1_unq_rows) == 0
assert len(compare.df2_unq_rows) == 1
assert list(compare.df2_unq_rows["a"]) == ["back fo mo"]
def test_temp_column_name():
df1 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}])
df2 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}, {"a": "back fo mo", "b": 3}])
actual = datacompy.temp_column_name(df1, df2)
assert actual == "_temp_0"
def test_temp_column_name_one_has():
df1 = pd.DataFrame([{"_temp_0": "hi", "b": 2}, {"_temp_0": "bye", "b": 2}])
df2 = pd.DataFrame([{"a": "hi", "b": 2}, {"a": "bye", "b": 2}, {"a": "back fo mo", "b": 3}])
actual = datacompy.temp_column_name(df1, df2)
assert actual == "_temp_1"
def test_temp_column_name_both_have():
df1 = pd.DataFrame([{"_temp_0": "hi", "b": 2}, {"_temp_0": "bye", "b": 2}])
df2 = pd.DataFrame(
[{"_temp_0": "hi", "b": 2}, {"_temp_0": "bye", "b": 2}, {"a": "back fo mo", "b": 3}]
)
actual = datacompy.temp_column_name(df1, df2)
assert actual == "_temp_1"
def test_temp_column_name_both_have():
df1 = pd.DataFrame([{"_temp_0": "hi", "b": 2}, {"_temp_0": "bye", "b": 2}])
df2 = pd.DataFrame(
[{"_temp_0": "hi", "b": 2}, {"_temp_1": "bye", "b": 2}, {"a": "back fo mo", "b": 3}]
)
actual = datacompy.temp_column_name(df1, df2)
assert actual == "_temp_2"
def test_temp_column_name_one_already():
df1 = pd.DataFrame([{"_temp_1": "hi", "b": 2}, {"_temp_1": "bye", "b": 2}])
df2 = pd.DataFrame(
[{"_temp_1": "hi", "b": 2}, {"_temp_1": "bye", "b": 2}, {"a": "back fo mo", "b": 3}]
)
actual = datacompy.temp_column_name(df1, df2)
assert actual == "_temp_0"
### Duplicate testing!
def test_simple_dupes_one_field():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 2}])
compare = datacompy.Compare(df1, df2, join_columns=["a"])
assert compare.matches()
# Just render the report to make sure it renders.
t = compare.report()
def test_simple_dupes_two_fields():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 2, "c": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 2, "c": 2}])
compare = datacompy.Compare(df1, df2, join_columns=["a", "b"])
assert compare.matches()
# Just render the report to make sure it renders.
t = compare.report()
def test_simple_dupes_index():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 2}])
df2 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 2}])
df1.index = df1["a"]
df2.index = df2["a"]
df1.index.name = df2.index.name = None
compare = datacompy.Compare(df1, df2, on_index=True)
assert compare.matches()
# Just render the report to make sure it renders.
t = compare.report()
def test_simple_dupes_one_field_two_vals():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 0}])
df2 = | pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 0}]) | pandas.DataFrame |
import pytest
import numpy as np
import pandas as pd
from pandas import Categorical, Series, CategoricalIndex
from pandas.core.dtypes.concat import union_categoricals
from pandas.util import testing as tm
class TestUnionCategoricals(object):
def test_union_categorical(self):
# GH 13361
data = [
(list('abc'), list('abd'), list('abcabd')),
([0, 1, 2], [2, 3, 4], [0, 1, 2, 2, 3, 4]),
([0, 1.2, 2], [2, 3.4, 4], [0, 1.2, 2, 2, 3.4, 4]),
(['b', 'b', np.nan, 'a'], ['a', np.nan, 'c'],
['b', 'b', np.nan, 'a', 'a', np.nan, 'c']),
(pd.date_range('2014-01-01', '2014-01-05'),
pd.date_range('2014-01-06', '2014-01-07'),
pd.date_range('2014-01-01', '2014-01-07')),
(pd.date_range('2014-01-01', '2014-01-05', tz='US/Central'),
pd.date_range('2014-01-06', '2014-01-07', tz='US/Central'),
pd.date_range('2014-01-01', '2014-01-07', tz='US/Central')),
(pd.period_range('2014-01-01', '2014-01-05'),
pd.period_range('2014-01-06', '2014-01-07'),
pd.period_range('2014-01-01', '2014-01-07')),
]
for a, b, combined in data:
for box in [Categorical, CategoricalIndex, Series]:
result = union_categoricals([box(Categorical(a)),
box(Categorical(b))])
expected = Categorical(combined)
tm.assert_categorical_equal(result, expected,
check_category_order=True)
# new categories ordered by appearance
s = Categorical(['x', 'y', 'z'])
s2 = Categorical(['a', 'b', 'c'])
result = union_categoricals([s, s2])
expected = Categorical(['x', 'y', 'z', 'a', 'b', 'c'],
categories=['x', 'y', 'z', 'a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
s = Categorical([0, 1.2, 2], ordered=True)
s2 = Categorical([0, 1.2, 2], ordered=True)
result = union_categoricals([s, s2])
expected = Categorical([0, 1.2, 2, 0, 1.2, 2], ordered=True)
tm.assert_categorical_equal(result, expected)
# must exactly match types
s = Categorical([0, 1.2, 2])
s2 = Categorical([2, 3, 4])
msg = 'dtype of categories must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([s, s2])
msg = 'No Categoricals to union'
with tm.assert_raises_regex(ValueError, msg):
union_categoricals([])
def test_union_categoricals_nan(self):
# GH 13759
res = union_categoricals([pd.Categorical([1, 2, np.nan]),
pd.Categorical([3, 2, np.nan])])
exp = Categorical([1, 2, np.nan, 3, 2, np.nan])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical(['A', 'B']),
pd.Categorical(['B', 'B', np.nan])])
exp = Categorical(['A', 'B', 'B', 'B', np.nan])
tm.assert_categorical_equal(res, exp)
val1 = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-03-01'),
pd.NaT]
val2 = [pd.NaT, pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-02-01')]
res = union_categoricals([pd.Categorical(val1), pd.Categorical(val2)])
exp = Categorical(val1 + val2,
categories=[pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-03-01'),
pd.Timestamp('2011-02-01')])
tm.assert_categorical_equal(res, exp)
# all NaN
res = union_categoricals([pd.Categorical([np.nan, np.nan]),
pd.Categorical(['X'])])
exp = Categorical([np.nan, np.nan, 'X'])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical([np.nan, np.nan]),
pd.Categorical([np.nan, np.nan])])
exp = Categorical([np.nan, np.nan, np.nan, np.nan])
tm.assert_categorical_equal(res, exp)
def test_union_categoricals_empty(self):
# GH 13759
res = union_categoricals([pd.Categorical([]),
pd.Categorical([])])
exp = Categorical([])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical([]),
pd.Categorical([1.0])])
exp = Categorical([1.0])
tm.assert_categorical_equal(res, exp)
# to make dtype equal
nanc = pd.Categorical(np.array([np.nan], dtype=np.float64))
res = union_categoricals([nanc,
pd.Categorical([])])
tm.assert_categorical_equal(res, nanc)
def test_union_categorical_same_category(self):
# check fastpath
c1 = Categorical([1, 2, 3, 4], categories=[1, 2, 3, 4])
c2 = Categorical([3, 2, 1, np.nan], categories=[1, 2, 3, 4])
res = union_categoricals([c1, c2])
exp = Categorical([1, 2, 3, 4, 3, 2, 1, np.nan],
categories=[1, 2, 3, 4])
tm.assert_categorical_equal(res, exp)
c1 = Categorical(['z', 'z', 'z'], categories=['x', 'y', 'z'])
c2 = Categorical(['x', 'x', 'x'], categories=['x', 'y', 'z'])
res = union_categoricals([c1, c2])
exp = Categorical(['z', 'z', 'z', 'x', 'x', 'x'],
categories=['x', 'y', 'z'])
tm.assert_categorical_equal(res, exp)
def test_union_categoricals_ordered(self):
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], ordered=False)
msg = 'Categorical.ordered must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2])
res = union_categoricals([c1, c1])
exp = Categorical([1, 2, 3, 1, 2, 3], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3, np.nan], ordered=True)
c2 = Categorical([3, 2], categories=[1, 2, 3], ordered=True)
res = union_categoricals([c1, c2])
exp = Categorical([1, 2, 3, np.nan, 3, 2], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], categories=[3, 2, 1], ordered=True)
msg = "to union ordered Categoricals, all categories must be the same"
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2])
def test_union_categoricals_ignore_order(self):
# GH 15219
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], ordered=False)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
msg = 'Categorical.ordered must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2], ignore_order=False)
res = union_categoricals([c1, c1], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([c1, c1], ignore_order=False)
exp = Categorical([1, 2, 3, 1, 2, 3],
categories=[1, 2, 3], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3, np.nan], ordered=True)
c2 = Categorical([3, 2], categories=[1, 2, 3], ordered=True)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, np.nan, 3, 2])
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], categories=[3, 2, 1], ordered=True)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([c2, c1], ignore_order=True,
sort_categories=True)
exp = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([4, 5, 6], ordered=True)
result = union_categoricals([c1, c2], ignore_order=True)
expected = Categorical([1, 2, 3, 4, 5, 6])
tm.assert_categorical_equal(result, expected)
msg = "to union ordered Categoricals, all categories must be the same"
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2], ignore_order=False)
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2])
def test_union_categoricals_sort(self):
# GH 13846
c1 = Categorical(['x', 'y', 'z'])
c2 = Categorical(['a', 'b', 'c'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['x', 'y', 'z', 'a', 'b', 'c'],
categories=['a', 'b', 'c', 'x', 'y', 'z'])
tm.assert_categorical_equal(result, expected)
# fastpath
c1 = Categorical(['a', 'b'], categories=['b', 'a', 'c'])
c2 = Categorical(['b', 'c'], categories=['b', 'a', 'c'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['a', 'b'], categories=['c', 'a', 'b'])
c2 = Categorical(['b', 'c'], categories=['c', 'a', 'b'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
# fastpath - skip resort
c1 = Categorical(['a', 'b'], categories=['a', 'b', 'c'])
c2 = Categorical(['b', 'c'], categories=['a', 'b', 'c'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['x', np.nan])
c2 = Categorical([np.nan, 'b'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['x', np.nan, np.nan, 'b'],
categories=['b', 'x'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical([np.nan])
c2 = Categorical([np.nan])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical([np.nan, np.nan], categories=[])
tm.assert_categorical_equal(result, expected)
c1 = Categorical([])
c2 = Categorical([])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical([])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['b', 'a'], categories=['b', 'a', 'c'], ordered=True)
c2 = Categorical(['a', 'c'], categories=['b', 'a', 'c'], ordered=True)
with pytest.raises(TypeError):
union_categoricals([c1, c2], sort_categories=True)
def test_union_categoricals_sort_false(self):
# GH 13846
c1 = Categorical(['x', 'y', 'z'])
c2 = Categorical(['a', 'b', 'c'])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(['x', 'y', 'z', 'a', 'b', 'c'],
categories=['x', 'y', 'z', 'a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
# fastpath
c1 = Categorical(['a', 'b'], categories=['b', 'a', 'c'])
c2 = Categorical(['b', 'c'], categories=['b', 'a', 'c'])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['b', 'a', 'c'])
tm.assert_categorical_equal(result, expected)
# fastpath - skip resort
c1 = Categorical(['a', 'b'], categories=['a', 'b', 'c'])
c2 = Categorical(['b', 'c'], categories=['a', 'b', 'c'])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['x', np.nan])
c2 = Categorical([np.nan, 'b'])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(['x', np.nan, np.nan, 'b'],
categories=['x', 'b'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical([np.nan])
c2 = Categorical([np.nan])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical([np.nan, np.nan], categories=[])
tm.assert_categorical_equal(result, expected)
c1 = | Categorical([]) | pandas.Categorical |
# -*- coding: utf-8 -*
########## file path ##########
##### input file
# training set keys uic-label with k_means clusters' label
path_df_part_1_uic_label_cluster = "df_part_1_uic_label_cluster.csv"
path_df_part_2_uic_label_cluster = "df_part_2_uic_label_cluster.csv"
path_df_part_3_uic = "df_part_3_uic.csv"
# data_set features
path_df_part_1_U = "df_part_1_U.csv"
path_df_part_1_I = "df_part_1_I.csv"
path_df_part_1_C = "df_part_1_C.csv"
path_df_part_1_IC = "df_part_1_IC.csv"
path_df_part_1_UI = "df_part_1_UI.csv"
path_df_part_1_UC = "df_part_1_UC.csv"
path_df_part_2_U = "df_part_2_U.csv"
path_df_part_2_I = "df_part_2_I.csv"
path_df_part_2_C = "df_part_2_C.csv"
path_df_part_2_IC = "df_part_2_IC.csv"
path_df_part_2_UI = "df_part_2_UI.csv"
path_df_part_2_UC = "df_part_2_UC.csv"
path_df_part_3_U = "df_part_3_U.csv"
path_df_part_3_I = "df_part_3_I.csv"
path_df_part_3_C = "df_part_3_C.csv"
path_df_part_3_IC = "df_part_3_IC.csv"
path_df_part_3_UI = "df_part_3_UI.csv"
path_df_part_3_UC = "df_part_3_UC.csv"
# item_sub_set P
path_df_P = "tianchi_fresh_comp_train_item.csv"
##### output file
path_df_result = "res_gbdt_k_means_subsample.csv"
path_df_result_tmp = "df_result_tmp.csv"
# depending package
import pandas as pd
import numpy as np
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import metrics
import matplotlib.pyplot as plt
import seaborn as sns
import time
# some functions
def df_read(path, mode='r'):
'''the definition of dataframe loading function'''
data_file = open(path, mode)
try:
df = pd.read_csv(data_file, index_col=False)
finally:
data_file.close()
return df
def subsample(df, sub_size):
'''the definition of sub-sample function
@param df: dataframe
@param sub_size: sub_sample set size
@return sub-dataframe with the same formation of df'''
if sub_size >= len(df):
return df
else:
return df.sample(n=sub_size)
##### loading data of part 1 & 2
df_part_1_uic_label_cluster = df_read(path_df_part_1_uic_label_cluster)
df_part_2_uic_label_cluster = df_read(path_df_part_2_uic_label_cluster)
df_part_1_U = df_read(path_df_part_1_U)
df_part_1_I = df_read(path_df_part_1_I)
df_part_1_C = df_read(path_df_part_1_C)
df_part_1_IC = df_read(path_df_part_1_IC)
df_part_1_UI = df_read(path_df_part_1_UI)
df_part_1_UC = df_read(path_df_part_1_UC)
df_part_2_U = df_read(path_df_part_2_U)
df_part_2_I = df_read(path_df_part_2_I)
df_part_2_C = df_read(path_df_part_2_C)
df_part_2_IC = df_read(path_df_part_2_IC)
df_part_2_UI = df_read(path_df_part_2_UI)
df_part_2_UC = df_read(path_df_part_2_UC)
##### generation of training set & valid set
def train_set_construct(np_ratio=1, sub_ratio=1):
'''
# generation of train set
@param np_ratio: int, the sub-sample rate of training set for N/P balanced.
@param sub_ratio: float ~ (0~1], the further sub-sample rate of training set after N/P balanced.
'''
train_part_1_uic_label = df_part_1_uic_label_cluster[df_part_1_uic_label_cluster['class'] == 0].sample(
frac=sub_ratio)
train_part_2_uic_label = df_part_2_uic_label_cluster[df_part_2_uic_label_cluster['class'] == 0].sample(
frac=sub_ratio)
frac_ratio = sub_ratio * np_ratio / 1200
for i in range(1, 1001, 1):
train_part_1_uic_label_0_i = df_part_1_uic_label_cluster[df_part_1_uic_label_cluster['class'] == i]
train_part_1_uic_label_0_i = train_part_1_uic_label_0_i.sample(frac=frac_ratio)
train_part_1_uic_label = pd.concat([train_part_1_uic_label, train_part_1_uic_label_0_i])
train_part_2_uic_label_0_i = df_part_2_uic_label_cluster[df_part_2_uic_label_cluster['class'] == i]
train_part_2_uic_label_0_i = train_part_2_uic_label_0_i.sample(frac=frac_ratio)
train_part_2_uic_label = pd.concat([train_part_2_uic_label, train_part_2_uic_label_0_i])
print("training subset uic_label keys is selected.")
# constructing training set
train_part_1_df = pd.merge(train_part_1_uic_label, df_part_1_U, how='left', on=['user_id'])
train_part_1_df = pd.merge(train_part_1_df, df_part_1_I, how='left', on=['item_id'])
train_part_1_df = pd.merge(train_part_1_df, df_part_1_C, how='left', on=['item_category'])
train_part_1_df = pd.merge(train_part_1_df, df_part_1_IC, how='left', on=['item_id', 'item_category'])
train_part_1_df = pd.merge(train_part_1_df, df_part_1_UI, how='left',
on=['user_id', 'item_id', 'item_category', 'label'])
train_part_1_df = pd.merge(train_part_1_df, df_part_1_UC, how='left', on=['user_id', 'item_category'])
train_part_2_df = pd.merge(train_part_2_uic_label, df_part_2_U, how='left', on=['user_id'])
train_part_2_df = pd.merge(train_part_2_df, df_part_2_I, how='left', on=['item_id'])
train_part_2_df = pd.merge(train_part_2_df, df_part_2_C, how='left', on=['item_category'])
train_part_2_df = pd.merge(train_part_2_df, df_part_2_IC, how='left', on=['item_id', 'item_category'])
train_part_2_df = pd.merge(train_part_2_df, df_part_2_UI, how='left',
on=['user_id', 'item_id', 'item_category', 'label'])
train_part_2_df = pd.merge(train_part_2_df, df_part_2_UC, how='left', on=['user_id', 'item_category'])
train_df = pd.concat([train_part_1_df, train_part_2_df])
# fill the missing value as -1 (missing value are time features)
train_df.fillna(-1, inplace=True)
# using all the features for training gbdt model
train_X = train_df.as_matrix(
['u_b1_count_in_6', 'u_b2_count_in_6', 'u_b3_count_in_6', 'u_b4_count_in_6', 'u_b_count_in_6',
'u_b1_count_in_3', 'u_b2_count_in_3', 'u_b3_count_in_3', 'u_b4_count_in_3', 'u_b_count_in_3',
'u_b1_count_in_1', 'u_b2_count_in_1', 'u_b3_count_in_1', 'u_b4_count_in_1', 'u_b_count_in_1',
'u_b4_rate', 'u_b4_diff_hours',
'i_u_count_in_6', 'i_u_count_in_3', 'i_u_count_in_1',
'i_b1_count_in_6', 'i_b2_count_in_6', 'i_b3_count_in_6', 'i_b4_count_in_6', 'i_b_count_in_6',
'i_b1_count_in_3', 'i_b2_count_in_3', 'i_b3_count_in_3', 'i_b4_count_in_3', 'i_b_count_in_3',
'i_b1_count_in_1', 'i_b2_count_in_1', 'i_b3_count_in_1', 'i_b4_count_in_1', 'i_b_count_in_1',
'i_b4_rate', 'i_b4_diff_hours',
'c_u_count_in_6', 'c_u_count_in_3', 'c_u_count_in_1',
'c_b1_count_in_6', 'c_b2_count_in_6', 'c_b3_count_in_6', 'c_b4_count_in_6', 'c_b_count_in_6',
'c_b1_count_in_3', 'c_b2_count_in_3', 'c_b3_count_in_3', 'c_b4_count_in_3', 'c_b_count_in_3',
'c_b1_count_in_1', 'c_b2_count_in_1', 'c_b3_count_in_1', 'c_b4_count_in_1', 'c_b_count_in_1',
'c_b4_rate', 'c_b4_diff_hours',
'ic_u_rank_in_c', 'ic_b_rank_in_c', 'ic_b4_rank_in_c',
'ui_b1_count_in_6', 'ui_b2_count_in_6', 'ui_b3_count_in_6', 'ui_b4_count_in_6', 'ui_b_count_in_6',
'ui_b1_count_in_3', 'ui_b2_count_in_3', 'ui_b3_count_in_3', 'ui_b4_count_in_3', 'ui_b_count_in_3',
'ui_b1_count_in_1', 'ui_b2_count_in_1', 'ui_b3_count_in_1', 'ui_b4_count_in_1', 'ui_b_count_in_1',
'ui_b_count_rank_in_u', 'ui_b_count_rank_in_uc',
'ui_b1_last_hours', 'ui_b2_last_hours', 'ui_b3_last_hours', 'ui_b4_last_hours',
'uc_b1_count_in_6', 'uc_b2_count_in_6', 'uc_b3_count_in_6', 'uc_b4_count_in_6', 'uc_b_count_in_6',
'uc_b1_count_in_3', 'uc_b2_count_in_3', 'uc_b3_count_in_3', 'uc_b4_count_in_3', 'uc_b_count_in_3',
'uc_b1_count_in_1', 'uc_b2_count_in_1', 'uc_b3_count_in_1', 'uc_b4_count_in_1', 'uc_b_count_in_1',
'uc_b_count_rank_in_u',
'uc_b1_last_hours', 'uc_b2_last_hours', 'uc_b3_last_hours', 'uc_b4_last_hours'])
train_y = train_df['label'].values
print("train subset is generated.")
return train_X, train_y
def valid_set_construct(sub_ratio=0.1):
'''
# generation of valid set
@param sub_ratio: float ~ (0~1], the sub-sample rate of original valid set
'''
valid_part_1_uic_label = df_part_1_uic_label_cluster[df_part_1_uic_label_cluster['class'] == 0].sample(
frac=sub_ratio)
valid_part_2_uic_label = df_part_2_uic_label_cluster[df_part_2_uic_label_cluster['class'] == 0].sample(
frac=sub_ratio)
for i in range(1, 1001, 1):
valid_part_1_uic_label_0_i = df_part_1_uic_label_cluster[df_part_1_uic_label_cluster['class'] == i]
valid_part_1_uic_label_0_i = valid_part_1_uic_label_0_i.sample(frac=sub_ratio)
valid_part_1_uic_label = pd.concat([valid_part_1_uic_label, valid_part_1_uic_label_0_i])
valid_part_2_uic_label_0_i = df_part_2_uic_label_cluster[df_part_2_uic_label_cluster['class'] == i]
valid_part_2_uic_label_0_i = valid_part_2_uic_label_0_i.sample(frac=sub_ratio)
valid_part_2_uic_label = pd.concat([valid_part_2_uic_label, valid_part_2_uic_label_0_i])
# constructing valid set
valid_part_1_df = pd.merge(valid_part_1_uic_label, df_part_1_U, how='left', on=['user_id'])
valid_part_1_df = pd.merge(valid_part_1_df, df_part_1_I, how='left', on=['item_id'])
valid_part_1_df = pd.merge(valid_part_1_df, df_part_1_C, how='left', on=['item_category'])
valid_part_1_df = | pd.merge(valid_part_1_df, df_part_1_IC, how='left', on=['item_id', 'item_category']) | pandas.merge |
import pandas as pd
import requests
import lxml.html as lh
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
from datetime import datetime as dt
import time
url='http://pokemondb.net/pokedex/all'
#Create a handle, page, to handle the contents of the website
page = requests.get(url)
#Store the contents of the website under doc
doc = lh.fromstring(page.content)
#Parse data that are stored between <tr>..</tr> of HTML
tr_elements = doc.xpath('//tr')
tr_elements = doc.xpath('//tr')
#Create empty list
col=[]
i=0
#For each row, store each first element (header) and an empty list
for t in tr_elements[0]:
i+=1
name=t.text_content()
col.append((name,[]))
#Since out first row is the header, data is stored on the second row onwards
for j in range(1,len(tr_elements)):
#T is our j'th row
T=tr_elements[j]
#If row is not of size 10, the //tr data is not from our table
if len(T)!=10:
break
#i is the index of our column
i=0
#Iterate through each element of the row
for t in T.iterchildren():
data=t.text_content()
#Check if row is empty
if i>0:
#Convert any numerical value to integers
try:
data=int(data)
except:
pass
#Append the data to the empty list of the i'th column
col[i][1].append(data)
#Increment i for the next column
i+=1
Dict={title:column for (title,column) in col}
df= | pd.DataFrame(Dict) | pandas.DataFrame |
import pandas as pd
from time import time
import logging
import os
import json
import numpy as np
from statsmodels.tsa.ar_model import AR
logger = logging.getLogger(__name__)
# Set of variables to predict
TARGETS = ['ETH.TO.SAU', 'ETH.TO.ITA', 'ETH.TO.GBR',
'ETH.TO.DNK', 'ETH.TO.SWE', 'ETH.TO.ZAF']
# Sub-Saharan countries codes
SSA = ['BDI', 'COM', 'DJI', 'ERI', 'ETH', 'ATF', 'KEN', 'MDG', 'MWI',
'MUS', 'MYT', 'MOZ', 'REU', 'RWA', 'SYC', 'SOM', 'SSD', 'TZA',
'UGA', 'ZMB', 'ZWE']
# User facing labels
LABELS = ['worse', 'poor', 'average', 'good', 'best']
# Maximum number of lag variables to consider for projections
MAX_LAG = 3
class Generator(object):
""" Feature generation routine for scoring """
def __init__(self, config, groupings, baseyear):
""" Initialize with a configuration object and the indicator groupings """
start_time = time()
sources = [os.path.join(os.getcwd(), config['paths']['output'],
d['name'],
'data.csv') for d in config['sources']]
# Generate a data frame with all indicators
self.df = pd.concat((pd.read_csv(f)
for f in sources), sort=False, ignore_index=True)
#self.df.year = self.df.year.fillna(method="ffill")
#self.df.year = self.df.year.astype(int)
# Summary stats
logger.info("Sources : {}".format(len(sources)))
logger.info("Row count : {}".format(len(self.df)))
logger.info("Geographies : {}".format(len(self.df['Country Name'].unique())))
logger.info("Indicators : {}".format(len(self.df['Indicator Code'].unique())))
logger.info("Temporal coverage : {} -> {}".format(self.df.year.min(), self.df.year.max()))
logger.info("Null values : {}".format(sum(self.df['value'].isnull())))
logger.info("Loaded data in {:3.2f} sec.".format(time() - start_time))
# Do the projections for indicators of interest
logger.info("Projecting indicators to {}.".format(baseyear))
inds = list(set(self.df['Indicator Code'].unique()) - set(TARGETS + ['Country Code', 'year']))
#proj = self.__projections(inds, baseyear)
# persist projections
# self.df['type'] = "raw"
# proj['type'] = 'projected'
# Include projections with the raw data
# self.df = pd.concat([self.df, proj], sort=False)
# self.df.to_csv("projections.csv", index=False)
# Now arrange data in long form
self.data = pd.pivot_table(self.df, index=['Country Code', 'year'],
columns='Indicator Code', values='value')
# Consider country/year as features (and not an index)
self.data.reset_index(inplace=True)
# These are the set of indicators to consider
self.indicators = sorted(list(set(self.data.columns) - set(TARGETS + ['Country Code', 'year'])))
# Get the set of indicators/code mappings
self.labels = {i[0]: i[1]
for i in self.df[['Indicator Code', 'Indicator Name']].drop_duplicates().values.tolist()}
# Filter by Sub-Saharan African countries for generating quantile cuts
ssa_data = self.data[self.data['Country Code'].isin(SSA)]
# Indicators that we use for Scenarios and their relative improvements
INDICATORS = {i['code']: i['direction-improvement'] for grp in groupings['clusters'] for i in grp['indicators']}
# Get the label <-> numeric transformers set up
self.category_lookup = {}
self.indicator_bins = {}
for ind, order in INDICATORS.items():
if order == "higher":
lbl = LABELS
else:
lbl = list(reversed(LABELS))
tmp, bins = self.__generate_lookup(ssa_data[ind], lbl)
self.category_lookup.update(tmp)
self.indicator_bins[ind] = (bins, lbl)
def __projections(self, indicators, baseyear):
"""
Generates indicator level projections till current year.
This treats each indicator for each country as a time series. The
projections are made using an AR(n) model, where n is determined by
a heuristic approach (n here is the number of lag variables).
For cases where data is insufficient, we simply treat it as missing
which is better than projecting incorrectly.
indicators: all indicators to project
baseyear: year to project to.
returns: a dataframe
"""
start_time = time()
pdf = self.df.copy(deep=True)
#print(pdf.year)
pdf['year_idx'] = pd.to_datetime(pdf.year, format='%Y')
pdf = pdf.set_index('year_idx').to_period(freq='Y')
cnt = 0
ign = 0
# The resulting dataframe
proj_df = pd.DataFrame()
ts = pdf.groupby(['Country Code', 'Indicator Code'])
for (country, ind), grp in ts:
if (country in SSA) & (ind in indicators):
# Years for which projection is needed
years = np.arange(grp.year.max() + 1, baseyear + 1)
# observations available in this time series
obs = len(grp)
# Maximum lag to consider for the AR model
lag = min(len(grp) - 1, MAX_LAG)
logger.debug("Country: {}, Indicator: {}, observations: {}, maxlag: {}, num years to project: {}".
format(country, ind, obs, lag, len(years)))
if (years.size > 0) & (years.size <= 5) & (obs > 5):
# Do some interpolation if needed
X = grp.value.copy(deep=True)
X = X.resample('Y').sum()
X = X.interpolate()
# Fit and score an AR(n) model
model = AR(X, missing='raise')
model_fit = model.fit(maxlag=lag, trend='nc')
pred = model_fit.predict(start=str(years.min())[:4], end=str(years.max())[:4])
cnt += 1
# Conform to the overall dataframe
curr_df = pd.DataFrame()
curr_df['value'] = pred
curr_df['Country Code'] = country
curr_df['Indicator Code'] = ind
curr_df['Country Name'] = grp['Country Name'][0]
curr_df['Indicator Name'] = grp['Indicator Name'][0]
curr_df.reset_index(inplace=True)
curr_df.rename(columns={'index': "year"}, inplace=True)
curr_df = curr_df[['Country Name', 'Country Code',
'Indicator Name', 'Indicator Code', 'year', 'value']]
proj_df = | pd.concat([proj_df, curr_df], ignore_index=True) | pandas.concat |
from pathlib import Path
import pandas as pd
import typer
from jinja2 import Environment, FileSystemLoader
from reki.data_finder import find_local_file
from reki_data_tool.postprocess.grid.gfs.ne.config import OUTPUT_DIRECTORY
from reki_data_tool.postprocess.grid.gfs.util import get_random_start_time, get_random_forecast_time
app = typer.Typer()
@app.command("serial")
def create_serial_task(
output_script_path: Path = typer.Option(Path(OUTPUT_DIRECTORY, "03-serial", "gfs_ne_grib2_serial_case_1.sh"))
):
start_time = get_random_start_time()
start_time_label = start_time.strftime("%Y%m%d%H")
forecast_time = get_random_forecast_time()
forecast_time_label = f"{int(forecast_time / pd.Timedelta(hours=1)):03}"
print(start_time_label, forecast_time_label)
output_directory = OUTPUT_DIRECTORY
output_file_path = Path(
output_directory,
f'ne_{start_time_label}_{forecast_time_label}.grb2'
)
file_loader = FileSystemLoader(Path(__file__).parent)
env = Environment(loader=file_loader)
template = env.get_template("slurm_job.sh")
job_params = dict(
job_name=output_script_path.stem,
is_parallel=False,
partition="serial",
model_path="reki_data_tool.postprocess.grid.gfs.ne",
options=f"""serial \\
--start-time={start_time_label} \\
--forecast-time={forecast_time_label}h \\
--output-file-path={output_file_path}"""
)
task_script_content = template.render(**job_params)
with open(output_script_path, "w") as f:
f.write(task_script_content)
return output_script_path
@app.command("dask-v1")
def create_dask_v1_task(
output_script_path: Path = typer.Option(Path(OUTPUT_DIRECTORY, "11-dask-v1", "dask_v1_case_1.sh")),
work_directory: Path = typer.Option(Path(OUTPUT_DIRECTORY)),
start_time: str = typer.Option(None),
forecast_time: str = typer.Option(None),
nodes: int = 1,
ntasks_per_node: int = 32,
partition: str = "normal"
):
if start_time is None:
start_time = get_random_start_time()
else:
start_time = pd.to_datetime(start_time, format="%Y%m%d%H")
start_time_label = start_time.strftime("%Y%m%d%H")
if forecast_time is None:
forecast_time = get_random_forecast_time()
else:
forecast_time = pd.to_timedelta(forecast_time)
forecast_time_label = f"{int(forecast_time / pd.Timedelta(hours=1)):03}"
print(start_time_label, forecast_time_label)
output_file_path = Path(
work_directory,
f'ne_{start_time_label}_{forecast_time_label}.grb2'
)
file_loader = FileSystemLoader(Path(__file__).parent)
env = Environment(loader=file_loader)
template = env.get_template("slurm_job.sh")
job_params = dict(
job_name=output_script_path.stem,
is_parallel=True,
partition=partition,
nodes=nodes,
ntasks_per_node=ntasks_per_node,
model_path="reki_data_tool.postprocess.grid.gfs.ne",
work_directory=work_directory.absolute(),
options=f"""dask-v1 \\
--start-time={start_time_label} \\
--forecast-time={forecast_time_label}h \\
--output-file-path={output_file_path} \\
--engine=mpi"""
)
task_script_content = template.render(**job_params)
with open(output_script_path, "w") as f:
f.write(task_script_content)
return output_script_path
@app.command("dask-v2")
def create_dask_v1_task(
output_script_path: Path = typer.Option(Path(OUTPUT_DIRECTORY, "12-dask-v2", "dask_v2_case_1.sh")),
work_directory: Path = typer.Option(Path(OUTPUT_DIRECTORY)),
start_time: str = typer.Option(None),
forecast_time: str = typer.Option(None),
nodes: int = 4,
partition: str = "normal"
):
if start_time is None:
start_time = get_random_start_time()
else:
start_time = pd.to_datetime(start_time, format="%Y%m%d%H")
start_time_label = start_time.strftime("%Y%m%d%H")
if forecast_time is None:
forecast_time = get_random_forecast_time()
else:
forecast_time = | pd.to_timedelta(forecast_time) | pandas.to_timedelta |
'''
Extracting the cluster labels
In the previous exercise, you saw that the intermediate clustering of the grain samples at height 6 has 3 clusters. Now, use the fcluster() function to extract the cluster labels for this intermediate clustering, and compare the labels with the grain varieties using a cross-tabulation.
The hierarchical clustering has already been performed and mergings is the result of the linkage() function. The list varieties gives the variety of each grain sample.
INSTRUCTIONS
100XP
Import:
pandas as pd.
fcluster from scipy.cluster.hierarchy.
Perform a flat hierarchical clustering by using the fcluster() function on mergings. Specify a maximum height of 6 and the keyword argument criterion='distance'.
Create a DataFrame df with two columns named 'labels' and 'varieties', using labels and varieties, respectively, for the column values. This has been done for you.
Create a cross-tabulation ct between df['labels'] and df['varieties'] to count the number of times each grain variety coincides with each cluster label.
'''
# Perform the necessary imports
import pandas as pd
from scipy.cluster.hierarchy import fcluster
# Use fcluster to extract labels: labels
labels = fcluster(mergings, 6, criterion='distance')
# Create a DataFrame with labels and varieties as columns: df
df = | pd.DataFrame({'labels': labels, 'varieties': varieties}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import json
import re
from io import StringIO
from typing import List
import numpy as np
import pandas as pd
import requests
class APIHandlerTCGA:
# TODO:
# - Finish docstrings
# - Check imports
# - Test
def __init__(
self,
API_filter,
url: str = "https://api.gdc.cancer.gov/",
format: str = "TSV",
size: int = 16000,
):
"""
Args:
filter:
url:
format:
size:
"""
self.filter = API_filter
self.url = url
self.format = format
self.size = size
def get_manifest(self) -> pd.DataFrame:
"""Get manifest file applying a filter
Returns:
pd.DataFrame
"""
params = {
"filters": json.dumps(self.filter),
"return_type": "manifest",
"format": self.format,
"size": self.size,
}
response = requests.get(f"{self.url}/files", params=params)
content = StringIO(response.content.decode("utf-8"))
manifest_df = pd.read_csv(content, sep="\t")
return manifest_df
def _get_fields_for_endpoint(
self, fields: List[str], endpoint: str
) -> pd.DataFrame:
"""Get fields for endpoint from GDC API
Args:
fields: List of fields to request from API
endpoint: API endpoint
Returns:
pd.DataFrame containing data from API response
"""
params = {
"filters": json.dumps(self.filter),
"fields": ",".join(fields),
"format": self.format,
"size": self.size,
}
response = requests.get(f"{self.url}/{endpoint}", params=params)
content = StringIO(response.content.decode("utf-8"))
df = | pd.read_csv(content, sep="\t") | pandas.read_csv |
import numpy as np
import os.path
import pandas as pd
import unittest
from forecast import (
importDataFrame,
makeEmptyPredictionDF,
makeTrainDF,
makeValidationDF,
)
from prediction_models import (
PredictionModel,
LastValueModel,
ProphetModel,
)
from loss_functions import LossFunction, MAE, RMSE
class Target(unittest.TestCase):
def setUp(self):
self.data_location = 'data/daily-min-temperatures.csv'
self.my_test_ts = importDataFrame(self.data_location)
self.column_names = self.my_test_ts.columns
def test_df_contains_only_two_columns(self):
"""
the dataframe contains only two columns
"""
self.assertEqual(self.my_test_ts.shape[1], 2)
def test_timeseries_contains_at_leats_12_entries(self):
self.assertGreaterEqual(self.my_test_ts.shape[0], 12)
def test_df_columns_have_correct_names(self):
# we want to ensure that the first column is named "dt"
# and the second column is named "val"
self.assertEqual(self.column_names[0], 'dt')
self.assertEqual(self.column_names[1], 'val')
def test_df_only_has_two_columns(self):
self.assertEqual(len(self.column_names), 2)
def test_columns_contain_expected_data_types(self):
self.assertEqual(self.my_test_ts['dt'].dtype, 'datetime64[ns]')
self.assertEqual(self.my_test_ts['val'].dtype, 'float')
class TrainDF(unittest.TestCase):
def setUp(self):
self.data_location = 'data/daily-min-temperatures.csv'
self.target_df = importDataFrame(self.data_location)
self.split_date = pd.to_datetime('1986-01-01 00:00:00')
self.train_df = makeTrainDF(self.target_df, self.split_date)
def test_df_contains_only_data_before_split(self):
self.assertTrue(self.train_df['dt'].max() == self.split_date)
def test_df_has_min_date_like_target_df(self):
self.assertTrue(
self.train_df['dt'].min() == self.target_df['dt'].min())
def test_train_df_contains_column_dt_and_val(self):
self.assertEqual(self.train_df.columns[0], 'dt', self.train_df.columns)
self.assertEqual(self.train_df.columns[1], 'val')
class EmptyPredictionDF(unittest.TestCase):
def setUp(self):
self.data_location = 'data/daily-min-temperatures.csv'
self.target_df = importDataFrame(self.data_location)
self.split_date = pd.to_datetime('1986-01-01 00:00:00')
self.empty_pred_df = makeEmptyPredictionDF(self.target_df,
self.split_date)
def test_df_columns_have_correct_names(self):
# the df contains a 'dt' column and a 'prediction'
self.column_names = self.empty_pred_df.columns
self.assertEqual(self.column_names[0], 'dt')
self.assertEqual(self.column_names[1], 'val')
def test_empty_pred_df_only_contains_dates_after_split(self):
self.assertGreater(self.empty_pred_df['dt'].min(), self.split_date)
def test_empty_pred_df_has_null_values_in_val_column(self):
for value in self.empty_pred_df['val']:
self.assertIsNone(value)
class TestPredictionModelClass(unittest.TestCase):
def setUp(self):
self.data_location = 'data/daily-min-temperatures.csv'
self.plot_save_location = '/home/boats/Desktop/'
self.target_df = importDataFrame(self.data_location)
self.split_date = pd.to_datetime('1986-01-01 00:00:00')
self.train_df = makeTrainDF(self.target_df, self.split_date)
self.valid_df = makeValidationDF(self.target_df, self.split_date)
self.empty_pred_df = makeEmptyPredictionDF(self.target_df,
self.split_date)
self.pred_df = LastValueModel.create_prediction(
self.train_df, self.empty_pred_df)
self.test_model = PredictionModel('test name')
def dummy_pred_fn(train_df, empty_pred_df):
last_value = (train_df[train_df['dt'] == train_df['dt']
.max()]['val'])
pred_df = empty_pred_df.copy(deep=True)
pred_df['val'] = last_value
return pred_df
self.test_model.predict = dummy_pred_fn
self.pred_df_from_model = self.test_model.predict(self.train_df,
self.empty_pred_df)
self.plot_df = self.test_model.present_results(
self.train_df, self.valid_df, self.pred_df, self.plot_save_location
)
def test_model_has_predict_method(self):
self.assertTrue(hasattr(self.test_model, 'create_prediction'))
def test_model_predict_output_columns_are_named_as_expected(self):
self.column_names = self.pred_df_from_model.columns
self.assertEqual(self.column_names[0], 'dt')
self.assertEqual(self.column_names[1], 'val')
def test_model_predict_output_columns_have_values(self):
for value in self.pred_df_from_model['val']:
self.assertIsNotNone(value)
class TestLossFunctionClass(unittest.TestCase):
def setUp(self):
self.data_location = 'data/daily-min-temperatures.csv'
self.target_df = importDataFrame(self.data_location)
self.split_date = | pd.to_datetime('1987-01-01 00:00:00') | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 3 18:12:36 2020
@author: Akash1313
"""
#Question 1
###############################################################################
print("---------------------------Question1----------------------------------------")
import numpy as np
import pandas as pd
import scipy as sp
import sympy
import statsmodels.api as stats
import warnings
warnings.filterwarnings('ignore')
def create_interaction (inDF1, inDF2):
name1 = inDF1.columns
name2 = inDF2.columns
outDF = pd.DataFrame()
for col1 in name1:
for col2 in name2:
outName = col1 + " * " + col2
outDF[outName] = inDF1[col1] * inDF2[col2]
return(outDF)
# A function that find the non-aliased columns, fit a logistic model, and return the full parameter estimates
def build_mnlogit (fullX, y, debug = 'N'):
# Number of all parameters
nFullParam = fullX.shape[1]
# Number of target categories
y_category = y.cat.categories
nYCat = len(y_category)
# Find the non-redundant columns in the design matrix fullX
reduced_form, inds = sympy.Matrix(fullX.values).rref()
# Extract only the non-redundant columns for modeling
X = fullX.iloc[:, list(inds)]
# These are the column numbers of the non-redundant columns
if (debug == 'Y'):
print('Column Numbers of the Non-redundant Columns:')
print(inds)
print("-------------------------------ans 1a------------------------------------------")
aliased_indices = [x for x in range(nFullParam) if (x not in inds)]
aliased_params = [fullX.columns[x] for x in aliased_indices]
print("the aliased columns in our model matrix are:\n ")
for i in aliased_params:
print(i)
# The number of free parameters
thisDF = len(inds) * (nYCat - 1)
# Build a multionomial logistic model
logit = stats.MNLogit(y, X)
thisFit = logit.fit(method='newton', full_output = True, maxiter = 100, tol = 1e-8)
thisParameter = thisFit.params
thisLLK = logit.loglike(thisParameter.values)
#if (debug == 'Y'):
print(thisFit.summary())
print("Model Parameter Estimates:\n", thisParameter)
print("Model Log-Likelihood Value =", thisLLK)
print("Number of Free Parameters =", thisDF)
# Recreat the estimates of the full parameters
workParams = pd.DataFrame(np.zeros(shape = (nFullParam, (nYCat - 1))))
workParams = workParams.set_index(keys = fullX.columns)
fullParams = pd.merge(workParams, thisParameter, how = "left", left_index = True, right_index = True)
fullParams = fullParams.drop(columns = '0_x').fillna(0.0)
# Return model statistics
return (thisLLK, thisDF, fullParams)
#Que 1
dataframe = pd.read_csv(r'C:/Users/Akash1313/Desktop/CS584_ML/Assignment4/Purchase_Likelihood.csv', delimiter=',')
dataframe = dataframe.dropna()
y = dataframe['insurance'].astype('category')
xGS = pd.get_dummies(dataframe[['group_size']].astype('category'))
xH = pd.get_dummies(dataframe[['homeowner']].astype('category'))
xMC = pd.get_dummies(dataframe[['married_couple']].astype('category'))
# Intercept only model
designX = pd.DataFrame(y.where(y.isnull(), 1))
LLK0, DF0, fullParams0 = build_mnlogit (designX, y, debug = 'N')
# Intercept + GS
print("\n\n---------------------GS------------------------------------")
designX = stats.add_constant(xGS, prepend=True)
LLK_1GS, DF_1GS, fullParams_1GS = build_mnlogit (designX, y, debug = 'N')
testDev = 2 * (LLK_1GS - LLK0)
testDF = DF_1GS - DF0
testPValue = sp.stats.chi2.sf(testDev, testDF)
print('Deviance Chi=Square Test')
print('Chi-Square Statistic = ', testDev)
print('Degreee of Freedom = ', testDF)
print(' Significance = ', testPValue)
print('Feature Importance Index = ', -np.log10(testPValue))
# Intercept + GS + H
print("\n\n--------------Intercept + GS + H----------------------------")
designX = xGS
designX = designX.join(xH)
designX = stats.add_constant(designX, prepend=True)
LLK_1GS_1H, DF_1GS_1H, fullParams_1GS_HJ = build_mnlogit (designX, y, debug = 'N')
testDev = 2 * (LLK_1GS_1H - LLK_1GS)
testDF = DF_1GS_1H - DF_1GS
testPValue = sp.stats.chi2.sf(testDev, testDF)
print('Deviance Chi=Square Test')
print('Chi-Square Statistic = ', testDev)
print(' Degreee of Freedom = ', testDF)
print(' Significance = ', testPValue)
print('Feature Importance Index = ', -np.log10(testPValue))
# Intercept + GS + H + MC
print("\n\n--------------Intercept + GS + H + MC----------------------------")
designX = xGS
designX = designX.join(xH)
designX = designX.join(xMC)
designX = stats.add_constant(designX, prepend=True)
LLK_1GS_1H_1MC, DF_1GS_1H_1MC, fullParams_1GS_1H_1MC = build_mnlogit (designX, y, debug = 'N')
testDev = 2 * (LLK_1GS_1H_1MC - LLK_1GS_1H)
testDF = DF_1GS_1H_1MC - DF_1GS_1H
testPValue = sp.stats.chi2.sf(testDev, testDF)
print('Deviance Chi=Square Test')
print('Chi-Square Statistic = ', testDev)
print(' Degreee of Freedom = ', testDF)
print(' Significance = ', testPValue)
print('Feature Importance Index = ', -np.log10(testPValue))
# Create the columns for the group_size * homeowner interaction effect
xGS_H = create_interaction(xGS, xH)
# Intercept + GS + H + MC + GS * H
print("\n\n--------------Intercept + GS + H + MC + GS * H----------------------------")
designX = xGS
designX = designX.join(xH)
designX = designX.join(xMC)
designX = designX.join(xGS_H)
designX = stats.add_constant(designX, prepend=True)
LLK_2GS_H, DF_2GS_H, fullParams_2GS_H = build_mnlogit (designX, y, debug = 'N')
testDev = 2 * (LLK_2GS_H - LLK_1GS_1H_1MC)
testDF = DF_2GS_H - DF_1GS_1H_1MC
testPValue = sp.stats.chi2.sf(testDev, testDF)
print('Deviance Chi=Square Test')
print('Chi-Square Statistic = ', testDev)
print(' Degreee of Freedom = ', testDF)
print(' Significance = ', testPValue)
print('Feature Importance Index = ', -np.log10(testPValue))
# Create the columns for the e. group_size * married_couple interaction effect
xGS_MC = create_interaction(xGS, xMC)
# Intercept + GS + H + MC + GS * H + GS * MC
print("\n\n--------------Intercept + GS + H + MC + GS * H + GS * MC----------------------------")
designX = xGS
designX = designX.join(xH)
designX = designX.join(xMC)
designX = designX.join(xGS_H)
designX = designX.join(xGS_MC)
designX = stats.add_constant(designX, prepend=True)
LLK_2GS_MC, DF_2GS_MC, fullParams_2GS_MC = build_mnlogit (designX, y, debug = 'N')
testDev = 2 * (LLK_2GS_MC - LLK_2GS_H)
testDF = DF_2GS_MC - DF_2GS_H
testPValue = sp.stats.chi2.sf(testDev, testDF)
print('Deviance Chi=Square Test')
print('Chi-Square Statistic = ', testDev)
print(' Degreee of Freedom = ', testDF)
print(' Significance = ', testPValue)
print('Feature Importance Index = ', -np.log10(testPValue))
# Create the columns for the e. homeowner * married_couple interaction effect
xH_MC = create_interaction(xH, xMC)
# Intercept + GS + H + MC + GS * H + GS * MC + H * MC
print("\n\n--------------Intercept + GS + H + MC + GS * H + GS * MC + H * MC----------------------------")
designX = xGS
designX = designX.join(xH)
designX = designX.join(xMC)
designX = designX.join(xGS_H)
designX = designX.join(xGS_MC)
designX = designX.join(xH_MC)
designX = stats.add_constant(designX, prepend=True)
LLK_2H_MC, DF_2H_MC, fullParams_2H_MC = build_mnlogit (designX, y, debug = 'Y')
testDev = 2 * (LLK_2H_MC - LLK_2GS_MC)
testDF = DF_2H_MC - DF_2GS_MC
testPValue = sp.stats.chi2.sf(testDev, testDF)
print('Deviance Chi=Square Test')
print('Chi-Square Statistic = ', testDev)
print(' Degreee of Freedom = ', testDF)
print(' Significance = ', testPValue)
print('Feature Importance Index = ', -np.log10(testPValue))
print("-------------------------------ans 1b------------------------------------------")
print("degrees of freedom for our model is",testDF)
print()
#Question 2
###############################################################################
print("---------------------------Question2----------------------------------------")
# Build a multionomial logistic model
logit = stats.MNLogit(y, designX)
this_fit = logit.fit(method='newton', full_output=True, maxiter=100, tol=1e-8)
data = []
for i in range(1, 5):
for j in range(2):
for k in range(2):
data.append([i, j, k])
dataset = pd.DataFrame(data, columns=['group_size', 'homeowner', 'married_couple'])
xGS = pd.get_dummies( dataset[['group_size']].astype('category'))
xH = pd.get_dummies(dataset[['homeowner']].astype('category'))
xMC = pd.get_dummies(dataset[['married_couple']].astype('category'))
xGS_H = create_interaction(xGS, xH)
xGS_MC = create_interaction(xGS, xMC)
xH_MC = create_interaction(xH, xMC)
designX = xGS
designX = designX.join(xH)
designX = designX.join(xMC)
designX = designX.join(xGS_H)
designX = designX.join(xGS_MC)
designX = designX.join(xH_MC)
designX = stats.add_constant(designX, prepend=True)
insurance_pred = this_fit.predict(exog = designX)
insurance_result=pd.concat([dataset, insurance_pred],axis=1)
print("-------------------------------ans 2a------------------------------------------")
print(insurance_pred)
print("-------------------------------ans 2b------------------------------------------")
insurance_result['oddVal(prob_I1/prob_I0)'] = insurance_result[1] / insurance_result[0]
print(insurance_result[['group_size','homeowner','married_couple','oddVal(prob_I1/prob_I0)']])
max_row = insurance_result.loc[insurance_result['oddVal(prob_I1/prob_I0)'].idxmax()]
print("The maximum odd value is obtained when \ngroup_size =",max_row['group_size'],", homeowner = ",max_row['homeowner'],", married_couple = ",max_row['married_couple'])
print('The maximum odd value is: ',max_row['oddVal(prob_I1/prob_I0)'])
print("-------------------------------ans 2c------------------------------------------")
prob_In2_GS3 = (dataframe[dataframe['group_size']==3].groupby('insurance').size()[2]/dataframe[dataframe['group_size']==3].shape[0])
prob_In0_GS3 = (dataframe[dataframe['group_size']==3].groupby('insurance').size()[0]/dataframe[dataframe['group_size']==3].shape[0])
odds1 = prob_In2_GS3/prob_In0_GS3
prob_In2_GS1 = (dataframe[dataframe['group_size']==1].groupby('insurance').size()[2]/dataframe[dataframe['group_size']==1].shape[0])
prob_In0_GS1 = (dataframe[dataframe['group_size']==1].groupby('insurance').size()[0]/dataframe[dataframe['group_size']==1].shape[0])
odds2 = prob_In2_GS1/prob_In0_GS1
oddsRatio = odds1/odds2
print(oddsRatio)
print("-------------------------------ans 2d------------------------------------------")
prob_In0_H1 = (dataframe[dataframe['homeowner']==1].groupby('insurance').size()[0]/dataframe[dataframe['homeowner']==1].shape[0])
prob_In1_H1 = (dataframe[dataframe['homeowner']==1].groupby('insurance').size()[1]/dataframe[dataframe['homeowner']==1].shape[0])
odds1 = prob_In0_H1/prob_In1_H1
prob_In0_H0 = (dataframe[dataframe['homeowner']==0].groupby('insurance').size()[0]/dataframe[dataframe['homeowner']==0].shape[0])
prob_In1_H0 = (dataframe[dataframe['homeowner']==0].groupby('insurance').size()[1]/dataframe[dataframe['homeowner']==0].shape[0])
odds2 = prob_In0_H0/prob_In1_H0
oddsRatio = odds1/odds2
print(oddsRatio)
######################################################################################
#Question3
print("---------------------------Question3----------------------------------------")
import pandas as pd
import numpy as np
import scipy.stats
import warnings
from sklearn import naive_bayes
warnings.filterwarnings('ignore')
pd.options.display.max_columns = None
def RowWithColumn (rowVar, columnVar, show = 'ROW'):
countTable = pd.crosstab(index = rowVar, columns = columnVar, margins = False, dropna = True)
print("Frequency Table: \n", countTable)
print()
return
dataset = pd.read_csv(r'C:/Users/Akash1313/Desktop/CS584_ML/Assignment4/Purchase_Likelihood.csv', delimiter=',')
#Que 3a
print("-------------------------ans 3a-------------------------------")
cTable = pd.crosstab(index = dataset['insurance'], columns = ["Count"], margins = True, dropna = False)
cTable['Class Prob'] = cTable['Count'] / len(dataset)
cTable = cTable.drop(columns = ['All'])
print(cTable)
#Que 3b
print("-------------------------ans 3b-------------------------------")
RowWithColumn(dataset['insurance'], dataset['group_size'])
#Que 3c
print("-------------------------ans 3c-------------------------------")
RowWithColumn(dataset['insurance'], dataset['homeowner'])
#Que 3d
print("-------------------------ans 3d-------------------------------")
RowWithColumn(dataset['insurance'], dataset['married_couple'])
#Que 3e
print("-------------------------ans 3e-------------------------------")
def ChiSquareTest (xCat, yCat, debug = 'N'):
obsCount = pd.crosstab(index = xCat, columns = yCat, margins = False, dropna = True)
cTotal = obsCount.sum(axis = 1)
rTotal = obsCount.sum(axis = 0)
nTotal = np.sum(rTotal)
expCount = np.outer(cTotal, (rTotal / nTotal))
if (debug == 'Y'):
print('Observed Count:\n', obsCount)
print('Column Total:\n', cTotal)
print('Row Total:\n', rTotal)
print('Overall Total:\n', nTotal)
print('Expected Count:\n', expCount)
print('\n')
chiSqStat = ((obsCount - expCount)**2 / expCount).to_numpy().sum()
chiSqDf = (obsCount.shape[0] - 1.0) * (obsCount.shape[1] - 1.0)
chiSqSig = scipy.stats.chi2.sf(chiSqStat, chiSqDf)
cramerV = chiSqStat / nTotal
if (cTotal.size > rTotal.size):
cramerV = cramerV / (rTotal.size - 1.0)
else:
cramerV = cramerV / (cTotal.size - 1.0)
cramerV = np.sqrt(cramerV)
return(chiSqStat, chiSqDf, chiSqSig, cramerV)
catPred = ['group_size', 'homeowner', 'married_couple']
testResult = | pd.DataFrame(index = catPred, columns = ['Statistic', 'DF', 'Significance', 'Association', 'Measure']) | pandas.DataFrame |
import numpy as np
import numpy.testing as npt
import pandas as pd
from stumpy import maamped, config
import pytest
from dask.distributed import Client, LocalCluster
import naive
@pytest.fixture(scope="module")
def dask_cluster():
cluster = LocalCluster(n_workers=2, threads_per_worker=2)
yield cluster
cluster.close()
test_data = [
(np.array([[584, -11, 23, 79, 1001, 0, -19]], dtype=np.float64), 3),
(np.random.uniform(-1000, 1000, [5, 20]).astype(np.float64), 5),
]
substitution_locations = [slice(0, 0), 0, -1, slice(1, 3), [0, 3]]
def test_maamped_int_input(dask_cluster):
with pytest.raises(TypeError):
with Client(dask_cluster) as dask_client:
maamped(dask_client, np.arange(20).reshape(2, 10), 5)
@pytest.mark.filterwarnings("ignore:\\s+Port 8787 is already in use:UserWarning")
@pytest.mark.parametrize("T, m", test_data)
def test_maamped(T, m, dask_cluster):
with Client(dask_cluster) as dask_client:
excl_zone = int(np.ceil(m / 4))
ref_P, ref_I = naive.maamp(T, m, excl_zone)
comp_P, comp_I = maamped(dask_client, T, m)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
@pytest.mark.filterwarnings("ignore:\\s+Port 8787 is already in use:UserWarning")
@pytest.mark.parametrize("T, m", test_data)
def test_maamped_include(T, m, dask_cluster):
with Client(dask_cluster) as dask_client:
for width in range(T.shape[0]):
for i in range(T.shape[0] - width):
include = np.asarray(range(i, i + width + 1))
excl_zone = int(np.ceil(m / 4))
ref_P, ref_I = naive.maamp(T, m, excl_zone, include)
comp_P, comp_I = maamped(dask_client, T, m, include)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
@pytest.mark.filterwarnings("ignore:\\s+Port 8787 is already in use:UserWarning")
@pytest.mark.parametrize("T, m", test_data)
def test_maamped_discords(T, m, dask_cluster):
with Client(dask_cluster) as dask_client:
excl_zone = int(np.ceil(m / 4))
ref_P, ref_I = naive.maamp(T, m, excl_zone, discords=True)
comp_P, comp_I = maamped(dask_client, T, m, discords=True)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
@pytest.mark.filterwarnings("ignore:\\s+Port 8787 is already in use:UserWarning")
@pytest.mark.parametrize("T, m", test_data)
def test_maamped_include_discords(T, m, dask_cluster):
with Client(dask_cluster) as dask_client:
for width in range(T.shape[0]):
for i in range(T.shape[0] - width):
include = np.asarray(range(i, i + width + 1))
excl_zone = int(np.ceil(m / 4))
ref_P, ref_I = naive.maamp(T, m, excl_zone, include, discords=True)
comp_P, comp_I = maamped(dask_client, T, m, include, discords=True)
npt.assert_almost_equal(ref_P, comp_P)
npt.assert_almost_equal(ref_I, comp_I)
@pytest.mark.filterwarnings("ignore:\\s+Port 8787 is already in use:UserWarning")
@pytest.mark.parametrize("T, m", test_data)
def test_maamped_df(T, m, dask_cluster):
with Client(dask_cluster) as dask_client:
excl_zone = int(np.ceil(m / 4))
ref_P, ref_I = naive.maamp(T, m, excl_zone)
df = | pd.DataFrame(T.T) | pandas.DataFrame |
from sklearn.model_selection import cross_val_score, train_test_split, GridSearchCV
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from pprint import pprint
import pandas as pd
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, precision_score, recall_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler, StandardScaler, PowerTransformer
from sklearn.cluster import KMeans
import preprocessing_permits as pr
import numpy as np
############################################################################################################
# Cross Validation Modeling #
############################################################################################################
def run_decision_tree_cv(X_train, y_train):
'''
Function to run a decision tree model. The function creates the model, then uses
cross-validation grid search to figure out what the best parameters are. Returns a grid (object
used to find best hyperparameters), df_result (holds the accuracy score for all hyperparameter values)
and model (holds the model with the best hyperparameters, used to create predictions)
'''
# keys are names of hyperparams, values are a list of values to try for that hyper parameter
params = {
'max_depth': range(1, 11),
'criterion': ['gini', 'entropy']
}
dtree = DecisionTreeClassifier()
# cv=4 means 4-fold cross-validation, i.e. k = 4
grid = GridSearchCV(dtree, params, cv=3, scoring= "recall")
grid.fit(X_train, y_train)
model = grid.best_estimator_
results = grid.cv_results_
for score, p in zip(results['mean_test_score'], results['params']):
p['score'] = score
df_result = pd.DataFrame(results['params'])
print(grid.best_params_)
return grid, df_result, model
def run_random_forest_cv(X_train, y_train):
'''
Function to run a random forest model. The function creates the model, then uses
cross-validation grid search to figure out what the best parameters are. Returns a grid (object
used to find best hyperparameters), df_result (holds the accuracy score for all hyperparameter values)
and model (holds the model with the best hyperparameters, used to create predictions)
'''
params = {
'max_depth': range(1, 10),
"min_samples_leaf": range(1,10)
}
rf = RandomForestClassifier(random_state = 123)
# cv=4 means 4-fold cross-validation, i.e. k = 4
grid = GridSearchCV(rf, params, cv=3, scoring= "recall")
grid.fit(X_train, y_train)
model = grid.best_estimator_
results = grid.cv_results_
for score, p in zip(results['mean_test_score'], results['params']):
p['score'] = score
df_result = pd.DataFrame(results['params'])
print(grid.best_params_)
return grid, df_result, model
def run_knn_cv(X_train, y_train):
'''
Function to run a knn model. The function creates the model, then uses
cross-validation grid search to figure out what the best parameters are. Returns a grid (object
used to find best hyperparameters), df_result (holds the accuracy score for all hyperparameter values)
and model (holds the model with the best hyperparameters, used to create predictions)
'''
knn = KNeighborsClassifier()
params = {
'weights': ["uniform", "distance"],
"n_neighbors": range(1,20)
}
# cv=4 means 4-fold cross-validation, i.e. k = 4
grid = GridSearchCV(knn, params, cv=3, scoring= "recall")
grid.fit(X_train, y_train)
model = grid.best_estimator_
results = grid.cv_results_
for score, p in zip(results['mean_test_score'], results['params']):
p['score'] = score
df_result = pd.DataFrame(results['params'])
print(grid.best_params_)
return grid, df_result, model
def evaluate_on_test_data(X_test, y_test, model):
model.score(X_test, y_test)
def create_prediction(df, model):
y_pred = model.predict(df)
return y_pred
############################################################################################################
# Evaluations #
############################################################################################################
def create_report(y_train, y_pred):
'''
Helper function used to create a classification evaluation report, and return it as df
'''
report = classification_report(y_train, y_pred, output_dict = True)
report = | pd.DataFrame.from_dict(report) | pandas.DataFrame.from_dict |
import numpy as np
import pandas as pd
from pytest import approx
from evidently.analyzers.stattests import z_stat_test
from evidently.analyzers.stattests.chisquare_stattest import chi_stat_test
def test_freq_obs_eq_freq_exp() -> None:
# observed and expected frequencies is the same
reference = pd.Series([1, 2, 3, 4, 5, 6]).repeat([16, 18, 16, 14, 12, 12])
current = pd.Series([1, 2, 3, 4, 5, 6]).repeat([16, 16, 16, 16, 16, 8])
assert chi_stat_test.func(reference, current, 0.5) == (approx(0.62338, abs=1e-5), False)
def test_chi_stat_test_cat_feature() -> None:
reference = | pd.Series(["a", "b", "c"]) | pandas.Series |
#!/usr/bin/env python
""" Barebones BGAPI scanner script for Bluegiga BLE modules
This script is designed to be used with a BGAPI-enabled Bluetooth Smart device
from Bluegiga, probably a BLED112 but anything that is connected to a serial
port (real or virtual) and can "speak" the BGAPI protocol. It is tuned for
usable defaults when used on a Raspberry Pi, but can easily be used on other
platforms, including Windows or OS X.
Note that the command functions do *not* incorporate the extra preceding length
byte required when using "packet" mode (only available on USART peripheral ports
on the BLE112/BLE113 module, not applicable to the BLED112). It is built so you
can simply plug in a BLED112 and go, but other kinds of usage may require small
modifications.
Changelog:
2013-04-07 - Fixed 128-bit UUID filters
- Added more verbose output on startup
- Added "friendly mode" output argument
- Added "quiet mode" output argument
- Improved comments in code
2013-03-30 - Initial release
2020-11-xx - Massive upgrade
- Fixed to work with python >=3.6
- Now works on both MS WinXX and Apple OSX x86 using ActiveState Python ( Not regular Python from Python.org)
- dump to excel .cvs file function added so that actually works properly
- real time plotting of RSSI to live graph function added
- real time plotting of user selected packet payload against time in milliseconds
- bglib.py code merged into this file so the whole thing is in one file
"""
__author__ = "<NAME> & others"
__license__ = "MIT"
__version__ = "2020-11-xx"
__email__ = "<EMAIL>"
#-------------------------------------------------------------------------------------------------------------------
#
# Things that need to be installed for this script to run
#
#
#------------------------------------------------------------------------------------------------------------------
#
# 1) Install activestate ActivePython from https://platform.activestate.com/ActiveState/ActivePython-3.8
#
# This is the only Python version which works straight out of the box with all the features of this script.
#
#
# 2) pip3 install pyserial future pandas matplotlib
#
# 3) Know the serial port that your BLE112 device is mount on .
#
# USB BLE112 device when installed correctly shows up as a virtual serial port .
# This is the case for Apple Mac OSX and Microsoft windows
#
# REF : https://www.silabs.com/wireless/bluetooth/bluegiga-low-energy-legacy-modules/device.bled112
# REF : https://www.mouser.dk/ProductDetail/Silicon-Labs/BLED112-V1/?qs=2bnkQPT%252BPaf%252BBQ6Sw8mJqQ%3D%3D
#
#------------------------------------------------------------------------------------------------------------------
import sys, optparse, serial, struct, time, datetime, re, signal, string
import matplotlib.pyplot as plt
import pandas as pd
import tkinter as tk
options = []
filter_uuid = []
filter_mac = []
filter_rssi = 0
# create an empty dataframe that will store streaming data
df = None
ax = None
df_byte = None
bx = None
instr_search = None
byte_switch = False
byte_position_ = None
start_time=datetime.datetime.now()
def remove(string):
pattern = re.compile(r'\s+')
return re.sub(pattern, '', string)
#return strip(string)
def main():
global options, filter_uuid, filter_mac, filter_rssi, df, ax, instr_search, byte_switch, byte_position_, df_byte, bx
class IndentedHelpFormatterWithNL(optparse.IndentedHelpFormatter):
def format_description(self, description):
if not description: return ""
desc_width = self.width - self.current_indent
indent = " "*self.current_indent
bits = description.split('\n')
formatted_bits = [
optparse.textwrap.fill(bit,
desc_width,
initial_indent=indent,
subsequent_indent=indent)
for bit in bits]
result = "\n".join(formatted_bits) + "\n"
return result
def format_option(self, option):
result = []
opts = self.option_strings[option]
opt_width = self.help_position - self.current_indent - 2
if len(opts) > opt_width:
opts = "%*s%s\n" % (self.current_indent, "", opts)
indent_first = self.help_position
else: # start help on same line as opts
opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
indent_first = 0
result.append(opts)
if option.help:
help_text = self.expand_default(option)
help_lines = []
for para in help_text.split("\n"):
help_lines.extend(optparse.textwrap.wrap(para, self.help_width))
result.append("%*s%s\n" % (
indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (self.help_position, "", line)
for line in help_lines[1:]])
elif opts[-1] != "\n":
result.append("\n")
return "".join(result)
class MyParser(optparse.OptionParser):
def format_epilog(self, formatter=None):
return self.epilog
def format_option_help(self, formatter=None):
formatter = IndentedHelpFormatterWithNL()
if formatter is None:
formatter = self.formatter
formatter.store_option_strings(self)
result = []
result.append(formatter.format_heading(optparse._("Options")))
formatter.indent()
if self.option_list:
result.append(optparse.OptionContainer.format_option_help(self, formatter))
result.append("\n")
for group in self.option_groups:
result.append(group.format_help(formatter))
result.append("\n")
formatter.dedent()
# Drop the last "\n", or the header if no options or option groups:
print
return "".join(result[:-1])
# process script arguments
p = MyParser(description='Bluetooth Smart Scanner Updated script for Bluegiga BLED112 v2020-11-30', epilog=
"""
Examples:
bled112_scanner_upgrade_V5.py
\tDefault options, passive scan, display all devices
bled112_scanner_upgrade_V5.py -p /dev/ttyUSB0 -d sd
\tUse ttyUSB0, display only sender MAC address and ad data payload
bled112_scanner_upgrade_V5.py -u 1809 -u 180D
\tDisplay only devices advertising Health Thermometer service (0x1809)
\tor the Heart Rate service (0x180D)
bled112_scanner_upgrade_V5.py -m 00:07:80 -m 08:57:82:bb:27:37
\tDisplay only devices with a Bluetooth address (MAC) starting with the
\tBluegiga OUI (00:07:80), or exactly matching 08:57:82:bb:27:37
Sample Output Explanation:
1364699494.574 -57 0 000780814494 0 255 02010603030918
't' (Unix time):\t1364699464.574, 1364699591.128, etc.
'r' (RSSI value):\t-57, -80, -92, etc.
'p' (Packet type):\t0 (advertisement), 4 (scan response)
's' (Sender MAC):\t000780535BB4, 000780814494, etc. {search in the actual MAC field and not the payload status data
'a' (Address type):\t0 (public), 1 (random)
'b' (Bond status):\t255 (no bond), 0 to 15 if bonded
'd' (Data payload):\t02010603030918, etc.
See BT4.0 Core Spec for details about ad packet format
======================================================================================================
Examples graph plot functions running on Microsoft Window WINxx or Apple OSX
( !! NOTE : Must be using Activestate ActivePython and not regular Python from www.python.org !!!! )
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
=======================================================================================================
Use for plotting rssi to graph plot function in MS WinXX
python ./bled112_scanner_upgrade_V5.py -q -f -c -x --time_in_ms -d tr -p com27
Use for plotting rssi to dos CMD function in MS WinXX
python ./bled112_scanner_upgrade_V5.py -q -f -c --time_in_ms -d tr -p com27
Use for plotting rssi and other data to dos CMD function in MS WinXX onwards to MS EXCEL .CSV file
python ./bled112_scanner_upgrade_V5.py -q -f -k --time_in_ms -p com27 1>>./dump.csv
Use live RSSI plot function to screen live graph
( !! NOTE : Must be using Activestate ActivePython and not regular Python from www.python.org !!!! )
python ./bled112_scanner_upgrade_V5.py -q -f -c -x --time_in_ms -d tr -p com27
Use live RSSI plot function to screen live graph and dump to excel CSV file also
(!! See Warning NOTE above !!)
For MS WinXx : python ./bled112_scanner_upgrade_V5.py -q -f -k -x --time_in_ms -p com27 1>>./dump.csv
For Apple OSX : python ./bled112_scanner_upgrade_V5.py -q -f -k -x --time_in_ms -p /dev/cu.usbmodem11 1>>./dump.csv
Using live search in the Payload_status message with live RSSI plot function to screen
live graph and dump to excel CSV file also (!! See Warning NOTE above !!)
For MS WinXx : python3 ./bled112_scanner_upgrade_V5.py -q -f -x -k --time_in_ms -p com27 --instr=FFFE96B6E511 1>>./dump.csv
For Apple OSX : python3 ./bled112_scanner_upgrade_V5.py -q -f -x -k --time_in_ms -p /dev/cu.usbmodem11 --instr=FFFE96B6E511 1>>./dump.csv
Using live search in the Payload_status message with live RSSI plot function to screen
live graph and dump to excel CSV file also (!! See Warning NOTE above !!)
plus search and find the value in byte position 09 and swap the found byte value
convert to decimal with the RSSI value.
For MS WinXx : python3 ./bled112_scanner_upgrade_V5.py -q -f -k -x --time_in_ms -p com27 --instr=FFFE96B6E511 --byte=09 --switch
For Apple OSX : python3 ./bled112_scanner_upgrade_V5.py -q -f -k -x --time_in_ms -p /dev/cu.usbmodem11 --instr=FFFE96B6E511 --byte=09 --switch
Using live search in the Payload_status message with live RSSI plot function to screen live graph (graph 1)
and Selected Byte from Payload data live to plot to graph (graph 2) and dump to excel CSV file
Selected Byte from Payload data = search payload data and find the value in byte position 09 .
Also (!! See Warning NOTE above !!)
For MS WinXx : python3 ./bled112_scanner_upgrade_V5.py -q -f -k -x --time_in_ms -p com27 --instr=FFFE96B6E511 --byte=09 -X
For Apple OSX : python3 ./bled112_scanner_upgrade_V5.py -q -f -k -x --time_in_ms -p /dev/cu.usbmodem11 --instr=FFFE96B6E511 --byte=09 -X
"""
)
# set all defaults for options
p.set_defaults(port="COM5", baud=115200, interval=0xC8, window=0xC8, display="trpsabd", uuid=[], mac=[], rssi=0, active=False, quiet=False, friendly=False)
# create serial port options argument group
group = optparse.OptionGroup(p, "Serial Port Options")
group.add_option('--port', '-p', type="string", help="Serial port device name (default /dev/ttyACM0)", metavar="PORT")
group.add_option('--baud', '-b', type="int", help="Serial port baud rate (default 115200)", metavar="BAUD")
p.add_option_group(group)
# create scan options argument group
group = optparse.OptionGroup(p, "Scan Options")
group.add_option('--interval', '-i', type="int", help="Scan interval width in units of 0.625ms (default 200)", metavar="INTERVAL")
group.add_option('--window', '-w', type="int", help="Scan window width in units of 0.625ms (default 200)", metavar="WINDOW")
group.add_option('--active', '-a', action="store_true", help="Perform active scan (default passive)\nNOTE: active scans result "
"in a 'scan response' request being sent to the slave device, which "
"should send a follow-up scan response packet. This will result in "
"increased power consumption on the slave device.")
p.add_option_group(group)
# create filter options argument group
group = optparse.OptionGroup(p, "Filter Options")
group.add_option('--uuid', '-u', type="string", action="append", help="Service UUID(s) to match", metavar="UUID")
group.add_option('--mac', '-m', type="string", action="append", help="MAC address(es) to match format :: xxXXxxXXxx", metavar="ADDRESS")
group.add_option('--instr', '-S', type="string", action="append", help="Search match 'Payload data' for match ::format :: xxXXxxXXxx", metavar="payload_instr")
group.add_option('--byte', '-y', type="string", action="append", help="Select single byte from 'Payload data' when there is a match from '--instr search' '--byte=byte_position ' output out in byte column , !! first character position is Zero !! ", metavar="byte_position")
group.add_option('--rssi', '-R', type="int", help="RSSI minimum filter (-110 to -20), omit to disable", metavar="RSSI")
group.add_option('--install', '-I' ,action="store_true", help="Guide of how to install ")
p.add_option_group(group)
# create output options argument group
group = optparse.OptionGroup(p, "Output Options")
group.add_option('--switch', '-Z', action="store_true", help="If options'--instr search' and '--byte=byte_position ' selected. Put byte value in RSSI column")
group.add_option('--quiet', '-q', action="store_true", help="Quiet mode (suppress initial scan parameter display)")
group.add_option('--time_in_ms', '-z', action="store_true", help="time_in_ms (Display time in milliseconds)")
group.add_option('--csv', '-k', action="store_true", help="CVS mode (If options -q and -f are set output in direclty excel csv file friendly format)")
group.add_option('--comma', '-c', action="store_true", help="Comma mode (If options -q and -f are set output in basic excel csv file not friendly format)")
group.add_option('--plot', '-x', action="store_true", help="Plot mode , If options '-q -f -c -x --time_in_ms -d tr' are set use live plot graph of rssi verses time )")
group.add_option('--plotbyte', '-X', action="store_true", help="Plot mode , If options '-q -f -c -x --time_in_ms -d tr' are set use live plot graph of payload selected byte verses time )")
group.add_option('--friendly', '-f', action="store_true", help="Friendly mode (output in human-readable format)")
group.add_option('--display', '-d', type="string", help="Display fields and order (default '%default')\n"
" t = Unix time, with milliseconds\n"
" r = RSSI measurement (signed integer)\n"
" p = Packet type (0 = normal, 4 = scan response)\n"
" s = Sender MAC address (hexadecimal)\n"
" a = Address type (0 = public, 1 = random)\n"
" b = Bonding status (255 = no bond, else bond handle)\n"
" d = Advertisement data payload (hexadecimal)", metavar="FIELDS")
p.add_option_group(group)
# actually parse all of the arguments
options, arguments = p.parse_args()
# validate any supplied MAC address filters
for arg in options.mac:
if re.search('[^a-fA-F0-9:]', arg):
p.print_help()
print("\n================================================================")
print("Invalid MAC filter argument '%s'\n-->must be in the form AA:BB:CC:DD:EE:FF" % arg)
print("================================================================")
exit(1)
arg2 = arg.replace(":", "").upper()
if (len(arg2) % 2) == 1:
p.print_help()
print("\n================================================================")
print("Invalid MAC filter argument '%s'\n--> must be 1-6 full bytes in 0-padded hex form (00:01:02:03:04:05)" % arg)
print("================================================================")
exit(1)
mac = []
for i in range(0, len(arg2), 2):
mac.append(int(arg2[i : i + 2], 16))
filter_mac.append(mac)
# validate any supplied UUID filters
for arg in options.uuid:
if re.search('[^a-fA-F0-9:]', arg):
p.print_help()
print("\n================================================================")
print("Invalid UUID filter argument '%s'\n--> must be 2 or 16 full bytes in 0-padded hex form (180B or 0123456789abcdef0123456789abcdef)" % arg)
print("================================================================")
exit(1)
arg2 = arg.replace(":", "").upper()
if len(arg2) != 4 and len(arg2) != 32:
p.print_help()
print("\n================================================================")
print("Invalid UUID filter argument '%s'\n--> must be 2 or 16 full bytes in 0-padded hex form (180B or 0123456789abcdef0123456789abcdef)" % arg)
print("================================================================")
exit(1)
uuid = []
for i in range(0, len(arg2), 2):
uuid.append(int(arg2[i : i + 2], 16))
filter_uuid.append(uuid)
# validate RSSI filter argument
filter_rssi = abs(int(options.rssi))
if filter_rssi > 0 and (filter_rssi < 20 or filter_rssi > 110):
p.print_help()
print("\n================================================================")
print("Invalid RSSI filter argument '%s'\n--> must be between 20 and 110" % filter_rssi)
print("================================================================")
exit(1)
# validate field output options
options.display = options.display.lower()
if re.search('[^trpsabd]', options.display):
p.print_help()
print("\n================================================================")
print("Invalid display options '%s'\n--> must be some combination of 't', 'r', 'p', 's', 'a', 'b', 'd'" % options.display)
print("================================================================")
exit(1)
if options.install :
print("================================================================")
print("Install for BLED112 Scanner for Python v%s" % __version__)
print("================================================================")
print(" ")
Print("Program is designed to use Activestate ActivePython and not regular Python from www.python.org ")
print(" ")
print(" Go to https://www.activestate.com and download and install the latest version of activepython for your operating system ")
print(" ")
print(" Once ActivePython is install in a command window shell type the follow")
print(" ")
print(" pip3 install pyserial future pandas matplotlib ")
print(" ")
exit(2)
# display scan parameter summary, if not in quiet mode
if not(options.quiet) :
print("================================================================")
print("BLED112 Scanner for Python v%s" % __version__)
print("================================================================")
#p.set_defaults(port="/dev/ttyACM0", baud=115200, interval=0xC8, window=0xC8, display="trpsabd", uuid=[], mac=[], rssi=0, active=False, quiet=False, friendly=False)
print("Serial port:\t%s" % options.port)
print("Baud rate:\t%s" % options.baud)
print("Scan interval:\t%d (%.02f ms)" % (options.interval, options.interval * 1.25))
print("Scan window:\t%d (%.02f ms)" % (options.window, options.window * 1.25))
print("Scan type:\t%s" % ['Passive', 'Active'][options.active])
print("UUID filters:\t",)
if len(filter_uuid) > 0:
print("0x%s" % ", 0x".join([''.join(['%02X' % b for b in uuid]) for uuid in filter_uuid]))
else:
print("None")
print("MAC filter(s):\t",)
if len(filter_mac) > 0:
print(", ".join([':'.join(['%02X' % b for b in mac]) for mac in filter_mac]))
else:
print("None")
print("RSSI filter:\t",)
if filter_rssi > 0:
print("-%d dBm minimum"% filter_rssi)
else:
print("None")
print("Display fields:\t-",)
field_dict = { 't':'Time', 'r':'RSSI', 'p':'Packet type', 's':'Sender MAC', 'a':'Address type', 'b':'Bond status', 'd':'Payload data' }
print("\n\t\t- ".join([field_dict[c] for c in options.display]))
print("Friendly mode:\t%s" % ['Disabled', 'Enabled'][options.friendly])
print("----------------------------------------------------------------")
print("Starting scan for BLE advertisements...")
# open serial port for BGAPI access
try:
ser = serial.Serial(port=options.port, baudrate=options.baud, timeout=1)
except serial.SerialException as e:
print("\n================================================================")
print("Port error (name='%s', baud='%ld'): %s" % (options.port, options.baud, e))
print("================================================================")
exit(2)
#=========================================================================================================
#
# Make initial communications with the BLE112 USB device and set up the comms process
#
#=========================================================================================================
# flush buffers
#print "Flushing serial I/O buffers..."
ser.flushInput()
ser.flushOutput()
# disconnect if we are connected already
#print "Disconnecting if connected..."
ble_cmd_connection_disconnect(ser, 0)
response = ser.read(7) # 7-byte response
#for b in response: print '%02X' % ord(b),
# stop advertising if we are advertising already
#print "Exiting advertising mode if advertising..."
ble_cmd_gap_set_mode(ser, 0, 0)
response = ser.read(6) # 6-byte response
#for b in response: print '%02X' % ord(b),
# stop scanning if we are scanning already
#print "Exiting scanning mode if scanning..."
ble_cmd_gap_end_procedure(ser)
response = ser.read(6) # 6-byte response
#for b in response: print '%02X' % ord(b),
# set scan parameters
#print "Setting scanning parameters..."
ble_cmd_gap_set_scan_parameters(ser, options.interval, options.window, options.active)
response = ser.read(6) # 6-byte response
#for b in response: print '%02X' % ord(b),
# start scanning now
#print "Entering scanning mode for general discoverable..."
ble_cmd_gap_discover(ser, 1)
#=========================================================================================================
#=========================================================================================================
if not(options.byte) :
if options.quiet and options.friendly and options.csv and options.time_in_ms :
print("\"Time_in_Milliseconds\";\"RSSI\";\"Packet_type\";\"Sender_MAC\";\"Address_type\";\"Bond_status\";\"Payload_status\"")
if options.quiet and options.friendly and options.csv and not(options.time_in_ms) :
print("\"Time\";\"RSSI\";\"Packet_type\";\"Sender_MAC\";\"Address_type\";\"Bond_status\";\"Payload_status\"")
if options.quiet and options.friendly and options.csv and options.time_in_ms and options.instr and options.byte :
print("\"Time_in_Milliseconds\";\"RSSI\";\"Packet_type\";\"Sender_MAC\";\"Address_type\";\"Bond_status\";\"Payload_status\";\"Selected_Byte(Dec)\"")
if options.quiet and options.friendly and options.csv and not(options.time_in_ms) and options.instr and options.byte :
print("\"Time\";\"RSSI\";\"Packet_type\";\"Sender_MAC\";\"Address_type\";\"Bond_status\";\"Payload_status\";\"Selected_Byte(Dec) \"")
if options.instr :
instr_search = str(options.instr)
instr_search=instr_search[2:len(instr_search)] # Original "['FFFE96B6E511']" strip and remove [' '] bits
instr_search=instr_search[0:(len(instr_search)-2)]
else :
instr_search = ""
if options.byte and (len(str(options.byte)) > 4) :
byte_str=(str(options.byte))[2:len((str(options.byte)))]
byte_str=byte_str[0:(len(byte_str)-2)]
byte_position_=abs(int(byte_str))
byte_position_=(byte_position_ -1 ) *2
if (byte_position_ < 0) : byte_position_ = 0
# print("byte to pick up from payload_status is :: " + str(byte_position_)) #Debug
else :
byte_position_ = -1
if options.instr and options.byte and options.switch : byte_switch = True
#-------------------------------------------------------------------------------------------------------------------
#
# Real time graph plotting routine setup section
#
#
#-------------------------------------------------------------------------------------------------------------------
if options.plot :
# create plot
plt.ion() # <-- work in "interactive mode"
fig, ax = plt.subplots()
fig.canvas.set_window_title('Live BLE RSSI level Chart')
ax.set_title("Primary RSSI level in dB verse time in Milliseconds")
# create an empty pandas dataframe that will store streaming data
df = pd.DataFrame()
if options.instr and options.byte and options.plotbyte :
# create plot
plt.ion() # <-- work in "interactive mode"
fig, bx = plt.subplots()
fig.canvas.set_window_title('Live BLE Payload data selected Byte Chart [ Byte position in Payload data = ' + byte_str + ' ] ')
bx.set_title("Selected Byte value (0-255) verse time in Milliseconds")
# create an empty pandas dataframe that will store streaming data
df_byte = pd.DataFrame()
#-------------------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------
while (1):
# catch all incoming data
# if options.quiet and options.friendly and options.plot :
# while (ser.inWaiting()): bgapi_parse_plot(ord(ser.read()));
#else:
# if options.quiet and options.friendly and options.comma :
# while (ser.inWaiting()): bgapi_parse_csv(ord(ser.read()));
# else:
# if options.quiet and options.friendly and options.csv :
# while (ser.inWaiting()): bgapi_parse_csv(ord(ser.read()));
# else:
# while (ser.inWaiting()): bgapi_parse(ord(ser.read()));
while (ser.inWaiting()): bgapi_parse_plot(ord(ser.read()));
# don't burden the CPU
time.sleep(0.0001)
# thanks to <NAME> for Python event handler code
# http://www.emptypage.jp/notes/pyevent.en.html
class BGAPIEvent(object):
def __init__(self, doc=None):
self.__doc__ = doc
def __get__(self, obj, objtype=None):
if obj is None:
return self
return BGAPIEventHandler(self, obj)
def __set__(self, obj, value):
pass
class BGAPIEventHandler(object):
def __init__(self, event, obj):
self.event = event
self.obj = obj
def _getfunctionlist(self):
"""(internal use) """
try:
eventhandler = self.obj.__eventhandler__
except AttributeError:
eventhandler = self.obj.__eventhandler__ = {}
return eventhandler.setdefault(self.event, [])
def add(self, func):
"""Add new event handler function.
Event handler function must be defined like func(sender, earg).
You can add handler also by using '+=' operator.
"""
self._getfunctionlist().append(func)
return self
def remove(self, func):
"""Remove existing event handler function.
You can remove handler also by using '-=' operator.
"""
self._getfunctionlist().remove(func)
return self
def fire(self, earg=None):
"""Fire event and call all handler functions
You can call EventHandler object itself like e(earg) instead of
e.fire(earg).
"""
for func in self._getfunctionlist():
func(self.obj, earg)
__iadd__ = add
__isub__ = remove
__call__ = fire
#==================================================================================================================================================
#==================================================================================================================================================
#==================================================================================================================================================
#=====================================================
#
# define API commands we might use for this script
#
#=====================================================
def ble_cmd_system_reset(p, boot_in_dfu):
p.write(struct.pack('5B', 0, 1, 0, 0, boot_in_dfu))
def ble_cmd_connection_disconnect(p, connection):
p.write(struct.pack('5B', 0, 1, 3, 0, connection))
def ble_cmd_gap_set_mode(p, discover, connect):
p.write(struct.pack('6B', 0, 2, 6, 1, discover, connect))
def ble_cmd_gap_end_procedure(p):
p.write(struct.pack('4B', 0, 0, 6, 4))
def ble_cmd_gap_set_scan_parameters(p, scan_interval, scan_window, active):
p.write(struct.pack('<4BHHB', 0, 5, 6, 7, scan_interval, scan_window, active))
def ble_cmd_gap_discover(p, mode):
p.write(struct.pack('5B', 0, 1, 6, 2, mode))
#=====================================================
#=====================================================
# define basic BGAPI parser
bgapi_rx_buffer = []
bgapi_rx_expected_length = 0
def bgapi_parse_plot(b):
global bgapi_rx_buffer, bgapi_rx_expected_length, df, ax, instr_search, byte_switch, byte_position_ , df_byte, bx
data_packet=None
select_byte_data_as_str=None
select_byte_date_as_int=None
if len(bgapi_rx_buffer) == 0 and (b == 0x00 or b == 0x80):
bgapi_rx_buffer.append(b)
elif len(bgapi_rx_buffer) == 1:
bgapi_rx_buffer.append(b)
bgapi_rx_expected_length = 4 + (bgapi_rx_buffer[0] & 0x07) + bgapi_rx_buffer[1]
elif len(bgapi_rx_buffer) > 1:
bgapi_rx_buffer.append(b)
#print '%02X: %d, %d' % (b, len(bgapi_rx_buffer), bgapi_rx_expected_length)
if bgapi_rx_expected_length > 0 and len(bgapi_rx_buffer) == bgapi_rx_expected_length:
#print '<=[ ' + ' '.join(['%02X' % b for b in bgapi_rx_buffer ]) + ' ]'
packet_type, payload_length, packet_class, packet_command = bgapi_rx_buffer[:4]
bgapi_rx_payload = bytes(bgapi_rx_buffer[4:])
if packet_type & 0x80 == 0x00: # response
bgapi_filler = 0
else: # event
if packet_class == 0x06: # gap
if packet_command == 0x00: # scan_response
rssi, packet_type, sender, address_type, bond, data_len = struct.unpack('<bB6sBBB', bgapi_rx_payload[:11])
sender = [b for b in sender]
data_data = [b for b in bgapi_rx_payload[11:]]
display = 1
# parse all ad fields from ad packet
ad_fields = []
this_field = []
ad_flags = 0
ad_services = []
ad_local_name = []
ad_tx_power_level = 0
ad_manufacturer = []
bytes_left = 0
for b in data_data:
if bytes_left == 0:
bytes_left = b
this_field = []
else:
this_field.append(b)
bytes_left = bytes_left - 1
if bytes_left == 0:
ad_fields.append(this_field)
if this_field[0] == 0x01: # flags
ad_flags = this_field[1]
if this_field[0] == 0x02 or this_field[0] == 0x03: # partial or complete list of 16-bit UUIDs
# for i in xrange((len(this_field) - 1) / 2):
for i in range(int((len(this_field) - 1) / 2)):
ad_services.append(this_field[-1 - i*2 : -3 - i*2 : -1])
if this_field[0] == 0x04 or this_field[0] == 0x05: # partial or complete list of 32-bit UUIDs
# for i in xrange((len(this_field) - 1) / 4):
for i in range(int((len(this_field) - 1) / 4)):
ad_services.append(this_field[-1 - i*4 : -5 - i*4 : -1])
if this_field[0] == 0x06 or this_field[0] == 0x07: # partial or complete list of 128-bit UUIDs
# for i in xrange((len(this_field) - 1) / 16):
for i in range(int((len(this_field) - 1) / 16)):
ad_services.append(this_field[-1 - i*16 : -17 - i*16 : -1])
if this_field[0] == 0x08 or this_field[0] == 0x09: # shortened or complete local name
ad_local_name = this_field[1:]
if this_field[0] == 0x0A: # TX power level
ad_tx_power_level = this_field[1]
# OTHER AD PACKET TYPES NOT HANDLED YET
if this_field[0] == 0xFF: # manufactuerer specific data
ad_manufacturer.append(this_field[1:])
if len(filter_mac) > 0:
match = 0
for mac in filter_mac:
if mac == sender[:-len(mac) - 1:-1]:
match = 1
break
if match == 0: display = 0
if display and len(filter_uuid) > 0:
if not [i for i in filter_uuid if i in ad_services]: display = 0
if display and filter_rssi > 0:
if -filter_rssi > rssi: display = 0
data_packet=""
data_packet=str(data_packet.join(['%02X' % b for b in data_data]))
# print ( " Data packet : " + data_packet + " instr search :: " + instr_search) ## debug line
select_byte_data_as_int=None
#-------------------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------
if display and len(instr_search)>0 :
if (data_packet.find(instr_search) > -1) :
#-------------------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------
if (byte_position_ > -1 ) :
select_byte_data_as_str=data_packet[ byte_position_ : ( byte_position_ +2 )]
if (all(c in string.hexdigits for c in select_byte_data_as_str)) :
#check that the selected data at byte_position is valid hex data
select_byte_data_as_int=int(select_byte_data_as_str, 16)
# print("Raw packet data :: " + data_packet) # Debug
# print("byte to pick up from payload_status is :: " + str(byte_position_) + "recovered data :: " + select_byte_data_as_str)
# print("recovered data :: " + str(select_byte_data_as_int))
if byte_switch == True :
#-------------------------------------------------------------------------------------------------------------------
# If there is conversion method to convert the selected byte to a RSSI value it should
# added here
#-------------------------------------------------------------------------------------------------------------------
rssi = select_byte_data_as_int
else :
if byte_switch == True :
byte_switch = False
t = datetime.datetime.now()
t_now= float("%ld.%03ld" % (time.mktime(t.timetuple()), t.microsecond/1000))
t_start=float("%ld.%03ld" % (time.mktime(start_time.timetuple()), start_time.microsecond/1000))
t_run=t_now-t_start
disp_list = []
for c in options.display:
if c == 't':
if options.time_in_ms:
disp_list.append("%f" % t_run )
if not(options.time_in_ms):
disp_list.append("%ld.%03ld" % (time.mktime(t.timetuple()), t.microsecond/1000) )
elif c == 'r':
disp_list.append("%d" % rssi)
elif c == 'p':
disp_list.append("%d" % packet_type)
elif c == 's':
disp_list.append("%s" % ''.join(['%02X' % b for b in sender[::-1]]))
elif c == 'a':
disp_list.append("%d" % address_type)
elif c == 'b':
disp_list.append("%d" % bond)
elif c == 'd':
disp_list.append("%s" % ''.join(['%02X' % b for b in data_data]))
#disp_list.append("%s" % ''.join([data_packet]))
if options.csv :
if options.byte :
op_str= "\"" + remove('\";\"'.join(disp_list)) + "\"" + ';\"' + str(select_byte_data_as_int) + "\""
else :
op_str= "\"" + remove('\";\"'.join(disp_list)) + "\""
print(op_str)
else:
if (options.comma or not(options.comma)) and not(options.csv):
if options.byte :
op_str= " " + remove(', '.join(disp_list)) + "," + str(select_byte_data_as_int)
else :
op_str= " " + remove(', '.join(disp_list)) + ""
print(op_str)
# print "gap_scan_response: rssi: %d, packet_type: %d, sender: %s, address_type: %d, bond: %d, data_len: %d" % \
# (rssi, packet_type, ':'.join(['%02X' % ord(b) for b in sender[::-1]]), address_type, bond, data_len) #
#-------------------------------------------------------------------------------------------------------------------
#
# Real time graph plotting routine
#
#
#-------------------------------------------------------------------------------------------------------------------
# receive python object
if options.plot and options.time_in_ms :
# print ("Raw data :: " , t_run, rssi )
#print(op_str)
row = | pd.DataFrame({'x':[t_run] ,'y':[rssi]}) | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from google.cloud import bigquery
# %reload_ext google.cloud.bigquery
client = bigquery.Client()
# %load_ext google.cloud.bigquery
# +
from notebooks import parameters
DATASET = parameters.LATEST_DATASET
LOOKUP_TABLES = parameters.LOOKUP_TABLES
print(f"Dataset to use: {DATASET}")
print(f"Lookup tables: {LOOKUP_TABLES}")
# +
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import matplotlib.pyplot as plt
import os
plt.style.use('ggplot')
pd.options.display.max_rows = 999
pd.options.display.max_columns = 999
pd.options.display.max_colwidth = 999
def cstr(s, color='black'):
return "<text style=color:{}>{}</text>".format(color, s)
# -
cwd = os.getcwd()
cwd = str(cwd)
print("Current working directory is: {cwd}".format(cwd=cwd))
# ### Get the list of HPO IDs
#
# ### NOTE: This assumes that all of the relevant HPOs have a person table.
hpo_id_query = f"""
SELECT REPLACE(table_id, '_person', '') AS src_hpo_id
FROM
`{DATASET}.__TABLES__`
WHERE table_id LIKE '%person'
AND table_id
NOT LIKE '%unioned_ehr_%'
AND table_id NOT LIKE '\\\_%'
"""
site_df = | pd.io.gbq.read_gbq(hpo_id_query, dialect='standard') | pandas.io.gbq.read_gbq |
import random
from datetime import datetime
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.cross_validation import train_test_split,cross_val_score
from sklearn import preprocessing
# Reading files
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
train_sample = np.random.choice(train.index.values,40000)
train = train.ix[train_sample]
# Converting date into datetime format
train['Date'] = pd.to_datetime( | pd.Series(train['Original_Quote_Date']) | pandas.Series |
import pandas as pd
from transformers_sklearn import BERTologyRegressor
if __name__ == '__main__':
## 1. preparing X,y
train_df = pd.read_csv('datasets/sts-b/train.tsv',sep='\t')
X_train = pd.concat([train_df['sentence1'],train_df['sentence2']],axis=1)
y_train = train_df['score']
dev_df = pd.read_csv('datasets/sts-b/dev.tsv', sep='\t')
X_dev = pd.concat([dev_df['sentence1'], dev_df['sentence2']], axis=1)
y_dev = dev_df['score']
test_df = | pd.read_csv('datasets/sts-b/test.tsv', sep='\t') | pandas.read_csv |
#####################################################.
# This file stores all the main functions #
#####################################################.
import glob
import sys
import os
import time
import concurrent.futures as futures
import multiprocessing as mp
from pathlib import Path
import pandas as pd
from progress.bar import IncrementalBar
from rdkit.Chem import AllChem as Chem
# from aqme.csearch import (check_charge_smi, clean_args,
# compute_confs,
# mol_from_sdf_or_mol_or_mol2, creation_of_dup_csv)
from aqme.csearch import *
from aqme.filter import geom_rules_output
from aqme.qprep import qprep, get_molecule_list, load_charge_data
# from aqme.grapher import graph
# from aqme.descp import calculate_parameters
# from aqme.nmr import calculate_boltz_and_nmr
# from aqme.energy import calculate_boltz_and_energy,calculate_avg_and_energy
# from aqme.dbstep_conf import calculate_db_parameters,calculate_boltz_and_dbstep
# from aqme.nics_conf import calculate_nics_parameters,calculate_boltz_for_nics,calculate_avg_nics
# from aqme.cclib_conf import calculate_cclib,get_avg_cclib_param,calculate_boltz_for_cclib
from aqme.cmin import mult_min
from aqme.utils import (
Logger,
move_file,
get_filenames,
creation_of_dup_csv_csearch,
creation_of_dup_csv_cmin,
read_energies,
get_name_and_charge,
)
# need to and in energy
def csearch_main(w_dir_initial, args, log_overall):
file_format = os.path.splitext(args.input)[1]
# Checks
if file_format not in SUPPORTED_INPUTS:
log_overall.write("\nx INPUT FILETYPE NOT CURRENTLY SUPPORTED!")
sys.exit()
if not os.path.exists(args.input):
log_overall.write("\nx INPUT FILE NOT FOUND!")
sys.exit()
# if large system increase stack size
# if args.STACKSIZE != '1G':
# os.environ['OMP_STACKSIZE'] = args.STACKSIZE
smi_derivatives = [".smi", ".txt", ".yaml", ".yml", ".rtf"]
Extension2inputgen = dict()
for key in smi_derivatives:
Extension2inputgen[key] = prepare_smiles_files
Extension2inputgen[".csv"] = prepare_csv_files
Extension2inputgen[".cdx"] = prepare_cdx_files
Extension2inputgen[".gjf"] = prepare_gaussian_files
Extension2inputgen[".com"] = prepare_gaussian_files
Extension2inputgen[".xyz"] = prepare_gaussian_files
Extension2inputgen[".sdf"] = prepare_sdf_files
Extension2inputgen[".mol"] = prepare_mol_files
Extension2inputgen[".mol2"] = prepare_mol_files
with futures.ProcessPoolExecutor(
max_workers=args.cpus, mp_context=mp.get_context("fork")
) as executor:
# Submit a set of asynchronous jobs
jobs = []
count_mol = 0
# Prepare the Jobs
prepare_function = Extension2inputgen[file_format]
job_inputs = prepare_function(args, w_dir_initial)
# Submit the Jobs
for job_input in job_inputs:
smi_, name_, dir_, varfile_, charge_default_, constraints_dist_, constraints_angle_, constraints_dihedral_ = job_input
job = executor.submit(
process_csearch, smi_, name_, dir_, varfile_, charge_default_, constraints_dist_, constraints_angle_, constraints_dihedral_
)
jobs.append(job)
count_mol += 1
final_dup_data = creation_of_dup_csv_csearch(args.CSEARCH)
bar = IncrementalBar("o Number of finished jobs from CSEARCH", max=count_mol)
# Process the job results (in submission order) and save the conformers.
for i, job in enumerate(jobs):
total_data = job.result()
frames = [final_dup_data, total_data]
final_dup_data = pd.concat(frames, ignore_index=True, sort=True)
bar.next()
bar.finish()
# removing temporary files
temp_files = [
"gfn2.out",
"xTB_opt.traj",
"ANI1_opt.traj",
"wbo",
"xtbrestart",
"ase.opt",
"xtb.opt",
"gfnff_topo",
]
for file in temp_files:
if os.path.exists(file):
os.remove(file)
return final_dup_data
def cmin_main(w_dir_initial, args, log_overall, dup_data):
bar = IncrementalBar("o Number of finished jobs from CMIN", max=len(dup_data))
final_dup_data = creation_of_dup_csv_cmin(args.CMIN)
for dup_data_idx in range(len(dup_data)):
# update_to_rdkit = dup_data.at[dup_data_idx,'update_to_rdkit']
name = dup_data.at[dup_data_idx, "Molecule"]
charge = dup_data.at[dup_data_idx, "Overall charge"]
if dup_data.at[dup_data_idx, "status"] != -1:
if args.CMIN == "ani":
min_suffix = "ani"
elif args.CMIN == "xtb":
min_suffix = "xtb"
if args.CSEARCH in ["rdkit", "summ", "fullmonte"]:
csearch_folder = Path(w_dir_initial).joinpath(f"CSEARCH/{args.CSEARCH}")
fullname = str(csearch_folder.joinpath(name + "_" + args.CSEARCH))
# fullname = f'{name}_{args.CSEARCH}'
# try:
total_data = mult_min(
fullname, args, min_suffix, charge, log_overall, w_dir_initial
)
# except:
# pass
frames = [final_dup_data, total_data]
final_dup_data = | pd.concat(frames, ignore_index=True, sort=True) | pandas.concat |
import numpy as np
import pytest
from pandas.compat import IS64
import pandas as pd
import pandas._testing as tm
@pytest.mark.parametrize("ufunc", [np.abs, np.sign])
# np.sign emits a warning with nans, <https://github.com/numpy/numpy/issues/15127>
@pytest.mark.filterwarnings("ignore:invalid value encountered in sign")
def test_ufuncs_single(ufunc):
a = pd.array([1, 2, -3, np.nan], dtype="Float64")
result = ufunc(a)
expected = pd.array(ufunc(a.astype(float)), dtype="Float64")
tm.assert_extension_array_equal(result, expected)
s = pd.Series(a)
result = ufunc(s)
expected = pd.Series(expected)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ufunc", [np.log, np.exp, np.sin, np.cos, np.sqrt])
def test_ufuncs_single_float(ufunc):
a = pd.array([1.0, 0.2, 3.0, np.nan], dtype="Float64")
with np.errstate(invalid="ignore"):
result = ufunc(a)
expected = pd.array(ufunc(a.astype(float)), dtype="Float64")
tm.assert_extension_array_equal(result, expected)
s = pd.Series(a)
with np.errstate(invalid="ignore"):
result = ufunc(s)
expected = pd.Series(ufunc(s.astype(float)), dtype="Float64")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ufunc", [np.add, np.subtract])
def test_ufuncs_binary_float(ufunc):
# two FloatingArrays
a = pd.array([1, 0.2, -3, np.nan], dtype="Float64")
result = ufunc(a, a)
expected = pd.array(ufunc(a.astype(float), a.astype(float)), dtype="Float64")
tm.assert_extension_array_equal(result, expected)
# FloatingArray with numpy array
arr = np.array([1, 2, 3, 4])
result = ufunc(a, arr)
expected = pd.array(ufunc(a.astype(float), arr), dtype="Float64")
tm.assert_extension_array_equal(result, expected)
result = ufunc(arr, a)
expected = pd.array(ufunc(arr, a.astype(float)), dtype="Float64")
tm.assert_extension_array_equal(result, expected)
# FloatingArray with scalar
result = ufunc(a, 1)
expected = pd.array(ufunc(a.astype(float), 1), dtype="Float64")
tm.assert_extension_array_equal(result, expected)
result = ufunc(1, a)
expected = pd.array(ufunc(1, a.astype(float)), dtype="Float64")
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize("values", [[0, 1], [0, None]])
def test_ufunc_reduce_raises(values):
arr = pd.array(values, dtype="Float64")
res = np.add.reduce(arr)
expected = arr.sum(skipna=False)
tm.assert_almost_equal(res, expected)
@pytest.mark.skipif(not IS64, reason="GH 36579: fail on 32-bit system")
@pytest.mark.parametrize(
"pandasmethname, kwargs",
[
("var", {"ddof": 0}),
("var", {"ddof": 1}),
("kurtosis", {}),
("skew", {}),
("sem", {}),
],
)
def test_stat_method(pandasmethname, kwargs):
s = pd.Series(data=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, np.nan, np.nan], dtype="Float64")
pandasmeth = getattr(s, pandasmethname)
result = pandasmeth(**kwargs)
s2 = pd.Series(data=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6], dtype="float64")
pandasmeth = getattr(s2, pandasmethname)
expected = pandasmeth(**kwargs)
assert expected == result
def test_value_counts_na():
arr = pd.array([0.1, 0.2, 0.1, pd.NA], dtype="Float64")
result = arr.value_counts(dropna=False)
idx = | pd.Index([0.1, 0.2, pd.NA], dtype=arr.dtype) | pandas.Index |
""" unit testing of write_utils functions """
# pylint: disable=missing-function-docstring
import os
import pytest
import pandas
from metatlas.io import write_utils
def test_make_dir_for01(mocker):
mocker.patch("os.makedirs")
write_utils.make_dir_for("foo/bar")
os.makedirs.assert_called_with("foo", exist_ok=True) # pylint: disable=no-member
def test_make_dir_for02(mocker):
mocker.patch("os.makedirs")
write_utils.make_dir_for("bar")
assert not os.makedirs.called # pylint: disable=no-member
def test_check_existing_file01(mocker):
mocker.patch("os.path.exists", return_value=True)
with pytest.raises(FileExistsError):
write_utils.check_existing_file("exists_file.txt")
def test_check_existing_file02(mocker):
mocker.patch("os.path.exists", return_value=False)
write_utils.check_existing_file("does_not_exist_file.txt")
# Should not raise an error. No assert needed.
def test_export_dataframe01(mocker):
mocker.patch("pandas.DataFrame.to_csv")
mocker.patch("os.path.exists", return_value=False)
dataframe = pandas.DataFrame({1: [10], 2: [20]})
write_utils.export_dataframe(dataframe, "foo/bar", "test")
assert pandas.DataFrame.to_csv.called # pylint: disable=no-member
def test_raise_on_diff01(mocker):
mocker.patch("os.path.exists", return_value=False)
dataframe = pandas.DataFrame({1: [10], 2: [20]})
write_utils.raise_on_diff(dataframe, "foo/bar", "test")
# Should not raise an error. No assert needed.
def test_raise_on_diff02(mocker):
mocker.patch("os.path.exists", return_value=True)
dataframe = pandas.DataFrame({1: [10], 2: [20]})
mocker.patch("filecmp.cmp", return_value=True)
write_utils.raise_on_diff(dataframe, "foo/bar", "test")
# Should not raise an error. No assert needed.
def test_raise_on_diff03(mocker):
mocker.patch("os.path.exists", return_value=True)
mocker.patch("filecmp.cmp", return_value=False)
to_write = pandas.DataFrame({1: [10], 2: [99]})
with pytest.raises(ValueError):
write_utils.raise_on_diff(to_write, "foo/bar", "test")
def test_export_dataframe_die_on_diff01():
dataframe = pandas.DataFrame({1: [10], 2: [20]})
write_utils.export_dataframe_die_on_diff(dataframe, "foo/bar", "test")
# Should not raise an error. No assert needed.
def test_export_dataframe_die_on_diff02():
dataframe = | pandas.DataFrame({1: [10], 2: [20]}) | pandas.DataFrame |
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def mssql_url() -> str:
conn = os.environ["MSSQL_URL"]
return conn
@pytest.mark.xfail
def test_on_non_select(mssql_url: str) -> None:
query = "CREATE TABLE non_select(id INTEGER NOT NULL)"
df = read_sql(mssql_url, query)
def test_aggregation(mssql_url: str) -> None:
query = (
"SELECT test_bool, SUM(test_float) as sum FROM test_table GROUP BY test_bool"
)
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"sum": pd.Series([10.9, 5.2, -10.0], dtype="float64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation(mssql_url: str) -> None:
query = (
"SELECT test_bool, SUM(test_int) AS test_int FROM test_table GROUP BY test_bool"
)
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"test_int": pd.Series([4, 5, 1315], dtype="Int64"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_aggregation2(mssql_url: str) -> None:
query = "select DISTINCT(test_bool) from test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation2(mssql_url: str) -> None:
query = "select MAX(test_int) as max, MIN(test_int) as min from test_table"
df = read_sql(mssql_url, query, partition_on="max", partition_num=2)
expected = pd.DataFrame(
index=range(1),
data={
"max": pd.Series([1314], dtype="Int64"),
"min": pd.Series([0], dtype="Int64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_udf(mssql_url: str) -> None:
query = (
"SELECT dbo.increment(test_int) AS test_int FROM test_table ORDER BY test_int"
)
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 3, 4, 5, 1315], dtype="Int64"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
| assert_frame_equal(df, expected, check_names=True) | pandas.testing.assert_frame_equal |
## Copyright 2015-2021 PyPSA Developers
## You can find the list of PyPSA Developers at
## https://pypsa.readthedocs.io/en/latest/developers.html
## PyPSA is released under the open source MIT License, see
## https://github.com/PyPSA/PyPSA/blob/master/LICENSE.txt
"""
Build optimisation problems from PyPSA networks without Pyomo.
Originally retrieved from nomopyomo ( -> 'no more Pyomo').
"""
__author__ = "PyPSA Developers, see https://pypsa.readthedocs.io/en/latest/developers.html"
__copyright__ = ("Copyright 2015-2021 PyPSA Developers, see https://pypsa.readthedocs.io/en/latest/developers.html, "
"MIT License")
from .pf import (_as_snapshots, get_switchable_as_dense as get_as_dense)
from .descriptors import (get_bounds_pu, get_extendable_i, get_non_extendable_i,
expand_series, nominal_attrs, additional_linkports,
Dict, get_active_assets, get_activity_mask)
from .linopt import (linexpr, write_bound, write_constraint, write_objective,
set_conref, set_varref, get_con, get_var, join_exprs,
run_and_read_highs, run_and_read_cbc, run_and_read_gurobi,
run_and_read_glpk, run_and_read_cplex, run_and_read_xpress,
define_constraints, define_variables, define_binaries,
align_with_static_component)
import pandas as pd
import numpy as np
from numpy import inf
from distutils.version import LooseVersion
pd_version = LooseVersion(pd.__version__)
agg_group_kwargs = dict(numeric_only=False) if pd_version >= "1.3" else {}
import gc, time, os, re, shutil
from tempfile import mkstemp
import logging
logger = logging.getLogger(__name__)
lookup = pd.read_csv(os.path.join(os.path.dirname(__file__), 'variables.csv'),
index_col=['component', 'variable'])
def define_nominal_for_extendable_variables(n, c, attr):
"""
Initializes variables for nominal capacities for a given component and a
given attribute.
Parameters
----------
n : pypsa.Network
c : str
network component of which the nominal capacity should be defined
attr : str
name of the variable, e.g. 'p_nom'
"""
ext_i = get_extendable_i(n, c)
if ext_i.empty: return
lower = n.df(c)[attr+'_min'][ext_i]
upper = n.df(c)[attr+'_max'][ext_i]
define_variables(n, lower, upper, c, attr)
def define_dispatch_for_extendable_and_committable_variables(n, sns, c, attr):
"""
Initializes variables for power dispatch for a given component and a
given attribute.
Parameters
----------
n : pypsa.Network
c : str
name of the network component
attr : str
name of the attribute, e.g. 'p'
"""
ext_i = get_extendable_i(n, c)
if c == 'Generator':
ext_i = ext_i.union(n.generators.query('committable').index)
if ext_i.empty:
return
active = get_activity_mask(n, c, sns)[ext_i] if n._multi_invest else None
define_variables(n, -inf, inf, c, attr, axes=[sns, ext_i], spec='ext', mask=active)
def define_dispatch_for_non_extendable_variables(n, sns, c, attr):
"""
Initializes variables for power dispatch for a given component and a
given attribute.
Parameters
----------
n : pypsa.Network
c : str
name of the network component
attr : str
name of the attribute, e.g. 'p'
"""
fix_i = get_non_extendable_i(n, c)
if c == 'Generator':
fix_i = fix_i.difference(n.generators.query('committable').index)
if fix_i.empty: return
nominal_fix = n.df(c)[nominal_attrs[c]][fix_i]
min_pu, max_pu = get_bounds_pu(n, c, sns, fix_i, attr)
lower = min_pu.mul(nominal_fix)
upper = max_pu.mul(nominal_fix)
axes = [sns, fix_i]
active = get_activity_mask(n, c, sns)[fix_i] if n._multi_invest else None
kwargs = dict(spec='non_ext', mask=active)
dispatch = define_variables(n, -inf, inf, c, attr, axes=axes, **kwargs)
dispatch = linexpr((1, dispatch))
define_constraints(n, dispatch, '>=', lower, c, 'mu_lower', **kwargs)
define_constraints(n, dispatch, '<=', upper, c, 'mu_upper', **kwargs)
def define_dispatch_for_extendable_constraints(n, sns, c, attr):
"""
Sets power dispatch constraints for extendable devices for a given
component and a given attribute.
Parameters
----------
n : pypsa.Network
c : str
name of the network component
attr : str
name of the attribute, e.g. 'p'
"""
ext_i = get_extendable_i(n, c)
if ext_i.empty: return
min_pu, max_pu = get_bounds_pu(n, c, sns, ext_i, attr)
operational_ext_v = get_var(n, c, attr)[ext_i]
nominal_v = get_var(n, c, nominal_attrs[c])[ext_i]
rhs = 0
active = get_activity_mask(n, c, sns)[ext_i] if n._multi_invest else None
kwargs = dict(spec=attr, mask=active)
lhs, *axes = linexpr((max_pu, nominal_v), (-1, operational_ext_v), return_axes=True)
define_constraints(n, lhs, '>=', rhs, c, 'mu_upper', axes=axes, **kwargs)
lhs, *axes = linexpr((min_pu, nominal_v), (-1, operational_ext_v), return_axes=True)
define_constraints(n, lhs, '<=', rhs, c, 'mu_lower', axes=axes, **kwargs)
def define_fixed_variable_constraints(n, sns, c, attr, pnl=True):
"""
Sets constraints for fixing variables of a given component and attribute
to the corresponding values in n.df(c)[attr + '_set'] if pnl is True, or
n.pnl(c)[attr + '_set']
Parameters
----------
n : pypsa.Network
c : str
name of the network component
attr : str
name of the attribute, e.g. 'p'
pnl : bool, default True
Whether variable which should be fixed is time-dependent
"""
if pnl:
if attr + '_set' not in n.pnl(c): return
fix = n.pnl(c)[attr + '_set'].loc[sns]
if fix.empty: return
if n._multi_invest:
active = get_activity_mask(n, c, sns)
fix = fix.where(active)
fix = fix.stack()
lhs = linexpr((1, get_var(n, c, attr).stack()[fix.index]),
as_pandas=False)
constraints = write_constraint(n, lhs, '=', fix).unstack().T
else:
if attr + '_set' not in n.df(c): return
fix = n.df(c)[attr + '_set'].dropna()
if fix.empty: return
lhs = linexpr((1, get_var(n, c, attr)[fix.index]), as_pandas=False)
constraints = write_constraint(n, lhs, '=', fix)
set_conref(n, constraints, c, f'mu_{attr}_set')
def define_generator_status_variables(n, sns):
c = 'Generator'
com_i = n.generators.query('committable').index
ext_i = get_extendable_i(n, c)
if not (ext_i.intersection(com_i)).empty:
logger.warning("The following generators have both investment optimisation"
f" and unit commitment:\n\n\t{', '.join((ext_i.intersection(com_i)))}\n\nCurrently PyPSA cannot "
"do both these functions, so PyPSA is choosing investment optimisation "
"for these generators.")
com_i = com_i.difference(ext_i)
if com_i.empty: return
active = get_activity_mask(n, c, sns)[com_i] if n._multi_invest else None
define_binaries(n, (sns, com_i), 'Generator', 'status', mask=active)
def define_committable_generator_constraints(n, sns):
c, attr = 'Generator', 'status'
com_i = n.df(c).query('committable and not p_nom_extendable').index
if com_i.empty: return
nominal = n.df(c)[nominal_attrs[c]][com_i]
min_pu, max_pu = get_bounds_pu(n, c, sns, com_i, 'p')
lower = min_pu.mul(nominal)
upper = max_pu.mul(nominal)
status = get_var(n, c, attr)
p = get_var(n, c, 'p')[com_i]
lhs = linexpr((lower, status), (-1, p))
active = get_activity_mask(n, c, sns)[com_i] if n._multi_invest else None
define_constraints(n, lhs, '<=', 0, 'Generators', 'committable_lb', mask=active)
lhs = linexpr((upper, status), (-1, p))
define_constraints(n, lhs, '>=', 0, 'Generators', 'committable_ub', mask=active)
def define_ramp_limit_constraints(n, sns, c):
"""
Defines ramp limits for a given component with valid ramplimit.
"""
rup_i = n.df(c).query('ramp_limit_up == ramp_limit_up').index
rdown_i = n.df(c).query('ramp_limit_down == ramp_limit_down').index
if rup_i.empty & rdown_i.empty:
return
fix_i = get_non_extendable_i(n, c)
ext_i = get_extendable_i(n, c)
if "committable" in n.df(c):
com_i = n.df(c).query('committable').index.difference(ext_i)
else:
com_i = []
# Check if ramping is not at start of n.snapshots
start_i = n.snapshots.get_loc(sns[0]) - 1
pnl = n.pnl(c)
# get dispatch for either one or two ports
attr = ({'p', 'p0'} & set(pnl)).pop()
p_prev_fix = pnl[attr].iloc[start_i]
is_rolling_horizon = (sns[0] != n.snapshots[0]) and not p_prev_fix.empty
if is_rolling_horizon:
active = get_activity_mask(n, c, sns)
p = get_var(n, c, 'p')
p_prev = get_var(n, c, 'p').shift(1, fill_value=-1)
rhs_prev = pd.DataFrame(0, *p.axes)
rhs_prev.loc[sns[0]] = p_prev_fix
else:
active = get_activity_mask(n, c, sns[1:])
p = get_var(n, c, 'p').loc[sns[1:]]
p_prev = get_var(n, c, 'p').shift(1, fill_value=-1).loc[sns[1:]]
rhs_prev = pd.DataFrame(0, *p.axes)
# fix up
gens_i = rup_i.intersection(fix_i)
if not gens_i.empty:
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]))
rhs = rhs_prev[gens_i] + n.df(c).loc[gens_i].eval('ramp_limit_up * p_nom')
kwargs = dict(spec='nonext.', mask=active[gens_i])
define_constraints(n, lhs, '<=', rhs, c, 'mu_ramp_limit_up', **kwargs)
# ext up
gens_i = rup_i.intersection(ext_i)
if not gens_i.empty:
limit_pu = n.df(c)['ramp_limit_up'][gens_i]
p_nom = get_var(n, c, 'p_nom')[gens_i]
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]), (-limit_pu, p_nom))
rhs = rhs_prev[gens_i]
kwargs = dict(spec='ext.', mask=active[gens_i])
define_constraints(n, lhs, '<=', rhs, c, 'mu_ramp_limit_up', **kwargs)
# com up
gens_i = rup_i.intersection(com_i)
if not gens_i.empty:
limit_start = n.df(c).loc[gens_i].eval('ramp_limit_start_up * p_nom')
limit_up = n.df(c).loc[gens_i].eval('ramp_limit_up * p_nom')
status = get_var(n, c, 'status').loc[p.index, gens_i]
status_prev = get_var(n, c, 'status').shift(1, fill_value=-1).loc[p.index, gens_i]
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]),
(limit_start - limit_up, status_prev),
(- limit_start, status))
rhs = rhs_prev[gens_i]
if is_rolling_horizon:
status_prev_fix = n.pnl(c)['status'][com_i].iloc[start_i]
rhs.loc[sns[0]] += (limit_up - limit_start) * status_prev_fix
kwargs = dict(spec='com.', mask=active[gens_i])
define_constraints(n, lhs, '<=', rhs, c, 'mu_ramp_limit_up', **kwargs)
# fix down
gens_i = rdown_i.intersection(fix_i)
if not gens_i.empty:
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]))
rhs = rhs_prev[gens_i] + n.df(c).loc[gens_i].eval('-1 * ramp_limit_down * p_nom')
kwargs = dict(spec='nonext.', mask=active[gens_i])
define_constraints(n, lhs, '>=', rhs, c, 'mu_ramp_limit_down', **kwargs)
# ext down
gens_i = rdown_i.intersection(ext_i)
if not gens_i.empty:
limit_pu = n.df(c)['ramp_limit_down'][gens_i]
p_nom = get_var(n, c, 'p_nom')[gens_i]
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]), (limit_pu, p_nom))
rhs = rhs_prev[gens_i]
kwargs = dict(spec='ext.', mask=active[gens_i])
define_constraints(n, lhs, '>=', rhs, c, 'mu_ramp_limit_down', **kwargs)
# com down
gens_i = rdown_i.intersection(com_i)
if not gens_i.empty:
limit_shut = n.df(c).loc[gens_i].eval('ramp_limit_shut_down * p_nom')
limit_down = n.df(c).loc[gens_i].eval('ramp_limit_down * p_nom')
status = get_var(n, c, 'status').loc[p.index, gens_i]
status_prev = get_var(n, c, 'status').shift(1, fill_value=-1).loc[p.index, gens_i]
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]),
(limit_down - limit_shut, status),
(limit_shut, status_prev))
rhs = rhs_prev[gens_i]
if is_rolling_horizon:
status_prev_fix = n.pnl(c)['status'][com_i].iloc[start_i]
rhs.loc[sns[0]] += -limit_shut * status_prev_fix
kwargs = dict(spec='com.', mask=active[gens_i])
define_constraints(n, lhs, '>=', rhs, c, 'mu_ramp_limit_down', **kwargs)
def define_nominal_constraints_per_bus_carrier(n, sns):
for carrier in n.carriers.index:
for bound, sense in [("max", "<="), ("min", ">=")]:
col = f'nom_{bound}_{carrier}'
if col not in n.buses.columns: continue
rhs = n.buses[col].dropna()
lhs = pd.Series('', rhs.index)
for c, attr in nominal_attrs.items():
if c not in n.one_port_components: continue
attr = nominal_attrs[c]
if (c, attr) not in n.variables.index: continue
nominals = get_var(n, c, attr)[n.df(c).carrier == carrier]
if nominals.empty: continue
per_bus = linexpr((1, nominals)).groupby(n.df(c).bus).sum(**agg_group_kwargs)
lhs += per_bus.reindex(lhs.index, fill_value='')
if bound == 'max':
lhs = lhs[lhs != '']
rhs = rhs.reindex(lhs.index)
else:
assert (lhs != '').all(), (
f'No extendable components of carrier {carrier} on bus '
f'{list(lhs[lhs == ""].index)}')
define_constraints(n, lhs, sense, rhs, 'Bus', 'mu_' + col)
def define_nodal_balance_constraints(n, sns):
"""
Defines nodal balance constraint.
"""
def bus_injection(c, attr, groupcol='bus', sign=1):
# additional sign only necessary for branches in reverse direction
if 'sign' in n.df(c):
sign = sign * n.df(c).sign
expr = linexpr((sign, get_var(n, c, attr))).rename(columns=n.df(c)[groupcol])
# drop empty bus2, bus3 if multiline link
if c == 'Link':
expr.drop(columns='', errors='ignore', inplace=True)
return expr
# one might reduce this a bit by using n.branches and lookup
args = [['Generator', 'p'], ['Store', 'p'], ['StorageUnit', 'p_dispatch'],
['StorageUnit', 'p_store', 'bus', -1], ['Line', 's', 'bus0', -1],
['Line', 's', 'bus1', 1], ['Transformer', 's', 'bus0', -1],
['Transformer', 's', 'bus1', 1], ['Link', 'p', 'bus0', -1],
['Link', 'p', 'bus1', get_as_dense(n, 'Link', 'efficiency', sns)]]
args = [arg for arg in args if not n.df(arg[0]).empty]
if not n.links.empty:
for i in additional_linkports(n):
eff = get_as_dense(n, 'Link', f'efficiency{i}', sns)
args.append(['Link', 'p', f'bus{i}', eff])
lhs = (pd.concat([bus_injection(*arg) for arg in args], axis=1)
.groupby(axis=1, level=0)
.sum(**agg_group_kwargs)
.reindex(columns=n.buses.index, fill_value=''))
if (lhs == '').any().any():
raise ValueError("Empty LHS in nodal balance constraint.")
sense = '='
rhs = ((- get_as_dense(n, 'Load', 'p_set', sns) * n.loads.sign)
.groupby(n.loads.bus, axis=1).sum()
.reindex(columns=n.buses.index, fill_value=0))
define_constraints(n, lhs, sense, rhs, 'Bus', 'marginal_price')
def define_kirchhoff_constraints(n, sns):
"""
Defines Kirchhoff voltage constraints
"""
comps = n.passive_branch_components & set(n.variables.index.levels[0])
if len(comps) == 0: return
branch_vars = pd.concat({c:get_var(n, c, 's') for c in comps}, axis=1)
def cycle_flow(ds, sns):
if sns is None:
sns = slice(None)
ds = ds[lambda ds: ds!=0.].dropna()
vals = linexpr((ds, branch_vars.loc[sns, ds.index]), as_pandas=False)
return vals.sum(1)
constraints = []
periods = sns.unique('period') if n._multi_invest else [None]
for period in periods:
n.determine_network_topology(investment_period=period)
subconstraints = []
for sub in n.sub_networks.obj:
branches = sub.branches()
C = pd.DataFrame(sub.C.todense(), index=branches.index)
if C.empty:
continue
carrier = n.sub_networks.carrier[sub.name]
weightings = branches.x_pu_eff if carrier == 'AC' else branches.r_pu_eff
C_weighted = 1e5 * C.mul(weightings, axis=0)
cycle_sum = C_weighted.apply(cycle_flow, sns=period)
snapshots = sns if period == None else sns[sns.get_loc(period)]
cycle_sum.set_index(snapshots, inplace=True)
con = write_constraint(n, cycle_sum, '=', 0)
subconstraints.append(con)
if len(subconstraints) == 0:
continue
constraints.append(pd.concat(subconstraints, axis=1, ignore_index=True))
if constraints:
constraints = pd.concat(constraints).rename_axis(columns='cycle')
set_conref(n, constraints, 'SubNetwork', 'mu_kirchhoff_voltage_law')
def define_storage_unit_constraints(n, sns):
"""
Defines state of charge (soc) constraints for storage units. In principal
the constraints states:
previous_soc + p_store - p_dispatch + inflow - spill == soc
"""
sus_i = n.storage_units.index
if sus_i.empty: return
c = 'StorageUnit'
# spillage
has_periods = isinstance(sns, pd.MultiIndex)
active = get_activity_mask(n, c, sns)
upper = get_as_dense(n, c, 'inflow', sns).loc[:, lambda df: df.max() > 0]
spill = define_variables(n, 0, upper, 'StorageUnit', 'spill', mask=active[upper.columns])
# elapsed hours
eh = expand_series(n.snapshot_weightings.stores[sns], sus_i)
# efficiencies
eff_stand = expand_series(1-n.df(c).standing_loss, sns).T.pow(eh)
eff_dispatch = expand_series(n.df(c).efficiency_dispatch, sns).T
eff_store = expand_series(n.df(c).efficiency_store, sns).T
soc = get_var(n, c, 'state_of_charge')
if has_periods:
cyclic_i = n.df(c).query('cyclic_state_of_charge & '
'~cyclic_state_of_charge_per_period').index
cyclic_pp_i = n.df(c).query('cyclic_state_of_charge & '
'cyclic_state_of_charge_per_period').index
noncyclic_i = n.df(c).query('~cyclic_state_of_charge & '
'~state_of_charge_initial_per_period').index
noncyclic_pp_i = n.df(c).query("~cyclic_state_of_charge & "
"state_of_charge_initial_per_period").index
else:
cyclic_i = n.df(c).query('cyclic_state_of_charge').index
noncyclic_i = n.df(c).query('~cyclic_state_of_charge ').index
# cyclic constraint for whole optimization horizon
previous_soc_cyclic = soc.where(active).ffill().apply(lambda ds: np.roll(ds, 1)).ffill()
# non cyclic constraint: determine the first active snapshot
first_active_snapshot = active.cumsum()[noncyclic_i] == 1
coeff_var = [(-1, soc),
(-1/eff_dispatch * eh, get_var(n, c, 'p_dispatch')),
(eff_store * eh, get_var(n, c, 'p_store'))]
lhs, *axes = linexpr(*coeff_var, return_axes=True)
def masked_term(coeff, var, cols):
return linexpr((coeff[cols], var[cols]))\
.reindex(index=axes[0], columns=axes[1], fill_value='').values
if (c, 'spill') in n.variables.index:
lhs += masked_term(-eh, get_var(n, c, 'spill'), spill.columns)
lhs += masked_term(eff_stand, previous_soc_cyclic, cyclic_i)
lhs += masked_term(eff_stand[~first_active_snapshot],
soc.shift()[~first_active_snapshot], noncyclic_i)
# rhs set e at beginning of optimization horizon for noncyclic
rhs = -get_as_dense(n, c, 'inflow', sns).mul(eh).astype(float)
rhs[noncyclic_i] = rhs[noncyclic_i].where(~first_active_snapshot,
rhs-n.df(c).state_of_charge_initial, axis=1)
if has_periods:
# cyclic constraint for soc per period - cyclic soc within each period
previous_soc_cyclic_pp = soc.groupby(level=0).transform(lambda ds: np.roll(ds, 1))
lhs += masked_term(eff_stand, previous_soc_cyclic_pp, cyclic_pp_i)
# set the initial enery at the beginning of each period
first_active_snapshot_pp = (
active[noncyclic_pp_i].groupby(level=0).cumsum() == 1)
lhs += masked_term(eff_stand[~first_active_snapshot_pp],
soc.shift()[~first_active_snapshot_pp],
noncyclic_pp_i)
rhs[noncyclic_pp_i] = (
rhs[noncyclic_pp_i].where(~first_active_snapshot_pp,
rhs - n.df(c).state_of_charge_initial, axis=1))
define_constraints(n, lhs, '==', rhs, c, 'mu_state_of_charge', mask=active)
def define_store_constraints(n, sns):
"""
Defines energy balance constraints for stores. In principal this states:
previous_e - p == e
"""
stores_i = n.stores.index
if stores_i.empty: return
c = 'Store'
has_periods = isinstance(sns, pd.MultiIndex)
active = get_activity_mask(n, c, sns)
define_variables(n, -inf, inf, axes=[sns, stores_i], name=c, attr='p', mask=active)
# elapsed hours
eh = expand_series(n.snapshot_weightings.stores[sns], stores_i) #elapsed hours
eff_stand = expand_series(1-n.df(c).standing_loss, sns).T.pow(eh)
e = get_var(n, c, 'e')
if has_periods:
cyclic_i = n.df(c).query('e_cyclic & ~e_cyclic_per_period').index
cyclic_pp_i = n.df(c).query('e_cyclic & e_cyclic_per_period').index
noncyclic_i = n.df(c).query('~e_cyclic & ~e_initial_per_period').index
noncyclic_pp_i = n.df(c).query("~e_cyclic & e_initial_per_period").index
else:
cyclic_i = n.df(c).query('e_cyclic').index
noncyclic_i = n.df(c).query('~e_cyclic').index
# cyclic constraint for whole optimization horizon
previous_e_cyclic = e.where(active).ffill().apply(lambda ds: np.roll(ds, 1)).ffill()
# non cyclic constraint: determine the first active snapshot
first_active_snapshot = active.cumsum()[noncyclic_i] == 1
coeff_var = [(-eh, get_var(n, c, 'p')), (-1, e)]
lhs, *axes = linexpr(*coeff_var, return_axes=True)
def masked_term(coeff, var, cols):
return linexpr((coeff[cols], var[cols]))\
.reindex(index=sns, columns=stores_i, fill_value='').values
lhs += masked_term(eff_stand, previous_e_cyclic, cyclic_i)
lhs += masked_term(eff_stand[~first_active_snapshot],
e.shift()[~first_active_snapshot], noncyclic_i)
# rhs set e at beginning of optimization horizon for noncyclic
rhs = pd.DataFrame(0., sns, stores_i)
rhs[noncyclic_i] = rhs[noncyclic_i].where(~first_active_snapshot, -n.df(c).e_initial, axis=1)
if has_periods:
# cyclic constraint for soc per period - cyclic soc within each period
previous_e_cyclic_pp = e.groupby(level=0).transform(lambda ds: np.roll(ds, 1))
lhs += masked_term(eff_stand, previous_e_cyclic_pp, cyclic_pp_i)
# set the initial enery at the beginning of each period
first_active_snapshot_pp = (
active[noncyclic_pp_i].groupby(level=0).cumsum() == 1)
lhs += masked_term(eff_stand[~first_active_snapshot_pp],
e.shift()[~first_active_snapshot_pp],
noncyclic_pp_i)
rhs[noncyclic_pp_i] = (
rhs[noncyclic_pp_i].where(~first_active_snapshot_pp, -n.df(c).e_initial, axis=1))
define_constraints(n, lhs, '==', rhs, c, 'mu_state_of_charge', mask=active)
def define_growth_limit(n, sns, c, attr):
"""Constraint new installed capacity per investment period.
Parameters
----------
n : pypsa.Network
c : str
network component of which the nominal capacity should be defined
attr : str
name of the variable, e.g. 'p_nom'
"""
if not n._multi_invest: return
ext_i = get_extendable_i(n, c)
if "carrier" not in n.df(c) or n.df(c).empty: return
with_limit = n.carriers.query("max_growth != inf").index
limit_i = n.df(c).query("carrier in @with_limit").index.intersection(ext_i)
if limit_i.empty: return
periods = sns.unique('period')
v = get_var(n, c, attr)
carriers = n.df(c).loc[limit_i, "carrier"]
caps = pd.concat({period: linexpr((1, v)).where(n.get_active_assets(c, period), '')
for period in periods}, axis=1).T[limit_i]
lhs = caps.groupby(carriers, axis=1).sum(**agg_group_kwargs)
rhs = n.carriers.max_growth[with_limit]
define_constraints(n, lhs, '<=', rhs, 'Carrier', 'growth_limit_{}'.format(c))
def define_global_constraints(n, sns):
"""
Defines global constraints for the optimization. Possible types are
1. primary_energy
Use this to constraint the byproducts of primary energy sources as
CO2
2. transmission_volume_expansion_limit
Use this to set a limit for line volume expansion. Possible carriers
are 'AC' and 'DC'
3. transmission_expansion_cost_limit
Use this to set a limit for line expansion costs. Possible carriers
are 'AC' and 'DC'
4. tech_capacity_expansion_limit
Use this to se a limit for the summed capacitiy of a carrier (e.g.
'onwind') for each investment period at choosen nodes. This limit
could e.g. represent land resource/ building restrictions for a
technology in a certain region. Currently, only the
capacities of extendable generators have to be below the set limit.
"""
if n._multi_invest:
period_weighting = n.investment_period_weightings["years"]
weightings = n.snapshot_weightings.mul(period_weighting, level=0, axis=0).loc[sns]
else:
weightings = n.snapshot_weightings.loc[sns]
def get_period(n, glc, sns):
period = slice(None)
if n._multi_invest and not np.isnan(glc["investment_period"]):
period = int(glc["investment_period"])
if period not in sns.unique('period'):
logger.warning("Optimized snapshots do not contain the investment "
f"period required for global constraint `{glc.name}`.")
return period
# (1) primary_energy
glcs = n.global_constraints.query('type == "primary_energy"')
for name, glc in glcs.iterrows():
rhs = glc.constant
lhs = ''
carattr = glc.carrier_attribute
emissions = n.carriers.query(f'{carattr} != 0')[carattr]
period = get_period(n, glc, sns)
if emissions.empty: continue
# generators
gens = n.generators.query('carrier in @emissions.index')
if not gens.empty:
em_pu = gens.carrier.map(emissions)/gens.efficiency
em_pu = (weightings["generators"].to_frame('weightings') @\
em_pu.to_frame('weightings').T).loc[period]
p = get_var(n, 'Generator', 'p').loc[sns, gens.index].loc[period]
vals = linexpr((em_pu, p), as_pandas=False)
lhs += join_exprs(vals)
# storage units
sus = n.storage_units.query('carrier in @emissions.index and '
'not cyclic_state_of_charge')
sus_i = sus.index
if not sus.empty:
em_pu = sus.carrier.map(emissions)
soc = get_var(n, 'StorageUnit', 'state_of_charge').loc[sns, sus_i].loc[period]
soc = soc.where(soc!=-1).ffill().iloc[-1]
vals = linexpr((-em_pu, soc), as_pandas=False)
lhs = lhs + '\n' + join_exprs(vals)
rhs -= em_pu @ sus.state_of_charge_initial
# stores
n.stores['carrier'] = n.stores.bus.map(n.buses.carrier)
stores = n.stores.query('carrier in @emissions.index and not e_cyclic')
if not stores.empty:
em_pu = stores.carrier.map(emissions)
e = get_var(n, 'Store', 'e').loc[sns, stores.index].loc[period]
e = e.where(e!=-1).ffill().iloc[-1]
vals = linexpr((-em_pu, e), as_pandas=False)
lhs = lhs + '\n' + join_exprs(vals)
rhs -= stores.carrier.map(emissions) @ stores.e_initial
define_constraints(n, lhs, glc.sense, rhs, 'GlobalConstraint', 'mu',
axes=pd.Index([name]), spec=name)
# (2) transmission_volume_expansion_limit
glcs = n.global_constraints.query('type == '
'"transmission_volume_expansion_limit"')
substr = lambda s: re.sub(r'[\[\]\(\)]', '', s)
for name, glc in glcs.iterrows():
car = [substr(c.strip()) for c in glc.carrier_attribute.split(',')]
lhs = ''
period = get_period(n, glc, sns)
for c, attr in (('Line', 's_nom'), ('Link', 'p_nom')):
if n.df(c).empty: continue
ext_i = n.df(c).query(f'carrier in @car and {attr}_extendable').index
ext_i = ext_i[get_activity_mask(n, c, sns)[ext_i].loc[period].any()]
if ext_i.empty: continue
v = linexpr((n.df(c).length[ext_i], get_var(n, c, attr)[ext_i]),
as_pandas=False)
lhs += '\n' + join_exprs(v)
if lhs == '': continue
sense = glc.sense
rhs = glc.constant
define_constraints(n, lhs, sense, rhs, 'GlobalConstraint', 'mu',
axes=pd.Index([name]), spec=name)
# (3) transmission_expansion_cost_limit
glcs = n.global_constraints.query('type == '
'"transmission_expansion_cost_limit"')
for name, glc in glcs.iterrows():
car = [substr(c.strip()) for c in glc.carrier_attribute.split(',')]
lhs = ''
period = get_period(n, glc, sns)
for c, attr in (('Line', 's_nom'), ('Link', 'p_nom')):
ext_i = n.df(c).query(f'carrier in @car and {attr}_extendable').index
ext_i = ext_i[get_activity_mask(n, c, sns)[ext_i].loc[period].any()]
if ext_i.empty: continue
v = linexpr((n.df(c).capital_cost[ext_i], get_var(n, c, attr)[ext_i]),
as_pandas=False)
lhs += '\n' + join_exprs(v)
if lhs == '': continue
sense = glc.sense
rhs = glc.constant
define_constraints(n, lhs, sense, rhs, 'GlobalConstraint', 'mu',
axes=pd.Index([name]), spec=name)
# (4) tech_capacity_expansion_limit
# TODO: Generalize to carrier capacity expansion limit (i.e. also for stores etc.)
substr = lambda s: re.sub(r'[\[\]\(\)]', '', s)
glcs = n.global_constraints.query('type == '
'"tech_capacity_expansion_limit"')
c, attr = 'Generator', 'p_nom'
for name, glc in glcs.iterrows():
period = get_period(n, glc, sns)
car = glc["carrier_attribute"]
bus = str(glc.get('bus', "")) # in pypsa buses are always strings
ext_i = n.df(c).query("carrier == @car and p_nom_extendable").index
if bus:
ext_i = n.df(c).loc[ext_i].query("bus == @bus").index
ext_i = ext_i[get_activity_mask(n, c, sns)[ext_i].loc[period].any()]
if ext_i.empty: continue
cap_vars = get_var(n, c, attr)[ext_i]
lhs = join_exprs(linexpr((1, cap_vars)))
rhs = glc.constant
sense = glc.sense
define_constraints(n, lhs, sense, rhs, 'GlobalConstraint', 'mu',
axes=pd.Index([name]), spec=name)
def define_objective(n, sns):
"""
Defines and writes out the objective function
"""
if n._multi_invest:
period_weighting = n.investment_period_weightings.objective[sns.unique('period')]
# constant for already done investment
nom_attr = nominal_attrs.items()
constant = 0
for c, attr in nom_attr:
ext_i = get_extendable_i(n, c)
cost = n.df(c)['capital_cost'][ext_i]
if cost.empty: continue
if n._multi_invest:
active = pd.concat({period: get_active_assets(n, c, period)[ext_i]
for period in sns.unique('period')}, axis=1)
cost = active @ period_weighting * cost
constant += cost @ n.df(c)[attr][ext_i]
object_const = write_bound(n, constant, constant)
write_objective(n, linexpr((-1, object_const), as_pandas=False)[0])
n.objective_constant = constant
# marginal cost
if n._multi_invest:
weighting = n.snapshot_weightings.objective.mul(period_weighting, level=0).loc[sns]
else:
weighting = n.snapshot_weightings.objective.loc[sns]
for c, attr in lookup.query('marginal_cost').index:
cost = (get_as_dense(n, c, 'marginal_cost', sns)
.loc[:, lambda ds: (ds != 0).all()]
.mul(weighting, axis=0))
if cost.empty: continue
terms = linexpr((cost, get_var(n, c, attr).loc[sns, cost.columns]))
write_objective(n, terms)
# investment
for c, attr in nominal_attrs.items():
ext_i = get_extendable_i(n, c)
cost = n.df(c)['capital_cost'][ext_i]
if cost.empty: continue
if n._multi_invest:
active = pd.concat({period: get_active_assets(n, c, period)[ext_i]
for period in sns.unique('period')}, axis=1)
cost = active @ period_weighting * cost
caps = get_var(n, c, attr).loc[ext_i]
terms = linexpr((cost, caps))
write_objective(n, terms)
def prepare_lopf(n, snapshots=None, keep_files=False, skip_objective=False,
extra_functionality=None, solver_dir=None):
"""
Sets up the linear problem and writes it out to a lp file.
Returns
-------
Tuple (fdp, problem_fn) indicating the file descriptor and the file name of
the lp file
"""
n._xCounter, n._cCounter = 1, 1
n.vars, n.cons = Dict(), Dict()
cols = ['component', 'name', 'pnl', 'specification']
n.variables = pd.DataFrame(columns=cols).set_index(cols[:2])
n.constraints = | pd.DataFrame(columns=cols) | pandas.DataFrame |
from bs4 import BeautifulSoup as bs
import codecs
import pandas as pd
from os import listdir
page_dir = "../../../data/external/player_pages/"
filelist = listdir(page_dir)
additional_player_rows_info = []
dict_list = []
test_careers = []
sgids = []
career_stats = []
def parse_player_info(soup):
player_dict = {}
#add player name and country to player dict
player_dict.update({'FullName': soup.find_all("div", "scrumPlayerName")[0].contents[0].strip()})
player_dict.update({'Country': soup.find_all("div", "scrumPlayerCountry")[0].contents[0].strip()})
#cycle through scrumPlayerDesc divs and add relevant present facts to player dict
player_info_div = soup.find_all("div", "scrumPlayerDesc")
for tag in player_info_div:
if "Full name" in tag.text:
full_name = tag.find('b').next_sibling.strip()
player_dict.update({'full_name2': full_name,
'forename': full_name.split()[0],
'surname': full_name.split()[-1]
})
elif "Born" in tag.text:
born_string = tag.find('b').next_sibling.strip().strip('?')
print(born_string, flush=True)
player_dict.update({'born': tag.find('b').next_sibling.strip()})
born_spl_comma = born_string.split(',')
born_spl_space = born_string.split(' ')
try:
if born_spl_space[0] == "(guess:": #special case
player_dict.update({'born_where': " ".join(born_spl_space[3:])})
elif born_spl_space[0] == "date": #case for "date unknown"
pass
elif born_spl_space[0] == "circa": #change circa years to years
player_dict.update({'born_date': born_spl_space[1]})
elif born_spl_comma[0].isdigit(): # only year is listed, no location
player_dict.update({'born_date': born_spl_comma[0],
'born_where': "".join(born_spl_comma[1:])
})
elif born_spl_space[1].strip(',').isdigit() and born_spl_space[2].strip(',').isdigit(): # full date and location listed
player_dict.update({'born_date': "".join(born_spl_comma[:2]),
'born_where': ",".join(born_spl_comma[2:]).strip()
})
except:
player_dict.update({'born_date': "dateERROR",
'born_where': "whereERROR"
})
elif "Died" in tag.text:
player_dict.update({'died': tag.find('b').next_sibling.strip()})
elif "Major teams" in tag.text:
player_dict.update({'major_teams': tag.find('b').next_sibling.strip().split(", ")})
elif "Position" in tag.text:
player_dict.update({'position': tag.find('b').next_sibling.strip().split(", ")})
elif "Height" in tag.text:
player_dict.update({'height': tag.find('b').next_sibling.strip()})
elif "Weight" in tag.text:
player_dict.update({'weight': tag.find('b').next_sibling.strip()})
#find test career table and process
test_career_tab = soup.find_all(lambda tag:tag.name == "table" and "Test career" in tag.text)
if len(test_career_tab):
table_df = pd.read_html(str(test_career_tab[0]), index_col=0)[0]
test_careers.append(table_df)
#find career stats table and process
career_stat_tab = soup.find_all(lambda tag:tag.name == "table" and "Career statistics" in tag.text)
if len(career_stat_tab):
for row in career_stat_tab[0].find_all("tr", class_="data2"):
if "test debut" in row.text.lower():
player_dict.update({'test_debut': row.find("a").attrs['href'].strip('.html').split('/')[-1]})
#print(row.find("a").attrs['href'].strip('.html').split('/')[-1])
if "last test" in row.text.lower():
player_dict.update({'last_debut': row.find("a").attrs['href'].strip('.html').split('/')[-1]})
#print(row.find("a").attrs['href'].strip('.html').split('/')[-1])
if "only test" in row.text.lower():
player_dict.update({'only_test': row.find("a").attrs['href'].strip('.html').split('/')[-1]})
#print(row.find("a").attrs['href'].strip('.html').split('/')[-1])
#find url to player image and add to dictionary
player_img_info = soup.find_all("img", alt="player portrait")
if len(player_img_info):
player_dict.update({'ImgURL': player_img_info[0]['src']})
return player_dict
for file in filelist[:3000]:
sg_id = int(file.strip(".html"))
#print(f"Parsing player id {str(sg_id)}", flush=True)
sgids.append(sg_id)
page_html = codecs.open(f"{page_dir}{file}", 'r').read()
soup = bs(page_html, 'html.parser')
ele_dict = parse_player_info(soup)
ele_dict.update({'player_sgid': sg_id})
dict_list.append(ele_dict)
career_df = pd.concat(test_careers, keys=sgids)
career_df.to_csv('../../../data/external/raw_scraped_player_careers.csv')
players_df = | pd.DataFrame(dict_list) | pandas.DataFrame |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, Biota Technology.
# www.biota.com
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import division
import numpy as np
from skbio.stats import subsample_counts
import pandas as pd
from functools import partial
def validate_gibbs_input(sources, sinks=None):
'''Validate `gibbs` inputs and coerce/round to type `np.int32`.
Summary
-------
Checks if data contains `nan` or `null` values, and returns data as
type `np.int32`. If both `sources` and `sinks` are passed, columns must
match exactly (including order).
Parameters
----------
sources : pd.DataFrame
A dataframe containing count data. Must be castable to `np.int32`.
sinks : optional, pd.DataFrame or None
If not `None` a dataframe containing count data that is castable to
`np.int32`.
Returns
-------
pd.Dataframe(s)
Raises
------
ValueError
If `nan` or `null` values found in inputs.
ValueError
If any values are smaller than 0.
ValueError
If any columns of an input dataframe are non-numeric.
ValueError
If `sources` and `sinks` passed and columns are not identical.
'''
if sinks is not None:
dfs = [sources, sinks]
else:
dfs = [sources]
for df in dfs:
# Because of this bug (https://github.com/numpy/numpy/issues/6114)
# we can't use e.g. np.isreal(df.dtypes).all(). Instead we use
# applymap. Based on:
# http://stackoverflow.com/questions/21771133/finding-non-numeric-rows-in-dataframe-in-pandas
if not df.applymap(np.isreal).values.all():
raise ValueError('A dataframe contains one or more values which '
'are not numeric. Data must be exclusively '
'positive integers.')
if np.isnan(df.values).any():
raise ValueError('A dataframe has `nan` or `null` values. Data '
'must be exclusively positive integers.')
if (df.values < 0).any():
raise ValueError('A dataframe has a negative count. Data '
'must be exclusively positive integers.')
if sinks is not None:
if not (sinks.columns == sources.columns).all():
raise ValueError('Dataframes do not contain identical (and '
'identically ordered) columns. Columns must '
'match exactly.')
return (sources.astype(np.int32, copy=False),
sinks.astype(np.int32, copy=False))
else:
return sources.astype(np.int32, copy=False)
def validate_gibbs_parameters(alpha1, alpha2, beta, restarts,
draws_per_restart, burnin, delay):
'''Return `True` if params numerically acceptable. See `gibbs` for docs.'''
real_vals = [alpha1, alpha2, beta]
int_vals = [restarts, draws_per_restart, burnin, delay]
# Check everything is real.
if all(np.isreal(val) for val in real_vals + int_vals):
# Check that integer values are some type of int.
int_check = all(isinstance(val, (int, np.int32, np.int64)) for val in
int_vals)
# All integer values must be > 0.
pos_int = all(val > 0 for val in int_vals)
# All real values must be non-negative.
non_neg = all(val >= 0 for val in real_vals)
return int_check and pos_int and non_neg and real_vals
else: # Failed to be all numeric values.
False
def intersect_and_sort_samples(sample_metadata, feature_table):
'''Return input tables retaining only shared samples, row order equivalent.
Parameters
----------
sample_metadata : pd.DataFrame
Contingency table with rows, columns = samples, metadata.
feature_table : pd.DataFrame
Contingency table with rows, columns = samples, features.
Returns
-------
sample_metadata, feature_table : pd.DataFrame, pd.DataFrame
Input tables with unshared samples removed and ordered equivalently.
Raises
------
ValueError
If no shared samples are found.
'''
shared_samples = np.intersect1d(sample_metadata.index, feature_table.index)
if shared_samples.size == 0:
raise ValueError('There are no shared samples between the feature '
'table and the sample metadata. Ensure that you have '
'passed the correct files.')
elif (shared_samples.size == sample_metadata.shape[0] ==
feature_table.shape[0]):
s_metadata = sample_metadata.copy()
s_features = feature_table.copy()
else:
s_metadata = sample_metadata.loc[np.in1d(sample_metadata.index,
shared_samples), :].copy()
s_features = feature_table.loc[np.in1d(feature_table.index,
shared_samples), :].copy()
return s_metadata, s_features.loc[s_metadata.index, :]
def get_samples(sample_metadata, col, value):
'''Return samples which have `value` under `col`.'''
return sample_metadata.index[sample_metadata[col] == value].copy()
def collapse_source_data(sample_metadata, feature_table, source_samples,
category, method):
'''Collapse each set of source samples into an aggregate source.
Parameters
----------
sample_metadata : pd.DataFrame
Contingency table where rows are features and columns are metadata.
feature_table : pd.DataFrame
Contingency table where rows are features and columns are samples.
source_samples : iterable
Samples which should be considered for collapsing (i.e. are sources).
category : str
Column in `sample_metadata` which should be used to group samples.
method : str
One of the available aggregation methods in pd.DataFrame.agg (mean,
median, prod, sum, std, var).
Returns
-------
pd.DataFrame
Collapsed sample data.
Notes
-----
This function calls `validate_gibbs_input` before returning the collapsed
source table to ensure aggregation has not introduced non-integer values.
The order of the collapsed sources is determined by the sort order of their
names. For instance, in the example below, .4 comes before 3.0 so the
collapsed sources will have the 0th row as .4.
Examples
--------
>>> samples = ['sample1', 'sample2', 'sample3', 'sample4']
>>> category = 'pH'
>>> values = [3.0, 0.4, 3.0, 3.0]
>>> stable = pd.DataFrame(values, index=samples, columns = [category])
>>> stable
pH
sample1 3.0
sample2 0.4
sample3 3.0
sample4 3.0
>>> fdata = np.array([[ 10, 50, 10, 70],
[ 0, 25, 10, 5],
[ 0, 25, 10, 5],
[100, 0, 10, 5]])
>>> ftable = pd.DataFrame(fdata, index = stable.index)
>>> ftable
0 1 2 3
sample1 10 50 10 70
sample2 0 25 10 5
sample3 0 25 10 5
sample4 100 0 10 5
>>> source_samples = ['sample1', 'sample2', 'sample3']
>>> method = 'sum'
>>> csources = collapse_source_data(stable, ftable, source_samples,
category, method)
>>> csources
0 1 2 3
collapse_col
0.4 0 25 10 5
3.0 10 75 20 75
'''
sources = sample_metadata.loc[source_samples, :]
table = feature_table.loc[sources.index, :].copy()
table['collapse_col'] = sources[category]
return validate_gibbs_input(table.groupby('collapse_col').agg(method))
def subsample_dataframe(df, depth, replace=False):
'''Subsample (rarify) input dataframe without replacement.
Parameters
----------
df : pd.DataFrame
Feature table where rows are features and columns are samples.
depth : int
Number of sequences to choose per sample.
replace : bool, optional
If ``True``, subsample with replacement. If ``False`` (the default),
subsample without replacement.
Returns
-------
pd.DataFrame
Subsampled dataframe.
'''
def subsample(x):
return pd.Series(subsample_counts(x.values, n=depth, replace=replace),
index=x.index)
return df.apply(subsample, axis=1)
def generate_environment_assignments(n, num_sources):
'''Randomly assign `n` counts to one of `num_sources` environments.
Parameters
----------
n : int
Number of environment assignments to generate.
num_sources : int
Number of possible environment states (this includes the 'Unknown').
Returns
-------
seq_env_assignments : np.array
1D vector of length `n`. The ith entry is the environment assignment of
the ith feature.
envcounts : np.array
1D vector of length `num_sources`. The ith entry is the total number of
entries in `seq_env_assignments` which are equal to i.
'''
seq_env_assignments = np.random.choice(np.arange(num_sources), size=n,
replace=True)
envcounts = np.bincount(seq_env_assignments, minlength=num_sources)
return seq_env_assignments, envcounts
class ConditionalProbability(object):
def __init__(self, alpha1, alpha2, beta, source_data):
r"""Set properties used for calculating the conditional probability.
Paramaters
----------
alpha1 : float
Prior counts of each feature in the training environments.
alpha2 : float
Prior counts of each feature in the Unknown environment. Higher
values make the Unknown environment smoother and less prone to
overfitting given a training sample.
beta : float
Number of prior counts of test sequences from each feature in each
environment
source_data : np.array
Columns are features, rows are collapsed samples. The [i,j]
entry is the sum of the counts of features j in all samples which
were considered part of source i.
Attributes
----------
m_xivs : np.array
This is an exact copy of the source_data passed when the function
is initialized. It is referenced as m_xivs because m_xiv is the
[v, xi] entry of the source data. In other words, the count of the
xith feature in the vth environment.
m_vs : np.array
The row sums of self.m_xivs. This is referenced as m_v in [1]_.
V : int
Number of environments (includes both known sources and the
'unknown' source).
tau : int
Number of features.
joint_probability : np.array
The joint conditional distribution. Until the `precalculate` method
is called, this will be uniformly zero.
n : int
Number of sequences in the sink.
known_p_tv : np.array
An array giving the precomputable parts of the probability of
finding the xith taxon in the vth environment given the known
sources, aka p_tv in the R implementation. Rows are (known)
sources, columns are features, shape is (V-1, tau).
denominator_p_v : float
The denominator of the calculation for finding the probability of
a sequence being in the vth environment given the training data
(source data).
known_source_cp : np.array
All precomputable portions of the conditional probability array.
Dimensions are the same as self.known_p_tv.
Notes
-----
This class exists to calculate the conditional probability given in
reference [1]_ (with modifications based on communications with the
author). Since the calculation of the conditional probability must
occur during each pass of the Gibbs sampler, reducing the number of
computations is of paramount importance. This class precomputes
everything that is static throughout a run of the sampler to reduce the
innermost for-loop computations.
The formula used to calculate the conditional joint probability is
described in the project readme file.
The variables are named in the class, as well as its methods, in
accordance with the variable names used in [1]_.
Examples
--------
The class is written so that it will be created before being passed to
the function which handles the loops of the Gibbs sampling.
>>> cp = ConditionalProbability(alpha1 = .5, alpha2 = .001, beta = 10,
... np.array([[0, 0, 0, 100, 100, 100],
... [100, 100, 100, 0, 0, 0]]))
Once it is passed to the Gibbs sampling function, the number of
sequences in the sink becomes known, and we can update the object with
this information to allow final precomputation.
>>> cp.set_n(367)
>>> cp.precompute()
Now we can compute the 'slice' of the conditional probability depending
on the current state of the test sequences (the ones randomly assigned
and then iteratively reassigned) and which feature (the slice) the
sequence we have removed was from.
>>> xi = 2
Count of the training sequences (that are feature xi) currently
assigned to the unknown environment.
>>> m_xiV = 38
Sum of the training sequences currently assigned to the unknown
environment (over all features).
>>> m_V = 158
Counts of the test sequences in each environment at the current
iteration of the sampler.
>>> n_vnoti = np.array([10, 500, 6])
Calculating the probability slice.
>>> cp.calculate_cp_slice(xi, m_xiV, m_V, n_vnoti)
array([8.55007781e-05, 4.38234238e-01, 9.92823532e-03])
References
----------
.. [1] Knights et al. "Bayesian community-wide culture-independent
source tracking", Nature Methods 2011.
"""
self.alpha1 = alpha1
self.alpha2 = alpha2
self.beta = beta
self.m_xivs = source_data.astype(np.float64)
self.m_vs = np.expand_dims(source_data.sum(1),
axis=1).astype(np.float64)
self.V = source_data.shape[0] + 1
self.tau = source_data.shape[1]
# Create the joint probability vector which will be overwritten each
# time self.calculate_cp_slice is called.
self.joint_probability = np.zeros(self.V, dtype=np.float64)
def set_n(self, n):
"""Set the sum of the sink."""
self.n = n
def precalculate(self):
"""Precompute all static quantities of the probability matrix."""
# Known source.
self.known_p_tv = (self.m_xivs + self.alpha1) / \
(self.m_vs + self.tau * self.alpha1)
self.denominator_p_v = self.n - 1 + (self.beta * self.V)
# We are going to be accessing columns of this array in the innermost
# loop of the Gibbs sampler. By forcing this array into 'F' order -
# 'Fortran-contiguous' - we've set it so that accessing column slices
# is faster. Tests indicate about 2X speed up in this operation from
# 'F' order as opposed to the default 'C' order.
self.known_source_cp = np.array(self.known_p_tv / self.denominator_p_v,
order='F', dtype=np.float64)
self.alpha2_n = self.alpha2 * self.n
self.alpha2_n_tau = self.alpha2_n * self.tau
def calculate_cp_slice(self, xi, m_xiV, m_V, n_vnoti):
"""Calculate slice of the conditional probability matrix.
Parameters
----------
xi : int
Index of the column (taxon) of the conditional probability matrix
that should be calculated.
m_xiV : float
Count of the training sequences (that are taxon xi) currently
assigned to the unknown environment.
m_V : float
Sum of the training sequences currently assigned to the unknown
environment (over all taxa).
n_vnoti : np.array
Counts of the test sequences in each environment at the current
iteration of the sampler.
Returns
-------
self.joint_probability : np.array
The joint conditional probability distribution for the the current
taxon based on the current state of the sampler.
"""
# Components for known sources, i.e. indices {0,1...V-2}.
self.joint_probability[:-1] = \
self.known_source_cp[:, xi] * (n_vnoti[:-1] + self.beta)
# Component for unknown source, i.e. index V-1.
self.joint_probability[-1] = \
((m_xiV + self.alpha2_n) * (n_vnoti[-1] + self.beta)) / \
((m_V + self.alpha2_n_tau) * self.denominator_p_v)
return self.joint_probability
def gibbs_sampler(sink, cp, restarts, draws_per_restart, burnin, delay):
"""Run Gibbs Sampler to estimate feature contributions from a sink sample.
Parameters
----------
sink : np.array
A one dimentional array containing counts of features whose sources are
to be estimated.
cp : ConditionalProbability object
Instantiation of the class handling probability calculations.
restarts : int
Number of independent Markov chains to grow. `draws_per_restart` *
`restarts` gives the number of samplings of the mixing proportions that
will be generated.
draws_per_restart : int
Number of times to sample the state of the Markov chain for each
independent chain grown.
burnin : int
Number of passes (withdrawal and reassignment of every sequence in the
sink) that will be made before a sample (draw) will be taken. Higher
values allow more convergence towards the true distribution before
draws are taken.
delay : int >= 1
Number passes between each sampling (draw) of the Markov chain. Once
the burnin passes have been made, a sample will be taken, and
additional samples will be drawn every `delay` number of passes. This
is also known as 'thinning'. Thinning helps reduce the impact of
correlation between adjacent states of the Markov chain.
Returns
-------
final_envcounts : np.array
2D array of ints. Rows are draws, columns are sources. The [i, j] entry
is the number of sequences from draw i that where assigned to have come
from environment j.
final_env_assignments : np.array
2D array of ints. Rows are draws, columns are conserved but arbitrary
ordering. The [i, j] entry is the index of feature j in draw i. These
orderings are identical for each draw.
final_taxon_assignments : np.array
2D array of ints. Rows are draws, columns are conserved but arbitrary
ordering (same ordering as `final_env_assignments`). The [i, j] entry
is the environment that the taxon `final_env_assignments[i, j]` is
determined to have come from in draw i (j is the environment).
"""
# Basic bookkeeping information we will use throughout the function.
num_sources = cp.V
num_features = cp.tau
sink = sink.astype(np.int32)
sink_sum = sink.sum()
# Calculate the number of passes that need to be conducted.
total_draws = restarts * draws_per_restart
total_passes = burnin + (draws_per_restart - 1) * delay + 1
# Results containers.
final_envcounts = np.zeros((total_draws, num_sources), dtype=np.int32)
final_env_assignments = np.zeros((total_draws, sink_sum), dtype=np.int32)
final_taxon_assignments = np.zeros((total_draws, sink_sum), dtype=np.int32)
# Sequences from the sink will be randomly assigned a source environment
# and then reassigned based on an increasingly accurate set of
# probabilities. The order in which the sequences are selected for
# reassignment must be random to avoid a systematic bias where the
# sequences occuring later in the taxon_sequence book-keeping vector
# receive more accurate reassignments by virtue of more updates to the
# probability model. 'order' will be shuffled each pass, but can be
# instantiated here to avoid unnecessary duplication.
order = np.arange(sink_sum, dtype=np.int32)
# Create a bookkeeping vector that keeps track of each sequence in the
# sink. Each one will be randomly assigned an environment, and then
# reassigned based on the increasinly accurate distribution. sink[i] i's
# will be placed in the `taxon_sequence` vector to allow each individual
# count to be removed and reassigned.
taxon_sequence = np.repeat(np.arange(num_features), sink).astype(np.int32)
# Update the conditional probability class now that we have the sink sum.
cp.set_n(sink_sum)
cp.precalculate()
# Several bookkeeping variables that are used within the for loops.
drawcount = 0
unknown_idx = num_sources - 1
for restart in range(restarts):
# Generate random source assignments for each sequence in the sink
# using a uniform distribution.
seq_env_assignments, envcounts = \
generate_environment_assignments(sink_sum, num_sources)
# Initially, the count of each taxon in the 'unknown' source should be
# 0.
unknown_vector = np.zeros(num_features, dtype=np.int32)
unknown_sum = 0
# If a sequence's random environmental assignment is to the 'unknown'
# environment we alter the training data to include those sequences
# in the 'unknown' source.
for e, t in zip(seq_env_assignments, taxon_sequence):
if e == unknown_idx:
unknown_vector[t] += 1
unknown_sum += 1
for rep in range(1, total_passes + 1):
# Iterate through sequences in a random order so that no
# systematic bias is introduced based on position in the taxon
# vector (i.e. taxa appearing at the end of the vector getting
# better estimates of the probability).
np.random.shuffle(order)
for seq_index in order:
e = seq_env_assignments[seq_index]
t = taxon_sequence[seq_index]
# Remove the ith sequence and update the probability
# associated with that environment.
envcounts[e] -= 1
if e == unknown_idx:
unknown_vector[t] -= 1
unknown_sum -= 1
# Calculate the new joint probability vector based on the
# removal of the ith sequence. Reassign the sequence to a new
# source environment and update counts for each environment and
# the unknown source if necessary.
# This is the fastest way I've currently found to draw from
# `jp`. By stacking (cumsum) the probability of `jp`, we can
# draw x from uniform variable in [0, total sum), and then find
# which interval that value lies in with searchsorted. Visual
# representation below
# e1 e2 e3 e4 e5 unk
# jp: | | | | | | |
# x: x
# new_e_idx == 4 (zero indexed)
# This is in contrast to the more intuitive, but much slower
# call it replaced:
# np.random.choice(num_sources, jp/jp.sum())
jp = cp.calculate_cp_slice(t, unknown_vector[t], unknown_sum,
envcounts)
cs = jp.cumsum()
new_e_idx = np.searchsorted(cs, np.random.uniform(0, cs[-1]))
seq_env_assignments[seq_index] = new_e_idx
envcounts[new_e_idx] += 1
if new_e_idx == unknown_idx:
unknown_vector[t] += 1
unknown_sum += 1
if rep > burnin and ((rep - (burnin + 1)) % delay) == 0:
# Update envcounts array with the assigned envs.
final_envcounts[drawcount] = envcounts
# Assign vectors necessary for feature table reconstruction.
final_env_assignments[drawcount] = seq_env_assignments
final_taxon_assignments[drawcount] = taxon_sequence
# We've made a draw, update this index so that the next
# iteration will be placed in the correct index of results.
drawcount += 1
return (final_envcounts, final_env_assignments, final_taxon_assignments)
def gibbs(sources, sinks=None, alpha1=.001, alpha2=.1, beta=10, restarts=10,
draws_per_restart=1, burnin=100, delay=1, cluster=None,
create_feature_tables=True):
'''Gibb's sampling API.
Notes
-----
This function exists to allow API calls to source/sink prediction and
leave-one-out (LOO) source prediction.
Input validation is done on the sources and sinks (if not None). They must
be dataframes with integerial data (or castable to such). If both
sources and sinks are provided, their columns must agree exactly.
Input validation is done on the Gibb's parameters, to make sure they are
numerically acceptable (all must be non-negative, some must be positive
integers - see below).
Warnings
--------
This function does _not_ perform rarefaction, the user should perform
rarefaction prior to calling this function.
This function does not collapse sources or sinks, it expects each row of
the `sources` dataframe to represent a unique source, and each row of the
`sinks` dataframe to represent a unique sink.
Parameters
----------
sources : DataFrame
A dataframe containing source data (rows are sources, columns are
features). The index must be the names of the sources.
sinks : DataFrame or None
A dataframe containing sink data (rows are sinks, columns are
features). The index must be the names of the sinks. If `None`,
leave-one-out (LOO) prediction will be done.
alpha1 : float
Prior counts of each feature in the training environments. Higher
values decrease the trust in the training environments, and make
the source environment distributions over taxa smoother. A value of
0.001 indicates reasonably high trust in all source environments, even
those with few training sequences. A more conservative value would be
0.01.
alpha2 : float
Prior counts of each feature in the Unknown environment. Higher
values make the Unknown environment smoother and less prone to
overfitting given a training sample.
beta : int
Number of prior counts of test sequences from each feature in each
environment.
restarts : int
Number of independent Markov chains to grow. `draws_per_restart` *
`restarts` gives the number of samplings of the mixing proportions that
will be generated.
draws_per_restart : int
Number of times to sample the state of the Markov chain for each
independent chain grown.
burnin : int
Number of passes (withdarawal and reassignment of every sequence in the
sink) that will be made before a sample (draw) will be taken. Higher
values allow more convergence towards the true distribtion before draws
are taken.
delay : int >= 1
Number passes between each sampling (draw) of the Markov chain. Once
the burnin passes have been made, a sample will be taken, and
additional samples will be drawn every `delay` number of passes. This
is also known as 'thinning'. Thinning helps reduce the impact of
correlation between adjacent states of the Markov chain.
cluster : ipyparallel.client.client.Client or None
An ipyparallel Client object, e.g. a started cluster.
create_feature_tables : boolean
If `True` create a feature table for each sink. The feature table
records the average count of each feature from each source for this
sink. This option can consume large amounts of memory if there are many
source, sinks, and features. If `False`, feature tables are not
created.
Returns
-------
mpm : DataFrame
Mixing proportion means. A dataframe containing the mixing proportions
(rows are sinks, columns are sources).
mps : DataFrame
Mixing proportion standard deviations. A dataframe containing the
mixing proportions standard deviations (rows are sinks, columns are
sources).
fas : list
ith item is a pd.DataFrame of the average feature assignments from each
source for the ith sink (in the same order as rows of `mpm` and `mps`).
Examples
--------
# An example of using the normal prediction.
>>> import pandas as pd
>>> import numpy as np
>>> from ipyparallel import Client
>>> import subprocess
>>> import time
>>> from sourcetracker import gibbs
# Prepare some source data.
>>> otus = np.array(['o%s' % i for i in range(50)])
>>> source1 = np.random.randint(0, 1000, size=50)
>>> source2 = np.random.randint(0, 1000, size=50)
>>> source3 = np.random.randint(0, 1000, size=50)
>>> source_df = pd.DataFrame([source1, source2, source3],
index=['source1', 'source2', 'source3'],
columns=otus, dtype=np.int32)
# Prepare some sink data.
>>> sink1 = np.ceil(.5*source1+.5*source2)
>>> sink2 = np.ceil(.5*source2+.5*source3)
>>> sink3 = np.ceil(.5*source1+.5*source3)
>>> sink4 = source1
>>> sink5 = source2
>>> sink6 = np.random.randint(0, 1000, size=50)
>>> sink_df = pd.DataFrame([sink1, sink2, sink3, sink4, sink5, sink6],
index=np.array(['sink%s' % i for i in
range(1,7)]),
columns=otus, dtype=np.int32)
# Set paramaters
>>> alpha1 = .01
>>> alpha2 = .001
>>> beta = 10
>>> restarts = 5
>>> draws_per_restart = 1
>>> burnin = 2
>>> delay = 2
# Call without a cluster
>>> mpm, mps, fas = gibbs(source_df, sink_df, alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay,
cluster=None, create_feature_tables=True)
# Start a cluster and call the function
>>> jobs = 4
>>> subprocess.Popen('ipcluster start -n %s --quiet' % jobs, shell=True)
>>> time.sleep(25)
>>> c = Client()
>>> mpm, mps, fas = gibbs(source_df, sink_df, alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay,
cluster=c, create_feature_tables=True)
# LOO prediction.
>>> mpm, mps, fas = gibbs(source_df, sinks=None, alpha1, alpha2, beta,
restarts, draws_per_restart, burnin, delay,
cluster=c, create_feature_tables=True)
'''
if not validate_gibbs_parameters(alpha1, alpha2, beta, restarts,
draws_per_restart, burnin, delay):
raise ValueError('The supplied Gibbs parameters are not acceptable. '
'Please review the `gibbs` doc string or call the '
'help function in the CLI.')
# Validate the input source and sink data. Error if the data do not meet
# the critical assumptions or cannot be cast to the proper type.
if sinks is not None:
sources, sinks = validate_gibbs_input(sources, sinks)
else:
sources = validate_gibbs_input(sources)
# Run LOO predictions on `sources`.
if sinks is None:
def f(cp_and_sink):
# The import is here to ensure that the engines of the cluster can
# access the gibbs_sampler function.
from sourcetracker._sourcetracker import gibbs_sampler
return gibbs_sampler(cp_and_sink[1], cp_and_sink[0], restarts,
draws_per_restart, burnin, delay)
cps_and_sinks = []
for source in sources.index:
_sources = sources.select(lambda x: x != source)
cp = ConditionalProbability(alpha1, alpha2, beta, _sources.values)
sink = sources.loc[source, :].values
cps_and_sinks.append((cp, sink))
if cluster is not None:
results = cluster[:].map(f, cps_and_sinks, block=True)
else:
results = list(map(f, cps_and_sinks))
mpm, mps, fas = collate_gibbs_results([i[0] for i in results],
[i[1] for i in results],
[i[2] for i in results],
sources.index, sources.index,
sources.columns,
create_feature_tables, loo=True)
return mpm, mps, fas
# Run normal prediction on `sinks`.
else:
cp = ConditionalProbability(alpha1, alpha2, beta, sources.values)
f = partial(gibbs_sampler, cp=cp, restarts=restarts,
draws_per_restart=draws_per_restart, burnin=burnin,
delay=delay)
if cluster is not None:
results = cluster[:].map(f, sinks.values, block=True)
else:
results = list(map(f, sinks.values))
mpm, mps, fas = collate_gibbs_results([i[0] for i in results],
[i[1] for i in results],
[i[2] for i in results],
sinks.index, sources.index,
sources.columns,
create_feature_tables, loo=False)
return mpm, mps, fas
def cumulative_proportions(all_envcounts, sink_ids, source_ids):
'''Calculate contributions of each source for each sink in `sink_ids`.
Parameters
----------
all_envcounts : list
Each entry is 2D array of ints. The ith entry must correspond to the
ith sink ID. The [j, k] entry of the ith table is the count of
sequences assigned to the sink from kth environment during the jth
draw.
sink_ids : np.array
ID's of the sinks. Must be in the same order as data in
`all_envcounts`.
source_ids : np.array
ID's of the sources. Must be in the same order as the columns of the
tables in `all_envcounts`.
Returns
-------
proportions : pd.DataFrame
A dataframe of floats, containing the mixing proportions of each source
in each sink. The [i, j] entry is the contribution from the jth source
to the ith sink.
proportions_std : pd.DataFrame
A dataframe of floats, identical to `proportions` except the entries
are the standard deviation of each entry in `proportions`.
Notes
-----
This script is designed to be used by `collate_gibbs_results` after
completion of multiple `gibbs_sampler` calls (for different sinks). This
function does _not_ check that the assumptions of ordering described above
are met. It is the user's responsibility to check these if using this
function independently.
'''
num_sinks = len(sink_ids)
num_sources = len(source_ids) + 1
proportions = np.zeros((num_sinks, num_sources), dtype=np.float64)
proportions_std = np.zeros((num_sinks, num_sources), dtype=np.float64)
for i, envcounts in enumerate(all_envcounts):
proportions[i] = envcounts.sum(0) / envcounts.sum()
proportions_std[i] = (envcounts / envcounts.sum()).std(0)
cols = list(source_ids) + ['Unknown']
return (pd.DataFrame(proportions, index=sink_ids, columns=cols),
| pd.DataFrame(proportions_std, index=sink_ids, columns=cols) | pandas.DataFrame |
# ===========================================
#
# mian Analysis Data Mining/ML Library
# @author: tbj128
#
# ===========================================
#
# Imports
#
from gene.model.otu_table import OTUTable
from gene.core.statistics import Statistics
import logging
from skbio.stats.composition import ancom
import pandas as pd
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
logger = logging.getLogger(__name__)
class DifferentialSelection(object):
def run(self, user_request):
table = OTUTable(user_request.user_id, user_request.pid)
differential_type = user_request.get_custom_attr("type")
otu_table, headers, sample_labels = table.get_table_after_filtering_and_aggregation(
user_request)
sample_ids_to_metadata_map = table.get_sample_metadata().get_sample_id_to_metadata_map(user_request.catvar)
taxonomy_map = table.get_otu_metadata().get_taxonomy_map()
if differential_type == "ANCOM":
logger.info("Running ANCOM")
return self.analyse_with_ancom(user_request, otu_table, headers, sample_labels, sample_ids_to_metadata_map, taxonomy_map)
else:
return self.analyse(user_request, otu_table, headers, sample_labels, sample_ids_to_metadata_map, taxonomy_map)
def analyse(self, user_request, base, headers, sample_labels, sample_ids_to_metadata_map, taxonomy_map):
otu_to_genus = {}
if int(user_request.level) == -1:
# We want to display a short hint for the OTU using the genus (column 5)
for header in headers:
if header in taxonomy_map and len(taxonomy_map[header]) > 5:
otu_to_genus[header] = taxonomy_map[header][5]
else:
otu_to_genus[header] = ""
pvalthreshold = float(user_request.get_custom_attr("pvalthreshold"))
catVar1 = user_request.get_custom_attr("pwVar1")
catVar2 = user_request.get_custom_attr("pwVar2")
statistical_test = user_request.get_custom_attr("type")
logger.info("Running statistical test " + statistical_test)
# Perform differential analysis between two groups
logger.info("Starting differential analysis")
otu_pvals = []
j = 0
while j < len(base[0]):
group1_arr = []
group2_arr = []
# Go through each sample for this OTU
i = 0
while i < len(base):
sample_id = sample_labels[i]
metadata_val = sample_ids_to_metadata_map[sample_id]
if metadata_val == catVar1:
group1_arr.append(float(base[i][j]))
if metadata_val == catVar2:
group2_arr.append(float(base[i][j]))
i += 1
groups_abundance = {catVar1: group1_arr, catVar2: group2_arr}
# Calculate the statistical p-value
statistics = Statistics.getTtest(groups_abundance, statistical_test)
otu_pvals.append(statistics[0]["pval"])
j += 1
otu_qvals = Statistics.getFDRCorrection(otu_pvals)
otus = []
j = 0
while j < len(base[0]):
otu_id = headers[j]
pval = otu_pvals[j]
qval = otu_qvals[j]
if float(pval) < pvalthreshold:
if int(user_request.level) == -1 and otu_id in otu_to_genus:
otus.append({"otu": otu_id, "pval": pval, "qval": qval, "hint": otu_to_genus[otu_id]})
else:
otus.append({"otu": otu_id, "pval": pval, "qval": qval})
j += 1
return {"differentials": otus}
def analyse_with_ancom(self, user_request, base, headers, sample_labels, sample_ids_to_metadata_map, taxonomy_map):
otu_to_genus = {}
if int(user_request.level) == -1:
# We want to display a short hint for the OTU using the genus (column 5)
for header in headers:
if header in taxonomy_map and len(taxonomy_map[header]) > 5:
otu_to_genus[header] = taxonomy_map[header][5]
else:
otu_to_genus[header] = ""
pvalthreshold = float(user_request.get_custom_attr("pvalthreshold"))
catVar1 = user_request.get_custom_attr("pwVar1")
catVar2 = user_request.get_custom_attr("pwVar2")
# Remove non-relevant base rows
relevant_rows = {}
new_sample_labels = []
row_groups = []
i = 0
while i < len(sample_labels):
sample_id = sample_labels[i]
if sample_ids_to_metadata_map[sample_id] == catVar1 or sample_ids_to_metadata_map[sample_id] == catVar2:
relevant_rows[i] = True
new_sample_labels.append(sample_id)
row_groups.append(sample_ids_to_metadata_map[sample_id])
i += 1
new_base = []
i = 0
while i < len(base):
if relevant_rows[i]:
new_row = []
j = 0
while j < len(base[i]):
if float(base[i][j]) > 0:
new_row.append(float(base[i][j]))
else:
# Use pseudocount as ANCOM does not accept zeros or negatives
new_row.append(0.001)
j += 1
new_base.append(new_row)
i += 1
table = | pd.DataFrame(new_base, index=new_sample_labels, columns=headers) | pandas.DataFrame |
import pandas as pd
import pypika as pk
from dateutil.relativedelta import relativedelta
from pypika import CustomFunction as cfn
from pypika import MSSQLQuery as Query
from pypika import Table as T
from pypika import functions as fn
from smseventlog import delta, dt
from smseventlog import functions as f
from smseventlog import getlog
from smseventlog import styles as st
from smseventlog.database import db
from smseventlog.queries import QueryBase
log = getlog(__name__)
class UnitSMR(QueryBase):
"""Return all SMR values for single unit"""
def __init__(self, unit, d_rng=None, **kw):
super().__init__(**kw)
a = T('UnitSMR')
if d_rng is None:
d_upper = dt.now()
d_lower = d_upper + delta(days=-60)
d_rng = (d_lower, d_upper)
cols = ['Unit', 'DateSMR', 'SMR']
q = Query.from_(a) \
.where(a.Unit == unit) \
.where(a.DateSMR.between(d_rng[0], d_rng[1]))
f.set_self(vars())
def process_df(self, df):
"""Add index of all dates in d_rng"""
return pd.period_range(*self.d_rng, freq='d') \
.to_timestamp() \
.to_frame(name='DateSMR') \
.reset_index(drop=True) \
.assign(Unit=self.unit) \
.merge(right=df[['DateSMR', 'SMR']], on='DateSMR', how='left')[['Unit', 'DateSMR', 'SMR']]
class UnitSMRMonthly(QueryBase):
"""Return max smr per month per unit, grouped monthly"""
def __init__(self, unit=None, **kw):
super().__init__(**kw)
a, b = pk.Tables('UnitSMR', 'UnitID')
_year = cfn('YEAR', ['date'])
_month = cfn('MONTH', ['date'])
year = _year(a.DateSMR)
month = _month(a.DateSMR)
_period = fn.Concat(year, '-', month)
cols = [a.Unit, _period.as_('Period'), fn.Max(a.SMR).as_('SMR')]
q = Query.from_(a) \
.left_join(b).on_field('Unit') \
.where(a.Unit == unit) \
.groupby(a.Unit, _period)
f.set_self(vars())
def process_df(self, df):
return df \
.assign(
Period=lambda x: | pd.to_datetime(x.Period, format='%Y-%m') | pandas.to_datetime |
#!/usr/bin/env python3
import os
import time
import functools
import subprocess
import numpy as np
import pandas as pd
import bottleneck as bn
import tensorflow as tf
import multiprocessing as mp
from scipy.special import gamma
from multiprocessing import Pool
from tensorflow.keras.models import load_model
import deepmp.utils as ut
from deepmp.model import *
epsilon = 0.05
gamma_val = 0.8
#<NAME>
beta_a = 1
beta_b = 22
beta_c = 14.5
# Human
# beta_a = 1
# beta_b = 6.5
# beta_c = 10.43
EPSILON = np.finfo(np.float64).resolution
log_EPSILON = np.log(EPSILON)
read_names = ['chrom', 'pos', 'strand', 'pos_in_strand', 'readname', 'pred_prob',
'inferred_label']
queen_size_border = 2000
time_wait = 5
# ------------------------------------------------------------------------------
# READ PREDICTION MULTIPROCESSING
# ------------------------------------------------------------------------------
def _write_predictions_to_file(write_fp, predictions_q):
while True:
if predictions_q.empty():
time.sleep(time_wait)
continue
predictions_to_file = predictions_q.get()
try:
predictions_to_file.to_csv(
write_fp, sep='\t', mode='a', index=None, header=None
)
except:
break
def _fill_files_queue(h5s_q, h5s_files, batch_size):
for i in np.arange(0, len(h5s_files), batch_size):
h5s_q.put(h5s_files[i:(i+batch_size)])
return
def do_multiprocessing_main(h5s_q, predictions_q, errornum_q, model_type,
trained_model, kmer, err_feat):
#Obtain predictions from every h5
while not h5s_q.empty():
try:
h5s = h5s_q.get()
except Exception:
break
model = load_model_or_weights(trained_model, model_type, kmer)
predictions, error_num = do_read_calling_multiprocessing(
h5s, model_type, model, kmer, err_feat
)
errornum_q.put(error_num)
predictions_q.put(predictions)
while predictions_q.qsize() > queen_size_border:
time.sleep(time_wait)
def do_read_calling_multiprocessing(h5s, model_type, trained_model, kmer, err_feat):
predictions = pd.DataFrame()
error = 0
for h5_fp in h5s:
try:
if model_type == 'seq':
pred, inferred, data_id = seq_read_calling(
h5_fp, kmer, err_feat, trained_model, model_type
)
elif model_type == 'err':
pred, inferred, data_id = err_read_calling(
h5_fp, kmer, trained_model, model_type
)
elif model_type == 'joint':
pred, inferred, data_id = joint_read_calling(
h5_fp, kmer, trained_model, model_type
)
test = build_test_df(data_id, pred, inferred, model_type)
predictions = pd.concat([predictions, test])
except Exception:
error += 1
continue
return predictions, error
# ------------------------------------------------------------------------------
# READ PREDICTION SINGLE
# ------------------------------------------------------------------------------
def do_read_calling(test_file, model_type, trained_model, kmer, err_feat,
out_file, tmp_folder='', flag='multi'):
model = load_model_or_weights(trained_model, model_type, kmer)
if model_type == 'seq':
pred, inferred, data_id = seq_read_calling(
test_file, kmer, err_feat, model, model_type
)
elif model_type == 'err':
pred, inferred, data_id = err_read_calling(
test_file, kmer, model, model_type
)
elif model_type == 'joint':
pred, inferred, data_id = joint_read_calling(
test_file, kmer, model, model_type
)
test = build_test_df(data_id, pred, inferred, model_type)
save_test(test, out_file, tmp_folder, test_file, flag)
def seq_read_calling(test_file, kmer, err_feat, trained_model, model_type):
data_seq, labels, data_id = ut.get_data_sequence(
test_file, kmer, err_feat, get_id=True
)
pred, inferred = test_single_read(data_seq, trained_model, model_type, kmer)
return pred, inferred, data_id
def err_read_calling(test_file, kmer, trained_model, model_type):
data_err, labels, data_id = ut.get_data_errors(test_file, kmer, get_id=True)
pred, inferred = test_single_read(data_err, trained_model, model_type, kmer)
return pred, inferred, data_id
def joint_read_calling(test_file, kmer, trained_model, model_type):
data_seq, data_err, labels, data_id = ut.get_data_jm(
test_file, kmer, get_id=True
)
pred, inferred = test_single_read(
[data_seq, data_err], trained_model, model_type, kmer
)
return pred, inferred, data_id
def load_model_or_weights(model_file, model_type, kmer):
try:
return load_model(model_file)
except:
return load_model_weights(model_file, model_type, kmer)
def test_single_read(data, model, model_type, kmer):
pred = model.predict(data).flatten()
inferred = np.zeros(len(pred), dtype=int)
inferred[np.argwhere(pred >= 0.5)] = 1
return pred, inferred
def load_model_weights(trained_model, model_type, kmer):
if model_type == 'seq':
return load_sequence_weights(trained_model, kmer)
elif model_type == 'err':
return load_error_weights(trained_model, kmer)
else:
return load_joint_weights(trained_model, kmer)
def load_sequence_weights(trained_model, kmer):
model = SequenceCNN('conv', 6, 256, 4)
input_shape = (None, kmer, 9)
model.compile(loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(),
metrics=['accuracy'])
model.build(input_shape)
model.load_weights(trained_model)
return model
def load_error_weights(trained_model, kmer):
model = BCErrorCNN(3, 3, 128, 3)
model.compile(loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(),
metrics=['accuracy'])
input_shape = (None, kmer, 9)
model.build(input_shape)
model.load_weights(trained_model)
return model
def load_joint_weights(trained_model, kmer):
model = JointNN()
model.compile(loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(learning_rate=0.00125),
metrics=['accuracy'])
input_shape = ([(None, kmer, 9), (None, kmer, 9)])
model.build(input_shape)
model.load_weights(trained_model)
return model
def build_test_df(data, pred_vec, inferred_vec, model_type):
df = pd.DataFrame()
df['chrom'] = data[0].astype(str)
df['pos'] = data[2]
if model_type != 'err':
df['strand'] = data[3].astype(str)
df['pos_in_strand'] = data[4]
df['readname'] = data[1].astype(str)
df['pred_prob'] = pred_vec
df['inferred_label'] = inferred_vec
return df
def save_test(test, out_file, tmp_folder, test_file, flag):
if flag == 'multi':
tmp_file = os.path.join(
tmp_folder, test_file.rsplit('.', 1)[0].rsplit('/', 1)[1] + '.tsv'
)
test.to_csv(tmp_file, sep='\t', index=None, header=None)
else:
test.to_csv(out_file, sep='\t', index=None, header=None)
# ------------------------------------------------------------------------------
# POSITION CALLING FUNCTIONS
# ------------------------------------------------------------------------------
## beta model prediction
def beta_fct(a, b):
return gamma(a) * gamma(b) / gamma(a + b)
def log_likelihood_nomod_beta(obs_reads):
return np.sum(np.log(obs_reads ** (beta_a - 1) * (1 - obs_reads) ** (beta_b - 1) \
* (1 - epsilon) / beta_fct(beta_a, beta_b) \
+ obs_reads ** (beta_c - 1) * (1 - obs_reads) ** (beta_a - 1) \
* epsilon / beta_fct(beta_c, beta_a)))
def log_likelihood_mod_beta(obs_reads):
return np.sum(np.log(obs_reads ** (beta_a - 1) * (1 - obs_reads) ** (beta_b - 1) \
* gamma_val / beta_fct(beta_a, beta_b) \
+ obs_reads ** (beta_c - 1) * (1 - obs_reads) ** (beta_a - 1) \
* (1 - gamma_val) / beta_fct(beta_c, beta_a)))
def _normalize_log_probs(probs):
max_i = bn.nanargmax(probs)
try:
exp_probs = np.exp(probs[np.arange(probs.size) != max_i] \
- probs[max_i])
except FloatingPointError:
exp_probs = np.exp(
np.clip(probs[np.arange(probs.size) != max_i] - probs[max_i],
log_EPSILON, 0)
)
probs_norm = probs - probs[max_i] - np.log1p(bn.nansum(exp_probs))
return np.exp(np.clip(probs_norm, log_EPSILON, 0))
#Assuming prior to be 0.5
def beta_stats(obs_reads, pred_beta, prob_beta_mod, prob_beta_unmod):
log_prob_pos_0 = log_likelihood_nomod_beta(obs_reads)
log_prob_pos_1 = log_likelihood_mod_beta(obs_reads)
norm_log_probs = _normalize_log_probs(
np.array([log_prob_pos_0, log_prob_pos_1])
)
prob_beta_mod.append(norm_log_probs[1])
prob_beta_unmod.append(norm_log_probs[0])
if prob_beta_mod[-1] >= prob_beta_unmod[-1]:
pred_beta.append(1)
else:
pred_beta.append(0)
return pred_beta, prob_beta_mod, prob_beta_unmod
def do_per_position_beta(df):
df['id'] = df['chrom'] + '_' + df['pos'].astype(str) + '_' + df['strand']
cov, meth_label, ids, pred_beta = [], [], [], []
prob_beta_mod, prob_beta_unmod, chromosome, pos, strand = [], [], [], [], []
meth_freq = []
for i, j in df.groupby('id'):
pred_beta, prob_beta_mod, prob_beta_unmod = beta_stats(
j['pred_prob'].values, pred_beta, prob_beta_mod, prob_beta_unmod
)
meth_freq.append(round(j['inferred_label'].sum() / j.shape[0], 5))
cov.append(len(j)); ids.append(i)
chromosome.append(i.split('_')[0])
strand.append(i.split('_')[2])
pos.append(i.split('_')[1])
preds = pd.DataFrame()
preds['chrom'] = chromosome
preds['pos'] = pos
preds['strand'] = strand
preds['id'] = ids
preds['cov'] = cov
preds['pred_beta'] = pred_beta
preds['prob_beta_mod'] = prob_beta_mod
preds['prob_beta_unmod'] = prob_beta_unmod
preds['meth_freq'] = meth_freq
return preds
## threshold prediction
def pred_site_threshold(inferred, pred_threshold, threshold):
if np.sum(inferred) / len(inferred) >= threshold:
pred_threshold.append(1)
else:
pred_threshold.append(0)
return pred_threshold
def do_per_position_theshold(df, threshold):
df['id'] = df['chrom'] + '_' + df['pos'].astype(str) + '_' + df['strand']
cov, meth_label, ids, pred_threshold = [], [], [], []
chromosome, pos, strand = [], [], []
meth_freq = []
for i, j in df.groupby('id'):
pred_threshold = pred_site_threshold(
j['pred_prob'].values, pred_threshold, threshold
)
meth_freq.append(round(j['inferred_label'].sum() / j.shape[0], 5))
cov.append(len(j)); ids.append(i)
strand.append(i.split('_')[2])
chromosome.append(i.split('_')[0])
pos.append(i.split('_')[1])
preds = | pd.DataFrame() | pandas.DataFrame |
# coding: utf-8
# In[ ]:
# Add volume to list
import pandas, numpy
from scipy import spatial
from multiprocessing import Pool
'''-----'''
# Initial setup
'''-----'''
# Creating dictionary of TOP form Excel list
df = pandas.read_excel('D:\\Keyword Clustering\\Feed_Yandex.xlsx' , usecols = 'A,H:R')
dictionary = df.set_index('Feed').T.to_dict('list')
print('Dictionary len: ' + str(len(dictionary))) # test
print('--------------')
# Creating dictionary of volumes from Excel list
df = pandas.read_excel('D:\\Keyword Clustering\\Feed_Yandex.xlsx' , usecols = 'A:G')
volume_dict = df.set_index('Feed').T.to_dict('list')
print(volume_dict) # test
print('--------------')
# Creating immutable initial dictionary
ini_dictionary = dictionary.copy()
# print(ini_dictionary)
# Creating dimensions list (vector space) (not cleaned yet)
dimensions = []
for key in dictionary:
for url_list in dictionary[key]:
dimensions.append(url_list)
# Cleaning nan in dimensions
dimensions = [url for url in dimensions if url is not numpy.nan]
# Loop to clean outliers
counter = 0
outliers = [] # list of outliers
while True:
counter += 1
# print('round ' + str(counter)) # test
# Finding URLs that are repeated across collection
multiple_urls = []
for url in dimensions:
if dimensions.count(url) > 1:
multiple_urls.append(url)
else:
continue
multiple_urls = list(set(multiple_urls))
# Finding keys from dictionary that have less than 3 repeated urls
keys_to_delete = []
for key in dictionary:
i = 0
for url in multiple_urls:
if url in dictionary[key]:
i += 1
if i < 3:
# print(i) # test
# print('key is:') # test
# print(key) # test
keys_to_delete.append(key)
outliers.append(key)
# print('keys_to_delete are:') # test
# print(keys_to_delete) # test
# Removing keys from list keys_to_delete from dictionary
for key in keys_to_delete:
del dictionary[key]
# Creating dimensions list (vector space)
dimensions = []
for key in dictionary:
for url_list in dictionary[key]:
dimensions.append(url_list)
# Cleaning nan in dimensions
dimensions = [url for url in dimensions if url is not numpy.nan]
# Exiting loop if no more outliers left
if keys_to_delete == []:
# print('Outliers are:') # test
# print('--------------') # test
# print(outliers) # test
break
'''
print('Cleaned dictionary')
print(dictionary) # test
print('--------------') # test
'''
# Cleaning duplicates in dimesions
dimensions = list(set(dimensions))
'''
print('Dimensions')
print(dimensions) #test
'''
# One hot encoding (creating vectors in vector space) for keys in dictionary according to dimensions (vector space)
vectors = {}
for key in dictionary:
vector = []
for dimension in dimensions:
if dimension in dictionary[key]:
vector.append(1)
else:
vector.append(0)
vectors.update({key: vector})
'''
print('Vectors')
print(vectors) # test
'''
# Creating immutable initial vectors
ini_vectors = vectors.copy()
'''-----'''
# Functions that are needed
'''-----'''
# Calculating cosine similarities for keys vectors
def cosine_similarities(vectors):
similarities = {}
for key1 in vectors:
key_similarities = {} # creates dictionary with similarities for one key with other keys
for key2 in vectors:
if key1 != key2:
key_similarities.update({key2: 1 - spatial.distance.cosine(vectors[key1], vectors[key2])})
else:
continue
# print(key_similarities) # test
similarities.update({key1: key_similarities})
# print(similarities) # test
return similarities
# Updating similarities with using initial cosine similarities in order not to recalculate them
def cosine_similarities_update(similarities, new_vectors):
new_similarities = {}
keys_to_delete = []
key_similarities = {}
for key in similarities:
if key in new_vectors:
new_similarities[key] = similarities[key]
if key not in new_vectors:
keys_to_delete.append(key)
for key in new_similarities:
for delete_key in keys_to_delete:
try:
del new_similarities[key][delete_key]
except:
continue
for key1 in new_vectors:
if key1 not in new_similarities:
# print(key1)
for key2 in new_vectors:
# print(key2)
if key2 == key1:
continue
key_similarities.update({key2: 1 - spatial.distance.cosine(new_vectors[key1], new_vectors[key2])})
if key1 != key2:
new_similarities[key2].update({key1: key_similarities[key2]})
new_similarities[key1] = key_similarities
break
else:
continue
# print(new_similarities)
return new_similarities
'''
We shall make so that we can combine multiple values at the same step if those values are not intersected.
So we find MAX similarity across matrix, and IF two queries with MAX similarity AND it is >=0.5 then we assume
them as a cluster, and make them unclusterable, then we find next MAX value and if it is not using queries from the previous step AND it is
>=0.5 then we assume them as the next cluster, if it is using unclusterable queries, then CONTINUE.
After non left — Break, calculate new clusters. Make step 2 with the new clusters.
'''
# Finding maximum value in similarities
def max_similar(similarities):
pd = pandas.DataFrame(similarities) # Converting to dataframe in order to find initial max similarity value
maximum_rows = pd.max()
absolute_maximum = maximum_rows.max() # returns array
absolute_maximum = numpy.asscalar(absolute_maximum) # converting array to scalar
# print(absolute_maximum) # test
return absolute_maximum
# Finding maximum indices in vectors
def max_keys(similarities, absolute_maximum):
for key1 in similarities:
for key2 in similarities[key1]:
if similarities[key1][key2] == absolute_maximum:
absolute_maximum_keys = [key1, key2]
return absolute_maximum_keys
else:
continue
# Adding new cluster to clusters (for 0.5)
def add_cluster(clusters, key1, key2):
if key1 in clusters and key2 in clusters:
for index in range(len(clusters[key2])):
clusters[key1].append(clusters[key2][index])
del clusters[key2]
cluster_index = key1
new_cluster = [clusters, cluster_index]
# print('New Cluster')
# print(new_cluster) # test
return new_cluster
elif key1 in clusters:
clusters[key1].append(key2)
cluster_index = key1
new_cluster = [clusters, cluster_index]
# print('New Cluster')
# print(new_cluster) # test
return new_cluster
elif key2 in clusters:
clusters[key2].append(key1)
cluster_index = key2
new_cluster = [clusters, cluster_index]
# print('New Cluster')
# print(new_cluster) # test
return new_cluster
else:
clusters.update({key1 + ' ' + str(len(key1) + len(key2)) : [key1, key2]})
cluster_index = key1 + ' ' + str(len(key1) + len(key2))
new_cluster = [clusters, cluster_index]
# print('New Cluster')
# print(new_cluster) # test
return new_cluster
# Function that calculates centroid of vectors
def center(cluster_index):
centroid = []
for index in range(len(dimensions)):
listi = []
sumi = 0
for key in clusters[cluster_index]:
# print('key in ' + str(cluster_index) + ' ' + str(key)) # test
listi.append(ini_vectors[key][index])
for item in listi:
sumi += item
centroid.append(sumi / len(clusters[cluster_index]))
# print(centroid) # test
return centroid
# Updating vectors
def vectors_update(vectors, cluster_index, centroid, key1, key2):
del vectors[key1]
del vectors[key2]
vectors.update({cluster_index : centroid})
return vectors
'''-----'''
# Building loop with clusters creation
'''-----'''
# Creating dictionary of clusters
clusters = {}
# Iterations counter
round_counter = 1
# Giving base absolute_maximum to start loop
absolute_maximum = 0.5
# Clustering Loop
similarities = cosine_similarities(vectors) # initial cosine similarities
'''
print('Initial Similarities') # test
print(similarities) # test
'''
while True:
print('--------------')
print('Round ' + str(round_counter)) # test
loop_must_break = False
absolute_maximum = max_similar(similarities)
if absolute_maximum >= 0.3:
print(absolute_maximum) #test
# Finding keys of absolute_maximum
absolute_maximum_keys = max_keys(similarities, absolute_maximum)
# print(absolute_maximum_keys) #test
key1 = absolute_maximum_keys[0]
print('key1') #test
print(key1) #test
key2 = absolute_maximum_keys[1]
print('key2') #test
print(key2) #test
# Checking if keys are clusters already
if key1 in clusters and key2 in clusters:
print('If both in clusters')
# Calculating similarities between keys in clusters
count_similarities = []
for key_a in clusters[key1]:
for key_b in clusters[key2]:
i = 0
for url in dictionary[key_a]:
if url in dictionary[key_b]:
i += 1
count_similarities.append(i)
print('count_similarities')
print(count_similarities)
# Checking if >= 3 similarities between keys in clusters
for i in count_similarities:
if i >= 3:
continue
else:
print('Similarities < 3 for each key')
while True:
similarities_to_loop_and_strip = similarities
print('Similarities in loop before strip')
print(similarities_to_loop_and_strip)
del similarities_to_loop_and_strip[key1][key2]
del similarities_to_loop_and_strip[key2][key1]
print('Similarities in loop after strip')
print(similarities_to_loop_and_strip)
absolute_maximum = max_similar(similarities_to_loop_and_strip)
print('Absolute Maximum')
print(absolute_maximum)
if absolute_maximum >= 0.3:
# Finding keys of absolute_maximum
absolute_maximum_keys = max_keys(similarities, absolute_maximum)
# print(absolute_maximum_keys) #test
key1 = absolute_maximum_keys[0]
print('key1') #test
print(key1) #test
key2 = absolute_maximum_keys[1]
print('key2') #test
print(key2) #test
# Checking if keys are clusters already
if key1 in clusters and key2 in clusters:
print('If both in clusters')
# Calculating similarities between keys in clusters
count_similarities = []
for key_a in clusters[key1]:
for key_b in clusters[key2]:
i = 0
for url in dictionary[key_a]:
if url in dictionary[key_b]:
i += 1
count_similarities.append(i)
print('Similarities')
print(count_similarities)
# Checking if only first key is in clusters already
elif key1 in clusters:
print('If key1 in clusters')
# Calculating similarities between keys in clusters
count_similarities = []
for key_a in clusters[key1]:
i = 0
for url in dictionary[key_a]:
if url in dictionary[key2]:
i += 1
count_similarities.append(i)
print('Similarities')
print(count_similarities)
# Checking if only second key is in clusters already
elif key2 in clusters:
print('If key2 in clusters')
# Calculating similarities between keys in clusters
count_similarities = []
for key_a in clusters[key2]:
i = 0
for url in dictionary[key_a]:
if url in dictionary[key1]:
i += 1
count_similarities.append(i)
print('Similarities')
print(count_similarities)
# Checking if both keys are not in clusters
else:
print('If both not in clusters')
# Calculating similarities between keys in clusters
count_similarities = []
i = 0
for url in dictionary[key2]:
if url in dictionary[key1]:
i += 1
count_similarities.append(i)
print('Similarities')
print(count_similarities)
else:
print('Absolute Maximum < 0.3')
loop_must_break = True
break
# Checking if >= 3 similarities between keys in clusters
three_similarities = 0
for i in count_similarities:
if i < 3:
break
else:
three_similarities += 1
print('Three Similarities')
print(three_similarities)
if len(count_similarities) == three_similarities:
print('Similarities >= 3 for each key')
# Adding cluster
new_cluster = add_cluster(clusters, key1, key2)
clusters = new_cluster[0]
print('Clusters')
print(clusters) # test
cluster_index = new_cluster[1]
print('Cluster Index')
print(cluster_index) # test
# print(clusters[cluster_index]) # test
# Calculating Centroid
centroid = center(cluster_index)
# Updating vector space
new_vectors = vectors_update(vectors, cluster_index, centroid, key1, key2)
# Recalculating new similarities for updated vector space
similarities = cosine_similarities_update(similarities, new_vectors)
# print(similarities) # test
# print(vectors) # test
# print('--------------') # test
round_counter += 1
loop_must_break = True
break
else:
print('Similarities < 3 for each key')
if loop_must_break == True:
break
if loop_must_break == True:
continue
print('MAX similarity OK')
# Adding cluster
new_cluster = add_cluster(clusters, key1, key2)
clusters = new_cluster[0]
# print(clusters) # test
cluster_index = new_cluster[1]
print('Cluster Index')
print(cluster_index) # test
print('Clusters')
print(clusters) # test
# print(clusters[cluster_index]) # test
# Calculating Centroid
centroid = center(cluster_index)
# Updating vector space
new_vectors = vectors_update(vectors, cluster_index, centroid, key1, key2)
# Recalculating new similarities for updated vector space
similarities = cosine_similarities_update(similarities, new_vectors)
# print(similarities) # test
# print(vectors) # test
# print('--------------') # test
round_counter += 1
elif key1 in clusters:
print('If key1 in clusters')
# Calculating similarities between keys in clusters
count_similarities = []
for key_a in clusters[key1]:
i = 0
for url in dictionary[key_a]:
if url in dictionary[key2]:
i += 0
count_similarities.append(i)
print('count_similarities')
print(count_similarities)
# Checking if >= 3 similarities between keys in clusters
for i in count_similarities:
if i >= 3:
continue
else:
print('Similarities < 3 for each key')
while True:
similarities_to_loop_and_strip = similarities
print('Similarities in loop before strip')
print(similarities_to_loop_and_strip)
del similarities_to_loop_and_strip[key1][key2]
del similarities_to_loop_and_strip[key2][key1]
print('Similarities in loop after strip')
print(similarities_to_loop_and_strip)
absolute_maximum = max_similar(similarities_to_loop_and_strip)
print('Absolute Maximum')
print(absolute_maximum)
if absolute_maximum >= 0.3:
# Finding keys of absolute_maximum
absolute_maximum_keys = max_keys(similarities, absolute_maximum)
# print(absolute_maximum_keys) #test
key1 = absolute_maximum_keys[0]
print('key1') #test
print(key1) #test
key2 = absolute_maximum_keys[1]
print('key2') #test
print(key2) #test
# Checking if keys are clusters already
if key1 in clusters and key2 in clusters:
print('If both in clusters')
# Calculating similarities between keys in clusters
count_similarities = []
for key_a in clusters[key1]:
for key_b in clusters[key2]:
i = 0
for url in dictionary[key_a]:
if url in dictionary[key_b]:
i += 1
count_similarities.append(i)
print('Similarities')
print(count_similarities)
# Checking if only first key is in clusters already
elif key1 in clusters:
print('If key1 in clusters')
# Calculating similarities between keys in clusters
count_similarities = []
for key_a in clusters[key1]:
i = 0
for url in dictionary[key_a]:
if url in dictionary[key2]:
i += 1
count_similarities.append(i)
print('Similarities')
print(count_similarities)
# Checking if only second key is in clusters already
elif key2 in clusters:
print('If key2 in clusters')
# Calculating similarities between keys in clusters
count_similarities = []
for key_a in clusters[key2]:
i = 0
for url in dictionary[key_a]:
if url in dictionary[key1]:
i += 1
count_similarities.append(i)
print('Similarities')
print(count_similarities)
# Checking if both keys are not in clusters
else:
print('If both not in clusters')
# Calculating similarities between keys in clusters
count_similarities = []
i = 0
for url in dictionary[key2]:
if url in dictionary[key1]:
i += 1
count_similarities.append(i)
print('Similarities')
print(count_similarities)
else:
print('Absolute Maximum < 0.3')
loop_must_break = True
break
# Checking if >= 3 similarities between keys in clusters
three_similarities = 0
for i in count_similarities:
if i < 3:
break
else:
three_similarities += 1
print('Three Similarities')
print(three_similarities)
if len(count_similarities) == three_similarities:
print('Similarities >= 3 for each key')
# Adding cluster
new_cluster = add_cluster(clusters, key1, key2)
clusters = new_cluster[0]
print('Clusters')
print(clusters) # test
cluster_index = new_cluster[1]
print('Cluster Index')
print(cluster_index) # test
# print(clusters[cluster_index]) # test
# Calculating Centroid
centroid = center(cluster_index)
# Updating vector space
new_vectors = vectors_update(vectors, cluster_index, centroid, key1, key2)
# Recalculating new similarities for updated vector space
similarities = cosine_similarities_update(similarities, new_vectors)
# print(similarities) # test
# print(vectors) # test
# print('--------------') # test
round_counter += 1
loop_must_break = True
break
else:
print('Similarities < 3 for each key')
if loop_must_break == True:
break
if loop_must_break == True:
continue
print('MAX similarity OK')
# Adding cluster
new_cluster = add_cluster(clusters, key1, key2)
clusters = new_cluster[0]
# print(clusters) # test
cluster_index = new_cluster[1]
print('Cluster Index')
print(cluster_index) # test
print('Clusters')
print(clusters) # test
# print(clusters[cluster_index]) # test
# Calculating Centroid
centroid = center(cluster_index)
# Updating vector space
new_vectors = vectors_update(vectors, cluster_index, centroid, key1, key2)
# Recalculating new similarities for updated vector space
similarities = cosine_similarities_update(similarities, new_vectors)
# print(similarities) # test
# print(vectors) # test
# print('--------------') # test
round_counter += 1
elif key2 in clusters:
print('If key2 in clusters')
# Calculating similarities between keys in clusters
count_similarities = []
for key_a in clusters[key2]:
i = 0
for url in dictionary[key_a]:
if url in dictionary[key1]:
i += 1
count_similarities.append(i)
print('count_similarities')
print(count_similarities)
# Checking if >= 3 similarities between keys in clusters
for i in count_similarities:
if i >= 3:
continue
else:
print('Similarities < 3 for each key')
while True:
similarities_to_loop_and_strip = similarities
print('Similarities in loop before strip')
print(similarities_to_loop_and_strip)
del similarities_to_loop_and_strip[key1][key2]
del similarities_to_loop_and_strip[key2][key1]
print('Similarities in loop after strip')
print(similarities_to_loop_and_strip)
absolute_maximum = max_similar(similarities_to_loop_and_strip)
print('Absolute Maximum')
print(absolute_maximum)
if absolute_maximum >= 0.3:
# Finding keys of absolute_maximum
absolute_maximum_keys = max_keys(similarities, absolute_maximum)
# print(absolute_maximum_keys) #test
key1 = absolute_maximum_keys[0]
print('key1') #test
print(key1) #test
key2 = absolute_maximum_keys[1]
print('key2') #test
print(key2) #test
# Checking if keys are clusters already
if key1 in clusters and key2 in clusters:
print('If both in clusters')
# Calculating similarities between keys in clusters
count_similarities = []
for key_a in clusters[key1]:
for key_b in clusters[key2]:
i = 0
for url in dictionary[key_a]:
if url in dictionary[key_b]:
i += 1
count_similarities.append(i)
print('Similarities')
print(count_similarities)
# Checking if only first key is in clusters already
elif key1 in clusters:
print('If key1 in clusters')
# Calculating similarities between keys in clusters
count_similarities = []
for key_a in clusters[key1]:
i = 0
for url in dictionary[key_a]:
if url in dictionary[key2]:
i += 1
count_similarities.append(i)
print('Similarities')
print(count_similarities)
# Checking if only second key is in clusters already
elif key2 in clusters:
print('If key2 in clusters')
# Calculating similarities between keys in clusters
count_similarities = []
for key_a in clusters[key2]:
i = 0
for url in dictionary[key_a]:
if url in dictionary[key1]:
i += 1
count_similarities.append(i)
print('Similarities')
print(count_similarities)
# Checking if both keys are not in clusters
else:
print('If both not in clusters')
# Calculating similarities between keys in clusters
count_similarities = []
i = 0
for url in dictionary[key2]:
if url in dictionary[key1]:
i += 1
count_similarities.append(i)
print('Similarities')
print(count_similarities)
else:
print('Absolute Maximum < 0.3')
loop_must_break = True
break
# Checking if >= 3 similarities between keys in clusters
three_similarities = 0
for i in count_similarities:
if i < 3:
break
else:
three_similarities += 1
print('Three Similarities')
print(three_similarities)
if len(count_similarities) == three_similarities:
print('Similarities >= 3 for each key')
# Adding cluster
new_cluster = add_cluster(clusters, key1, key2)
clusters = new_cluster[0]
print('Clusters')
print(clusters) # test
cluster_index = new_cluster[1]
print('Cluster Index')
print(cluster_index) # test
# print(clusters[cluster_index]) # test
# Calculating Centroid
centroid = center(cluster_index)
# Updating vector space
new_vectors = vectors_update(vectors, cluster_index, centroid, key1, key2)
# Recalculating new similarities for updated vector space
similarities = cosine_similarities_update(similarities, new_vectors)
# print(similarities) # test
# print(vectors) # test
# print('--------------') # test
round_counter += 1
loop_must_break = True
break
else:
print('Similarities < 3 for each key')
if loop_must_break == True:
break
if loop_must_break == True:
continue
print('MAX similarity OK')
# Adding cluster
new_cluster = add_cluster(clusters, key1, key2)
clusters = new_cluster[0]
# print(clusters) # test
cluster_index = new_cluster[1]
print('Cluster Index')
print(cluster_index) # test
print('Clusters')
print(clusters) # test
# print(clusters[cluster_index]) # test
# Calculating Centroid
centroid = center(cluster_index)
# Updating vector space
new_vectors = vectors_update(vectors, cluster_index, centroid, key1, key2)
# Recalculating new similarities for updated vector space
similarities = cosine_similarities_update(similarities, new_vectors)
# print(similarities) # test
# print(vectors) # test
# print('--------------') # test
round_counter += 1
else:
print('If both not in clusters')
# Calculating similarities between keys in clusters
count_similarities = []
i = 0
for url in dictionary[key2]:
if url in dictionary[key1]:
i += 1
count_similarities.append(i)
print('count_similarities')
print(count_similarities)
# Checking if >= 3 similarities between keys in clusters
for i in count_similarities:
if i >= 3:
continue
else:
print('Similarities < 3 for each key')
while True:
similarities_to_loop_and_strip = similarities
print('Similarities in loop before strip')
print(similarities_to_loop_and_strip)
del similarities_to_loop_and_strip[key1][key2]
del similarities_to_loop_and_strip[key2][key1]
print('Similarities in loop after strip')
print(similarities_to_loop_and_strip)
absolute_maximum = max_similar(similarities_to_loop_and_strip)
print('Absolute Maximum')
print(absolute_maximum)
if absolute_maximum >= 0.3:
# Finding keys of absolute_maximum
absolute_maximum_keys = max_keys(similarities, absolute_maximum)
# print(absolute_maximum_keys) #test
key1 = absolute_maximum_keys[0]
print('key1') #test
print(key1) #test
key2 = absolute_maximum_keys[1]
print('key2') #test
print(key2) #test
# Checking if keys are clusters already
if key1 in clusters and key2 in clusters:
print('If both in clusters')
# Calculating similarities between keys in clusters
count_similarities = []
for key_a in clusters[key1]:
for key_b in clusters[key2]:
i = 0
for url in dictionary[key_a]:
if url in dictionary[key_b]:
i += 1
count_similarities.append(i)
print('Similarities')
print(count_similarities)
# Checking if only first key is in clusters already
elif key1 in clusters:
print('If key1 in clusters')
# Calculating similarities between keys in clusters
count_similarities = []
for key_a in clusters[key1]:
i = 0
for url in dictionary[key_a]:
if url in dictionary[key2]:
i += 1
count_similarities.append(i)
print('Similarities')
print(count_similarities)
# Checking if only second key is in clusters already
elif key2 in clusters:
print('If key2 in clusters')
# Calculating similarities between keys in clusters
count_similarities = []
for key_a in clusters[key2]:
i = 0
for url in dictionary[key_a]:
if url in dictionary[key1]:
i += 1
count_similarities.append(i)
print('Similarities')
print(count_similarities)
# Checking if both keys are not in clusters
else:
print('If both not in clusters')
# Calculating similarities between keys in clusters
count_similarities = []
i = 0
for url in dictionary[key2]:
if url in dictionary[key1]:
i += 1
count_similarities.append(i)
print('Similarities')
print(count_similarities)
else:
print('Absolute Maximum < 0.3')
loop_must_break = True
break
# Checking if >= 3 similarities between keys in clusters
three_similarities = 0
for i in count_similarities:
if i < 3:
break
else:
three_similarities += 1
print('Three Similarities')
print(three_similarities)
if len(count_similarities) == three_similarities:
print('Similarities >= 3 for each key')
# Adding cluster
new_cluster = add_cluster(clusters, key1, key2)
clusters = new_cluster[0]
print('Clusters')
print(clusters) # test
cluster_index = new_cluster[1]
print('Cluster Index')
print(cluster_index) # test
# print(clusters[cluster_index]) # test
# Calculating Centroid
centroid = center(cluster_index)
# Updating vector space
new_vectors = vectors_update(vectors, cluster_index, centroid, key1, key2)
# Recalculating new similarities for updated vector space
similarities = cosine_similarities_update(similarities, new_vectors)
# print(similarities) # test
# print(vectors) # test
# print('--------------') # test
round_counter += 1
loop_must_break = True
break
else:
print('Similarities < 3 for each key')
if loop_must_break == True:
break
if loop_must_break == True:
continue
print('MAX similarity OK')
# Adding cluster
new_cluster = add_cluster(clusters, key1, key2)
clusters = new_cluster[0]
# print(clusters) # test
cluster_index = new_cluster[1]
print('Cluster Index')
print(cluster_index) # test
print('Clusters')
print(clusters) # test
# print(clusters[cluster_index]) # test
# Calculating Centroid
centroid = center(cluster_index)
# Updating vector space
new_vectors = vectors_update(vectors, cluster_index, centroid, key1, key2)
# Recalculating new similarities for updated vector space
similarities = cosine_similarities_update(similarities, new_vectors)
# print(similarities) # test
# print(vectors) # test
# print('--------------') # test
round_counter += 1
else:
print('Clustering Done!')
break
# print(clusters) # test
'''-----'''
# Writing results to new list in Excel
'''-----'''
# Creating new dictionary that will combine ini_dictionary and clusters
cluster_dictionary = {}
for cluster in clusters:
keys_dictionary = {}
for key in clusters[cluster]:
keys_dictionary.update({key : ini_dictionary[key]})
cluster_dictionary.update({cluster : keys_dictionary})
print(cluster_dictionary)
# Creating dictionary of unclustered queries
clustered_queries_nested = list(clusters.values())
clustered_queries = []
# print(clustered_queries_nested) # test
for i in clustered_queries_nested:
for query in i:
clustered_queries.append(query)
# print(clustered_queries) # test
unclustered_dictionary = {query : top for query, top in ini_dictionary.items() if query not in clustered_queries and query not in outliers}
# print('--------------') # test
# print(unclustered_dictionary.keys()) # test
# Creating dictionary of outliers
outliers_dictionary = {query : top for query, top in ini_dictionary.items() if query in outliers}
# print('--------------') # test
# print(outliers_dictionary.keys()) # test
# Writing clusters from cluster_dictionary as dataframes into excel
row = 0
writer = | pandas.ExcelWriter('D:\\Keyword Clustering\\Clusters_Yandex.xlsx', engine = 'xlsxwriter') | pandas.ExcelWriter |
"""数据处理器"""
import datetime
import socket
import QUANTAXIS as QA
import pandas as pd
from QUANTAXIS.QAFetch.QAQuery import QA_fetch_stock_to_market_date
from QUANTAXIS.QAFetch.QAQuery_Advance import QA_fetch_stock_block_adv
from QUANTAXIS.QAUtil import QA_util_datetime_to_strdate as datetostr
from LSTM_for_Stock import indicators
class Wrapper(object):
"""數據包裝器"""
def build(self, df): # pylint: disable=C0103
"""數據處理
Args:
df (pd.DataFrame): 待處理的數據
Returns:
pd.DataFrame : 處理后的數據
"""
return df
class Wrapper_fillna(Wrapper):
def build(self, df):
"""數據處理。
1. **向前**填充 stock 的数据。
2. 删除 stock 中依然为空的数据。(上市时间在benchmark之后的情况。)
Args:
df (pd.DataFrame): 待處理的數據
Returns:
pd.DataFrame : 處理后的數據
"""
result = df.copy()
result = result.fillna(method='ffill')
return result.dropna()
class Wrapper_default(Wrapper):
def build(self, df):
result = df.copy()
result = result.fillna(method='ffill')
result = result.drop(columns=['up_count', 'down_count'])
return result.dropna()
class Wrapper_remove_benchmark(Wrapper_default):
def build(self, df):
result = super(Wrapper_remove_benchmark, self).build(df)
result = result.drop(
columns=[f for f in result.columns if 'bench' in f])
return result.dropna()
class Wrapper_append_features(Wrapper):
def build(self, df):
result = df.copy()
result = result.fillna(method='ffill')
result = result.drop(columns=['up_count', 'down_count'])
result['EMA_5'] = QA.QA_indicator_EMA(result, 5)
result['CCI_5'] = QA.talib_indicators.CCI(result, 5)
result['RSI_5'] = QA.QA_indicator_RSI(result, 5, 5, 5)['RSI1']
result['MOM_5'] = indicators.talib_MOM(result, 5)
result[[
'BB_SMA_LOWER_5', 'BB_SMA_MIDDLE_5',
'BB_SMA_UPPER_5']] = indicators.talib_BBANDS(
result, 5)
# result[['AROON_DOWN_5', 'AROON_UP_5']] = QA.talib_indicators.AROON(
# result, 5)
# result['AROONOSC_5'] = QA.talib_indicators.AROONOSC(result,5)
return result.dropna()
class DataLoader(object):
"""數據提供器"""
@staticmethod
def today():
return datetime.datetime.today().strftime('%Y-%m-%d')
class DataLoaderStock(DataLoader):
"""股票數據提供器
Args:
stock_code (str): 股票代碼
benchmark_code (str): 指數代碼
fq (str, optional): Defaults to 'qfq'. 是否取復權數據。
* `qfq` - 前復權
* `hfq` - 后復權
* `bfq` or `None` - 不復權
online (bool, optional): Defaults to False. 是否獲取在線數據
start (str, optional): Defaults to '1990-01-01'. 開始日期
end (str, optional): Defaults to DataLoader.today(). 結束日期
"""
def __init__(self,
stock_code,
benchmark_code='399300',
fq='qfq',
online=False,
start='1990-01-01',
end=DataLoader.today(),
wrapper=Wrapper(),
*args, **kwargs):
"""股票數據提供器
Args:
stock_code (str): 股票代碼
benchmark_code (str, optional): Defaults to '399300'. 指數代碼
fq (str, optional): Defaults to 'qfq'. 是否取復權數據。
* `qfq` - 前復權
* `hfq` - 后復權
* `bfq` or `None` - 不復權
online (bool, optional): Defaults to False. 是否獲取在線數據
start (str, optional): Defaults to '1990-01-01'. 開始日期
end (str, optional): Defaults to DataLoader.today(). 結束日期
wrapper (Wrapper, optional): Defaults to Wrapper(). `DataFrame`包装器。
appends (str): 待附加的股票代码列表。默认为空。
dtype : 读取数据时转换数据类型的定义。
类型参考 :py:func:`DataFrame.astype(dtype, copy=True, errors='raise', **kwargs)` 中 `dtype` 的定义。
默认为 `float32`。
"""
self.__stock_code = stock_code
self.__benchmark_code = benchmark_code
self.__online = online
self.__start = start
self.__end = end
self.__fq = fq
self.__wrapper = wrapper
self.__data_raw = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import random
import os
print('Welcome to the game!')
input('Press enter to continue: ')
print('')
print('- This is a population simulator for a fictional town.')
print('- The town starts with 20 people. With each year that passes, babies will be born and people will die.')
print('- Females can have a baby at any age between 18 and 40, but are most likely to give birth a in their late 20s.')
print('- People can die at any age, but more likely as they get older.')
print('- Names will be randomly assigned based on names in USA datasets.')
print('- Surnames are inherited from the mother.')
print('')
print('loading...')
# read in surnames from data source
# snlist = pd.read_csv('https://raw.githubusercontent.com/fivethirtyeight/data/master/most-common-name/surnames.csv', nrows=20)['name'].tolist()
snlist = ['SMITH', 'JOHNSON', 'WILLIAMS', 'BROWN', 'JONES', 'MILLER', 'DAVIS', 'GARCIA', 'RODRIGUEZ', 'WILSON', 'MARTINEZ', 'ANDERSON', 'TAYLOR', 'THOMAS', 'HERNANDEZ', 'MOORE', 'MARTIN', 'JACKSON', 'THOMPSON', 'WHITE']
# read in first names from data source
fnlistcsv = pd.read_csv('https://raw.githubusercontent.com/MatthiasWinkelmann/firstname-database/master/firstnames.csv', delimiter=';')
# select only relevent data
fnlist = fnlistcsv
fnlist = fnlist[fnlist['U.S.A.'].notnull()][['name', 'gender', 'U.S.A.']].rename(columns={'U.S.A.':'Freq'})
fnlist['Freq'] = fnlist['Freq'].astype(int)
# clean gender values
fnlist.replace({'1F': 'F', '?F': 'F', '1M': 'M', '?M': 'M'}, inplace=True)
fnlist = fnlist[fnlist['gender'].isin(['F', 'M'])]
# apply factors to 'Freq' column to represent popularity
fnlist['Freq'] = (10-(fnlist[['Freq']]*-1+1))**3
fnlistm = fnlist[fnlist['gender'] == 'M'].sort_values('Freq', ascending=False).reset_index(drop=True)
fnlistf = fnlist[fnlist['gender'] == 'F'].sort_values('Freq', ascending=False).reset_index(drop=True)
fnlistm = fnlistm.reindex(fnlistm.index.repeat(fnlistm['Freq']))['name'].tolist()
fnlistf = fnlistf.reindex(fnlistf.index.repeat(fnlistf['Freq']))['name'].tolist()
town = input('Enter the name of your town: ')
FirstName = []
for i in range(20):
FirstName.append(random.choice(fnlistf))
MiddleName = []
for i in range(20):
MiddleName.append(random.choice(fnlistf))
# create dataframe
data = {'FirstName':FirstName, 'MiddleName':MiddleName, 'Surname':snlist, 'Sex':'F', 'YearBorn':list(range(0,20))}
df = pd.DataFrame(data)
# add columns
year = 19
df['YearDeceased'] = np.nan
df['CurrentYear'] = year
df['Age'] = (df[['CurrentYear','YearDeceased']].min(axis=1) - df['YearBorn']).astype(int)
df['ParentID'] = np.nan
df['Generation'] = 1
df['NoOfChildren'] = 0
# probability of dying at age
# manually enter probablities
prob = [0.001] * 40 + [0.002] * 10 + [0.008] * 10 + [0.012] * 10 + [0.025] * 10 + [0.05] * 5 + [0.1] * 5 + [0.2] * 5 + [0.25] * 15 + [0.35] * 6 + [0.5] * 3 + [1] * 1
data = {'Age':list(range(1,121)), 'Prob':prob}
probdeath = pd.DataFrame(data)
# probability of having a baby at age
# min age=18, max age=40. manually enter probablities
# rapid growth
data = {'Age':list(range(18,40)), 'Prob':[0.02, 0.04, 0.06, 0.08, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.4, 0.35, 0.3, 0.25, 0.2, 0.15, 0.1, 0.08, 0.06, 0.04, 0.02]}
probbabyRG = pd.DataFrame(data)
# neutral growth
data = {'Age':list(range(18,40)), 'Prob':[0.01, 0.02, 0.03, 0.04, 0.05, 0.075, 0.1, 0.13, 0.155, 0.19, 0.215, 0.215, 0.19, 0.155, 0.13, 0.1, 0.075, 0.05, 0.04, 0.03, 0.02, 0.01]}
probbabyNU = pd.DataFrame(data)
# moderate decline
data = {'Age':list(range(18,40)), 'Prob':[0.007, 0.015, 0.02, 0.03, 0.04, 0.05, 0.07, 0.1, 0.12, 0.16, 0.21, 0.21, 0.16, 0.12, 0.1, 0.07, 0.05, 0.04, 0.03, 0.02, 0.015, 0.007]}
probbabyMD = pd.DataFrame(data)
# productivity output by age
r1 = list(range(1,16))
l1 = [0.0625]*15
lr1 = [r1*l1 for r1,l1 in zip(r1,l1)]
r2 = list(reversed(range(1,35)))
l2 = [0.0286]*34
lr2 = [r2*l2 for r2,l2 in zip(r2,l2)]
prodout = [0]*14 + lr1 + [1]*11 + lr2 + [0]*46
data = {'Age':list(range(1,121)), 'ProdOutput':prodout}
prodout = pd.DataFrame(data)
# productivity used by age
produse = [0.25]*120
data = {'Age':list(range(1,121)), 'ProdUsed':produse}
produse = pd.DataFrame(data)
playing = 'y'
while playing == 'y':
# add years
yearsadd = int(input('Run for how many more years? (enter between 1-50 years): '))
probbabyint = int(input('At what population growth rate? (1=rapid growth, 2=neutral, 3=moderate decline): '))
if probbabyint == 1:
probbaby = probbabyRG
elif probbabyint == 2:
probbaby = probbabyNU
elif probbabyint == 3:
probbaby = probbabyMD
else:
print('incorrect input!')
endyear = year + yearsadd
while year < endyear:
year += 1
alive = df[df['YearDeceased'].isnull()]
df['CurrentYear'] = year
df['Age'] = np.where(df['YearDeceased'].isnull(), (df['CurrentYear'] - df['YearBorn']), (df['YearDeceased'] - df['YearBorn'])).astype(int)
# did anyone die? if so enter in YearDeceased
temp1 = df[df['YearDeceased'].isnull()].reset_index().merge(probdeath).set_index('index')[['Prob']]
temp1['rand'] = [random.random() for i in temp1.index]
temp1['YearDeceased1'] = np.where(temp1['rand'] < temp1['Prob'], year, np.nan)
temp1.drop(columns={'Prob', 'rand'}, inplace=True)
df = pd.concat([df, temp1], axis=1)
df['YearDeceased'] = np.where(df['YearDeceased'].isnull() == True, df['YearDeceased1'], df['YearDeceased'])
df.drop(columns={'YearDeceased1'}, inplace=True)
# did anyone have a baby? if so enter new row for each
babies = df[(df['YearDeceased'].isnull()) & (df['Sex'] == 'F')].reset_index().merge(probbaby, on='Age').set_index('index')
lst = []
for i in range(babies.shape[0]):
lst.append(random.random())
babies['rand'] = lst
babies['baby?'] = babies['Prob'] > babies['rand']
babies = babies[babies['baby?']][['Surname', 'Generation']]
babies['Generation'] += 1
babies = babies.reset_index().rename(columns={'index':'ParentID'})
if len(babies) > 0:
Sex = []
for i in range(babies.shape[0]):
Sex.append(random.choice(['F', 'M']))
babies['Sex'] = Sex
MFirstName = []
for i in range(babies.shape[0]):
MFirstName.append(random.choice(fnlistm))
babies['MFirstName'] = MFirstName
MMiddleName = []
for i in range(babies.shape[0]):
MMiddleName.append(random.choice(fnlistm))
babies['MMiddleName'] = MMiddleName
FFirstName = []
for i in range(babies.shape[0]):
FFirstName.append(random.choice(fnlistf))
babies['FFirstName'] = FFirstName
FMiddleName = []
for i in range(babies.shape[0]):
FMiddleName.append(random.choice(fnlistf))
babies['FMiddleName'] = FMiddleName
babies['FirstName'] = np.where(babies['Sex'] == 'F', babies['FFirstName'], babies['MFirstName'])
babies['MiddleName'] = np.where(babies['Sex'] == 'F', babies['FMiddleName'], babies['MMiddleName'])
babies.drop(columns={'MFirstName', 'MMiddleName', 'FFirstName', 'FMiddleName'}, inplace=True)
babies['YearBorn'] = year
babies['YearDeceased'] = np.nan
babies['CurrentYear'] = year
babies['Age'] = 0
babies['NoOfChildren'] = 0
babies = babies[['FirstName', 'MiddleName', 'Surname', 'Sex', 'YearBorn', 'YearDeceased', 'CurrentYear', 'Age', 'ParentID', 'Generation', 'NoOfChildren']]
df = | pd.concat([df, babies]) | pandas.concat |
# Query Jupyter server for the rows of a data frame
import json as _VSCODE_json
import builtins
import pandas as _VSCODE_pd
import pandas.io.json as _VSCODE_pd_json
import builtins as _VSCODE_builtins
# In IJupyterVariables.getValue this '_VSCode_JupyterTestValue' will be replaced with the json stringified value of the target variable
# Indexes off of _VSCODE_targetVariable need to index types that are part of IJupyterVariable
_VSCODE_targetVariable = _VSCODE_json.loads("""_VSCode_JupyterTestValue""")
_VSCODE_evalResult = _VSCODE_builtins.eval(_VSCODE_targetVariable["name"])
# _VSCode_JupyterStartRow and _VSCode_JupyterEndRow should be replaced dynamically with the literals
# for our start and end rows
_VSCODE_startRow = _VSCODE_builtins.max(_VSCode_JupyterStartRow, 0)
_VSCODE_endRow = _VSCODE_builtins.min(
_VSCode_JupyterEndRow, _VSCODE_targetVariable["rowCount"]
)
# Assume we have a dataframe. If not, turn our eval result into a dataframe
_VSCODE_df = _VSCODE_evalResult
if isinstance(_VSCODE_evalResult, list):
_VSCODE_df = _VSCODE_pd.DataFrame(_VSCODE_evalResult)
elif isinstance(_VSCODE_evalResult, _VSCODE_pd.Series):
_VSCODE_df = _VSCODE_pd.Series.to_frame(_VSCODE_evalResult)
elif isinstance(_VSCODE_evalResult, dict):
_VSCODE_evalResult = _VSCODE_pd.Series(_VSCODE_evalResult)
_VSCODE_df = _VSCODE_pd.Series.to_frame(_VSCODE_evalResult)
elif _VSCODE_targetVariable["type"] == "ndarray":
_VSCODE_df = | _VSCODE_pd.DataFrame(_VSCODE_evalResult) | pandas.DataFrame |
"""
Tree-like multi-level dictionary with advanced indexing options.
"""
from functools import reduce
from future.utils import viewitems as viewitems_, viewvalues as viewvalues_
from . import funcargparse, general, strdump
import re
import collections
import pandas as pd
_depends_local=["..utils.strdump"]
def split_path(path, omit_empty=True, sep=None):
"""
Split generic path into individual path entries.
Args:
path: Generic path. Lists and tuples (possible nested) are flattened;
strings are split according to separators; non-strings are converted into strings first.
omit_empty (bool): Determines if empty entries are skipped.
sep (str): If not ``None``, defines regex for path separators; default separator is ``'/'``.
Returns:
list: A list of individual entries.
"""
if not (isinstance(path, list) or isinstance(path, tuple)):
path=[path]
else:
path=general.flatten_list(path)
if sep is None:
path=[e for t in path for e in str(t).split("/")]
else:
path=[e for t in path for e in re.split(sep,t)]
if omit_empty:
path=[p for p in path if p!=""]
return path
def normalize_path_entry(entry, case_sensitive=True, case_normalization="lower"):
"""Normalize the case of the entry if it's not case-sensitive. Normalization is either either ``'lower'`` or ``'upper'``."""
funcargparse.check_parameter_range(case_normalization,"case_normalization",{"lower","upper"})
if not case_sensitive:
if case_normalization=="lower":
return entry.lower()
else:
return entry.upper()
else:
return entry
def normalize_path(path, omit_empty=True, case_sensitive=True, case_normalization="lower", sep=None, force=False):
"""
Split and normalize generic path into individual path entries.
Args:
path: Generic path. Lists and tuples (possible nested) are flattened;
strings are split according to separators; non-strings are converted into strings first.
omit_empty (bool): Determines if empty entries are skipped.
case_sensitive (bool): If ``False``, entries case is normalized according to case_normalization.
case_normalization (str): Normalization rules; either ``'lower'`` or ``'upper'``.
sep (str): If not None, defines regex for path separators; default separator is ``'/'``.
force (bool): If ``False``, treat lists as if they're already normalized.
Returns:
list: A list of individual normalized entries.
"""
if isinstance(path,list) and not force:
return path
funcargparse.check_parameter_range(case_normalization,"case_normalization",{"lower","upper"})
path=split_path(path,omit_empty,sep=sep)
if not case_sensitive:
if case_normalization=="lower":
path=[p.lower() for p in path]
else:
path=[p.upper() for p in path]
return path
def is_dictionary(obj, generic=False):
"""
Determine if the object is a dictionary.
Args:
obj: object
generic (bool): if ``False``, passes only :class:`Dictionary` (or subclasses) objects;
otherwise, passes any dictionary-like object.
Returns:
bool
"""
return Dictionary.is_dictionary(obj,generic=generic)
def as_dictionary(obj, case_sensitive=True, case_normalization="lower"):
"""
Convert object into :class:`Dictionary` with the given parameters.
If object is already a :class:`Dictionary` (or its subclass), return unchanged, even if its parameters are different.
"""
return Dictionary.as_dictionary(obj,case_sensitive=case_sensitive,case_normalization=case_normalization)
def as_dict(obj, style="nested", copy=True):
"""
Convert object into standard `dict` with the given parameters.
If object is already a `dict`, return unchanged, even if the parameters are different.
"""
if isinstance(obj,dict):
return obj
return Dictionary.as_dictionary(obj).as_dict(style=style,copy=copy)
class Dictionary(object):
"""
Multi-level dictionary.
Access is done by path (all path elements are converted into strings and concatenated to form a single string path).
If dictionary is not case-sensitive, all inserted and accessed paths are normalized to lower or upper case.
Args:
root (dict or Dictionary): Initial value.
case_sensitive (bool): If ``False``, entries case is normalized according to `case_normalization`.
case_normalization (str): Normalization rules; either ``'lower'`` or ``'upper'``.
copy (bool): If ``True``, make copy of the supplied data; otherwise, just make it the root.
Warning:
If ``copy==False``, the root data is already assumed to be normalized. If it isn't, the behavior might be incorrect.
"""
def __init__(self, root=None, case_sensitive=True, case_normalization="lower", copy=True):
object.__init__(self)
self._case_sensitive=case_sensitive
self._case_normalization=case_normalization
if root is not None:
if isinstance(root,pd.Series):
root=dict(zip(root.index,root))
elif isinstance(root,pd.DataFrame):
if root.shape[1]==1:
root=dict(zip(root.index,root.iloc(axis=1)[0]))
elif root.shape[1]==2:
root=dict(zip(root.iloc(axis=1)[0],root.iloc(axis=1)[1]))
else:
raise ValueError("only accept 1- and 2-column arrays")
root=Dictionary._get_root(root)
if copy:
self._data={}
self.merge_branch(root) # automatically normalizes source
else:
self._data=root
else:
self._data={}
def _make_similar_dict(self, root=None, copy=True):
return Dictionary(root=root,copy=copy,case_sensitive=self._case_sensitive,case_normalization=self._case_normalization)
def _normalize_path_entry(self, entry):
return normalize_path_entry(entry,case_sensitive=self._case_sensitive,case_normalization=self._case_normalization)
def _normalize_path(self, path):
return normalize_path(path,omit_empty=True,case_sensitive=self._case_sensitive,case_normalization=self._case_normalization)
@staticmethod
def _is_branch(v):
return isinstance(v,dict)
@staticmethod
def _get_root(source):
if isinstance(source, Dictionary):
return source._data
elif Dictionary._is_branch(source):
return source
else:
raise ValueError("source isn't a tree")
@staticmethod
def _is_empty(source):
if isinstance(source, Dictionary):
return not source._data
elif Dictionary._is_branch(source):
return not source
else:
return False
@staticmethod
def is_dictionary(obj, generic=True):
"""
Determine if the object is a dictionary.
Args:
obj
generic (bool): if False, passes only :class:`Dictionary` (or subclasses) objects;
otherwise, passes any dictionary-like object.
Returns:
bool
"""
if generic:
return isinstance(obj, Dictionary) or Dictionary._is_branch(obj)
else:
return isinstance(obj, Dictionary)
@staticmethod
def as_dictionary(obj, case_sensitive=True, case_normalization="lower"):
"""
Convert object into :class:`Dictionary` with the given parameters.
If object is already a :class:`Dictionary` (or its subclass), return unchanged, even if its parameters are different.
"""
if isinstance(obj,DictionaryPointer):
return Dictionary(obj,copy=False)
if isinstance(obj, Dictionary):
return obj
else:
return Dictionary(obj,case_sensitive=case_sensitive,case_normalization=case_normalization)
def _get_valid_subpath(self, s_path):
branch=self._data
for i,p in enumerate(s_path):
if self._is_branch(branch) and p in branch:
branch=branch[p]
else:
break
return s_path[:i]
def _get_branch(self, s_path, append=False, overwrite_leaves=False):
branch=self._data
for p in s_path:
if append:
new_branch=branch.setdefault(p,{})
if not self._is_branch(new_branch):
if overwrite_leaves:
new_branch=branch[p]={}
else:
return None
branch=new_branch
elif p in branch:
branch=branch[p]
if not self._is_branch(branch):
return None
else:
return None
return branch
def _attach_node(self, dest, key, value, branch_option="normalize"):
"""
Attach a node.
branch_option decides what to do if the value is dictionary-like: just attach root, copy, or normalize all the keys
attaching empty dictionary does nothing.
"""
try:
value=Dictionary._get_root(value)
if value: # adding empty dictionary doesn't change anything
if branch_option=="attach":
dest[key]=value
else:
branch={}
self._insert_branch(value,branch,overwrite=True,normalize_paths=(branch_option=="normalize"))
dest[key]=branch
except ValueError:
dest[key]=value
def add_entry(self, path, value, force=False, branch_option="normalize"):
"""
Add value to a given path (overwrite leaf value if necessary).
Doesn't replace leaves with branches and vice-verse if ``force==False``.
Args:
path
value
force (bool): If ``True``, change leaf into a branch and vice-versa; otherwise, raises :exc:`ValueError` if the conversion is necessary.
branch_option (str):
Decides what to do if the value is dictionary-like:
- ``'attach'`` -- just attach the root,
- ``'copy'`` -- copy and attach,
- ``'normalize'`` -- copy while normalizing all the keys according to the current rules.
"""
funcargparse.check_parameter_range(branch_option,"branch_option",{"attach","copy","normalize"})
if self._is_empty(value):
if force:
self.del_entry(path)
return self
path=self._normalize_path(path)
if path==[]:
raise KeyError("can't reassign root")
if force:
branch=self._get_branch(path[:-1],append=True,overwrite_leaves=True)
else:
branch=self._get_branch(path[:-1],append=True,overwrite_leaves=False)
if branch is None:
wrong_path="/".join(self._get_valid_subpath(path))
raise KeyError("can't replace the leaf '{0}' with a subtree; delete the leaf explicitly first, or use force=True".format(wrong_path))
if self._is_branch(branch.get(path[-1],None)):
wrong_path="/".join(path)
raise KeyError("can't replace the subtree '{0}' with a leaf; delete the subtree explicitly first, or use force=True".format(wrong_path))
self._attach_node(branch,path[-1],value,branch_option=branch_option)
return self
def _get_entry(self, path):
path=self._normalize_path(path)
if path==[]:
return self._data
branch=self._get_branch(path[:-1],append=False)
if branch and (path[-1] in branch):
return branch[path[-1]]
else:
raise KeyError("unaccessible entry with path {0}".format(path))
def get_entry(self, path, as_pointer=False):
"""
Get entry at a given path
Args:
path
as_pointer (bool): If ``True`` and entry is not a leaf, return :class:`Dictionary` or :class:`DictionaryPointer`
"""
value=self._get_entry(path)
if self._is_branch(value):
if as_pointer:
return DictionaryPointer(self,path,case_sensitive=self._case_sensitive,case_normalization=self._case_normalization,copy=False)
else:
return self._make_similar_dict(value,copy=False)
else:
return value
def has_entry(self, path, kind="all"):
"""
Determine if the path is in the dictionary.
`kind` determines which kind of path to consider and can be ``'leaf'``, ``'branch'`` or ``'all'``.
"""
funcargparse.check_parameter_range(kind,"kind",{"leaf","branch","all"})
try:
v=self._get_entry(path)
return (kind=="all") or (kind=="branch" and self._is_branch(v)) or (kind=="leaf" and not self._is_branch(v))
except KeyError:
return False
def get_max_prefix(self, path, kind="all"):
"""
Find the longest prefix of `path` contained in the dictionary.
Return tuple ``(prefix, rest)``, where both path entries are normalized according to the dictionary rules.
`kind` determines which kind of path to consider and can be ``'leaf'``, ``'branch'`` or ``'all'``. If the longest prefix is of a different kind, return ``(None,None)``.
"""
funcargparse.check_parameter_range(kind,"kind",{"leaf","branch","all"})
s_path=self._normalize_path(path)
if s_path==[]:
if not self._data and kind!="branch":
return ([],[])
if self._data and kind!="leaf":
return ([],[])
return (None,None)
branch=self._data
for i,p in enumerate(s_path):
if p in branch:
branch=branch[p]
if not self._is_branch(branch):
return (None,None) if kind=="branch" else (s_path[:i+1],s_path[i+1:])
else:
return (None,None) if kind=="leaf" else (s_path[:i],s_path[i:])
return (None,None) if kind=="leaf" else (s_path,[])
def del_entry(self, path):
""" Delete entry from the dictionary. Return ``True`` if the path was present."""
path=self._normalize_path(path)
if path==[]:
return False
branch=self._get_branch(path[:-1],append=False)
if branch:
try:
del branch[path[-1]]
return True
except KeyError:
pass
return False
__getitem__=get_entry
__setitem__=add_entry
__contains__=has_entry
__delitem__=del_entry
def __len__(self): return len(self._data)
def size(self):
"""Return the total size of the dictionary (number of nodes)."""
def _branch_size(branch):
if self._is_branch(branch):
return sum(_branch_size(v) for v in viewvalues_(branch))
else:
return 1
return _branch_size(self._data)
def get(self, path, default=None):
"""
Analog of ``dict.get()``: ``D.get(k,d) -> D[k] if k in D else d``.
"""
try:
return self.__getitem__(path)
except KeyError:
return default
def setdefault(self, path, default=None):
"""
Analog of ``dict.setdefault()``: ``D.setdefault(k,d) -> D.get(k,d)``, also sets ``D[k]=d`` if ``k is not in D``.
"""
try:
return self.__getitem__(path)
except KeyError:
self.__setitem__(path, default)
return default
def viewitems(self, ordered=False, leafs=False, path_kind="split", wrap_branches=True):
"""
Analog of ``dict.viewitems()``, by default iterating only over the immediate children of the root.
Args:
ordered (bool): If ``True``, loop over keys in alphabetic order.
leafs (bool): If ``True``, loop over leaf nodes (i.e., behave as 'flat' dictionary);
otherwise, loop over immediate children (i.e., behave as 'nested' dictionary)
path_kind (str): either ``"split"`` (each path is a tuple of individual keys), or ``"joined"`` (each path is a single string)
wrap_branches (bool): if ``True``, wrap sub-branches into :class:`DictionaryPointer` objects; otherwise, return them as nested built-in dictionaries
"""
if leafs:
funcargparse.check_parameter_range(path_kind,"path_kind",{"split","joined"})
makep=tuple if path_kind=="split" else "/".join
for p,v in self.iternodes(to_visit="leafs",ordered=ordered,include_path=True):
yield makep(p),v
else:
items_=sorted(viewitems_(self._data)) if ordered else viewitems_(self._data)
if wrap_branches:
makev=lambda p,v: (self._fast_build_branch_pointer([p],v) if self._is_branch(v) else v)
else:
makev=lambda p,v: v
for p,v in items_:
yield p,makev(p,v)
iteritems=viewitems # for compatibility
items=viewitems
def viewvalues(self, ordered=False, leafs=False, wrap_branches=True):
"""
Analog of ``dict.viewvalues()``, iterating only over the immediate children of the root.
Args:
ordered (bool): If ``True``, loop over keys in alphabetic order.
leafs (bool): If ``True``, loop over leaf nodes (i.e., behave as 'flat' dictionary);
otherwise, loop over immediate children (i.e., behave as 'nested' dictionary)
wrap_branches (bool): if ``True``, wrap sub-branches into :class:`DictionaryPointer` objects; otherwise, return them as nested built-in dictionaries
"""
for _,v in self.items(ordered=ordered,leafs=leafs,wrap_branches=wrap_branches):
yield v
itervalues=viewvalues
values=viewvalues
def viewkeys(self, ordered=False):
"""
Analog of ``dict.viewkeys()``, iterating only over the immediate children of the root.
Args:
ordered (bool): If ``True``, loop over keys in alphabetic order.
"""
return sorted(self._data) if ordered else list(self._data)
iterkeys=viewkeys # for compatibility
def __iter__(self):
return self._data.__iter__()
keys=viewkeys
def paths(self, ordered=False, topdown=False, path_kind="split"):
"""
Return list of all leaf paths.
Args:
ordered (bool): If ``True``, loop over paths in alphabetic order.
topdown (bool): If ``True``, return node's leafs before its subtrees leafs.
path_kind (str): either ``"split"`` (each path is a tuple of individual keys), or ``"joined"`` (each path is a single string)
"""
ps=[]
funcargparse.check_parameter_range(path_kind,"path_kind",{"split","joined"})
makep=tuple if path_kind=="split" else "/".join
for p,_ in self.iternodes(to_visit="leafs",ordered=ordered,topdown=topdown,include_path=True):
ps.append(makep(p))
return ps
def _iterbranches(self, ordered=False, topdown=False):
if topdown:
yield self
source=self._data
path=self.get_path()
if ordered:
iter_range=sorted(viewitems_(source))
else:
iter_range=viewitems_(source)
for k,v in iter_range:
if self._is_branch(v):
ptr=self._fast_build_branch_pointer(path+[k],v)
for b in ptr._iterbranches(ordered=ordered,topdown=topdown):
yield b
if not topdown:
yield self
def iternodes(self, to_visit="leafs", ordered=False, include_path=False, topdown=False):
"""
Iterate over nodes.
Args:
to_visit (str): Can be ``'leafs'``, ``'branches'`` or ``'all'`` and determines which parts of the dictionary are visited.
ordered (bool): If ``True``, loop over paths in alphabetic order.
include_path (bool): Include in the return value.
topdown (bool): If ``True``, visit node and its leafs before its subtrees leafs.
Yield:
Values for leafs and :class:`DictionaryPointer` for branches.
If ``include_path==True``, yields tuple ``(path, value)``, where `path` is in the form of a normalized list.
"""
funcargparse.check_parameter_range(to_visit,"to_visit",{"branches","leafs","all"})
for br in self._iterbranches(ordered=ordered,topdown=topdown):
path=br.get_path()
if topdown and (to_visit in {"branches","all"}):
yield (path,br) if include_path else br
if to_visit in {"leafs","all"}:
for k,v in br.viewitems(ordered=ordered,wrap_branches=False):
if not self._is_branch(v):
yield (path+[k],v) if include_path else v
if (not topdown) and (to_visit in {"branches","all"}):
yield (path,br) if include_path else br
nodes=iternodes
def __str__(self):
iterleafs=self.iternodes(ordered=True,to_visit="leafs",include_path=True)
content="\n".join("'{0}': {1}".format("/".join(k),str(v)) for k,v in iterleafs)
return "{0}({1})".format(type(self).__name__,content)
__repr__=__str__
def _insert_branch(self, source, dest, overwrite=True, normalize_paths=True):
for k,v in viewitems_(source):
if normalize_paths:
k=self._normalize_path(k)
if len(k)>1:
v=reduce((lambda d,sk: {sk:d}), [v]+k[:0:-1]) # build dict corresponding to {"k[1]/k[2]/.../k[-1]":v}
k=k[0]
else:
k=self._normalize_path_entry(str(k))
try:
v=self._get_root(v)
is_branch=True
except ValueError:
is_branch=False
if is_branch:
if k in dest and not (self._is_branch(dest[k])):
if overwrite:
dest[k]={}
else:
continue
dest.setdefault(k,{})
self._insert_branch(v,dest[k],overwrite=overwrite,normalize_paths=normalize_paths)
else:
if overwrite:
dest[k]=v
else:
dest.setdefault(k,v)
def merge_branch(self, source, branch="", overwrite=True, normalize_paths=True):
"""
Attach source (:class:`dict` or other :class:`Dictionary`) to a given branch; source is automatically deep-copied.
Args:
source (dict or Dictionary)
branch (tuple or str): Destination path.
overwrite (bool): If ``True``, replaces the old entries with the new ones (it only matters for leaf assignments).
normalize_paths (bool): If ``True`` and the dictionary isn't case sensitive, perform normalization if the `source`.
"""
source=Dictionary._get_root(source)
if not source:
return self
path=self._normalize_path(branch)
dest=self._get_branch(path,append=True,overwrite_leaves=overwrite)
if dest is None:
raise KeyError("can't replace the leaf '{0}' with a subtree; delete the leaf explicitly first, or use force=True".format("/".join(path)))
self._insert_branch(source,dest,overwrite=overwrite,normalize_paths=normalize_paths)
return self
update=merge_branch
def detach_branch(self, branch=""):
"""Remove branch from the current dictionary and return it as a separate :class:`Dictionary`."""
subtree=self[branch]
del self[branch]
return subtree
@staticmethod
def _deep_copy(leaf):
if Dictionary._is_branch(leaf):
res={}
for k,v in viewitems_(leaf):
res[k]=Dictionary._deep_copy(v)
else:
res=leaf
return res
def branch_copy(self, branch=""):
"""Get a copy of the branch as a :class:`Dictionary`."""
source=self._get_branch(self._normalize_path(branch),append=False)
if source is None:
raise KeyError("unaccessible entry with path {0}".format(branch))
return self._make_similar_dict(self._deep_copy(source),copy=False)
def copy(self):
"""Get a full copy the dictionary."""
return self.branch_copy()
def updated(self, source, branch="", overwrite=True, normalize_paths=True):
"""
Get a copy of the dictionary and attach a new branch to it.
Parameters are the same as in the :meth:`Dictionary.merge_branch`.
"""
cpy=self.copy()
cpy.merge_branch(source,branch=branch,overwrite=overwrite,normalize_paths=normalize_paths)
return cpy
def as_dict(self, style="nested", copy=True):
"""
Convert into a :class:`dict` object.
Args:
style (str):
Determines style of the returned :
- ``'nested'`` -- subtrees are turned into nested dictionaries,
- ``'flat'`` -- single dictionary is formed with full paths as keys.
copy (bool): If ``False`` and ``style=='nested'``, return the root dictionary.
"""
if isinstance(self,dict):
return self.copy() if copy else self
funcargparse.check_parameter_range(style,"style",{"nested","flat"})
if style=="nested":
return self.copy()._data if copy else self._data
else:
d={}
for p,v in self.iternodes(to_visit="leafs",include_path=True):
d["/".join(p)]=v
return d
def as_pandas(self, index_key=True, as_series=True):
"""
Convert into a pandas DataFrame or Series object.
Args:
index_key (bool): If ``False``, create a 2-column table with the first column (``"key"``) containing string path
and the second column (``"value"``) containing value; otherwise, move key to the table index.
as_series (bool): If ``index_key==True`` and ``as_series==True``, convert the resulting DataFrame into 1D Series
(the key is the index); otherwise, keep it as a single-column table
"""
data=[("/".join(p), v) for p,v in self.iternodes(to_visit="leafs",include_path=True,ordered=True)]
table= | pd.DataFrame(data,columns=["key","value"]) | pandas.DataFrame |
#--------------------------------------------------------
# Import Packages
#--------------------------------------------------------
from neorl.benchmarks import KP
from neorl import PPO2, DQN, ACER, ACKTR, A2C
from neorl import MlpPolicy, DQNPolicy
from neorl import RLLogger
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import sys
#--------------------------------------------------------
# KP Data
#--------------------------------------------------------
def KP_Data(n_objects):
""""
Function provides initial data to construct a Knapsack problem enviroment
:param n_objects: (int) number of objects, choose either 50 or 100
:return: obj_list (list), optimum_knapsack (list), episode_length (int), weight_capacity (int)
"""
if n_objects == 50:
#---50 objects
obj_list = [[3,4],[8,4],[4,2],[9,4],[5,9],[3,6],[3,1],[9,2],[8,3],[6,8],[9,4],[4,2],[4,7],[5,1],[6,4],[5,8],[2,1],[5,7],[2,5],[7,4],\
[6,3],[8,2],[7,7],[4,8],[5,8],[2,1],[3,7],[7,4],[9,1],[1,4],[2,2],[6,4],[7,3],[2,6],[7,3],[9,1],[1,1],[1,9],[2,3],[5,8],[5,1],[3,9],\
[5,6],[5,7],[4,2],[2,3],[1,4],[8,3],[7,5],[1,6]]
#optimal solution for comparison
optimum_knapsack = [1,2,3,4,7,8,9,10,11,12,14,15,16,17,18,20,21,22,23,25,26,28,29,31,32,33,35,36,39,41,43,44,45,48,49]
#episode length
episode_length = 2
weight_capacity = 125
elif n_objects == 100:
#---100 objects
obj_list = [[1,4],[9,5],[9,7],[6,8],[3,7],[8,4],[8,6],[2,1],[2,6],[9,7],[8,2],[6,6],[6,9],[6,7],[4,4],[7,8],[1,9],[1,3],[5,3],[8,1],\
[5,7],[8,6],[2,8],[3,5],[3,8],[4,3],[8,2],[6,7],[4,9],[3,5],[9,1],[9,3],[5,6],[2,2],[2,1],[5,9],[6,2],[1,3],[8,3],[8,8],[3,8],[4,6],\
[4,7],[9,7],[9,4],[8,8],[2,7],[4,4],[1,2],[3,4],[8,8],[6,9],[4,7],[6,8],[8,7],[4,8],[7,9],[5,9],[8,8],[5,4],[2,2],[4,9],[1,4],[1,8],\
[8,6],[4,5],[9,1],[3,1],[6,2],[7,1],[1,6],[1,7],[9,7],[7,5],[7,1],[5,6],[3,5],[8,8],[8,9],[2,9],[3,1],[5,9],[7,8],[4,3],[2,8],[8,4],\
[9,5],[6,7],[8,2],[3,5],[2,6],[3,2],[9,7],[1,1],[6,7],[7,4],[6,4],[7,6],[6,4],[3,2]]
#optimal solution for comparison
optimum_knapsack = [2,3,6,7,8,10,11,12,14,15,16,19,20,22,26,27,28,31,32,34,35,37,39,40,44,45,46,48,51,55,59,60,61,65,66,67,68,69,\
70,73,74,75,78,79,81,83,84,86,87,89,92,93,94,96,97,98,99,100]
#episode length
episode_length = 2
weight_capacity= 250
else:
raise ValueError('--error: n_objects is not defined, either choose 50 or 100')
return obj_list, optimum_knapsack, episode_length, weight_capacity
#--------------------------------------------------------
# User Parameters for RL Optimisation
#--------------------------------------------------------
try:
total_steps=int(sys.argv[1]) #get time steps as external argument (for quick testing)
except:
total_steps=8000 #or use default total time steps to run all optimizers
n_steps=12 #update frequency for A2C, ACKTR, PPO
n_objects=50 #number of objects: choose 50 or 100
n_sum_steps=10 #this is for logging and averaging purposes
#---get some data to initialize the enviroment---
obj_list, optimum_knapsack, episode_length, weight_capacity=KP_Data(n_objects=n_objects)
#--------------------------------------------------------
# DQN
#--------------------------------------------------------
#create an enviroment object from the class
env=KP(obj_list=obj_list, optimum_knapsack=optimum_knapsack,
episode_length=episode_length, weight_capacity=weight_capacity, method = 'dqn')
#create a callback function to log data
cb_dqn=RLLogger(check_freq=1)
#To activate logger plotter, add following arguments to cb_dqn:
#plot_freq = 50,n_avg_steps=10,pngname='DQN-reward'
#Also applicable to ACER.
#create a RL object based on the env object
dqn = DQN(DQNPolicy, env=env, seed=1)
#optimise the enviroment class
dqn.learn(total_timesteps=total_steps*n_sum_steps, callback=cb_dqn)
#--------------------------------------------------------
# ACER
#--------------------------------------------------------
env=KP(obj_list=obj_list, optimum_knapsack=optimum_knapsack,
episode_length=episode_length, weight_capacity=weight_capacity, method = 'acer')
cb_acer=RLLogger(check_freq=1)
acer = ACER(MlpPolicy, env=env, seed=1)
acer.learn(total_timesteps=total_steps*n_sum_steps, callback=cb_acer)
#--------------------------------------------------------
# PPO
#--------------------------------------------------------
env=KP(obj_list=obj_list, optimum_knapsack=optimum_knapsack,
episode_length=episode_length, weight_capacity=weight_capacity, method = 'ppo')
cb_ppo=RLLogger(check_freq=1)
#To activate logger plotter, add following arguments to cb_ppo:
#plot_freq = 1, n_avg_steps=10, pngname='PPO-reward'
#Also applicable to A2C, ACKTR.
ppo = PPO2(MlpPolicy, env=env, n_steps=n_steps, seed = 1)
ppo.learn(total_timesteps=total_steps, callback=cb_ppo)
#--------------------------------------------------------
# ACKTR
#--------------------------------------------------------
env=KP(obj_list=obj_list, optimum_knapsack=optimum_knapsack,
episode_length=episode_length, weight_capacity=weight_capacity, method = 'acktr')
cb_acktr=RLLogger(check_freq=1)
acktr = ACKTR(MlpPolicy, env=env, n_steps=n_steps, seed = 1)
acktr.learn(total_timesteps=total_steps, callback=cb_acktr)
#--------------------------------------------------------
# A2C
#--------------------------------------------------------
env=KP(obj_list=obj_list, optimum_knapsack=optimum_knapsack,
episode_length=episode_length, weight_capacity=weight_capacity, method = 'a2c')
cb_a2c=RLLogger(check_freq=1)
a2c = A2C(MlpPolicy, env=env, n_steps=n_steps, seed = 1)
a2c.learn(total_timesteps=total_steps, callback=cb_a2c)
#--------------------------------
#Summary Results
#--------------------------------
print('--------------- DQN results ---------------')
print('The best value of x found:', cb_dqn.xbest)
print('The best value of y found:', cb_dqn.rbest)
print('--------------- ACER results ---------------')
print('The best value of x found:', cb_acer.xbest)
print('The best value of y found:', cb_acer.rbest)
print('--------------- PPO results ---------------')
print('The best value of x found:', cb_ppo.xbest)
print('The best value of y found:', cb_ppo.rbest)
print('--------------- ACKTR results ---------------')
print('The best value of x found:', cb_acktr.xbest)
print('The best value of y found:', cb_acktr.rbest)
print('--------------- A2C results ---------------')
print('The best value of x found:', cb_a2c.xbest)
print('The best value of y found:', cb_a2c.rbest)
#--------------------------------
#Summary Plots
#--------------------------------
log_dqn = pd.DataFrame(cb_dqn.r_hist).cummax(axis = 0).values
log_acer = | pd.DataFrame(cb_acer.r_hist) | pandas.DataFrame |
import sys
import pytz
import hashlib
import numpy as np
import pandas as pd
from datetime import datetime
def edit_form_link(link_text='Submit edits'):
"""Return HTML for link to form for edits"""
return f'<a href="https://docs.google.com/forms/d/e/1FAIpQLScw8EUGIOtUj994IYEM1W7PfBGV0anXjEmz_YKiKJc4fm-tTg/viewform">{link_text}</a>'
def add_google_analytics(input_html):
"""
Return HTML with Google Analytics block added
"""
ga_block = """
<!-- Global site tag (gtag.js) - Google Analytics -->
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-173043454-1"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-173043454-1');
</script>
"""
output_html = input_html.replace('<!-- replace with google analytics -->', ga_block)
return output_html
def add_geojson(shape_gdf, field_name, field_value, input_html):
"""
Add a GeoJSON feature as a Javascript variable to an HTML string
This variable will be used to calculate the bounds of the map
"""
shape_row = shape_gdf[shape_gdf[field_name] == field_value].copy()
shape_geo = shape_row.geometry.iloc[0]
geo_bounds = shape_geo.boundary[0].xy
output_string = '[['
for idx, value in enumerate(geo_bounds[0]):
if idx > 0:
output_string += ','
output_string += '['
x = geo_bounds[0][idx]
output_string += '{}'.format(x)
y = geo_bounds[1][idx]
output_string += ', {}'.format(y)
output_string += ']\n'
output_string += ']]'
output_html = input_html.replace('REPLACE_WITH_XY', output_string)
return output_html
def dc_coordinates():
"""Return coordinates for a DC-wide map"""
dc_longitude = -77.016243706276569
dc_latitude = 38.894858329321485
dc_zoom_level = 10.3
return dc_longitude, dc_latitude, dc_zoom_level
def anc_names(anc_id):
"""
Return formatted ANC names
"""
ancs = pd.read_csv('data/ancs.csv')
anc_upper = 'ANC' + anc_id
anc_lower = anc_upper.lower()
anc_neighborhoods = ancs[ancs['anc_id'] == anc_id]['neighborhoods'].values[0]
return anc_upper, anc_lower, anc_neighborhoods
def assemble_divo():
"""
Return DataFrame with one row per SMD and various stats about each SMD's ranking
divo = district-votes
"""
results = pd.read_csv('data/results.csv')
districts = | pd.read_csv('data/districts.csv') | pandas.read_csv |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
import re
import urllib.request
import os
import jinja2
import sys
import keras
from keras.models import load_model
def predict_torrmow():
model = load_model('model.h5')
df = pd.read_csv('golds.csv')
df.columns = ['date', 'price']
df['date'] = pd.to_datetime(df['date'])
df = df.set_index('date')
train = df[["price"]]
sc = MinMaxScaler()
train_sc = sc.fit_transform(train)
X_train = train_sc[:-1]
y_train = train_sc[1:]
X_train_t = X_train[:, None]
y_pred = model.predict(X_train_t)
result = sc.inverse_transform(y_pred)[-2:]
today = round(result[0][0])
tomorrow = round(result[1][0])
output = "flat"
if tomorrow > today:
output = "up"
elif tomorrow < today:
output = "down"
return output, tomorrow
def load_gold_price():
url = "https://www.goldtraders.or.th/default.aspx"
html = urllib.request.urlopen(urllib.request.Request(url, headers={'User-Agent': 'Mozilla'})).read().decode("utf-8")
date = re.findall('<span id="DetailPlace_uc_goldprices1_lblAsTime".*?>(\d\d/\d\d/25\d\d).*?</span>', html)
date = date[0] if len(date) else None
day, month, thai_year= date.split("/")
year = int(thai_year) - 543
price = re.findall('<span id="DetailPlace_uc_goldprices1_lblBLSell".*?>(.*?)</span>', html)
price = price[0] if len(price) else None
price = float(price.replace(",", ""))
return "{}-{}-{}".format(year, month, day), price
def render(tpl_path, context):
path, filename = os.path.split(tpl_path)
return jinja2.Environment(
loader=jinja2.FileSystemLoader(path or './')
).get_template(filename).render(context)
def render_predict(status, tomorrow):
df_predict = pd.read_csv("predicted.csv")
predicts = df_predict.sort_index(ascending=False).to_dict('records')
data = {"status": status, "tomorrow": tomorrow, "predicts": predicts}
html = render('./docs/template.html', data)
with open("./docs/index.html", "w") as html_file:
html_file.write(html)
df = pd.read_csv('golds.csv')
df.columns = ['date', 'price']
df['date'] = pd.to_datetime(df['date'])
df = df.set_index('date')
gold_date, gold_price = load_gold_price()
if gold_date is None or gold_price is None:
print("Can not get gold price.")
sys.exit()
print("{}: {}".format(gold_date, gold_price))
if gold_date in df.index:
print("Exist date: {}".format(gold_date))
print("Exit.")
sys.exit()
# Save new data
new_df_gold = pd.DataFrame([[gold_date, gold_price]], columns=["date","price"])
new_df_gold['date'] = pd.to_datetime(new_df_gold['date'])
new_df_gold = new_df_gold.set_index('date')
df = df.append(new_df_gold)
df["price"] = df["price"].astype(float)
df.to_csv("golds.csv")
sign, price = predict_torrmow()
x = | pd.DatetimeIndex(df.iloc[-1:].index.values) | pandas.DatetimeIndex |
import io
import numpy as np
import pytest
from pandas.compat._optional import VERSIONS
from pandas import (
DataFrame,
date_range,
read_csv,
read_excel,
read_feather,
read_json,
read_parquet,
read_pickle,
read_stata,
read_table,
)
import pandas._testing as tm
from pandas.util import _test_decorators as td
df1 = DataFrame(
{
"int": [1, 3],
"float": [2.0, np.nan],
"str": ["t", "s"],
"dt": date_range("2018-06-18", periods=2),
}
)
text = str(df1.to_csv(index=False)).encode()
@pytest.fixture
def cleared_fs():
fsspec = pytest.importorskip("fsspec")
memfs = fsspec.filesystem("memory")
yield memfs
memfs.store.clear()
def test_read_csv(cleared_fs):
with cleared_fs.open("test/test.csv", "wb") as w:
w.write(text)
df2 = read_csv("memory://test/test.csv", parse_dates=["dt"])
tm.assert_frame_equal(df1, df2)
def test_reasonable_error(monkeypatch, cleared_fs):
from fsspec import registry
from fsspec.registry import known_implementations
registry.target.clear()
with pytest.raises(ValueError, match="nosuchprotocol"):
read_csv("nosuchprotocol://test/test.csv")
err_msg = "test error message"
monkeypatch.setitem(
known_implementations,
"couldexist",
{"class": "unimportable.CouldExist", "err": err_msg},
)
with pytest.raises(ImportError, match=err_msg):
read_csv("couldexist://test/test.csv")
def test_to_csv(cleared_fs):
df1.to_csv("memory://test/test.csv", index=True)
df2 = read_csv("memory://test/test.csv", parse_dates=["dt"], index_col=0)
tm.assert_frame_equal(df1, df2)
@pytest.mark.parametrize("ext", ["xls", "xlsx"])
def test_to_excel(cleared_fs, ext):
if ext == "xls":
pytest.importorskip("xlwt")
else:
pytest.importorskip("openpyxl")
path = f"memory://test/test.{ext}"
df1.to_excel(path, index=True)
df2 = read_excel(path, parse_dates=["dt"], index_col=0)
tm.assert_frame_equal(df1, df2)
@pytest.mark.parametrize("binary_mode", [False, True])
def test_to_csv_fsspec_object(cleared_fs, binary_mode):
fsspec = pytest.importorskip("fsspec")
path = "memory://test/test.csv"
mode = "wb" if binary_mode else "w"
fsspec_object = fsspec.open(path, mode=mode).open()
df1.to_csv(fsspec_object, index=True)
assert not fsspec_object.closed
fsspec_object.close()
mode = mode.replace("w", "r")
fsspec_object = fsspec.open(path, mode=mode).open()
df2 = read_csv(
fsspec_object,
parse_dates=["dt"],
index_col=0,
)
assert not fsspec_object.closed
fsspec_object.close()
tm.assert_frame_equal(df1, df2)
def test_csv_options(fsspectest):
df = DataFrame({"a": [0]})
df.to_csv(
"testmem://test/test.csv", storage_options={"test": "csv_write"}, index=False
)
assert fsspectest.test[0] == "csv_write"
read_csv("testmem://test/test.csv", storage_options={"test": "csv_read"})
assert fsspectest.test[0] == "csv_read"
def test_read_table_options(fsspectest):
# GH #39167
df = DataFrame({"a": [0]})
df.to_csv(
"testmem://test/test.csv", storage_options={"test": "csv_write"}, index=False
)
assert fsspectest.test[0] == "csv_write"
read_table("testmem://test/test.csv", storage_options={"test": "csv_read"})
assert fsspectest.test[0] == "csv_read"
@pytest.mark.parametrize("extension", ["xlsx", "xls"])
def test_excel_options(fsspectest, extension):
if extension == "xls":
pytest.importorskip("xlwt")
else:
pytest.importorskip("openpyxl")
df = DataFrame({"a": [0]})
path = f"testmem://test/test.{extension}"
df.to_excel(path, storage_options={"test": "write"}, index=False)
assert fsspectest.test[0] == "write"
read_excel(path, storage_options={"test": "read"})
assert fsspectest.test[0] == "read"
@td.skip_if_no("fastparquet")
def test_to_parquet_new_file(cleared_fs):
"""Regression test for writing to a not-yet-existent GCS Parquet file."""
df1.to_parquet(
"memory://test/test.csv", index=True, engine="fastparquet", compression=None
)
@td.skip_if_no("pyarrow", min_version="2")
def test_arrowparquet_options(fsspectest):
"""Regression test for writing to a not-yet-existent GCS Parquet file."""
df = DataFrame({"a": [0]})
df.to_parquet(
"testmem://test/test.csv",
engine="pyarrow",
compression=None,
storage_options={"test": "parquet_write"},
)
assert fsspectest.test[0] == "parquet_write"
read_parquet(
"testmem://test/test.csv",
engine="pyarrow",
storage_options={"test": "parquet_read"},
)
assert fsspectest.test[0] == "parquet_read"
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) fastparquet
@td.skip_if_no("fastparquet")
def test_fastparquet_options(fsspectest):
"""Regression test for writing to a not-yet-existent GCS Parquet file."""
df = DataFrame({"a": [0]})
df.to_parquet(
"testmem://test/test.csv",
engine="fastparquet",
compression=None,
storage_options={"test": "parquet_write"},
)
assert fsspectest.test[0] == "parquet_write"
read_parquet(
"testmem://test/test.csv",
engine="fastparquet",
storage_options={"test": "parquet_read"},
)
assert fsspectest.test[0] == "parquet_read"
@pytest.mark.single_cpu
@td.skip_if_no("s3fs")
def test_from_s3_csv(s3_resource, tips_file, s3so):
tm.assert_equal(
read_csv("s3://pandas-test/tips.csv", storage_options=s3so), read_csv(tips_file)
)
# the following are decompressed by pandas, not fsspec
tm.assert_equal(
read_csv("s3://pandas-test/tips.csv.gz", storage_options=s3so),
read_csv(tips_file),
)
tm.assert_equal(
read_csv("s3://pandas-test/tips.csv.bz2", storage_options=s3so),
read_csv(tips_file),
)
@pytest.mark.single_cpu
@pytest.mark.parametrize("protocol", ["s3", "s3a", "s3n"])
@td.skip_if_no("s3fs")
def test_s3_protocols(s3_resource, tips_file, protocol, s3so):
tm.assert_equal(
read_csv("%s://pandas-test/tips.csv" % protocol, storage_options=s3so),
read_csv(tips_file),
)
@pytest.mark.single_cpu
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) fastparquet
@td.skip_if_no("s3fs")
@td.skip_if_no("fastparquet")
def test_s3_parquet(s3_resource, s3so):
fn = "s3://pandas-test/test.parquet"
df1.to_parquet(
fn, index=False, engine="fastparquet", compression=None, storage_options=s3so
)
df2 = read_parquet(fn, engine="fastparquet", storage_options=s3so)
tm.assert_equal(df1, df2)
@td.skip_if_installed("fsspec")
def test_not_present_exception():
msg = "Missing optional dependency 'fsspec'|fsspec library is required"
with pytest.raises(ImportError, match=msg):
read_csv("memory://test/test.csv")
@td.skip_if_no("pyarrow")
def test_feather_options(fsspectest):
df = DataFrame({"a": [0]})
df.to_feather("testmem://afile", storage_options={"test": "feather_write"})
assert fsspectest.test[0] == "feather_write"
out = read_feather("testmem://afile", storage_options={"test": "feather_read"})
assert fsspectest.test[0] == "feather_read"
| tm.assert_frame_equal(df, out) | pandas._testing.assert_frame_equal |
import numpy as np
import pandas as pd
import nltk
from nltk.stem.porter import *
import string
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
from time import time
from collections import OrderedDict
from operator import itemgetter
from scipy.sparse.linalg import norm
import scipy
def removing_stop_words(df_with_body) :
stopwords = ['me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', 'your', 'yours',
'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', 'her', 'hers',
'herself', 'it', 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves',
'what', 'which', 'who', 'whom', 'this', 'that', 'these', 'those', 'am', 'is', 'are',
'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does',
'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until',
'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into',
'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down',
'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here',
'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more',
'than', 'too', 'very', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'z',
'can', 'will', 'just', 'don', 'should', 'now', ' ']
data = []
i=0
for text in df_with_body['body'].tolist():
#
# Remove punctuation
punctuation = set(string.punctuation)
doc = ''.join(w for w in text.lower() if w not in punctuation)
# Stopword removal
doc = [w for w in doc.split() if w not in stopwords]
doc = [w for w in doc if not (any(c.isdigit() for c in w))]
# Stemming
stemmer=PorterStemmer()
doc2= [stemmer.stem(w) for w in doc]
# Covenrt list of words to one string
doc2 = ' '.join(doc2)
i+=1
if i%1000==0:
print(i)
data.append(doc2) # list data contains the preprocessed documents
data_result = pd.DataFrame({'word split': data})
data_result = | pd.concat([df_with_body, data_result], axis=1, join='inner') | pandas.concat |
import requests, bs4, re
import pandas as pd
import numpy as np
import yfinance as yf
import xlrd
import os
import time
import lxml
from datetime import datetime
def getPER(TickerList,TickerName,path,dataframe=None):
if dataframe is not None:
df = dataframe
newFile = False
else:
df = pd.DataFrame()
newFile = True
PosCount = 0
for i in range(0,len(TickerList)): #Fills in the EPS column for each company
x = TickerList[i]
y = TickerName[i]
url = 'https://www.macrotrends.net/stocks/charts/{}/{}/pe-ratio'.format(x,y)
res = requests.get(url)
res.raise_for_status()
soup = bs4.BeautifulSoup(res.text,'lxml')
list2 = soup.findAll('tr')
Search1 = re.compile(r'(\d\d\d\d)-(\d\d)-(\d\d)')
Search2 = re.compile(r'\d{1,}\.\d{2}')
if not newFile:
indxfir = df[x].first_valid_index()
for j in range(0,len(list2)):
mo1 = Search1.search(str(list2[j]))
if mo1 != None:
mo2 = Search2.findall(str(list2[j]))
if mo1.group(2) == '12' or mo1.group(2) == '11':
quart = 'Q4 {}'.format(mo1.group(1))
elif mo1.group(2) == '01':
quart = 'Q4 {}'.format(str(int(mo1.group(1))-1))
elif mo1.group(2) == '09' or mo1.group(2) == '10'or mo1.group(2) == '08':
quart = 'Q3 {}'.format(mo1.group(1))
elif mo1.group(2) == '06' or mo1.group(2) == '07'or mo1.group(2) == '05':
quart = 'Q2 {}'.format(mo1.group(1))
elif mo1.group(2) == '03' or mo1.group(2) == '04'or mo1.group(2) == '02':
quart = 'Q1 {}'.format(mo1.group(1))
else:
print(mo1.group(2))
print('Something has gone wrong')
if newFile:
if i == 0:
df.loc[PosCount,'Date'] = quart
df.loc[PosCount,x] = mo2[-1]
PosCount += 1
else:
if (quart in df.loc[:,'Date'].values):
indx = df.index[df['Date']==quart]
df.loc[indx,x] = mo2[-1]
else:
df.loc[PosCount,'Date'] = quart
df.loc[PosCount,x] = mo2[-1]
PosCount += 1
else:
if x in df.columns:
if int(df.loc[indxfir,'Date'][-4:]) <= int(quart[-4:]): #checks the year, if smaller or equal to year update.
if (quart in df.loc[:,'Date'].values):
indx = df.index[df['Date']==quart][0] #added zero to get the integer, otherwise it will make a new column
if df.loc[indx,x] != mo2[-1]:
df.loc[indx,x] = mo2[-1]
else:
lenDF = len(df)
df.loc[lenDF] = 'NaN'
df.loc[lenDF,'Date'] = quart
df.loc[lenDF,x] = mo2[-1]
else:
print('new Ticker')
if (quart in df.loc[:,'Date'].values):
indx = df.index[df['Date']==quart][0] #added zero to get the integer, otherwise it will make a new column
df.loc[indx,x] = mo2[-1]
else:
lenDF = len(df)
df.loc[lenDF] = 'NaN'
df.loc[lenDF,'Date'] = quart
df.loc[lenDF,x] = mo2[-1]
df = df.set_index('Date')
df = df.reindex(sorted(df.index, key=lambda x: x.split(' ')[::-1],reverse=True)).reset_index()
df.to_excel(path)
return df
def getRevenue(TickerList,TickerName,path,dataframe=None):
if dataframe is not None:
df = dataframe
newFile = False
else:
df = pd.DataFrame()
newFile = True
PosCount = 0
for i in range(0,len(TickerList)): #Fills in the EPS column for each company
x = TickerList[i]
y = TickerName[i]
url1 = 'https://www.macrotrends.net/stocks/charts/{}/{}/revenue'.format(x,y)
res = requests.get(url1)
res.raise_for_status()
soup = bs4.BeautifulSoup(res.text,'lxml')
list2 = soup.findAll('tr')
Search = re.compile(r'Q\d \d\d\d\d') #Search regex for Quarters
Search2 = re.compile(r'\$\-?\d{1,},\d{0,}') #Search regex for EPS
if not newFile:
indxfir = df[x].first_valid_index()
for j in range(0,len(list2)):
mo1 = Search.search(str(list2[j])[35:42])
if mo1 != None:
quart = mo1.group()
mo2 = Search2.search(str(list2[j]))
if mo2 != None:
if newFile:
if i == 0:
df.loc[PosCount,'Date'] = quart
df.loc[PosCount,x] = mo2.group()
PosCount += 1
else:
if (quart in df.loc[:,'Date'].values):
indx = df.index[df['Date']==quart][0]
df.loc[indx,x] = mo2.group()
else:
df.loc[PosCount,'Date'] = quart
df.loc[PosCount,x] = mo2.group()
PosCount += 1
else:
if x in df.columns:
if int(df.loc[indxfir,'Date'][-4:]) <= int(quart[-4:]):
if (quart in df.loc[:,'Date'].values):
indx = df.index[df['Date']==quart]
if df.loc[indx,x].values != mo2.group():
df.loc[indx,x] = mo2.group()
else:
lenDF = len(df)
df.loc[lenDF] = 'NaN'
df.loc[lenDF,'Date'] = quart
df.loc[lenDF,x] = mo2.group()
else:
if (quart in df.loc[:,'Date'].values):
indx = df.index[df['Date']==quart]
df.loc[indx,x] = mo2.group()
else:
lenDF = len(df)
df.loc[lenDF] = 'NaN'
df.loc[lenDF,'Date'] = quart
df.loc[lenDF,x] = mo2.group()
df = df.set_index('Date')
df = df.reindex(sorted(df.index, key=lambda x: x.split(' ')[::-1],reverse=True)).reset_index()
df.to_excel(path)
return df
def getEPS(TickerList,TickerName,path,dataframe=None):
if dataframe is not None:
df = dataframe
newFile = False
else:
df = pd.DataFrame()
newFile = True
PosCount = 0
for i in range(0,len(TickerList)): #Fills in the EPS column for each company
x = TickerList[i]
y = TickerName[i]
url1 = 'https://www.macrotrends.net/stocks/charts/{}/{}/eps-earnings-per-share-diluted'.format(x,y)
res = requests.get(url1)
res.raise_for_status()
soup = bs4.BeautifulSoup(res.text,'lxml')
list2 = soup.findAll('tr')
Search = re.compile(r'Q\d \d\d\d\d') #Search regex for Quarters
Search2 = re.compile(r'\$\-?\d{1,}.\d{2}') #Search regex for EPS
if not newFile:
indxfir = df[x].first_valid_index()
for j in range(0,len(list2)):
mo1 = Search.search(str(list2[j])[35:42])
if mo1 != None:
mo2 = Search2.search(str(list2[j]))
quart = mo1.group()
if newFile:
if i == 0:
df.loc[PosCount,'Date'] = quart
df.loc[PosCount,x] = mo2.group()
#print(quart)
#print(mo2.group())
PosCount+=1
else:
if (quart in df.loc[:,'Date'].values):
indx = df.index[df['Date']==quart]
df.loc[indx,x] = mo2.group()
else:
df.loc[PosCount,'Date'] = quart
df.loc[PosCount,x] = mo2.group()
PosCount += 1
else:
if x in df.columns:
if int(df.loc[indxfir,'Date'][-4:]) <= int(quart[-4:]):
if (quart in df.loc[:,'Date'].values):
indx = df.index[df['Date']==quart]
if df.loc[indx,x].values != mo2.group():
df.loc[indx,x] = mo2.group()
else:
lenDF = len(df)
df.loc[lenDF] = 'NaN'
df.loc[lenDF,'Date'] = quart
df.loc[lenDF,x] = mo2.group()
else:
if (quart in df.loc[:,'Date'].values):
indx = df.index[df['Date']==quart]
df.loc[indx,x] = mo2.group()
else:
lenDF = len(df)
df.loc[lenDF] = 'NaN'
df.loc[lenDF,'Date'] = quart
df.loc[lenDF,x] = mo2.group()
df = df.set_index('Date')
df = df.reindex(sorted(df.index, key=lambda x: x.split(' ')[::-1],reverse=True)).reset_index()
df.to_excel(path)
return df
def getDER(TickerList,TickerName,path,dataframe=None):
if dataframe is not None:
df = dataframe
newFile = False
else:
df = pd.DataFrame()
newFile = True
PosCount = 0
for i in range(0,len(TickerList)): #Fills in the EPS column for each company
x = TickerList[i]
y = TickerName[i]
url = 'https://www.macrotrends.net/stocks/charts/{}/{}/debt-equity-ratio'.format(x,y)
res = requests.get(url)
res.raise_for_status()
soup = bs4.BeautifulSoup(res.text,'lxml')
list2 = soup.findAll('tr')
Search1 = re.compile(r'(\d\d\d\d)-(\d\d)-(\d\d)')
Search2 = re.compile(r'-?\d{1,}\.\d{2}')
if not newFile:
indxfir = df[x].first_valid_index()
for j in range(0,len(list2)):
mo1 = Search1.search(str(list2[j]))
if mo1 != None:
mo2 = Search2.findall(str(list2[j]))
if mo1.group(2) == '12' or mo1.group(2) == '11':
quart = 'Q4 {}'.format(mo1.group(1))
elif mo1.group(2) == '01':
quart = 'Q4 {}'.format(str(int(mo1.group(1))-1))
elif mo1.group(2) == '09' or mo1.group(2) == '10'or mo1.group(2) == '08':
quart = 'Q3 {}'.format(mo1.group(1))
elif mo1.group(2) == '06' or mo1.group(2) == '07'or mo1.group(2) == '05':
quart = 'Q2 {}'.format(mo1.group(1))
elif mo1.group(2) == '03' or mo1.group(2) == '04'or mo1.group(2) == '02':
quart = 'Q1 {}'.format(mo1.group(1))
else:
print(mo1.group(2))
print('Something has gone wrong')
if newFile:
if i == 0:
df.loc[PosCount,'Date'] = quart
df.loc[PosCount,x] = mo2[-1]
PosCount += 1
else:
if (quart in df.loc[:,'Date'].values):
indx = df.index[df['Date']==quart][0]
df.loc[indx,x] = mo2[-1]
else:
df.loc[PosCount,'Date'] = quart
df.loc[PosCount,x] = mo2[-1]
PosCount += 1
else:
if x in df.columns:
if int(df.loc[indxfir,'Date'][-4:]) <= int(quart[-4:]):
if (quart in df.loc[:,'Date'].values):
indx = df.index[df['Date']==quart][0]
if df.loc[indx,x] != mo2[-1]:
#print(indx[0])
df.loc[indx,x] = mo2[-1]
else:
lenDF = len(df)
df.loc[lenDF] = 'NaN'
df.loc[lenDF,'Date'] = quart
df.loc[lenDF,x] = mo2[-1]
else:
if (quart in df.loc[:,'Date']):
indx = df.index[df['Date']==quart][0]
df.loc[indx,x] = mo2[-1]
else:
lenDF = len(df)
df.loc[lenDF] = 'NaN'
df.loc[lenDF,'Date'] = quart
df.loc[lenDF,x] = mo2[-1]
df = df.set_index('Date')
df = df.reindex(sorted(df.index, key=lambda x: x.split(' ')[::-1],reverse=True)).reset_index()
df.to_excel(path)
return df
def getDividend(TickerList,TickerName,path,dataframe=None): #it does not need a tickername, but otherwise the loops fails
if dataframe is not None:
df = dataframe
newFile = False
else:
df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn.metrics import roc_curve, auc, confusion_matrix, precision_score, recall_score, f1_score
from sklearn.metrics import average_precision_score, precision_recall_curve
from ._woe_binning import woe_binning, woe_binning_2, woe_binning_3
class Metrics:
def __init__(self, df, actual, prediction):
self.df = df
self.target = actual
self.actual = df[actual]
self.prediction = df[prediction]
self.gains = self.calculate_gains()
self.ks = self.ks()
self.gini = self.gini()
self.tn, self.fp, self.fn, self.tp, self.precision, self.recall, self.f1_score = self.precision_recall_f1_score()
def calculate_gains(self):
"""Returns a pandas dataframe with gains along with KS and Gini calculated"""
self.df['scaled_score'] = (self.df['positive_probability']*1000000).round(0)
gains = self.df.groupby('scaled_score')[self.target].agg(['count','sum'])
gains.columns = ['total','responders']
gains.reset_index(inplace=True)
gains.sort_values(by='scaled_score', ascending=False)
gains['non_responders'] = gains['total'] - gains['responders']
gains['cum_resp'] = gains['responders'].cumsum()
gains['cum_non_resp'] = gains['non_responders'].cumsum()
gains['total_resp'] = gains['responders'].sum()
gains['total_non_resp'] = gains['non_responders'].sum()
gains['perc_resp'] = (gains['responders']/gains['total_resp'])*100
gains['perc_non_resp'] = (gains['non_responders']/gains['total_non_resp'])*100
gains['perc_cum_resp'] = gains['perc_resp'].cumsum()
gains['perc_cum_non_resp'] = gains['perc_non_resp'].cumsum()
gains['k_s'] = gains['perc_cum_resp'] - gains['perc_cum_non_resp']
return gains
def get_threshold(self):
"""Returns a pandas dataframe with y_pred based on threshold from roc_curve."""
fpr, tpr, threshold = roc_curve(self.actual, self.prediction)
threshold_cutoff_df = pd.DataFrame({'fpr': fpr, 'tpr': tpr, 'threshold': threshold})
threshold_cutoff_df['distance'] = ((threshold_cutoff_df['fpr']-0)**2+(threshold_cutoff_df['tpr']-1)**2)**0.5
threshold_cutoff_df['distance_diff'] = abs(threshold_cutoff_df['distance'].diff(periods=1))
for index, rows in threshold_cutoff_df.iterrows():
if index != 0 and index != threshold_cutoff_df.shape[0]-1:
curr_val = threshold_cutoff_df.loc[index, 'distance_diff']
prev_val = threshold_cutoff_df.loc[index-1, 'distance_diff']
next_val = threshold_cutoff_df.loc[index+1, 'distance_diff']
if curr_val>prev_val and curr_val>next_val:
threshold_cutoff = threshold_cutoff_df.loc[index, 'threshold']
break
return threshold_cutoff
def gini(self):
fpr, tpr, threshold = roc_curve(self.actual, self.prediction)
auroc = auc(fpr, tpr)
gini = 2*auroc -1
return gini
def ks(self):
gains = self.gains()
return gains['k_s'].max()
def precision_recall_f1_score(self):
threshold_cutoff = self.get_threshold()
self.y_pred = np.where(self.prediction>=threshold_cutoff,1,0)
self.df['y_pred'] = self.y_pred
tn, fp, fn, tp = confusion_matrix(self.actual, self.y_pred).ravel()
precision = precision_score(self.actual, self.y_pred)
recall = recall_score(self.actual, self.y_pred)
f1 = f1_score(self.actual, self.y_pred)
return tn, fp, fn, tp, precision, recall, f1
def to_dict(self):
return {'ks': self.ks, 'gini': self.gini, 'tn': self.tn, 'tp': self.tp, 'fn': self.fn, 'fp': self.fp, 'precision': self.precision, 'recall': self.recall, 'f1_score': self.f1_score}
def standard_metrics(df, target_col, prediction_col):
"""Returns a dict with all metrics - Gini, KS, Precision, Recall, F1 Score, True Negative, True Positive, False Positive, False Negative."""
metrics = Metrics(df, target_col, prediction_col)
return metrics.to_dict()
def quick_psi(dev, val):
"""Calculate PSI from 2 arrays - dev and val"""
return sum([(a-b)*np.log(a/b) for (a,b) in zip(dev,val)])
def psi(dev, val, target='positive_probability', n_bins=10):
"""
Returns a pandas dataframe with psi column (Population Stability Index) after creating 10 deciles.
Code includes creating score calculation using round(500-30 x log(100 x (p/(1-p))), 0) where p is probability.
We need to pass both dev and val at same time to apply same bins created on dev dataframe.
"""
dev['score'] = dev[target].apply(lambda x: round(500-30*np.log2(100*(x/(1-x))), 0))
val['score'] = val[target].apply(lambda x: round(500-30*np.log2(100*(x/(1-x))), 0))
_, bins = pd.qcut(dev.score, n_bins, retbins=True, precision=0)
bins = [int(i) if abs(i)!=np.inf else i for i in bins]
dev['bins'] = pd.cut(dev.score, bins)
val['bins'] = pd.cut(val.score, bins)
dev_bins = dev.bins.value_counts(sort=False, normalize=True)
val_bins = val.bins.value_counts(sort=False, normalize=True)
psi_ = pd.concat([dev_bins, val_bins], axis=1)
psi_.columns = ['dev', 'val']
psi_['psi'] = (psi_.dev - psi_.val)*np.log(psi_.dev/psi_.val)
return psi_
def gsi(data, col='GENDER', col_val='F', target='positive_probability', n_bins=10):
"""
Returns a pandas dataframe with gsi columns (Group Stability Index) after creating n bins.
Args:
data: pandas dataframe
col: Columns on which GSI has to be calculated (ex: Gender column)
col_val: selected value will be compared with rest of the values (ex: F vs Rest)
target: score column
n_bins: number of bins to be created (Default=10)
"""
df = data.copy()
df['decile'] = pd.qcut(df[target], n_bins, labels=False)
df.loc[df[col]!=col_val, col] = 'Rest'
pivot_ = df.groupby(['decile', col])[target].count().unstack()
pivot = pivot_.div(pivot_.sum(axis=0), axis=1)
pivot['gsi'] = (pivot[col_val]-pivot['Rest'])*np.log(pivot[col_val]/pivot['Rest'])
return pivot
def iv(df, suffix='_dev'):
"""Returns a pandas dataframe with calculated fields - resp_rate, perc_dist, perc_non_resp, perc_resp, raw_odds, ln_odds, iv, exp_resp, exp_non_resp, chi_square."""
df['resp_rate'+suffix] = (df['responders'+suffix]*100)/df['total'+suffix]
df['perc_dist'+suffix] = (df['total'+suffix]*100)/df.groupby('var_name')['total'+suffix].transform('sum')
df['perc_non_resp'+suffix] = (df['non_responders'+suffix]*100)/df.groupby('var_name')['non_responders'+suffix].transform('sum')
df['perc_resp'+suffix] = (df['responders'+suffix]*100)/df.groupby('var_name')['responders'+suffix].transform('sum')
df['raw_odds'+suffix] = df.apply(lambda r: 0 if r['perc_resp'+suffix]==0 else r['perc_non_resp'+suffix]/r['perc_resp'+suffix], axis=1)
df['ln_odds'+suffix] = df['raw_odds'+suffix].apply(lambda x: 0 if abs(np.log(x))==np.inf else np.log(x))
df['iv'+suffix] = (df['perc_non_resp'+suffix]-df['perc_resp'+suffix])*df['ln_odds'+suffix]/100
df['exp_resp'+suffix] = df['total'+suffix]*df.groupby('var_name')['responders'+suffix].transform('sum')/df.groupby('var_name')['total'+suffix].transform('sum')
df['exp_non_resp'+suffix] = df['total'+suffix]*df.groupby('var_name')['non_responders'+suffix].transform('sum')/df.groupby('var_name')['total'+suffix].transform('sum')
df['chi_square'+suffix] = (((df['responders'+suffix]-df['exp_resp'+suffix])**2)/df['exp_resp'+suffix]) + (((df['non_responders'+suffix]-df['exp_non_resp'+suffix])**2)/df['exp_non_resp'+suffix])
return df
def iv_var(df, var_name, resp_name, suffix='_dev', var_cuts=None):
"""Returns IV of a variable"""
summ_df, _ = woe_bins(df, var_name, resp_name, suffix, var_cuts)
iv_ = iv(summ_df, suffix)
return iv_, iv_['iv'+suffix].sum()
def woe_bins(df, var_name, resp_name, suffix='_dev', var_cuts=None):
"""
Returns a pandas dataframe, var_cuts after creating bins.
Returns:
df: pandas dataframe has var_cuts_string, total, responders, non_responders, var_name (with _dev or _val suffix)
var_cuts: list of Interval items to be used on val file.
"""
df1 = df[[resp_name, var_name]]
if (np.issubdtype(df1[var_name].dtype, np.number)):
n = df1[var_name].nunique()
if var_cuts is None:
suffix = '_dev'
var_cuts = woe_binning_3(df1, resp_name, var_name, 0.05, 0.00001, 0, 50, 'bad', 'good')
var_cuts = list(set(var_cuts))
var_cuts.sort()
df1.loc[:,'var_binned'] = pd.cut(df[var_name], var_cuts, right=True, labels=None, retbins=False, precision=10, include_lowest=False)
var_min = float(df1[var_name].min())
var_max = float(df1[var_name].max())
summ_df = df1.groupby('var_binned')[resp_name].agg(['count','sum']).reset_index()
summ_df['delta'] = summ_df['count'] - summ_df['sum']
summ_df['var_name'] = var_name
summ_df.columns = ['var_cuts', 'total'+suffix, 'responders'+suffix, 'non_responders'+suffix, 'var_name']
summ_df['var_cuts_string'+suffix] = summ_df.var_cuts.apply(lambda x: str(x.left if x.left!=-np.inf else var_min)+' To '+str(x.right if x.right!=np.inf else var_max))
else:
df1[var_name].fillna('Blank', inplace=True)
summ_df = df1.groupby(var_name)[resp_name].agg(['count','sum']).reset_index()
summ_df['delta'] = summ_df['count'] - summ_df['sum']
summ_df['var_name'] = var_name
summ_df.columns = ['var_cuts_string'+suffix, 'total'+suffix, 'responders'+suffix, 'non_responders'+suffix, 'var_name']
summ_df['var_cuts'] = summ_df['var_cuts_string'+suffix]
return summ_df[summ_df['total'+suffix]!=0], var_cuts
def csi(dev_df, val_df, var_list, resp_name):
"""Returns a pandas dataframe with csi, csi_var, perc_csi columns (Charecteristic Stability Index) calculated based on both dev and val dataframes."""
dev_df.fillna(0, inplace=True)
val_df.fillna(0, inplace=True)
dev_dfs = []
var_cuts = {}
for var_name in var_list:
summ_df, cut = woe_bins(dev_df, var_name, resp_name, '_dev')
dev_dfs.append(summ_df)
var_cuts[var_name] = cut
dev = pd.concat(dev_dfs, axis=0)
dev = iv(dev, '_dev')
val_dfs = []
val_cuts = {}
for var_name in var_list:
val_summ_df, val_cut = woe_bins(val_df, var_name, resp_name, '_val', var_cuts[var_name])
val_dfs.append(val_summ_df)
val_cuts[var_name] = val_cut
val = pd.concat(val_dfs, axis=0)
val = iv(val, '_val')
final = pd.merge(dev, val, how='left', on=['var_name', 'var_cuts'], suffixes=['_dev','_val'])
final['csi'] = ((final['perc_dist_dev']-final['perc_dist_val'])/100)*np.log(final['perc_dist_dev']/final['perc_dist_val'])
final['csi_var'] = final.groupby('var_name')['csi'].transform('sum')
final['perc_csi'] = (100*final.groupby('var_name')['csi'].transform('cumsum'))/final.groupby('var_name')['csi'].transform('sum')
return final
def get_decilewise_counts(df, target, bins=10, cutpoints=None):
"""Returns a summarized pandas dataframe with total and responders for each decile based on positive_probability."""
if cutpoints is None:
cutpoints = df['positive_probability'].quantile(np.arange(0, bins+1)/bins).reset_index(drop=True)
cutpoints = [0] + list(cutpoints) + [1]
df['bins'] = | pd.cut(df['positive_probability'], cutpoints) | pandas.cut |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 26 13:09:15 2018
@author: alber
"""
# Estudio exploratorio
import os
import glob
import pandas as pd
import re
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
import numpy as np
from scipy.stats.mstats import normaltest
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
global stemmer
import pickle
stemmer = SnowballStemmer("english")
words = []
word_list = []
df_pattern = pd.DataFrame()
i = 0
# Hago la tokenizacion
for utterance in documento:
# Tokenizo cada frase
w = re.findall(r'\w+', utterance.lower(),flags = re.UNICODE) # Paso a minusculas todo
words = w
# Eliminación de las stop_words
words = [word for word in words if word not in stopwords.words('english')]
# Elimino guiones y otros simbolos raros
words = [word for word in words if not word.isdigit()] # Elimino numeros
# Stemming y eliminación de duplicados
words = [stemmer.stem(w) for w in words]
# Inicializo la bolsa de palabras
pattern_words = words
df = pd.DataFrame(pattern_words)
df['ocurrencias'] = 1
df.columns = ['palabras', 'ocurrencias']
df = df.groupby(['palabras'])['ocurrencias'].sum() # En este pundo, al pasarlo a indices, se ordenan
df = | pd.DataFrame(df) | pandas.DataFrame |
import pandas as pd
import numpy as np
import re
import datetime
import os
pth = os.path.join('~', 'Documents', 'ML_modules', 'Fin_LSTM_data')
c_path = os.path.join(pth, 'CompanyDataPanel.pickle')
t_path = os.path.join(pth, 'GoogleDomesticTrends.xlsx')
company_data = pd.read_pickle(c_path)
company_data = company_data.drop(['USDGBP', 'TETHF'], axis=2)
trends_data = pd.read_excel(t_path, index_col=0)
company_data.minor_axis
sectorkeys = ['transportation', 'health', 'energy', 'nondurables',
'nondurables', 'technology', 'transportation', 'health', 'health',
'technology', 'technology', 'transportation', 'nondurables', 'energy']
sector_dict = {idx : sec for idx, sec in zip(company_data.minor_axis, sectorkeys)}
observations = | pd.DataFrame() | pandas.DataFrame |
import unittest
import os
import shutil
import numpy as np
import pandas as pd
from aistac import ConnectorContract
from ds_discovery import Wrangle, SyntheticBuilder
from ds_discovery.intent.wrangle_intent import WrangleIntentModel
from aistac.properties.property_manager import PropertyManager
class WrangleIntentCorrelateTest(unittest.TestCase):
def setUp(self):
os.environ['HADRON_PM_PATH'] = os.path.join('work', 'config')
os.environ['HADRON_DEFAULT_PATH'] = os.path.join('work', 'data')
try:
os.makedirs(os.environ['HADRON_PM_PATH'])
os.makedirs(os.environ['HADRON_DEFAULT_PATH'])
except:
pass
PropertyManager._remove_all()
def tearDown(self):
try:
shutil.rmtree('work')
except:
pass
@property
def tools(self) -> WrangleIntentModel:
return Wrangle.scratch_pad()
def test_runs(self):
"""Basic smoke test"""
im = Wrangle.from_env('tester', default_save=False, default_save_intent=False,
reset_templates=False, has_contract=False).intent_model
self.assertTrue(WrangleIntentModel, type(im))
def test_correlate_custom(self):
tools = self.tools
df = pd.DataFrame()
df['A'] = [1, 2, 3]
result = tools.correlate_custom(df, code_str="[x + 2 for x in @['A']]")
self.assertEqual([3, 4, 5], result)
result = tools.correlate_custom(df, code_str="[True if x == $v1 else False for x in @['A']]", v1=2)
self.assertEqual([False, True, False], result)
def test_correlate_choice(self):
tools = self.tools
df = pd.DataFrame()
df['A'] = [[1,2,4,6], [1], [2,4,8,1], [2,4]]
result = tools.correlate_choice(df, header='A', list_size=2)
control = [[1, 2], [1], [2, 4], [2, 4]]
self.assertEqual(control, result)
result = tools.correlate_choice(df, header='A', list_size=1)
self.assertEqual([1, 1, 2, 2], result)
def test_correlate_coefficient(self):
tools = self.tools
df = pd.DataFrame()
df['A'] = [1,2,3]
result = tools.correlate_polynomial(df, header='A', coefficient=[2,1])
self.assertEqual([3, 4, 5], result)
result = tools.correlate_polynomial(df, header='A', coefficient=[0, 0, 1])
self.assertEqual([1, 4, 9], result)
def test_correlate_join(self):
tools = self.tools
df = pd.DataFrame()
df['A'] = [1,2,3]
df['B'] = list('XYZ')
df['C'] = [4.2,7.1,4.1]
result = tools.correlate_join(df, header='B', action="values", sep='_')
self.assertEqual(['X_values', 'Y_values', 'Z_values'], result)
result = tools.correlate_join(df, header='A', action=tools.action2dict(method='correlate_numbers', header='C'))
self.assertEqual(['14.2', '27.1', '34.1'], result)
def test_correlate_columns(self):
tools = self.tools
df = pd.DataFrame({'A': [1,1,1,1,None], 'B': [1,None,2,3,None], 'C': [2,2,2,2,None], 'D': [5,5,5,5,None]})
result = tools.correlate_aggregate(df, headers=list('ABC'), agg='sum')
control = [4.0, 3.0, 5.0, 6.0, 0.0]
self.assertEqual(result, control)
for action in ['sum', 'prod', 'count', 'min', 'max', 'mean']:
print(action)
result = tools.correlate_aggregate(df, headers=list('ABC'), agg=action)
self.assertEqual(5, len(result))
def test_correlate_number(self):
tools = self.tools
df = pd.DataFrame(data=[1,2,3,4.0,5,6,7,8,9,0], columns=['numbers'])
result = tools.correlate_numbers(df, 'numbers', precision=0)
self.assertCountEqual([1,2,3,4,5,6,7,8,9,0], result)
# Offset
df = pd.DataFrame(data=[1, 2, 3, 4, 5, 6, 7, 8, 9, 0], columns=['numbers'])
result = tools.correlate_numbers(df, 'numbers', offset=1, precision=0)
self.assertEqual([2,3,4,5,6,7,8,9,10,1], result)
# str offset
df = pd.DataFrame(data=[1, 2, 3, 4], columns=['numbers'])
result = tools.correlate_numbers(df, 'numbers', offset='1-@', precision=0)
self.assertEqual([0,-1,-2,-3], result)
# complex str offset
result = tools.correlate_numbers(df, 'numbers', offset='x + 2 if x <= 2 else x', precision=0)
self.assertEqual([3, 4, 3, 4], result)
# jitter
df = pd.DataFrame(data=[2] * 1000, columns=['numbers'])
result = tools.correlate_numbers(df, 'numbers', jitter=5, precision=0)
self.assertLessEqual(max(result), 4)
self.assertGreaterEqual(min(result), 0)
df = pd.DataFrame(data=tools._get_number(99999, size=5000), columns=['numbers'])
result = tools.correlate_numbers(df, 'numbers', jitter=5, precision=1)
self.assertNotEqual(df['numbers'].to_list(), result)
self.assertEqual(5000, len(result))
for index in range(len(result)):
loss = abs(df['numbers'][index] - result[index])
self.assertLessEqual(loss, 5)
df = pd.DataFrame(data=tools._get_number(99999, size=5000), columns=['numbers'])
result = tools.correlate_numbers(df, 'numbers', jitter=1, precision=1)
self.assertNotEqual(df['numbers'].to_list(), result)
self.assertEqual(5000, len(result))
for index in range(len(result)):
loss = abs(df['numbers'][index] - result[index])
self.assertLessEqual(loss, 1)
def test_correlate_normalize(self):
tools = self.tools
df = | pd.DataFrame(data=[1,2,2,3,3,2,2,1], columns=['numbers']) | pandas.DataFrame |
import warnings
from pathlib import Path
from typing import Union
warnings.simplefilter(action='ignore', category=FutureWarning)
import pandas as pd
import os
import numpy as np
import matplotlib.pyplot as plt
import warnings
from scipy.stats import linregress
from thoipapy.utils import normalise_0_1, make_sure_path_exists
warnings.filterwarnings("ignore")
def save_BO_linegraph_and_barchart(s, bocurve_data_xlsx, BO_linechart_png, BO_barchart_png, namedict, logging, AUC_ser, plot_o_over_r=False):
df_o_minus_r = pd.read_excel(bocurve_data_xlsx, sheet_name="df_o_minus_r", index_col=0)
BO_scatter_png = str(BO_barchart_png)[:-12] + "scatter.png"
#######################################################################################################
# #
# Create a dataframe with AUBOC and AUC for individual protein (df_valid_indiv) #
# #
#######################################################################################################
# load AUBOC values as a series
mean_o_minus_r_by_sample_ser = pd.read_excel(bocurve_data_xlsx, sheet_name="mean_o_minus_r_by_sample", index_col=0)["mean_o_minus_r_by_sample"]
# select sample sizes 5 and 10
df_valid_indiv = df_o_minus_r.loc[[5, 10], :].T.copy()
df_valid_indiv["AUBOC"] = mean_o_minus_r_by_sample_ser
df_valid_indiv["ROC AUC"] = AUC_ser
df_valid_indiv.sort_values("AUBOC", axis=0, ascending=False, inplace=True)
""" df_valid_indiv should now have the results from BO curve and ROC for each protein
AUBOC sample size 5 sample size 10 ROC AUC
3ij4_A-crystal 17.456522 1.913043 1.652174 0.714286
4wit_A-crystal 16.620000 2.000000 2.000000 0.622807
Q08345-ETRA 16.571429 2.809524 2.238095 0.842593
P04626-ETRA 16.456522 1.913043 1.652174 0.916667
P25189-ETRA 14.634615 2.038462 2.153846 0.812500
"""
#######################################################################################################
# #
# plot correlation between AUBOC and ROC #
# #
#######################################################################################################
# BO_barchart_png
plt.close("all")
# plt.rcParams.update({'font.size': 8})
figsize = np.array([3.42, 3.42]) * 2 # DOUBLE the real size, due to problems on Bo computer with fontsizes
fig, ax = plt.subplots(figsize=figsize)
# df_valid_indiv_scatter = df_valid_indiv[["AUBOC", "ROC AUC"]]
df_valid_indiv.plot(kind="scatter", ax=ax, x="AUBOC", y="ROC AUC", alpha=0.7)
# calculate linear regression for fitted line
slope, intercept, r_value, p_value, std_err = linregress(df_valid_indiv["AUBOC"], df_valid_indiv["ROC AUC"])
# fit_fn = np.poly1d(linear_regression)
# slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(x, y)
x_first_last_dp = np.array([df_valid_indiv["AUBOC"].min(), df_valid_indiv["AUBOC"].max()])
y_fitted = x_first_last_dp * slope + intercept
ax.plot(x_first_last_dp, y_fitted, label="$R^2$ : {:.2f}".format(r_value ** 2))
ax.set_xlabel("AUBOC")
ax.set_ylabel("ROC AUC")
ax.legend()
fig.tight_layout()
ax.grid(False)
# BO_barchart_png = os.path.join(BO_curve_folder, "AUBOC_barchart.png")
fig.savefig(BO_scatter_png, dpi=240)
# simply normalise all between 0 and 1
for col in df_valid_indiv.columns:
df_valid_indiv[col] = normalise_0_1(df_valid_indiv[col])[0] + 0.01
bocurve_data_xlsx: Union[Path, str] = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/data/{s['setname']}_thoipa_loo_bo_curve_data.xlsx"
BO_data_valid_indiv_csv: Union[Path, str] = Path(s["data_dir"]) / f"results/{s['setname']}/crossvalidation/data/{s['setname']}_BO_curve_data_valid_indiv.csv"
make_sure_path_exists(bocurve_data_xlsx, isfile=True)
df_valid_indiv = df_valid_indiv.reindex(columns=["AUBOC", 5, 10, "ROC AUC"])
df_valid_indiv.columns = ["AUBOC", "sample size 5", "sample size 10", "ROC AUC"]
df_valid_indiv.to_csv(BO_data_valid_indiv_csv)
""" df_valid_indiv is now normalised within each column, and sorted by AUBOC
AUBOC sample size 5 sample size 10 ROC AUC
3ij4_A-crystal 1.010000 0.789166 0.727758 0.724139
4wit_A-crystal 0.980317 0.810587 0.793133 0.594927
DDR1 [Q08345-ETRA] 0.978593 1.010000 0.837883 0.905371
ErbB2 [P04626-ETRA] 0.974516 0.789166 0.727758 1.010000
MPZ [P25189-ETRA] 0.909867 0.820061 0.822048 0.862866
"""
#######################################################################################################
# #
# plot barchart #
# #
#######################################################################################################
# BO_barchart_png
plt.close("all")
# plt.rcParams.update({'font.size': 8})
figsize = np.array([3.42, 3.42]) * 2 # DOUBLE the real size, due to problems on Bo computer with fontsizes
fig, ax = plt.subplots(figsize=figsize)
# replace the protein names
df_valid_indiv.index = pd.Series(df_valid_indiv.index).replace(namedict)
df_valid_indiv.plot(kind="bar", ax=ax, alpha=0.7)
ax.set_ylabel("performance value\n(observed overlap - random overlap)")
ax.legend() # (["sample size = 5", "sample size = 10"])
fig.tight_layout()
ax.grid(False)
fig.savefig(BO_barchart_png, dpi=240)
#######################################################################################################
# #
# plot linechart (combined data all proteins #
# #
#######################################################################################################
if plot_o_over_r:
df_o_over_r = pd.read_excel(bocurve_data_xlsx, sheet_name="df_o_over_r", index_col=0)
df_o_over_r_mean = df_o_over_r.T.mean()
df_o_minus_r.columns = pd.Series(df_o_minus_r.columns).replace(namedict)
df_o_minus_r_mean = df_o_minus_r.T.mean()
# apply cutoff (e.g. 5 residues for AUBOC5)
auboc_ser = df_o_minus_r_mean.iloc[:s["n_residues_AUBOC_validation"]]
# get the area under the curve
AUBOC = np.trapz(y=auboc_ser, x=auboc_ser.index)
# BO_linechart_png
plt.close("all")
figsize = np.array([3.42, 3.42]) * 2 # DOUBLE the real size, due to problems on Bo computer with fontsizes
fig, ax = plt.subplots(figsize=figsize)
df_o_minus_r_mean.plot(ax=ax, color="#0f7d9b", linestyle="-", label="prediction (AUBOC : {:0.2f}".format(AUBOC))
ax.plot([1, 10], [0, 0], color="#0f7d9b", linestyle="--", label="random", alpha=0.5)
if plot_o_over_r:
ax2 = ax.twinx()
df_o_over_r_mean.plot(ax=ax2, color="#9b2d0f", linestyle="-", label="old method (o/r)")
ax2.plot([1, 10], [1, 1], color="#9b2d0f", linestyle="--", label="old method random", alpha=0.5)
# ax.set_ylim(0)
ax.grid(False)
ax.set_ylabel("fraction of correctly predicted residues\n(observed - random)", color="#0f7d9b")
ax.tick_params('y', colors="#0f7d9b")
ax.spines['left'].set_color("#0f7d9b")
ax.legend()
if plot_o_over_r:
ax2.tick_params('y', colors="#9b2d0f")
ax2.spines['right'].set_color("#9b2d0f")
# ax.set_ylabel("performance value\n (observed / random)", color="#9b2d0f")
ax.set_ylabel("fraction of correctly predicted residues\n(observed / random)", color="#9b2d0f")
ax2.legend()
ax.set_xlabel("number of TMD residues\n(sample size)")
fig.tight_layout()
fig.savefig(BO_linechart_png, dpi=140)
return AUBOC
def save_extra_BO_figs(bocurve_data_xlsx, other_figs_path):
linechart_mean_obs_and_rand = os.path.join(other_figs_path, "1_linechart_mean_obs_and_rand.png")
linechart_obs_indiv = os.path.join(other_figs_path, "2_linechart_obs_indiv.png")
linechart_p_indiv = os.path.join(other_figs_path, "3_linechart_p_indiv.png")
linechart_o_minus_r = os.path.join(other_figs_path, "4_linechart_o_minus_r.png")
linechart_o_over_r = os.path.join(other_figs_path, "5_linechart_o_over_r.png")
dfrand = | pd.read_excel(bocurve_data_xlsx, sheet_name="dfrand", index_col=0) | pandas.read_excel |
"""SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI
connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, `_TestSQLAlchemyConn` tests the API with a SQLAlchemy
Connection object. The different tested flavors (sqlite3, MySQL,
PostgreSQL) derive from the base class
- Tests for the fallback mode (`TestSQLiteFallback`)
"""
import csv
from datetime import date, datetime, time
from io import StringIO
import sqlite3
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
isna,
to_datetime,
to_timedelta,
)
import pandas._testing as tm
import pandas.io.sql as sql
from pandas.io.sql import read_sql_query, read_sql_table
try:
import sqlalchemy
import sqlalchemy.schema
import sqlalchemy.sql.sqltypes as sqltypes
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as sa_session
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
"create_iris": {
"sqlite": """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
"mysql": """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
"postgresql": """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)""",
},
"insert_iris": {
"sqlite": """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
"mysql": """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
"postgresql": """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);""",
},
"create_test_types": {
"sqlite": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
"mysql": """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`IntDateOnlyCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
"postgresql": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"DateColWithTz" TIMESTAMP WITH TIME ZONE,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)""",
},
"insert_test_types": {
"sqlite": {
"query": """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"mysql": {
"query": """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"postgresql": {
"query": """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"DateColWithTz",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
},
"read_parameters": {
"sqlite": "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
"mysql": 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
"postgresql": 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s',
},
"read_named_parameters": {
"sqlite": """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
"mysql": """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
"postgresql": """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
""",
},
"create_view": {
"sqlite": """
CREATE VIEW iris_view AS
SELECT * FROM iris
"""
},
}
class MixInBase:
def teardown_method(self, method):
# if setup fails, there may not be a connection to close.
if hasattr(self, "conn"):
for tbl in self._get_all_tables():
self.drop_table(tbl)
self._close_conn()
class MySQLMixIn(MixInBase):
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute(f"DROP TABLE IF EXISTS {sql._get_valid_mysql_name(table_name)}")
self.conn.commit()
def _get_all_tables(self):
cur = self.conn.cursor()
cur.execute("SHOW TABLES")
return [table[0] for table in cur.fetchall()]
def _close_conn(self):
from pymysql.err import Error
try:
self.conn.close()
except Error:
pass
class SQLiteMixIn(MixInBase):
def drop_table(self, table_name):
self.conn.execute(
f"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}"
)
self.conn.commit()
def _get_all_tables(self):
c = self.conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
return [table[0] for table in c.fetchall()]
def _close_conn(self):
self.conn.close()
class SQLAlchemyMixIn(MixInBase):
def drop_table(self, table_name):
sql.SQLDatabase(self.conn).drop_table(table_name)
def _get_all_tables(self):
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
table_list = meta.tables.keys()
return table_list
def _close_conn(self):
pass
class PandasSQLTest:
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def _get_exec(self):
if hasattr(self.conn, "execute"):
return self.conn
else:
return self.conn.cursor()
@pytest.fixture(params=[("data", "iris.csv")])
def load_iris_data(self, datapath, request):
import io
iris_csv_file = datapath(*request.param)
if not hasattr(self, "conn"):
self.setup_connect()
self.drop_table("iris")
self._get_exec().execute(SQL_STRINGS["create_iris"][self.flavor])
with io.open(iris_csv_file, mode="r", newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS["insert_iris"][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _load_iris_view(self):
self.drop_table("iris_view")
self._get_exec().execute(SQL_STRINGS["create_view"][self.flavor])
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
assert issubclass(pytype, np.floating)
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _load_test1_data(self):
columns = ["index", "A", "B", "C", "D"]
data = [
(
"2000-01-03 00:00:00",
0.980268513777,
3.68573087906,
-0.364216805298,
-1.15973806169,
),
(
"2000-01-04 00:00:00",
1.04791624281,
-0.0412318367011,
-0.16181208307,
0.212549316967,
),
(
"2000-01-05 00:00:00",
0.498580885705,
0.731167677815,
-0.537677223318,
1.34627041952,
),
(
"2000-01-06 00:00:00",
1.12020151869,
1.56762092543,
0.00364077397681,
0.67525259227,
),
]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(
dict(
A=[4, 1, 3, 6],
B=["asd", "gsq", "ylt", "jkl"],
C=[1.1, 3.1, 6.9, 5.3],
D=[False, True, True, False],
E=["1990-11-22", "1991-10-26", "1993-11-26", "1995-12-12"],
)
)
df["E"] = to_datetime(df["E"])
self.test_frame2 = df
def _load_test3_data(self):
columns = ["index", "A", "B"]
data = [
("2000-01-03 00:00:00", 2 ** 31 - 1, -1.987670),
("2000-01-04 00:00:00", -29, -0.0412318367011),
("2000-01-05 00:00:00", 20000, 0.731167677815),
("2000-01-06 00:00:00", -290867, 1.56762092543),
]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table("types_test_data")
self._get_exec().execute(SQL_STRINGS["create_test_types"][self.flavor])
ins = SQL_STRINGS["insert_test_types"][self.flavor]
data = [
{
"TextCol": "first",
"DateCol": "2000-01-03 00:00:00",
"DateColWithTz": "2000-01-01 00:00:00-08:00",
"IntDateCol": 535852800,
"IntDateOnlyCol": 20101010,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": 1,
"BoolColWithNull": False,
},
{
"TextCol": "first",
"DateCol": "2000-01-04 00:00:00",
"DateColWithTz": "2000-06-01 00:00:00-07:00",
"IntDateCol": 1356998400,
"IntDateOnlyCol": 20101212,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": None,
"BoolColWithNull": None,
},
]
for d in data:
self._get_exec().execute(
ins["query"], [d[field] for field in ins["fields"]]
)
def _count_rows(self, table_name):
result = (
self._get_exec()
.execute(f"SELECT count(*) AS count_1 FROM {table_name}")
.fetchone()
)
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_query("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS["read_parameters"][self.flavor]
params = ["Iris-setosa", 5.1]
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS["read_named_parameters"][self.flavor]
params = {"name": "Iris-setosa", "length": 5.1}
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self, method=None):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=method)
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _to_sql_empty(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1.iloc[:0], "test_frame1")
def _to_sql_fail(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
assert self.pandasSQL.has_table("test_frame1")
msg = "Table 'test_frame1' already exists"
with pytest.raises(ValueError, match=msg):
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
self.drop_table("test_frame1")
def _to_sql_replace(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="replace")
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_append(self):
# Nuke table just in case
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="append")
assert self.pandasSQL.has_table("test_frame1")
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_method_callable(self):
check = [] # used to double check function below is really being used
def sample(pd_table, conn, keys, data_iter):
check.append(1)
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(pd_table.table.insert(), data)
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=sample)
assert self.pandasSQL.has_table("test_frame1")
assert check == [1]
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _roundtrip(self):
self.drop_table("test_frame_roundtrip")
self.pandasSQL.to_sql(self.test_frame1, "test_frame_roundtrip")
result = self.pandasSQL.read_query("SELECT * FROM test_frame_roundtrip")
result.set_index("level_0", inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _to_sql_save_index(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")], columns=["A", "B", "C"], index=["A"]
)
self.pandasSQL.to_sql(df, "test_to_sql_saves_index")
ix_cols = self._get_index_columns("test_to_sql_saves_index")
assert ix_cols == [["A"]]
def _transaction_test(self):
with self.pandasSQL.run_transaction() as trans:
trans.execute("CREATE TABLE test_trans (A INT, B TEXT)")
class DummyException(Exception):
pass
# Make sure when transaction is rolled back, no rows get inserted
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
raise DummyException("error")
except DummyException:
# ignore raised exception
pass
res = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res) == 0
# Make sure when transaction is committed, rows do get inserted
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
res2 = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res2) == 1
# -----------------------------------------------------------------------------
# -- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the fallback mode
(`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific
tests for the different sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = "sqlite"
mode: str
def setup_connect(self):
self.conn = self.connect()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def load_test_data_and_sql(self):
self._load_iris_view()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_sql_view(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris_view", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, "test_frame1", self.conn)
assert sql.has_table("test_frame1", self.conn)
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
assert sql.has_table("test_frame2", self.conn)
msg = "Table 'test_frame2' already exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="replace")
assert sql.has_table("test_frame3", self.conn)
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame3")
assert num_rows == num_entries
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="append")
assert sql.has_table("test_frame4", self.conn)
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame4")
assert num_rows == num_entries
def test_to_sql_type_mapping(self):
sql.to_sql(self.test_frame3, "test_frame5", self.conn, index=False)
result = sql.read_sql("SELECT * FROM test_frame5", self.conn)
tm.assert_frame_equal(self.test_frame3, result)
def test_to_sql_series(self):
s = Series(np.arange(5, dtype="int64"), name="series")
sql.to_sql(s, "test_series", self.conn, index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
def test_roundtrip(self):
sql.to_sql(self.test_frame1, "test_frame_roundtrip", con=self.conn)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index("level_0", inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_roundtrip_chunksize(self):
sql.to_sql(
self.test_frame1,
"test_frame_roundtrip",
con=self.conn,
index=False,
chunksize=2,
)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def test_date_parsing(self):
# Test date parsing in read_sql
# No Parsing
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn)
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["DateCol"]
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"IntDateOnlyCol": "%Y%m%d"},
)
assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64)
assert df.IntDateOnlyCol.tolist() == [
pd.Timestamp("2010-10-10"),
pd.Timestamp("2010-12-12"),
]
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
index_col="DateCol",
parse_dates=["DateCol", "IntDateCol"],
)
assert issubclass(df.index.dtype.type, np.datetime64)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_timedelta(self):
# see #6921
df = to_timedelta(Series(["00:00:01", "00:00:03"], name="foo")).to_frame()
with tm.assert_produces_warning(UserWarning):
df.to_sql("test_timedelta", self.conn)
result = sql.read_sql_query("SELECT * FROM test_timedelta", self.conn)
tm.assert_series_equal(result["foo"], df["foo"].astype("int64"))
def test_complex_raises(self):
df = DataFrame({"a": [1 + 1j, 2j]})
msg = "Complex datatypes not supported"
with pytest.raises(ValueError, match=msg):
df.to_sql("test_complex", self.conn)
@pytest.mark.parametrize(
"index_name,index_label,expected",
[
# no index name, defaults to 'index'
(None, None, "index"),
# specifying index_label
(None, "other_label", "other_label"),
# using the index name
("index_name", None, "index_name"),
# has index name, but specifying index_label
("index_name", "other_label", "other_label"),
# index name is integer
(0, None, "0"),
# index name is None but index label is integer
(None, 0, "0"),
],
)
def test_to_sql_index_label(self, index_name, index_label, expected):
temp_frame = DataFrame({"col1": range(4)})
temp_frame.index.name = index_name
query = "SELECT * FROM test_index_label"
sql.to_sql(temp_frame, "test_index_label", self.conn, index_label=index_label)
frame = sql.read_sql_query(query, self.conn)
assert frame.columns[0] == expected
def test_to_sql_index_label_multiindex(self):
temp_frame = DataFrame(
{"col1": range(4)},
index=MultiIndex.from_product([("A0", "A1"), ("B0", "B1")]),
)
# no index name, defaults to 'level_0' and 'level_1'
sql.to_sql(temp_frame, "test_index_label", self.conn)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[0] == "level_0"
assert frame.columns[1] == "level_1"
# specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["A", "B"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# using the index name
temp_frame.index.names = ["A", "B"]
sql.to_sql(temp_frame, "test_index_label", self.conn, if_exists="replace")
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# has index name, but specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["C", "D"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["C", "D"]
msg = "Length of 'index_label' should match number of levels, which is 2"
with pytest.raises(ValueError, match=msg):
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label="C",
)
def test_multiindex_roundtrip(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")],
columns=["A", "B", "C"],
index=["A", "B"],
)
df.to_sql("test_multiindex_roundtrip", self.conn)
result = sql.read_sql_query(
"SELECT * FROM test_multiindex_roundtrip", self.conn, index_col=["A", "B"]
)
tm.assert_frame_equal(df, result, check_index_type=True)
def test_integer_col_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])
sql.to_sql(df, "test_frame_integer_col_names", self.conn, if_exists="replace")
def test_get_schema(self):
create_sql = sql.get_schema(self.test_frame1, "test", con=self.conn)
assert "CREATE" in create_sql
def test_get_schema_dtypes(self):
float_frame = DataFrame({"a": [1.1, 1.2], "b": [2.1, 2.2]})
dtype = sqlalchemy.Integer if self.mode == "sqlalchemy" else "INTEGER"
create_sql = sql.get_schema(
float_frame, "test", con=self.conn, dtype={"b": dtype}
)
assert "CREATE" in create_sql
assert "INTEGER" in create_sql
def test_get_schema_keys(self):
frame = DataFrame({"Col1": [1.1, 1.2], "Col2": [2.1, 2.2]})
create_sql = sql.get_schema(frame, "test", con=self.conn, keys="Col1")
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("Col1")'
assert constraint_sentence in create_sql
# multiple columns as key (GH10385)
create_sql = sql.get_schema(
self.test_frame1, "test", con=self.conn, keys=["A", "B"]
)
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")'
assert constraint_sentence in create_sql
def test_chunksize_read(self):
df = DataFrame(np.random.randn(22, 5), columns=list("abcde"))
df.to_sql("test_chunksize", self.conn, index=False)
# reading the query in one time
res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
# reading the query in chunks with read_sql_query
res2 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_query(
"select * from test_chunksize", self.conn, chunksize=5
):
res2 = concat([res2, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res2)
# reading the query in chunks with read_sql_query
if self.mode == "sqlalchemy":
res3 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_table("test_chunksize", self.conn, chunksize=5):
res3 = concat([res3, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res3)
def test_categorical(self):
# GH8624
# test that categorical gets written correctly as dense column
df = DataFrame(
{
"person_id": [1, 2, 3],
"person_name": ["<NAME>", "<NAME>", "<NAME>"],
}
)
df2 = df.copy()
df2["person_name"] = df2["person_name"].astype("category")
df2.to_sql("test_categorical", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_categorical", self.conn)
tm.assert_frame_equal(res, df)
def test_unicode_column_name(self):
# GH 11431
df = DataFrame([[1, 2], [3, 4]], columns=["\xe9", "b"])
df.to_sql("test_unicode", self.conn, index=False)
def test_escaped_table_name(self):
# GH 13206
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
df.to_sql("d1187b08-4943-4c8d-a7f6", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM `d1187b08-4943-4c8d-a7f6`", self.conn)
tm.assert_frame_equal(res, df)
@pytest.mark.single
@pytest.mark.skipif(not SQLALCHEMY_INSTALLED, reason="SQLAlchemy not installed")
class TestSQLApi(SQLAlchemyMixIn, _TestSQLApi):
"""
Test the public API as it would be used directly
Tests for `read_sql_table` are included here, as this is specific for the
sqlalchemy mode.
"""
flavor = "sqlite"
mode = "sqlalchemy"
def connect(self):
return sqlalchemy.create_engine("sqlite:///:memory:")
def test_read_table_columns(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
cols = ["A", "B"]
result = sql.read_sql_table("test_frame", self.conn, columns=cols)
assert result.columns.tolist() == cols
def test_read_table_index_col(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
result = sql.read_sql_table("test_frame", self.conn, index_col="index")
assert result.index.names == ["index"]
result = sql.read_sql_table("test_frame", self.conn, index_col=["A", "B"])
assert result.index.names == ["A", "B"]
result = sql.read_sql_table(
"test_frame", self.conn, index_col=["A", "B"], columns=["C", "D"]
)
assert result.index.names == ["A", "B"]
assert result.columns.tolist() == ["C", "D"]
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
iris_frame1 = sql.read_sql_table("iris", self.conn)
iris_frame2 = sql.read_sql("iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
def test_not_reflect_all_tables(self):
# create invalid table
qry = """CREATE TABLE invalid (x INTEGER, y UNKNOWN);"""
self.conn.execute(qry)
qry = """CREATE TABLE other_table (x INTEGER, y INTEGER);"""
self.conn.execute(qry)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
sql.read_sql_table("other_table", self.conn)
sql.read_sql_query("SELECT * FROM other_table", self.conn)
# Verify some things
assert len(w) == 0
def test_warning_case_insensitive_table_name(self):
# see gh-7815
#
# We can't test that this warning is triggered, a the database
# configuration would have to be altered. But here we test that
# the warning is certainly NOT triggered in a normal case.
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# This should not trigger a Warning
self.test_frame1.to_sql("CaseSensitive", self.conn)
# Verify some things
assert len(w) == 0
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes("test_index_saved")
ixs = [i["column_names"] for i in ixs]
return ixs
def test_sqlalchemy_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLDatabase(self.conn)
table = sql.SQLTable("test_type", db, frame=df)
# GH 9086: TIMESTAMP is the suggested type for datetimes with timezones
assert isinstance(table.table.c["time"].type, sqltypes.TIMESTAMP)
def test_database_uri_string(self):
# Test read_sql and .to_sql method with a database URI (GH10654)
test_frame1 = self.test_frame1
# db_uri = 'sqlite:///:memory:' # raises
# sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near
# "iris": syntax error [SQL: 'iris']
with tm.ensure_clean() as name:
db_uri = "sqlite:///" + name
table = "iris"
test_frame1.to_sql(table, db_uri, if_exists="replace", index=False)
test_frame2 = sql.read_sql(table, db_uri)
test_frame3 = sql.read_sql_table(table, db_uri)
query = "SELECT * FROM iris"
test_frame4 = sql.read_sql_query(query, db_uri)
tm.assert_frame_equal(test_frame1, test_frame2)
tm.assert_frame_equal(test_frame1, test_frame3)
tm.assert_frame_equal(test_frame1, test_frame4)
# using driver that will not be installed on Travis to trigger error
# in sqlalchemy.create_engine -> test passing of this error to user
try:
# the rest of this test depends on pg8000's being absent
import pg8000 # noqa
pytest.skip("pg8000 is installed")
except ImportError:
pass
db_uri = "postgresql+pg8000://user:pass@host/dbname"
with pytest.raises(ImportError, match="pg8000"):
sql.read_sql("select * from table", db_uri)
def _make_iris_table_metadata(self):
sa = sqlalchemy
metadata = sa.MetaData()
iris = sa.Table(
"iris",
metadata,
sa.Column("SepalLength", sa.REAL),
sa.Column("SepalWidth", sa.REAL),
sa.Column("PetalLength", sa.REAL),
sa.Column("PetalWidth", sa.REAL),
sa.Column("Name", sa.TEXT),
)
return iris
def test_query_by_text_obj(self):
# WIP : GH10846
name_text = sqlalchemy.text("select * from iris where name=:name")
iris_df = sql.read_sql(name_text, self.conn, params={"name": "Iris-versicolor"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-versicolor"}
def test_query_by_select_obj(self):
# WIP : GH10846
iris = self._make_iris_table_metadata()
name_select = sqlalchemy.select([iris]).where(
iris.c.Name == sqlalchemy.bindparam("name")
)
iris_df = sql.read_sql(name_select, self.conn, params={"name": "Iris-setosa"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-setosa"}
class _EngineToConnMixin:
"""
A mixin that causes setup_connect to create a conn rather than an engine.
"""
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
super().load_test_data_and_sql()
engine = self.conn
conn = engine.connect()
self.__tx = conn.begin()
self.pandasSQL = sql.SQLDatabase(conn)
self.__engine = engine
self.conn = conn
yield
self.__tx.rollback()
self.conn.close()
self.conn = self.__engine
self.pandasSQL = sql.SQLDatabase(self.__engine)
# XXX:
# super().teardown_method(method)
@pytest.mark.single
class TestSQLApiConn(_EngineToConnMixin, TestSQLApi):
pass
@pytest.mark.single
class TestSQLiteFallbackApi(SQLiteMixIn, _TestSQLApi):
"""
Test the public sqlite connection fallback API
"""
flavor = "sqlite"
mode = "fallback"
def connect(self, database=":memory:"):
return sqlite3.connect(database)
def test_sql_open_close(self):
# Test if the IO in the database still work if the connection closed
# between the writing and reading (as in many real situations).
with tm.ensure_clean() as name:
conn = self.connect(name)
sql.to_sql(self.test_frame3, "test_frame3_legacy", conn, index=False)
conn.close()
conn = self.connect(name)
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;", conn)
conn.close()
tm.assert_frame_equal(self.test_frame3, result)
@pytest.mark.skipif(SQLALCHEMY_INSTALLED, reason="SQLAlchemy is installed")
def test_con_string_import_error(self):
conn = "mysql://root@localhost/pandas_nosetest"
msg = "Using URI string without sqlalchemy installed"
with pytest.raises(ImportError, match=msg):
sql.read_sql("SELECT * FROM iris", conn)
def test_read_sql_delegate(self):
iris_frame1 = | sql.read_sql_query("SELECT * FROM iris", self.conn) | pandas.io.sql.read_sql_query |
# Copyright 2021 The Funnel Rocket Maintainers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import shutil
from contextlib import contextmanager
from dataclasses import dataclass
from enum import auto
from typing import List
import numpy as np
import pytest
from pandas import RangeIndex, Series, DataFrame
from frocket.common.dataset import DatasetPartsInfo, DatasetId, DatasetPartId, PartNamingMethod, DatasetInfo, \
DatasetColumnType, DatasetShortSchema
from frocket.common.serializable import AutoNamedEnum
from frocket.worker.runners.part_loader import shared_part_loader
from tests.utils.base_test_utils import temp_filename, TEMP_DIR, DisablePyTestCollectionMixin
from tests.utils.mock_s3_utils import SKIP_S3_TESTS, new_mock_s3_bucket
class TestColumn(DisablePyTestCollectionMixin, str, AutoNamedEnum):
int_64_userid = auto()
int_64_ts = auto()
int_u32 = auto()
float_64_ts = auto()
float_all_none = auto()
float_32 = auto()
float_category = auto()
str_userid = auto()
str_and_none = auto()
str_all_none = auto()
str_object_all_none = auto()
str_category_userid = auto()
str_category_few = auto()
str_category_many = auto()
bool = auto()
unsupported_datetimes = auto()
unsupported_lists = auto()
DEFAULT_GROUP_COUNT = 200
DEFAULT_ROW_COUNT = 1000
DEFAULT_GROUP_COLUMN = TestColumn.int_64_userid.value
DEFAULT_TIMESTAMP_COLUMN = TestColumn.int_64_ts.value
BASE_TIME = 1609459200000 # Start of 2021, UTC
BASE_USER_ID = 100000
TIME_SHIFT = 10000
UNSUPPORTED_COLUMN_DTYPES = {TestColumn.unsupported_datetimes: 'datetime64[ns]',
TestColumn.unsupported_lists: 'object'}
STR_AND_NONE_VALUES = ["1", "2", "3"]
STR_CAT_FEW_WEIGHTS = [0.9, 0.07, 0.02, 0.01]
STR_CAT_MANY_WEIGHTS = [0.5, 0.2] + [0.01] * 30
def test_colname_to_coltype(name: str) -> DatasetColumnType:
prefix_to_type = {
'int': DatasetColumnType.INT,
'float': DatasetColumnType.FLOAT,
'str': DatasetColumnType.STRING,
'bool': DatasetColumnType.BOOL,
'unsupported': None
}
coltype = prefix_to_type[name.split('_')[0]]
return coltype
def datafile_schema(part: int = 0) -> DatasetShortSchema:
# noinspection PyUnresolvedReferences
result = DatasetShortSchema(
min_timestamp=float(BASE_TIME),
max_timestamp=float(BASE_TIME + TIME_SHIFT),
source_categoricals=[TestColumn.str_category_userid, TestColumn.str_category_many],
potential_categoricals=[TestColumn.str_and_none, TestColumn.str_category_few],
columns={col.value: test_colname_to_coltype(col)
for col in TestColumn
if test_colname_to_coltype(col)})
# print(f"Test dataset short schema is:\n{result.to_json(indent=2)}")
return result
def weighted_list(size: int, weights: list) -> list:
res = []
for idx, w in enumerate(weights):
v = str(idx)
vlen = size * w
res += [v] * int(vlen)
assert len(res) == size
return res
def str_and_none_column_values(part: int = 0, with_none: bool = True) -> List[str]:
result = [*STR_AND_NONE_VALUES, f"part-{part}"]
if with_none:
result.append(None)
return result
def create_datafile(part: int = 0, size: int = DEFAULT_ROW_COUNT, filename: str = None) -> str:
# First, prepare data for columns
# Each part has a separate set of user (a.k.a. group) IDs
initial_user_id = BASE_USER_ID * part
min_user_id = initial_user_id
max_user_id = initial_user_id + DEFAULT_GROUP_COUNT - 1
# To each tests, ensure that each user ID appears in the file at least once, by including the whole range,
# then add random IDs in the range
int64_user_ids = \
list(range(min_user_id, max_user_id + 1)) + \
random.choices(range(min_user_id, max_user_id + 1), k=size - DEFAULT_GROUP_COUNT)
# And also represent as strings in another column
str_user_ids = [str(uid) for uid in int64_user_ids]
# Timestamp: each part has a range of values of size TIME_SHIFT
min_ts = BASE_TIME + (TIME_SHIFT * part)
max_ts = BASE_TIME + (TIME_SHIFT * (part + 1))
# Ensure that min & max timestamps appear exactly once, and fill the rest randomly in the range
int_timestamps = \
[min_ts, max_ts] + \
random.choices(range(min_ts + 1, max_ts), k=size-2)
# Now as floats and as (incorrect!) datetimes (datetimes currently unsupported)
float_timestamps = [ts + random.random() for ts in int_timestamps]
# More test columns
int_u32_values = random.choices(range(100), k=size)
float_32_values = [np.nan, *[random.random() for _ in range(size - 2)], np.nan]
str_and_none_values = random.choices(str_and_none_column_values(part), k=size)
bool_values = random.choices([True, False], k=size)
# For yet-unsupported columns below
lists_values = [[1, 2, 3]] * size
datetimes = [ts * 1000000 for ts in float_timestamps]
# Now create all series
idx = RangeIndex(size)
columns = {
TestColumn.int_64_userid: Series(data=int64_user_ids),
TestColumn.int_64_ts: Series(data=int_timestamps),
TestColumn.int_u32: | Series(data=int_u32_values, dtype='uint32') | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 18 17:54:55 2020
@author: RredRrobin
"""
import os
import tkinter as tk
import tkinter.filedialog as filedialog
import tkinter.ttk as ttk
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
class TextScrollCombo(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
self.grid_propagate(True)
self.text = tk.Text(self, wrap="none")
self.text.grid(row=0, column=0, sticky="nsew", padx=2, pady=2)
scroll_bar_y = ttk.Scrollbar(self, command=self.text.yview)
scroll_bar_y.grid(row=0, column=1, sticky='nsew')
self.text['yscrollcommand'] = scroll_bar_y.set
scroll_bar_x = ttk.Scrollbar(self, command=self.text.xview, orient=tk.HORIZONTAL)
scroll_bar_x.grid(row=1, column=0, sticky='nsew')
self.text['xscrollcommand'] = scroll_bar_x.set
def add(self, row):
self.text.insert("end", row)
def empty(self):
self.text.delete('1.0', "end")
class program(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
self.grid()
self.grid_propagate(False)
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
self.menubar = tk.Menu(master)
master.config(menu=self.menubar)
self.menu_create()
self.combo = TextScrollCombo(master)
self.combo.grid(row=0, column=0, sticky='nsew')
style = ttk.Style()
style.theme_use('clam')
self.WD_open ="/" # set root as default directory to open files
self.WD_save ="/" # set root as default directory to save files
def menu_create(self):
self.progs = tk.Menu(self.menubar, tearoff=False)
self.progs.add_command(label="Choose directory to OPEN files", command=self.chooseWD_open)
self.progs.add_command(label="Choose directory to SAVE files", command=self.chooseWD_save)
self.progs.add_command(label="Close", command=self.close)
self.menubar.add_cascade(label="Program", menu=self.progs)
self.menuPs = tk.Menu(self.menubar, tearoff=False)
self.menuPs.add_command(label="Load data", command=self.data_import)
self.menuPs.add_command(label="Select interval", command=self.interval_select)
self.menuPs.add_command(label="Cut & save interval", command=self.interval_cut)
self.menubar.add_cascade(label="HRdata", menu=self.menuPs)
def data_import(self):
self.file_name = filedialog.askopenfilename(initialdir = self.WD_open, title = "Select file",filetypes = (("HRM files","*.hrm"),("Text files","*.txt"),("all files","*.*")))
file = open(self.file_name)
data = file.read()
file.close()
self.combo.add(data) # to display
# load dataframe
self.df = pd.read_csv(self.file_name, sep = ",")
self.df.columns = ["IBI"] # name column "IBI" (Inter-beat interval)
# delete unnessecary information
a = list(self.df.IBI) # convert column from self.df to list
b = list(self.df.IBI).index('[HRData]') # recognize beginning of HR data in the list
del a[0:4] # deletes first four rows
del a[1:b-3] # deletes rows 2 to "[HRData]"
self.df = pd.DataFrame({'IBI':a}) # writes dataframe
# create column with forth-counted time
self.df['IBI'] = self.df['IBI'].str.replace('StartTime=','') # deletes "StartTime=" to obtain just time value
self.df['IBItime'] = pd.to_timedelta(self.df['IBI'])*1000000 # *1.000.000, because IBI-values are interpreted in microseconds (except for StartTime-value)
l = len(self.df) # calculate length of DF / number of IBIs
liste = list(range(1,l)) # create list from 1 to end (the unused ro with the time has the index number '0' and the last row has length minus 1)
self.df['time'] = pd.to_timedelta(self.df['IBI'][0])
for i in liste:
self.df['time'][i] = self.df['time'][i-1] + self.df['IBItime'][i] # adds continuously the respective time value (previous time value plus length of IBI)
self.combo.empty() # empty screen
self.combo.add(self.df)
# save as .csv-file
filename_w_ext = os.path.basename(self.file_name) # change file name to be saved in a way that
filename, file_extension = os.path.splitext(filename_w_ext) # it has the same name as -hrm file, but
self.n = filename+'.csv' # with .csv as ending
self.df.to_csv(self.n, sep='\t', index=False) # options: tabulator as seperator; without indexing
return(self.df)
def interval_select(self):
def time_select(num1,num2,num3,num4,num5,num6):
stime = (num1+":"+num2+":"+num3) # select start time
dtime = (num4+":"+num5+":"+num6)
print(stime) # print in console
print(dtime)
st = datetime.strptime(stime,"%H:%M:%S")
dt = datetime.strptime(dtime,"%H:%M:%S")
self.Stime = timedelta(hours = st.hour, minutes = st.minute, seconds= st.second)# format into sth that can be used later
self.Dtime = timedelta(hours = dt.hour, minutes = dt.minute, seconds= dt.second)
return(self.Stime)
return(self.Dtime)
# create window
intwin = tk.Tk()
intwin.geometry("200x100+100+100")
intwin.title('Interval')
# label
tk.Label(intwin, text="Start time").grid(row=1, column=0)
tk.Label(intwin, text=":").grid(row=1, column=2)
tk.Label(intwin, text=":").grid(row=1, column=4)
tk.Label(intwin, text="Duration").grid(row=2, column=0)
tk.Label(intwin, text=":").grid(row=2, column=2)
tk.Label(intwin, text=":").grid(row=2, column=4)
# set type of variable
number1 = tk.IntVar()
number2 = tk.IntVar()
number3 = tk.IntVar()
number4 = tk.IntVar()
number5 = tk.IntVar()
number6 = tk.IntVar()
# create entry slot
sHH = tk.Entry(intwin, textvariable=number1, width=2) # commentary: it would be nice to limit the entry options to two digits by default - not just visually - , in order to avoid nonsense entries / typos
sHH.grid(row=1, column=1)
sMM = tk.Entry(intwin, textvariable=number2, width=2)
sMM.grid(row=1, column=3)
sSS = tk.Entry(intwin, textvariable=number3, width=2)
sSS.grid(row=1, column=5)
dHH = tk.Entry(intwin, textvariable=number4, width=2)
dHH.grid(row=2, column=1)
dMM = tk.Entry(intwin, textvariable=number5, width=2)
dMM.grid(row=2, column=3)
dSS = tk.Entry(intwin, textvariable=number6, width=2)
dSS.grid(row=2, column=5)
tk.Button(intwin, text = "OK", command=lambda: time_select(sHH.get(),sMM.get(),sSS.get(),dHH.get(),dMM.get(),dSS.get()), activebackground = "white", activeforeground = "blue").place(x = 70, y = 50)
intwin.mainloop()
self.combo.add(self.Stime)
self.combo.add(self.Dtime)
def interval_cut(self):
# load dataframe
self.df2 = pd.read_csv(self.n, sep = "\t")
# define times as timedelta data
self.df2['IBI'] = pd.to_numeric(self.df2['IBI'][1:])
self.df2['IBItime'] = | pd.to_timedelta(self.df2['IBItime']) | pandas.to_timedelta |
import pandas as pd
import numpy as np
from functools import reduce
from PortfolioBasic.stockstats import StockDataFrame
from MarketData import QuandlMarketDataSource, MarketData
from PortfolioBasic.Definitions import HeaderFactory
from PortfolioBasic.Technical.Indicators import RsiIndicator, MomentumIndicator, MACDIndicator, \
CombinedIndicator, BollingerIndicator, Williams, CommodityChannelIndex, TripleExponentialMovingAverage, \
AverageDirectionalIndex, AverageTrueRange
class DataLoader(object):
def load_data(self, stock, days, sentiment_location=None, source=QuandlMarketDataSource(), full_articles=True,
from_date='2011-04-01', to_date='2015-04-01'):
articles = None
if sentiment_location is not None:
articles = self.load_sentiment(sentiment_location, full_articles)
price_df = source.get_stock_data(stock)
# noinspection PyTypeChecker
indicators = CombinedIndicator((
MomentumIndicator(1),
MomentumIndicator(5),
BollingerIndicator(),
MACDIndicator(),
CommodityChannelIndex(),
AverageDirectionalIndex(),
TripleExponentialMovingAverage(),
AverageTrueRange(),
RsiIndicator(),
Williams()
))
ma = [
50,
100,
200
]
market = MarketData(stock, price_df, days=days, ma=ma, indicator=indicators, df_additional=articles)
price_df = market.get_stock_data()
return market.get_binary_data(price_df, days=days, from_date=from_date, to_date=to_date)
def load_sentiment(self, location, full_articles=True):
articles = pd.read_csv(location, na_values=["nan"])
articles.Date = pd.to_datetime(articles.Date, format='%d/%m/%Y %H:%M:%S')
articles.set_index('Date', inplace=True)
articles.reindex()
articles.sort_index(ascending=True, inplace=True)
if full_articles:
articles['Anger'] /= articles['TotalWords']
articles['Anticipation'] /= articles['TotalWords']
articles['Disgust'] /= articles['TotalWords']
articles['Fear'] /= articles['TotalWords']
articles['Joy'] /= articles['TotalWords']
articles['Sadness'] /= articles['TotalWords']
articles['Surprise'] /= articles['TotalWords']
articles['Trust'] /= articles['TotalWords']
articles.drop(columns=['Original', 'Original', 'TotalWords', 'TotalSentimentWords', 'Id'], inplace=True)
else:
articles.drop(columns=['Original', 'Original', 'TotalWords', 'TotalSentimentWords', 'Id', 'Anger',
'Anticipation', 'Disgust', 'Fear', 'Joy', 'Sadness', 'Surprise', 'Trust'], inplace=True)
articles['Calculated'] = ((articles['Calculated'] - 1) / 2) - 1
articles = articles.groupby( | pd.TimeGrouper('D') | pandas.TimeGrouper |
#This prgram predict wether a list of film categorized by its genres has more chance to be viewed by users
import sys, os
import numpy as np
import pandas as pd
import pickle, json, random
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from keras.models import load_model
from keras.layers import Input, Embedding, Flatten, Dot, Dense, Concatenate
from keras.models import Model
import matplotlib.pyplot as plt
#np.set_printoptions(threshold=sys.maxsize)
################################################################################
#this recommender is the final one, it's a mox between collaborative and content-based filtering cause it concatenate opinions and caracteristics of each user
#it predicts user rating in function of it's opinion about film genre he likes (with previous films he liked) and in function of it's caracteristics Age, Gender, Occupation and ZipCode
#there must be a lot of data to learn correctly and training must be very accurate
ratings = pd.read_csv("./ml-100k/u.data", names=["UserId", "MovieId", "Rate", "TimeStamp"], sep="\t")
genres = pd.read_csv("./ml-100k/u.genre", names=["Genre", "GenreId"], sep="|")
items = pd.read_csv("./ml-100k/u.item", names=["MovieId", "MovieTitle", "ReleaseDate","VideoReleaseDate", "IMDbURL", "unknown", "Action", "Adventure", "Animation", "Children's", "Comedy", "Crime", "Documentary", "Drama", "Fantasy", "Film-Noir", "Horror", "Musical", "Mystery", "Romance", "Sci-Fi", "Thriller", "War", "Western"], sep="|")
occupations = | pd.read_csv("./ml-100k/u.occupation", names=["Occupation", "OccupationId"], sep="|") | pandas.read_csv |
import numpy as np
import pandas as pd
class HousePriceDataset(object):
def __init__(self,train,test):
super(HousePriceDataset,self).__init__()
train.drop(['Id'], axis=1, inplace=True)
test.drop(['Id'], axis=1, inplace=True)
train = train[train.GrLivArea < 4500]
train.reset_index(drop=True, inplace=True)
train["SalePrice"] = np.log1p(train["SalePrice"])
y_train = train['SalePrice'].reset_index(drop=True)
train_features = train.drop(['SalePrice'], axis=1)
test_features = test
features = | pd.concat([train_features, test_features]) | pandas.concat |
from dies.data import (
Dataset,
DatasetCategoricalData,
combine_datasets,
ds_from_df,
ds_from_df_from_dtypes,
scale_datasets,
)
from dies import data
import numpy as np
import pandas as pd
import unittest
from numpy.testing import assert_almost_equal, assert_array_less, assert_array_equal
from sklearn.datasets import load_iris
import torch
from sklearn import datasets
from sklearn.preprocessing import MinMaxScaler, StandardScaler
def get_2_dfs():
X, y, _ = datasets.make_regression(
n_samples=50,
n_features=2,
bias=1000,
n_informative=2,
noise=10,
coef=True,
random_state=42,
)
df1 = pd.DataFrame(
data=np.concatenate([X, y.reshape(-1, 1)], axis=1),
columns=["feat1", "feat2", "target"],
)
cats = np.random.randint(low=0, high=10, size=(df1.shape[0], 2))
df1["cat_1"] = cats[:, 0]
df1["cat_2"] = cats[:, 1]
index1 = pd.date_range("2000-01-01", "2000-06-01", periods=df1.shape[0])
index1 = pd.to_datetime(index1, utc=True)
df1.index = index1
X, y, _ = datasets.make_regression(
n_samples=53,
n_features=2,
bias=800,
n_informative=2,
noise=8,
coef=True,
random_state=42,
)
df2 = pd.DataFrame(
data=np.concatenate([X, y.reshape(-1, 1)], axis=1),
columns=["feat1", "feat2", "target"],
)
cats = np.random.randint(low=0, high=10, size=(df2.shape[0], 2))
df2["cat_1"] = cats[:, 0]
df2["cat_2"] = cats[:, 1]
index2 = pd.date_range("2000-06-01", "2000-12-01", periods=df2.shape[0])
index2 = pd.to_datetime(index2, utc=True)
df2.index = index2
return df1, df2
class TestDataset(unittest.TestCase):
def setUp(self):
data = load_iris()
self.df = pd.DataFrame(
data=np.c_[data["data"], data["target"]],
columns=data["feature_names"] + ["target"],
)
self.x_columns = self.df.drop("target", axis=1).columns
self.y_columns = "target"
X = self.df.drop("target", axis=1).values
y = self.df["target"].values
self.index = pd.date_range("2000-06-01", "2000-12-01", periods=X.shape[0])
self.index = | pd.to_datetime(self.index, utc=True) | pandas.to_datetime |
import numpy as np
import datajoint as dj
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import itertools
import pandas as pd
from pipeline import experiment, ephys, psth
from pipeline.plot.util import (_plot_with_sem, _extract_one_stim_dur, _get_units_hemisphere,
_get_trial_event_times, _get_clustering_method,
_plot_stacked_psth_diff, _plot_avg_psth, jointplot_w_hue)
m_scale = 1200
_plt_xmin = -3
_plt_xmax = 2
def plot_clustering_quality(probe_insertion, clustering_method=None, axs=None):
probe_insertion = probe_insertion.proj()
if clustering_method is None:
try:
clustering_method = _get_clustering_method(probe_insertion)
except ValueError as e:
raise ValueError(str(e) + '\nPlease specify one with the kwarg "clustering_method"')
amp, snr, spk_rate, isi_violation = (ephys.Unit * ephys.UnitStat * ephys.ProbeInsertion.InsertionLocation
& probe_insertion & {'clustering_method': clustering_method}).fetch(
'unit_amp', 'unit_snr', 'avg_firing_rate', 'isi_violation')
metrics = {'amp': amp,
'snr': snr,
'isi': np.array(isi_violation) * 100, # to percentage
'rate': np.array(spk_rate)}
label_mapper = {'amp': 'Amplitude',
'snr': 'Signal to noise ratio (SNR)',
'isi': 'ISI violation (%)',
'rate': 'Firing rate (spike/s)'}
fig = None
if axs is None:
fig, axs = plt.subplots(2, 3, figsize = (12, 8))
fig.subplots_adjust(wspace=0.4)
assert axs.size == 6
for (m1, m2), ax in zip(itertools.combinations(list(metrics.keys()), 2), axs.flatten()):
ax.plot(metrics[m1], metrics[m2], '.k')
ax.set_xlabel(label_mapper[m1])
ax.set_ylabel(label_mapper[m2])
# cosmetic
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
return fig
def plot_unit_characteristic(probe_insertion, clustering_method=None, axs=None):
probe_insertion = probe_insertion.proj()
if clustering_method is None:
try:
clustering_method = _get_clustering_method(probe_insertion)
except ValueError as e:
raise ValueError(str(e) + '\nPlease specify one with the kwarg "clustering_method"')
amp, snr, spk_rate, x, y, insertion_depth = (
ephys.Unit * ephys.ProbeInsertion.InsertionLocation * ephys.UnitStat
& probe_insertion & {'clustering_method': clustering_method} & 'unit_quality != "all"').fetch(
'unit_amp', 'unit_snr', 'avg_firing_rate', 'unit_posx', 'unit_posy', 'dv_location')
insertion_depth = np.where(np.isnan(insertion_depth), 0, insertion_depth)
metrics = pd.DataFrame(list(zip(*(amp/amp.max(), snr/snr.max(), spk_rate/spk_rate.max(), x, y - insertion_depth))))
metrics.columns = ['amp', 'snr', 'rate', 'x', 'y']
fig = None
if axs is None:
fig, axs = plt.subplots(1, 3, figsize=(10, 8))
fig.subplots_adjust(wspace=0.6)
assert axs.size == 3
ymin = metrics.y.min() - 100
ymax = metrics.y.max() + 200
cosmetic = {'legend': None,
'linewidth': 1.75,
'alpha': 0.9,
'facecolor': 'none', 'edgecolor': 'k'}
sns.scatterplot(data=metrics, x='x', y='y', s=metrics.amp*m_scale, ax=axs[0], **cosmetic)
sns.scatterplot(data=metrics, x='x', y='y', s=metrics.snr*m_scale, ax=axs[1], **cosmetic)
sns.scatterplot(data=metrics, x='x', y='y', s=metrics.rate*m_scale, ax=axs[2], **cosmetic)
# manually draw the legend
lg_ypos = ymax
data = pd.DataFrame({'x': [3, 20, 40], 'y': [lg_ypos, lg_ypos, lg_ypos], 'size_ratio': np.array([0.2, 0.5, 0.8])})
for ax, ax_maxval in zip(axs.flatten(), (amp.max(), snr.max(), spk_rate.max())):
sns.scatterplot(data=data, x='x', y='y', s=data.size_ratio*m_scale, ax=ax, **dict(cosmetic, facecolor='k'))
for _, r in data.iterrows():
ax.text(r['x']-4, r['y']+70, (r['size_ratio']*ax_maxval).astype(int))
# cosmetic
for title, ax in zip(('Amplitude', 'SNR', 'Firing rate'), axs.flatten()):
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_title(title)
ax.set_xlim((-10, 60))
ax.add_patch(mpl.patches.Rectangle((-7, lg_ypos-80), 62, 210, fill=False))
ax.set_ylim((ymin, ymax + 150))
return fig
def plot_unit_selectivity(probe_insertion, clustering_method=None, axs=None):
probe_insertion = probe_insertion.proj()
if clustering_method is None:
try:
clustering_method = _get_clustering_method(probe_insertion)
except ValueError as e:
raise ValueError(str(e) + '\nPlease specify one with the kwarg "clustering_method"')
attr_names = ['unit', 'period', 'period_selectivity', 'contra_firing_rate',
'ipsi_firing_rate', 'unit_posx', 'unit_posy', 'dv_location']
selective_units = (psth.PeriodSelectivity * ephys.Unit * ephys.ProbeInsertion.InsertionLocation
* experiment.Period & probe_insertion & {'clustering_method': clustering_method}
& 'period_selectivity != "non-selective"').fetch(*attr_names)
selective_units = pd.DataFrame(selective_units).T
selective_units.columns = attr_names
selective_units.period_selectivity.astype('category')
# --- account for insertion depth (manipulator depth)
selective_units.unit_posy = (selective_units.unit_posy
- np.where(np.isnan(selective_units.dv_location.values.astype(float)),
0, selective_units.dv_location.values.astype(float)))
# --- get ipsi vs. contra firing rate difference
f_rate_diff = np.abs(selective_units.ipsi_firing_rate - selective_units.contra_firing_rate)
selective_units['f_rate_diff'] = f_rate_diff / f_rate_diff.max()
# --- prepare for plotting
cosmetic = {'legend': None,
'linewidth': 0.0001}
ymin = selective_units.unit_posy.min() - 100
ymax = selective_units.unit_posy.max() + 100
# a bit of hack to get the 'open circle'
pts = np.linspace(0, np.pi * 2, 24)
circ = np.c_[np.sin(pts) / 2, -np.cos(pts) / 2]
vert = np.r_[circ, circ[::-1] * .7]
open_circle = mpl.path.Path(vert)
# --- plot
fig = None
if axs is None:
fig, axs = plt.subplots(1, 3, figsize=(10, 8))
fig.subplots_adjust(wspace=0.6)
assert axs.size == 3
for (title, df), ax in zip(((p, selective_units[selective_units.period == p])
for p in ('sample', 'delay', 'response')), axs):
sns.scatterplot(data=df, x='unit_posx', y='unit_posy',
s=df.f_rate_diff.values.astype(float)*m_scale,
hue='period_selectivity', marker=open_circle,
palette={'contra-selective': 'b', 'ipsi-selective': 'r'},
ax=ax, **cosmetic)
contra_p = (df.period_selectivity == 'contra-selective').sum() / len(df) * 100
# cosmetic
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_title(f'{title}\n% contra: {contra_p:.2f}\n% ipsi: {100-contra_p:.2f}')
ax.set_xlim((-10, 60))
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_ylim((ymin, ymax))
return fig
def plot_unit_bilateral_photostim_effect(probe_insertion, clustering_method=None, axs=None):
probe_insertion = probe_insertion.proj()
if clustering_method is None:
try:
clustering_method = _get_clustering_method(probe_insertion)
except ValueError as e:
raise ValueError(str(e) + '\nPlease specify one with the kwarg "clustering_method"')
dv_loc = (ephys.ProbeInsertion.InsertionLocation & probe_insertion).fetch1('dv_location')
no_stim_cond = (psth.TrialCondition
& {'trial_condition_name':
'all_noearlylick_both_alm_nostim'}).fetch1('KEY')
bi_stim_cond = (psth.TrialCondition
& {'trial_condition_name':
'all_noearlylick_both_alm_stim'}).fetch1('KEY')
# get photostim duration
stim_durs = np.unique((experiment.Photostim & experiment.PhotostimEvent
* psth.TrialCondition().get_trials('all_noearlylick_both_alm_stim')
& probe_insertion).fetch('duration'))
stim_dur = _extract_one_stim_dur(stim_durs)
units = ephys.Unit & probe_insertion & {'clustering_method': clustering_method} & 'unit_quality != "all"'
metrics = pd.DataFrame(columns=['unit', 'x', 'y', 'frate_change'])
_, cue_onset = _get_trial_event_times(['delay'], units, 'all_noearlylick_both_alm_nostim')
cue_onset = cue_onset[0]
# XXX: could be done with 1x fetch+join
for u_idx, unit in enumerate(units.fetch('KEY', order_by='unit')):
x, y = (ephys.Unit & unit).fetch1('unit_posx', 'unit_posy')
# obtain unit psth per trial, for all nostim and bistim trials
nostim_trials = ephys.Unit.TrialSpikes & unit & psth.TrialCondition.get_trials(no_stim_cond['trial_condition_name'])
bistim_trials = ephys.Unit.TrialSpikes & unit & psth.TrialCondition.get_trials(bi_stim_cond['trial_condition_name'])
nostim_psths, nostim_edge = psth.compute_unit_psth(unit, nostim_trials.fetch('KEY'), per_trial=True)
bistim_psths, bistim_edge = psth.compute_unit_psth(unit, bistim_trials.fetch('KEY'), per_trial=True)
# compute the firing rate difference between contra vs. ipsi within the stimulation duration
ctrl_frate = np.array([nostim_psth[np.logical_and(nostim_edge >= cue_onset,
nostim_edge <= cue_onset + stim_dur)].mean()
for nostim_psth in nostim_psths])
stim_frate = np.array([bistim_psth[np.logical_and(bistim_edge >= cue_onset,
bistim_edge <= cue_onset + stim_dur)].mean()
for bistim_psth in bistim_psths])
frate_change = (stim_frate.mean() - ctrl_frate.mean()) / ctrl_frate.mean()
frate_change = abs(frate_change) if frate_change < 0 else 0.0001
metrics.loc[u_idx] = (int(unit['unit']), x, y - dv_loc, frate_change)
metrics.frate_change = metrics.frate_change / metrics.frate_change.max()
fig = None
if axs is None:
fig, axs = plt.subplots(1, 1, figsize=(4, 8))
cosmetic = {'legend': None,
'linewidth': 1.75,
'alpha': 0.9,
'facecolor': 'none', 'edgecolor': 'k'}
sns.scatterplot(data=metrics, x='x', y='y', s=metrics.frate_change*m_scale,
ax=axs, **cosmetic)
axs.spines['right'].set_visible(False)
axs.spines['top'].set_visible(False)
axs.set_title('% change')
axs.set_xlim((-10, 60))
return fig
def plot_stacked_contra_ipsi_psth(units, axs=None):
units = units.proj()
# get event start times: sample, delay, response
period_names, period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, 'good_noearlylick_hit')
hemi = _get_units_hemisphere(units)
conds_i = (psth.TrialCondition
& {'trial_condition_name':
'good_noearlylick_left_hit' if hemi == 'left' else 'good_noearlylick_right_hit'}).fetch1('KEY')
conds_c = (psth.TrialCondition
& {'trial_condition_name':
'good_noearlylick_right_hit' if hemi == 'left' else 'good_noearlylick_left_hit'}).fetch1('KEY')
sel_i = (ephys.Unit * psth.UnitSelectivity
& 'unit_selectivity = "ipsi-selective"' & units)
sel_c = (ephys.Unit * psth.UnitSelectivity
& 'unit_selectivity = "contra-selective"' & units)
# ipsi selective ipsi trials
psth_is_it = (psth.UnitPsth * sel_i.proj('unit_posy') & conds_i).fetch(order_by='unit_posy desc')
# ipsi selective contra trials
psth_is_ct = (psth.UnitPsth * sel_i.proj('unit_posy') & conds_c).fetch(order_by='unit_posy desc')
# contra selective contra trials
psth_cs_ct = (psth.UnitPsth * sel_c.proj('unit_posy') & conds_c).fetch(order_by='unit_posy desc')
# contra selective ipsi trials
psth_cs_it = (psth.UnitPsth * sel_c.proj('unit_posy') & conds_i).fetch(order_by='unit_posy desc')
fig = None
if axs is None:
fig, axs = plt.subplots(1, 2, figsize=(20, 20))
assert axs.size == 2
_plot_stacked_psth_diff(psth_cs_ct, psth_cs_it, ax=axs[0], vlines=period_starts, flip=True)
axs[0].set_title('Contra-selective Units')
axs[0].set_ylabel('Unit (by depth)')
axs[0].set_xlabel('Time to go (s)')
_plot_stacked_psth_diff(psth_is_it, psth_is_ct, ax=axs[1], vlines=period_starts)
axs[1].set_title('Ipsi-selective Units')
axs[1].set_ylabel('Unit (by depth)')
axs[1].set_xlabel('Time to go (s)')
return fig
def plot_avg_contra_ipsi_psth(units, axs=None):
units = units.proj()
# get event start times: sample, delay, response
period_names, period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, 'good_noearlylick_hit')
hemi = _get_units_hemisphere(units)
good_unit = ephys.Unit & 'unit_quality != "all"'
conds_i = (psth.TrialCondition
& {'trial_condition_name':
'good_noearlylick_left_hit' if hemi == 'left' else 'good_noearlylick_right_hit'}).fetch('KEY')
conds_c = (psth.TrialCondition
& {'trial_condition_name':
'good_noearlylick_right_hit' if hemi == 'left' else 'good_noearlylick_left_hit'}).fetch('KEY')
sel_i = (ephys.Unit * psth.UnitSelectivity
& 'unit_selectivity = "ipsi-selective"' & units)
sel_c = (ephys.Unit * psth.UnitSelectivity
& 'unit_selectivity = "contra-selective"' & units)
psth_is_it = (((psth.UnitPsth & conds_i)
* ephys.Unit.proj('unit_posy'))
& good_unit.proj() & sel_i.proj()).fetch(
'unit_psth', order_by='unit_posy desc')
psth_is_ct = (((psth.UnitPsth & conds_c)
* ephys.Unit.proj('unit_posy'))
& good_unit.proj() & sel_i.proj()).fetch(
'unit_psth', order_by='unit_posy desc')
psth_cs_ct = (((psth.UnitPsth & conds_c)
* ephys.Unit.proj('unit_posy'))
& good_unit.proj() & sel_c.proj()).fetch(
'unit_psth', order_by='unit_posy desc')
psth_cs_it = (((psth.UnitPsth & conds_i)
* ephys.Unit.proj('unit_posy'))
& good_unit.proj() & sel_c.proj()).fetch(
'unit_psth', order_by='unit_posy desc')
fig = None
if axs is None:
fig, axs = plt.subplots(1, 2, figsize=(16, 6))
assert axs.size == 2
_plot_avg_psth(psth_cs_it, psth_cs_ct, period_starts, axs[0],
'Contra-selective')
_plot_avg_psth(psth_is_it, psth_is_ct, period_starts, axs[1],
'Ipsi-selective')
ymax = max([ax.get_ylim()[1] for ax in axs])
for ax in axs:
ax.set_ylim((0, ymax))
return fig
def plot_psth_bilateral_photostim_effect(units, axs=None):
units = units.proj()
hemi = _get_units_hemisphere(units)
psth_s_l = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name':
'all_noearlylick_both_alm_stim_left'}).fetch('unit_psth')
psth_n_l = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name':
'all_noearlylick_both_alm_nostim_left'}).fetch('unit_psth')
psth_s_r = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name':
'all_noearlylick_both_alm_stim_right'}).fetch('unit_psth')
psth_n_r = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name':
'all_noearlylick_both_alm_nostim_right'}).fetch('unit_psth')
# get event start times: sample, delay, response
period_names, period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, 'good_noearlylick_hit')
# get photostim duration
stim_durs = np.unique((experiment.Photostim & experiment.PhotostimEvent
* psth.TrialCondition().get_trials('all_noearlylick_both_alm_stim')
& units).fetch('duration'))
stim_dur = _extract_one_stim_dur(stim_durs)
if hemi == 'left':
psth_s_i = psth_s_l
psth_n_i = psth_n_l
psth_s_c = psth_s_r
psth_n_c = psth_n_r
else:
psth_s_i = psth_s_r
psth_n_i = psth_n_r
psth_s_c = psth_s_l
psth_n_c = psth_n_l
fig = None
if axs is None:
fig, axs = plt.subplots(1, 2, figsize=(16, 6))
assert axs.size == 2
_plot_avg_psth(psth_n_i, psth_n_c, period_starts, axs[0],
'Control')
_plot_avg_psth(psth_s_i, psth_s_c, period_starts, axs[1],
'Bilateral ALM photostim')
# cosmetic
ymax = max([ax.get_ylim()[1] for ax in axs])
for ax in axs:
ax.set_ylim((0, ymax))
# add shaded bar for photostim
stim_time = period_starts[np.where(period_names == 'delay')[0][0]]
axs[1].axvspan(stim_time, stim_time + stim_dur, alpha=0.3, color='royalblue')
return fig
def plot_psth_photostim_effect(units, condition_name_kw=['both_alm'], axs=None):
"""
For the specified `units`, plot PSTH comparison between stim vs. no-stim with left/right trial instruction
The stim location (or other appropriate search keywords) can be specified in `condition_name_kw` (default: bilateral ALM)
"""
units = units.proj()
fig = None
if axs is None:
fig, axs = plt.subplots(1, 2, figsize=(16, 6))
assert axs.size == 2
hemi = _get_units_hemisphere(units)
# no photostim:
psth_n_l = psth.TrialCondition.get_cond_name_from_keywords(['_nostim', '_left'])[0]
psth_n_r = psth.TrialCondition.get_cond_name_from_keywords(['_nostim', '_right'])[0]
psth_n_l = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name': psth_n_l} & 'unit_psth is not NULL').fetch('unit_psth')
psth_n_r = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name': psth_n_r} & 'unit_psth is not NULL').fetch('unit_psth')
psth_s_l = psth.TrialCondition.get_cond_name_from_keywords(condition_name_kw + ['_stim_left'])[0]
psth_s_r = psth.TrialCondition.get_cond_name_from_keywords(condition_name_kw + ['_stim_right'])[0]
psth_s_l = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name': psth_s_l} & 'unit_psth is not NULL').fetch('unit_psth')
psth_s_r = (psth.UnitPsth * psth.TrialCondition & units
& {'trial_condition_name': psth_s_r} & 'unit_psth is not NULL').fetch('unit_psth')
# get event start times: sample, delay, response
period_names, period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, 'good_noearlylick_hit')
# get photostim duration
stim_trial_cond_name = psth.TrialCondition.get_cond_name_from_keywords(condition_name_kw + ['_stim'])[0]
stim_durs = np.unique((experiment.Photostim & experiment.PhotostimEvent
* psth.TrialCondition().get_trials(stim_trial_cond_name)
& units).fetch('duration'))
stim_dur = _extract_one_stim_dur(stim_durs)
if hemi == 'left':
psth_s_i = psth_s_l
psth_n_i = psth_n_l
psth_s_c = psth_s_r
psth_n_c = psth_n_r
else:
psth_s_i = psth_s_r
psth_n_i = psth_n_r
psth_s_c = psth_s_l
psth_n_c = psth_n_l
_plot_avg_psth(psth_n_i, psth_n_c, period_starts, axs[0],
'Control')
_plot_avg_psth(psth_s_i, psth_s_c, period_starts, axs[1],
'Photostim')
# cosmetic
ymax = max([ax.get_ylim()[1] for ax in axs])
for ax in axs:
ax.set_ylim((0, ymax))
ax.set_xlim([_plt_xmin, _plt_xmax])
# add shaded bar for photostim
stim_time = period_starts[np.where(period_names == 'delay')[0][0]]
axs[1].axvspan(stim_time, stim_time + stim_dur, alpha=0.3, color='royalblue')
return fig
def plot_coding_direction(units, time_period=None, axs=None):
_, proj_contra_trial, proj_ipsi_trial, time_stamps, _ = psth.compute_CD_projected_psth(
units.fetch('KEY'), time_period=time_period)
# get event start times: sample, delay, response
period_names, period_starts = _get_trial_event_times(['sample', 'delay', 'go'], units, 'good_noearlylick')
fig = None
if axs is None:
fig, axs = plt.subplots(1, 1, figsize=(8, 6))
# plot
_plot_with_sem(proj_contra_trial, time_stamps, ax=axs, c='b')
_plot_with_sem(proj_ipsi_trial, time_stamps, ax=axs, c='r')
for x in period_starts:
axs.axvline(x=x, linestyle = '--', color = 'k')
# cosmetic
axs.spines['right'].set_visible(False)
axs.spines['top'].set_visible(False)
axs.set_ylabel('CD projection (a.u.)')
axs.set_xlabel('Time (s)')
return fig
def plot_paired_coding_direction(unit_g1, unit_g2, labels=None, time_period=None):
"""
Plot trial-to-trial CD-endpoint correlation between CD-projected trial-psth from two unit-groups (e.g. two brain regions)
Note: coding direction is calculated on selective units, contra vs. ipsi, within the specified time_period
"""
_, proj_contra_trial_g1, proj_ipsi_trial_g1, time_stamps, unit_g1_hemi = psth.compute_CD_projected_psth(
unit_g1.fetch('KEY'), time_period=time_period)
_, proj_contra_trial_g2, proj_ipsi_trial_g2, time_stamps, unit_g2_hemi = psth.compute_CD_projected_psth(
unit_g2.fetch('KEY'), time_period=time_period)
# get event start times: sample, delay, response
period_names, period_starts = _get_trial_event_times(['sample', 'delay', 'go'], unit_g1, 'good_noearlylick')
if labels:
assert len(labels) == 2
else:
labels = ('unit group 1', 'unit group 2')
# plot projected trial-psth
fig, axs = plt.subplots(1, 2, figsize=(16, 6))
_plot_with_sem(proj_contra_trial_g1, time_stamps, ax=axs[0], c='b')
_plot_with_sem(proj_ipsi_trial_g1, time_stamps, ax=axs[0], c='r')
_plot_with_sem(proj_contra_trial_g2, time_stamps, ax=axs[1], c='b')
_plot_with_sem(proj_ipsi_trial_g2, time_stamps, ax=axs[1], c='r')
# cosmetic
for ax, label in zip(axs, labels):
for x in period_starts:
ax.axvline(x=x, linestyle = '--', color = 'k')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel('CD projection (a.u.)')
ax.set_xlabel('Time (s)')
ax.set_title(label)
# plot trial CD-endpoint correlation - if 2 unit-groups are from 2 hemispheres,
# then contra-ipsi definition is based on the first group
p_start, p_end = time_period
contra_cdend_1 = proj_contra_trial_g1[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)
ipsi_cdend_1 = proj_ipsi_trial_g1[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)
if unit_g1_hemi == unit_g1_hemi:
contra_cdend_2 = proj_contra_trial_g2[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)
ipsi_cdend_2 = proj_ipsi_trial_g2[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)
else:
contra_cdend_2 = proj_ipsi_trial_g2[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)
ipsi_cdend_2 = proj_contra_trial_g2[:, np.logical_and(time_stamps >= p_start, time_stamps < p_end)].mean(axis=1)
c_df = | pd.DataFrame([contra_cdend_1, contra_cdend_2]) | pandas.DataFrame |
from __future__ import division
"""Class definitions."""
import os, warnings
from os.path import join
import numpy as np
import pandas as pd
from pandas import DataFrame, Series, Index
import six
import abc
from copy import deepcopy
from functools import reduce
from nltools.data import Adjacency, design_matrix
from nltools.stats import (downsample,
upsample,
transform_pairwise)
from nltools.utils import (set_decomposition_algorithm)
from sklearn.metrics.pairwise import pairwise_distances, cosine_similarity
from sklearn.utils import check_random_state
from feat.utils import read_feat, read_affectiva, read_facet, read_openface, wavelet, calc_hist_auc, load_h5, get_resource_path
from feat.plotting import plot_face
from nilearn.signal import clean
from scipy.signal import convolve
class FexSeries(Series):
"""
This is a sub-class of pandas series. While not having additional methods
of it's own required to retain normal slicing functionality for the
Fex class, i.e. how slicing is typically handled in pandas.
All methods should be called on Fex below.
"""
_metadata = ['au_columns', 'emotion_columns', 'facebox_columns', 'landmark_columns', 'facepose_columns', 'gaze_columns', 'time_columns', 'design_columns', 'fex_columns', 'filename', 'sampling_freq', 'features', 'sessions', 'detector']
def __init__(self, *args, **kwargs):
self.sampling_freq = kwargs.pop('sampling_freq', None)
self.sessions = kwargs.pop('sessions', None)
super().__init__(*args, **kwargs)
@property
def _constructor(self):
return FexSeries
@property
def _constructor_expanddim(self):
return Fex
def __finalize__(self, other, method=None, **kwargs):
""" propagate metadata from other to self """
# NOTE: backported from pandas master (upcoming v0.13)
for name in self._metadata:
object.__setattr__(self, name, getattr(other, name, None))
return self
class Fex(DataFrame):
"""Fex is a class to represent facial expression (Fex) data. It is essentially
an enhanced pandas df, with extra attributes and methods. Methods
always return a new design matrix instance.
Args:
filename: (str, optional) path to file
detector: (str, optional) name of software used to extract Fex. (Feat, FACET, OpenFace, or Affectiva)
sampling_freq (float, optional): sampling rate of each row in Hz;
defaults to None
features (pd.Dataframe, optional): features that correspond to each
Fex row
sessions: Unique values indicating rows associated with a specific
session (e.g., trial, subject, etc). Must be a 1D array of
n_samples elements; defaults to None
"""
# __metaclass__ = abc.ABCMeta
# Need to specify attributes for pandas.
_metadata = ['au_columns', 'emotion_columns', 'facebox_columns', 'landmark_columns', 'facepose_columns', 'gaze_columns', 'time_columns', 'design_columns', 'fex_columns', 'filename', 'sampling_freq', 'features', 'sessions', 'detector']
def __finalize__(self, other, method=None, **kwargs):
"""propagate metadata from other to self """
self = super().__finalize__(other, method=method, **kwargs)
# merge operation: using metadata of the left object
if method == "merge":
for name in self._metadata:
print("self", name, self.au_columns, other.left.au_columns)
object.__setattr__(self, name, getattr(other.left, name, None))
# concat operation: using metadata of the first object
elif method == "concat":
for name in self._metadata:
object.__setattr__(self, name, getattr(other.objs[0], name, None))
return self
def __init__(self, *args, **kwargs):
### Columns ###
self.au_columns = kwargs.pop('au_columns', None)
self.emotion_columns = kwargs.pop('emotion_columns', None)
self.facebox_columns = kwargs.pop('facebox_columns', None)
self.landmark_columns = kwargs.pop('landmark_columns', None)
self.facepose_columns = kwargs.pop('facepose_columns', None)
self.gaze_columns = kwargs.pop('gaze_columns', None)
self.time_columns = kwargs.pop('time_columns', None)
self.design_columns = kwargs.pop('design_columns', None)
### Meta data ###
self.filename = kwargs.pop('filename', None)
self.sampling_freq = kwargs.pop('sampling_freq', None)
self.detector = kwargs.pop('detector', None)
self.features = kwargs.pop('features', None)
self.sessions = kwargs.pop('sessions', None)
super().__init__(*args, **kwargs)
if self.sessions is not None:
if not len(self.sessions) == len(self):
raise ValueError('Make sure sessions is same length as data.')
self.sessions = np.array(self.sessions)
# if (self.fex_columns is None) and (not self._metadata):
# try:
# self.fex_columns = self._metadata
# except:
# print('Failed to import _metadata to fex_columns')
# Set _metadata attributes on series: Kludgy solution
for k in self:
self[k].sampling_freq = self.sampling_freq
self[k].sessions = self.sessions
@property
def _constructor(self):
return Fex
@property
def _constructor_sliced(self):
return FexSeries
def _ixs(self, i, axis=0):
""" Override indexing to ensure Fex._metadata is propogated correctly
when integer indexing
i : int, slice, or sequence of integers
axis : int
"""
result = super()._ixs(i, axis=axis)
# Override columns
if axis == 1:
"""
Notes
-----
If slice passed, the resulting data will be a view
"""
label = self.columns[i]
if isinstance(i, slice):
# need to return view
lab_slice = slice(label[0], label[-1])
return self.loc[:, lab_slice]
else:
if isinstance(label, Index):
return self._take(i, axis=1, convert=True)
index_len = len(self.index)
# if the values returned are not the same length
# as the index (iow a not found value), iget returns
# a 0-len ndarray. This is effectively catching
# a numpy error (as numpy should really raise)
values = self._data.iget(i)
if index_len and not len(values):
values = np.array([np.nan] * index_len, dtype=object)
result = self._constructor_sliced(
values, index=self.index, name=label, fastpath=True,
sampling_freq=self.sampling_freq, sessions=self.sessions)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result
def aus(self):
"""Returns the Action Units data
Returns:
DataFrame: Action Units data
"""
return self[self.au_columns]
def emotions(self):
"""Returns the emotion data
Returns:
DataFrame: emotion data
"""
return self[self.emotion_columns]
def landmark(self):
"""Returns the landmark data
Returns:
DataFrame: landmark data
"""
return self[self.landmark_columns]
def landmark_x(self):
"""Returns the x landmarks.
Returns:
DataFrame: x landmarks.
"""
######## TODO: NATSORT columns before returning #######
x_cols = [col for col in self.landmark_columns if 'x' in col]
return self[x_cols]
def landmark_y(self):
"""Returns the y landmarks.
Returns:
DataFrame: y landmarks.
"""
y_cols = [col for col in self.landmark_columns if 'y' in col]
return self[y_cols]
def facebox(self):
"""Returns the facebox data
Returns:
DataFrame: facebox data
"""
return self[self.facebox_columns]
def time(self):
"""Returns the time data
Returns:
DataFrame: time data
"""
return self[self.time_columns]
def design(self):
"""Returns the design data
Returns:
DataFrame: time data
"""
return self[self.design_columns]
def read_file(self, *args, **kwargs):
"""Loads file into FEX class
Returns:
DataFrame: Fex class
"""
if self.detector=='FACET':
return self.read_facet(self.filename)
elif self.detector=='OpenFace':
return self.read_openface(self.filename)
elif self.detector=='Affectiva':
return self.read_affectiva(self.filename)
elif self.detector=='Feat':
return self.read_feat(self.filename)
else:
print("Must specifiy which detector [Feat, FACET, OpenFace, or Affectiva]")
def info(self):
"""Print class meta data.
"""
attr_list = []
for name in self._metadata:
attr_list.append(name +": "+ str(getattr(self, name, None))+'\n')
print(f"{self.__class__}\n" + "".join(attr_list))
### Class Methods ###
def read_feat(self, filename=None, *args, **kwargs):
# Check if filename exists in metadata.
if not filename:
try:
filename = self.filename
except:
print("filename must be specified.")
result = read_feat(filename, *args, **kwargs)
return result
def read_facet(self, filename=None, *args, **kwargs):
# Check if filename exists in metadata.
if not filename:
try:
filename = self.filename
except:
print("filename must be specified.")
result = read_facet(filename, *args, **kwargs)
for name in self._metadata:
attr_value = getattr(self, name, None)
if attr_value and getattr(result, name, None) == None:
setattr(result, name, attr_value)
return result
def read_openface(self, filename=None, *args, **kwargs):
if not filename:
try:
filename = self.filename
except:
print("filename must be specified.")
result = read_openface(filename, *args, **kwargs)
for name in self._metadata:
attr_value = getattr(self, name, None)
if attr_value and getattr(result, name, None) == None:
setattr(result, name, attr_value)
return result
def read_affectiva(self, filename=None, *args, **kwargs):
if not filename:
try:
filename = self.filename
except:
print("filename must be specified.")
result = read_affectiva(filename, *args, **kwargs)
for name in self._metadata:
attr_value = getattr(self, name, None)
if attr_value and getattr(result, name, None) == None:
setattr(result, name, attr_value)
return result
def itersessions(self):
''' Iterate over Fex sessions as (session, series) pairs.
Returns:
it: a generator that iterates over the sessions of the fex instance
'''
for x in np.unique(self.sessions):
yield x, self.loc[self.sessions==x, :]
def append(self, data, session_id=None, axis=0):
''' Append a new Fex object to an existing object
Args:
data: (Fex) Fex instance to append
session_id: session label
axis: ([0,1]) Axis to append. Rows=0, Cols=1
Returns:
Fex instance
'''
if not isinstance(data, self.__class__):
raise ValueError('Make sure data is a Fex instance.')
if self.empty:
out = data.copy()
if session_id is not None:
out.sessions = np.repeat(session_id, len(data))
else:
if self.sampling_freq != data.sampling_freq:
raise ValueError('Make sure Fex objects have the same '
'sampling frequency')
if axis==0:
out = self.__class__(pd.concat([self, data],
axis=axis,
ignore_index=True),
sampling_freq=self.sampling_freq)
if session_id is not None:
out.sessions = np.hstack([self.sessions, np.repeat(session_id, len(data))])
if self.features is not None:
if data.features is not None:
if self.features.shape[1]==data.features.shape[1]:
out.features = self.features.append(data.features, ignore_index=True)
else:
raise ValueError('Different number of features in new dataset.')
else:
out.features = self.features
elif data.features is not None:
out = data.features
elif axis==1:
out = self.__class__(pd.concat([self, data], axis=axis),
sampling_freq=self.sampling_freq)
if self.sessions is not None:
if data.sessions is not None:
if np.array_equal(self.sessions, data.sessions):
out.sessions = self.sessions
else:
raise ValueError('Both sessions must be identical.')
else:
out.sessions = self.sessions
elif data.sessions is not None:
out.sessions = data.sessions
if self.features is not None:
out.features = self.features
if data.features is not None:
out.features.append(data.features, axis=1, ignore_index=True)
elif data.features is not None:
out.features = data.features
else:
raise ValueError('Axis must be 1 or 0.')
return out
def regress(self):
NotImplemented
def ttest(self, threshold_dict=None):
NotImplemented
def predict(self, *args, **kwargs):
NotImplemented
def downsample(self, target, **kwargs):
""" Downsample Fex columns. Relies on nltools.stats.downsample,
but ensures that returned object is a Fex object.
Args:
target(float): downsampling target, typically in samples not
seconds
kwargs: additional inputs to nltools.stats.downsample
"""
df_ds = downsample(self, sampling_freq=self.sampling_freq,
target=target, **kwargs)
if self.features is not None:
ds_features = downsample(self.features,
sampling_freq=self.sampling_freq,
target=target, **kwargs)
else:
ds_features = self.features
return self.__class__(df_ds, sampling_freq=target, features=ds_features)
def upsample(self, target, target_type='hz', **kwargs):
""" Upsample Fex columns. Relies on nltools.stats.upsample,
but ensures that returned object is a Fex object.
Args:
target(float): upsampling target, default 'hz' (also 'samples',
'seconds')
kwargs: additional inputs to nltools.stats.upsample
"""
df_us = upsample(self, sampling_freq=self.sampling_freq,
target=target, target_type=target_type, **kwargs)
if self.features is not None:
us_features = upsample(self.features,
sampling_freq=self.sampling_freq,
target=target, target_type=target_type,
**kwargs)
else:
us_features = self.features
return self.__class__(df_us, sampling_freq=target, features=us_features)
def distance(self, method='euclidean', **kwargs):
""" Calculate distance between rows within a Fex() instance.
Args:
method: type of distance metric (can use any scikit learn or
sciypy metric)
Returns:
dist: Outputs a 2D distance matrix.
"""
return Adjacency(pairwise_distances(self, metric=method, **kwargs),
matrix_type='Distance')
def rectification(self, std=3):
""" Removes time points when the face position moved
more than N standard deviations from the mean.
Args:
std (default 3): standard deviation from mean to remove outlier face locations
Returns:
data: cleaned FEX object
"""
#### TODO: CHECK IF FACET OR FIND WAY TO DO WITH OTHER ONES TOO #####
if self.facebox_columns and self.au_columns and self.emotion_columns:
cleaned = deepcopy(self)
face_columns = self.facebox_columns
x_m = self.FaceRectX.mean()
x_std = self.FaceRectX.std()
y_m = self.FaceRectY.mean()
y_std = self.FaceRectY.std()
x_bool = (self.FaceRectX>std*x_std+x_m) | (self.FaceRectX<x_m-std*x_std)
y_bool = (self.FaceRectY>std*y_std+y_m) | (self.FaceRectY<y_m-std*y_std)
xy_bool = x_bool | y_bool
cleaned.loc[xy_bool, face_columns + self.au_columns + self.emotion_columns] = np.nan
return cleaned
else:
raise ValueError("Facebox columns need to be defined.")
def baseline(self, baseline='median', normalize=None,
ignore_sessions=False):
''' Reference a Fex object to a baseline.
Args:
method: {'median', 'mean', 'begin', FexSeries instance}. Will subtract baseline
from Fex object (e.g., mean, median). If passing a Fex
object, it will treat that as the baseline.
normalize: (str). Can normalize results of baseline.
Values can be [None, 'db','pct']; default None.
ignore_sessions: (bool) If True, will ignore Fex.sessions
information. Otherwise, method will be applied
separately to each unique session.
Returns:
Fex object
'''
if self.sessions is None or ignore_sessions:
out = self.copy()
if baseline is 'median':
baseline = out.median()
elif baseline is 'mean':
baseline = out.mean()
elif baseline is 'begin':
baseline = out.iloc[0,:]
elif isinstance(baseline, (Series, FexSeries)):
baseline = baseline
elif isinstance(baseline, (Fex, DataFrame)):
raise ValueError('Must pass in a FexSeries not a FexSeries Instance.')
else:
raise ValueError('%s is not implemented please use {mean, median, Fex}' % baseline)
if normalize == 'db':
out = 10*np.log10(out - baseline)/baseline
if normalize == 'pct':
out = 100*(out - baseline)/baseline
else:
out = out - baseline
else:
out = self.__class__(sampling_freq=self.sampling_freq)
for k,v in self.itersessions():
if baseline is 'median':
baseline = v.median()
elif baseline is 'mean':
baseline = v.mean()
elif baseline is 'begin':
baseline = v.iloc[0,:]
elif isinstance(baseline, (Series, FexSeries)):
baseline = baseline
elif isinstance(baseline, (Fex, DataFrame)):
raise ValueError('Must pass in a FexSeries not a FexSeries Instance.')
else:
raise ValueError('%s is not implemented please use {mean, median, Fex}' % baseline)
if normalize == 'db':
out = out.append(10*np.log10(v-baseline)/baseline, session_id=k)
if normalize == 'pct':
out = out.append(100*(v-baseline)/baseline, session_id=k)
else:
out = out.append(v-baseline, session_id=k)
return self.__class__(out, sampling_freq=self.sampling_freq,
features=self.features, sessions=self.sessions)
def clean(self, detrend=True, standardize=True, confounds=None,
low_pass=None, high_pass=None, ensure_finite=False,
ignore_sessions=False, *args, **kwargs):
""" Clean Time Series signal
This function wraps nilearn functionality and can filter, denoise,
detrend, etc.
See http://nilearn.github.io/modules/generated/nilearn.signal.clean.html
This function can do several things on the input signals, in
the following order:
- detrend
- standardize
- remove confounds
- low- and high-pass filter
If Fex.sessions is not None, sessions will be cleaned separately.
Args:
confounds: (numpy.ndarray, str or list of Confounds timeseries)
Shape must be (instant number, confound number),
or just (instant number,). The number of time
instants in signals and confounds must be identical
(i.e. signals.shape[0] == confounds.shape[0]). If a
string is provided, it is assumed to be the name of
a csv file containing signals as columns, with an
optional one-line header. If a list is provided,
all confounds are removed from the input signal,
as if all were in the same array.
low_pass: (float) low pass cutoff frequencies in Hz.
high_pass: (float) high pass cutoff frequencies in Hz.
detrend: (bool) If detrending should be applied on timeseries
(before confound removal)
standardize: (bool) If True, returned signals are set to unit
variance.
ensure_finite: (bool) If True, the non-finite values
(NANs and infs) found in the data will be
replaced by zeros.
ignore_sessions: (bool) If True, will ignore Fex.sessions
information. Otherwise, method will be applied
separately to each unique session.
Returns:
cleaned Fex instance
"""
if self.sessions is not None:
if ignore_sessions:
sessions = None
else:
sessions = self.sessions
else:
sessions = None
return self.__class__(pd.DataFrame(clean(self.values, detrend=detrend,
standardize=standardize,
confounds=confounds,
low_pass=low_pass,
high_pass=high_pass,
ensure_finite=ensure_finite,
t_r=1./np.float(self.sampling_freq),
sessions=sessions,
*args, **kwargs),
columns=self.columns),
sampling_freq=self.sampling_freq,
features=self.features,
sessions=self.sessions)
def decompose(self, algorithm='pca', axis=1, n_components=None,
*args, **kwargs):
''' Decompose Fex instance
Args:
algorithm: (str) Algorithm to perform decomposition
types=['pca','ica','nnmf','fa']
axis: dimension to decompose [0,1]
n_components: (int) number of components. If None then retain
as many as possible.
Returns:
output: a dictionary of decomposition parameters
'''
out = {}
out['decomposition_object'] = set_decomposition_algorithm(
algorithm=algorithm,
n_components=n_components,
*args, **kwargs)
com_names = ['c%s' % str(x+1) for x in range(n_components)]
if axis == 0:
out['decomposition_object'].fit(self.T)
out['components'] = self.__class__(pd.DataFrame(out['decomposition_object'].transform(self.T), index=self.columns, columns=com_names), sampling_freq=None)
out['weights'] = self.__class__(pd.DataFrame(out['decomposition_object'].components_.T,
index=self.index,columns=com_names),
sampling_freq=self.sampling_freq,
features=self.features,
sessions=self.sessions)
if axis == 1:
out['decomposition_object'].fit(self)
out['components'] = self.__class__(pd.DataFrame(out['decomposition_object'].transform(self),
columns=com_names),
sampling_freq=self.sampling_freq,
features=self.features,
sessions=self.sessions)
out['weights'] = self.__class__(pd.DataFrame(out['decomposition_object'].components_, index=com_names, columns=self.columns), sampling_freq=None).T
return out
def extract_mean(self, ignore_sessions=False, *args, **kwargs):
""" Extract mean of each feature
Args:
ignore_sessions: (bool) ignore sessions or extract separately
by sessions if available.
Returns:
Fex: mean values for each feature
"""
if self.sessions is None or ignore_sessions:
feats = pd.DataFrame(self.mean()).T
feats.columns = 'mean_' + feats.columns
return self.__class__(feats, sampling_freq=self.sampling_freq)
else:
feats = pd.DataFrame()
for k,v in self.itersessions():
feats = feats.append(pd.Series(v.mean(), name=k))
feats.columns = 'mean_' + feats.columns
return self.__class__(feats, sampling_freq=self.sampling_freq,
sessions=np.unique(self.sessions))
def extract_min(self, ignore_sessions=False, *args, **kwargs):
""" Extract minimum of each feature
Args:
ignore_sessions: (bool) ignore sessions or extract separately
by sessions if available.
Returns:
Fex: (Fex) minimum values for each feature
"""
if self.sessions is None or ignore_sessions:
feats = pd.DataFrame(self.min()).T
feats.columns = 'min_' + feats.columns
return self.__class__(feats, sampling_freq=self.sampling_freq)
else:
feats = pd.DataFrame()
for k,v in self.itersessions():
feats = feats.append(pd.Series(v.min(), name=k))
feats.columns = 'min_' + feats.columns
return self.__class__(feats, sampling_freq=self.sampling_freq,
sessions=np.unique(self.sessions))
def extract_max(self, ignore_sessions=False, *args, **kwargs):
""" Extract maximum of each feature
Args:
ignore_sessions: (bool) ignore sessions or extract separately
by sessions if available.
Returns:
fex: (Fex) maximum values for each feature
"""
if self.sessions is None or ignore_sessions:
feats = pd.DataFrame(self.max()).T
feats.columns = 'max_' + feats.columns
return self.__class__(feats, sampling_freq=self.sampling_freq)
else:
feats = pd.DataFrame()
for k,v in self.itersessions():
feats = feats.append(pd.Series(v.max(), name=k))
feats.columns = 'max_' + feats.columns
return self.__class__(feats, sampling_freq=self.sampling_freq,
sessions=np.unique(self.sessions))
def extract_summary(self, mean=False, max=False, min=False,
ignore_sessions=False, *args, **kwargs):
""" Extract summary of multiple features
Args:
mean: (bool) extract mean of features
max: (bool) extract max of features
min: (bool) extract min of features
ignore_sessions: (bool) ignore sessions or extract separately
by sessions if available.
Returns:
fex: (Fex)
"""
out = self.__class__(sampling_freq=self.sampling_freq)
if mean:
out = out.append(self.extract_mean(ignore_sessions=ignore_sessions,
*args, **kwargs), axis=1)
if max:
out = out.append(self.extract_max(ignore_sessions=ignore_sessions,
*args, **kwargs), axis=1)
if min:
out = out.append(self.extract_min(ignore_sessions=ignore_sessions,
*args, **kwargs), axis=1)
return out
def extract_wavelet(self, freq, num_cyc=3, mode='complex',
ignore_sessions=False):
''' Perform feature extraction by convolving with a complex morlet
wavelet
Args:
freq: (float) frequency to extract
num_cyc: (float) number of cycles for wavelet
mode: (str) feature to extract, e.g.,
['complex','filtered','phase','magnitude','power']
ignore_sessions: (bool) ignore sessions or extract separately
by sessions if available.
Returns:
convolved: (Fex instance)
'''
wav = wavelet(freq, sampling_freq=self.sampling_freq, num_cyc=num_cyc)
if self.sessions is None or ignore_sessions:
convolved = self.__class__(pd.DataFrame({x:convolve(y, wav, mode='same') for x,y in self.iteritems()}), sampling_freq=self.sampling_freq)
else:
convolved = self.__class__(sampling_freq=self.sampling_freq)
for k,v in self.itersessions():
session = self.__class__(pd.DataFrame({x:convolve(y, wav, mode='same') for x,y in v.iteritems()}), sampling_freq=self.sampling_freq)
convolved = convolved.append(session, session_id=k)
if mode is 'complex':
convolved = convolved
elif mode is 'filtered':
convolved = np.real(convolved)
elif mode is 'phase':
convolved = np.angle(convolved)
elif mode is 'magnitude':
convolved = np.abs(convolved)
elif mode is 'power':
convolved = np.abs(convolved)**2
else:
raise ValueError("Mode must be ['complex','filtered','phase',"
"'magnitude','power']")
convolved = self.__class__(convolved, sampling_freq=self.sampling_freq,
features=self.features,
sessions=self.sessions)
convolved.columns = 'f' + '%s' % round(freq, 2) + '_' + mode + '_' + self.columns
return convolved
def extract_multi_wavelet(self, min_freq=.06, max_freq=.66, bank=8, *args, **kwargs):
''' Convolve with a bank of morlet wavelets. Wavelets are equally
spaced from min to max frequency. See extract_wavelet for more
information and options.
Args:
min_freq: (float) minimum frequency to extract
max_freq: (float) maximum frequency to extract
bank: (int) size of wavelet bank
num_cyc: (float) number of cycles for wavelet
mode: (str) feature to extract, e.g.,
['complex','filtered','phase','magnitude','power']
ignore_sessions: (bool) ignore sessions or extract separately
by sessions if available.
Returns:
convolved: (Fex instance)
'''
out = []
for f in np.geomspace(min_freq, max_freq, bank):
out.append(self.extract_wavelet(f, *args, **kwargs))
return self.__class__(pd.concat(out, axis=1),
sampling_freq=self.sampling_freq,
features=self.features,
sessions=self.sessions)
def extract_boft(self, min_freq=.06, max_freq=.66, bank=8, *args, **kwargs):
""" Extract Bag of Temporal features
Args:
min_freq: maximum frequency of temporal filters
max_freq: minimum frequency of temporal filters
bank: number of temporal filter banks, filters are on exponential scale
Returns:
wavs: list of Morlet wavelets with corresponding freq
hzs: list of hzs for each Morlet wavelet
"""
# First generate the wavelets
target_hz = self.sampling_freq
freqs = np.geomspace(min_freq, max_freq,bank)
wavs, hzs = [],[]
for i, f in enumerate(freqs):
wav = np.real(wavelet(f, sampling_freq=target_hz))
wavs.append(wav)
hzs.append(str(np.round(freqs[i],2)))
wavs = np.array(wavs)[::-1]
hzs = np.array(hzs)[::-1]
# # check asymptotes at lowest freq
# asym = wavs[-1,:10].sum()
# if asym > .001:
# print("Lowest frequency asymptotes at %2.8f " %(wavs[-1,:10].sum()))
# Convolve data with wavelets
Feats2Use = self.columns
feats = | pd.DataFrame() | pandas.DataFrame |
from os import listdir, makedirs
from os.path import isdir, isfile, join
import pandas as pd
import re
PATH_DATASETS = r'/home/guy/Desktop/betting_bot/data/datasets/'
PATH_RATINGS = r'/home/guy/Desktop/betting_bot/data/ratings/'
PATH_TEAMS = r'/home/guy/Desktop/betting_bot/data/teams/'
PATH_STATISTICS = r'/home/guy/Desktop/betting_bot/data/statistics/'
PATH_MERGED = r'/home/guy/Desktop/betting_bot/data/'
PATH_SEASON = r'/home/guy/Desktop/betting_bot/data/seasons/'
COLUMNS_TO_KEEP = ['HomeTeam', 'AwayTeam', 'FTHG', 'FTAG', 'HST', 'AST', 'HC', 'AC',
'B365H', 'B365D', 'B365A', 'B365>2.5', 'B365<2.5', 'year']
def merge_dataset(path, outputPath):
mergedDataset = []
for file in listdir(path):
if isfile(join(path, file)):
dataset = pd.read_csv(path+file, header=0)
year = re.findall('[0-9]+', file)[0]
yearCol = [year for i in range(dataset.shape[0])]
dataset['year'] = yearCol
mergedDataset.append(dataset)
else: print(path+file+' is not a file.')
dataset = pd.concat(mergedDataset, ignore_index=True).sort_values(by=['year'], ascending=True)
dataset.to_csv(outputPath+'dataset.csv', index = False, header=True)
print('Datasets merged!')
def merge_ratings(path, outputPath):
mergedDataset = []
for file in listdir(path):
if isfile(join(path, file)):
teamsRatings = pd.read_csv(path+file, header=0)
year = re.findall('[0-9]+', file)[0]
yearCol = [year for i in range(teamsRatings.shape[0])]
teamsRatings['year'] = yearCol
mergedDataset.append(teamsRatings)
else: print(path+file+' is not a file.')
dataset = pd.concat(mergedDataset, ignore_index=True).sort_values(by=['year'], ascending=True)
dataset.to_csv(outputPath+'rating.csv', index = False, header=True)
print('Ratings merged!')
def merge_teams(path, outputPath):
mergedDataset = []
for dir in listdir(path):
for file in listdir(path+dir+'/'):
if isfile(join(path+dir+'/', file)):
teamsGames = | pd.read_csv(path+dir+'/'+file, header=0) | pandas.read_csv |
#Author: <NAME>
import numpy as np
import os
import h5py
import pandas as pd
from AxonImaging import signal_processing as sp
def get_processed_running_speed (vsig,vref,sample_freq, smooth_filter_sigma = 0.05, wheel_diameter = 16.51, positive_speed_threshold= 70, negative_speed_threshold= -5):
''' Returns the running speed given voltage changes from an encoder wheel. Speeds are smoothed and outlier
values above or below arbrituarly defined thresholds are set as NaN.
:param Vsig: voltage signal which changes as a function of wheel movement (running)
:param Vref: reference voltage (typically 5V +/- small offset that is encoder dependent
:param sample_freq: sampling frequency which Vsig and Vref are acquired at
:param smooth_filter_sigma: value used for guassian filtering
:param wheel_diameter: diameter of running wheel
:param positive_speed_threshold: maximum allowed positive speed (sets impossibly high running speeds equal to NaN)
:param negative_speed_threshold: maximum allowed negative speed (sets impossibly high backwards running speeds equal to NaN)
:param units: whether to return in terms of seconds (dependent on the passed-in sample freq) or samples
:return: smooth traced of running speed in cm/s per sample with outliers set to NaN
'''
from scipy.ndimage import gaussian_filter1d
vref_mean = np.median(vref[np.abs(vref)<20])
position_arc = vsig*(2.*np.pi)/vref_mean
position_arc_smooth = gaussian_filter1d(position_arc, int(smooth_filter_sigma*sample_freq))
speed_arc = np.append(np.diff(position_arc_smooth),0) * sample_freq
speed = speed_arc * wheel_diameter
speed_smooth = np.copy(speed)
speed_smooth[np.logical_or(speed>=positive_speed_threshold,speed<=negative_speed_threshold)]=np.nan
mask = np.isnan(speed_smooth)
mask2 = np.zeros(mask.shape, dtype=np.bool)
for n,p in enumerate(mask):
if p:
mask2[(n-(2*int(smooth_filter_sigma*sample_freq))):(n+int((2*smooth_filter_sigma*sample_freq+1)))] = True # extend mask 2 filter widths to extend interpolation
speed_smooth[mask2] = np.interp(np.flatnonzero(mask2), np.flatnonzero(~mask2), speed[~mask2])
return speed_smooth
def get_auditory_onset_times(microphone, sample_freq, threshold=1, stdev_samples=10,filter_width=20):
'''
Finds the onset of an auditory event through first calculating a standard deviation across user defined samples and then thresholding the stdeviations to find the onset times.
:param microphone: an analog microphone signal
:param samplefreq: the sampling frequency at which the auditory signal was acquired at
:param threshold = threshold value in units of standard deviation for finding onset times (values above this are marked as a valid onset)
:param stdev_samples=number of samples to calculate each standard deviation from.
:
:return: the onset sound_times in the units of seconds
'''
from scipy.signal import convolve, boxcar
#get the standard deviation across user-defined number of samples
step=int(stdev_samples)
stdev=[]
for ii in range(0,microphone.shape[0],step):
chunk=microphone[ii:ii+step]
stdev.append(np.std(chunk))
stdev_filtered=convolve(stdev, boxcar(M=filter_width))
#get the up samples #s through thresholding
stamps=sp.threshold_greater(np.array(stdev_filtered),threshold)
#multiply these samples # by user-defined number of stdev_samples to account for the downsampling that occured when the standard deviation was calculated
stamps=np.multiply(stamps,stdev_samples)
sound_times = np.divide(stamps,sample_freq)
print ('total number of sound presentations found = '+ str(len(sound_times)))
return sound_times
def microphone_to_dB (signal, sensitivity=250, pre_amp_gain=12):
''' Converts microphone voltage to decibels given the microphone sensitivity and pre amp gain.
:param signal: the analog microphone voltage (in V)
:param sensitivity: the sensitivity of the microphone in mv/Pa
:param pre_amp_gain: gain setting on the microphone pre amp (in dB)
'''
#reference is "threshold for hearing" 20 micropascals at 1 kHz, also called SPL
reference=20E-6
baseline_v=reference*sensitivity
db=np.log10((signal/baseline_v))*20
db_nogain=db-pre_amp_gain
return db_nogain
#convert signal to pascals
#divide by the preamp gain, multiply by 1000 to convert from volts to mV
#divide by the microphone sensitivity in mV/Pa
#dB equation from voltage is 20 * log ()
def shift_aud_frames_by_mic_delay(mic_onsets, aud_frames, vsync):
'''
Time aligns auditory stimulation onset times that are given in terms relative to monitor frames (ie auditory stimulation was
presented on frame 50-100) into accurate times for which they are played/heard (detected on a microphone).
Requires that the number of sound times presented is the same quantity as the number detected by the microphone
:param mic_onsets: auditory onsets detected by a microphone (see get_auditory_onset_times function) (seconds)
:param aud_frames: frames when the auditory stimulation was initiated (typically from pikl file) (frame #'s)
:param vsync: times of each monitor frame presentation on the same time base as the microphone (seconds)
:return: array of frame numbers that correspond to onset of the auditory stimulation being played
'''
#compare total number of auditory stims with the expected number of presentations.
if len(mic_onsets)==len(aud_frames):
#get the auditory stimulation time from the pickle file and convert it to a Vsync time
#get the auditory stimulation time from the pickle file and convert it to a Vsync time
sound_frames=[]
for ii in range(len(aud_frames)):
#calculate the difference in time between the detection (microphone) and the presentation, in terms of seconds
dif=mic_onsets[ii]-vsync[aud_frames[ii]].astype(np.float32)
presented_time=vsync[aud_frames[ii]]+dif
#find the vysnc time that most closely matches the stimulation
index=np.argmin(np.abs(vsync-presented_time))
sound_frames.append(index)
#print ('time of presentation '+ str(vsync[aud_onsets[ii]]) + ' time of detection ' + str(sound_times[ii]))
sound_frames=np.array(sound_frames)
print ('mean number of visual frames between presentation and detection is ' + str((np.mean(sound_frames-aud_frames)))) + ' frames or '+ str((1/np.median(np.diff(vsync))*(np.mean(sound_frames-aud_frames))))+' millseconds (assuming 60 fps)'
return sound_frames
else:
print ('Number of known auditory presentations '+str(len(aud_frames))+ ' does not equal those detected by microphone '+ str(len(mic_onsets)))
return
def stimulus_thresh_df (paths,data_key, thresh_signal, thresh, min_l, min_t,
before, after, baseline_period,response_period,min_time_between=False,use_dff=True,
other_signals=[],dff_baseline_dur=1., exclusion_sig='null',exclusion_thresh=0.,exclusion_dur=0.,exclusion_logic='exclude',
override_ends=False, use_per_thresh=False, sample_freq=30. ):
"""
:param paths: path to HDF5 files
:param data_key: key for the HDF5 to access the data type of interest
---Thresholding parameters
:param thresh_signal: the signal to threshold on
:param thresh: the threshold
:param min_l: the minimum amount of time the signal must go below the threshold to end a period
:param min_t: minimum time for a threshold period
:param min_time_between: the minimum amount that must be between the start of two epochs. Useful for finding epochs that occur in isolation from nearby other epochs.
---trace extraction parameters
:param before: amount of time before the threshold time to extract
:param after: amount of time after the threshold time to extract
:param baseline: how many seconds in the the 'before' period to calculate baseline periods from (used in DF/F calculations and others)
:param baseline: where the "baseline" should be calculated from in the trace (used in DF/F calculations and others) . Tuple of start time and end time for the baseline.
:param sample_t_after_thresh: when sampling the "response" start this far after the threshold crossing (0 = at the threshold). Set to string 'half' to sample 50% through the epoch's duration.
:param sample_dur_after_thresh:when sampling the "response" start from sample_t_after_thresh and go this many seconds ahead
"""
import os
import h5py
import pandas as pd
#create dataframe of all ROI responses for every running epoch
total_roi_counter=0
responses=[]
meaned_responses=[]
#check to make sure that the baseline is specified as a tuple and deal with instances where it isn't
if isinstance(baseline_period, (int, float)):
print ('the baseline period was specified as a single number, not a start and end time. Assuming start time is time 0 and end time of hte baseline is what is specified.')
baseline_period=(0,baseline_period)
for path in paths:
mouse_id=os.path.basename(path)[0:7]
print ('\n processing ' + str(mouse_id) + '\n')
data_f=h5py.File(path,'r')
data=data_f.get(data_key)
if use_per_thresh==True:
#first lowpass filter and calculate the median of the trace
median=np.nanmedian(sp.butter_lowpass_filter(data[thresh_signal], cutoff=1., analog=True))
threshold_per=median+(thresh*median)
thresh=threshold_per
if exclusion_sig=='null':
runs=sp.threshold_period(signal=data[thresh_signal], threshold=thresh,
min_low=min_l, sample_freq=30., min_time=min_t)
else:
print (exclusion_logic+' epochs where the '+ str(exclusion_sig) + ' is greater than '+ str(exclusion_thresh))
runs=sp.threshold_period(signal=data[thresh_signal], threshold=thresh,min_time_between=min_time_between,
min_low=min_l, sample_freq=30., min_time=min_t,exclusion_signal=data[exclusion_sig],
exclusion_dur=exclusion_dur,exclusion_logic=exclusion_logic,
exclusion_thresh=exclusion_thresh)
#check if no threshold crossing are found. If so, go to next file
if runs.size==0:
print (' No periods found for id '+ str(mouse_id))
continue
#get teh start times from teh threshold_period output
starts=runs[:,0]
#take into account times where you want to get traces that start relative to the onset and you don't want to be concerned with their duration
if override_ends==False:
starts=runs[:,0]
ends=runs[:,1]
durs=runs[:,2]
elif isinstance(override_ends, (int, float)):
#if a number is passed to override the ends, determine the end of the periods by adding this number to the beginning
print ('Overiding detected durations and using USER-DEFINED durations')
starts=runs[:,0]
ends=starts+override_ends
durs=ends-starts
elif override_ends=='starts':
print ('setting the start times equal to the detected END TIMES!')
starts=runs[:,1]
ends=runs[:,1]
durs=(ends-starts)+1.
error_counter=0
#calculate the stimulus evoked dff for each roi
#loop for each ROI
for roi in range(len(data['axon_traces'])):
mean_onset_list=[]
mean_end_list=[]
mean_speed_list=[]
mean_delta_speed_list=[]
#create a list to store the first portion of each trace where there always a epoch peroiod
traces_onset=[]
#create a more inclusive list to store entire baseline, onset, and after periods for arbituarily selecting regions for analysis
before_after_traces=[]
#determine unique ids for each roi and calculate area
roi_unique_id=mouse_id[-6::]+'_'+ str(0)+str(roi)
mask=data_f['masks']['axon_masks'][roi]
pixels=np.where(np.logical_and(mask!=0, ~np.isnan(mask)))
roi_area=np.shape(pixels)[0]*np.shape(pixels)[1]
#loop for each epoch
for xx in range(len(starts)):
runnings=sp.get_event_trig_avg_samples(data[thresh_signal],event_onset_times=starts[xx],
event_end_times=ends[xx],
sample_freq=sample_freq,
time_before=before,
time_after=after, verbose=False)
if response_period[1]=='half':
if override_ends==False:
response_period_end=response_period[0]+durs[xx]/2.
elif isinstance(override_ends, (int, float)):
# print ('Half duration is passed for end, but overriding durations: calculating duration from half the time of after')
response_period_end=response_period[0]+(after/2.)
else:
print ('major error')
else:
response_period_end=response_period[1]
baseline_indices=(int((baseline_period[0]*sample_freq)), int((baseline_period[1]*sample_freq)))
response_indices=(int((response_period[0]*sample_freq)), int((response_period_end*sample_freq)))
#get mean running_speed
baseline_speed=np.nanmean(runnings[baseline_indices[0]:baseline_indices[1]],axis=0)
mean_speed=np.nanmean(runnings[response_indices[0]:response_indices[1]],axis=0)
delta_speed=mean_speed-baseline_speed
#produce an array that is composed of each ROI's DF/F epeoch
axon_responses=sp.get_event_trig_avg_samples(data['axon_traces'][roi],event_onset_times=starts[xx],
event_end_times=ends[xx],
sample_freq=sample_freq,
time_before=before,
time_after=after, dff=use_dff,dff_baseline=(baseline_period[0], baseline_period[1]), verbose=False)
#check to make sure expected durations match returned trace durations
expected_dur=((ends[xx]-starts[xx])+before+after)
trace_dur_run=int(round(len(runnings)/30.))
trace_dur_axon=int(round(len(axon_responses)/30.))
dur_check=int( round( (ends[xx]-starts[xx]+before+after)*30.))
if len(axon_responses)!=dur_check:
if error_counter==0:
print ('Epoch # ' + str(xx) + ' Trace durations do not match expected duration: Likely due to not enough samples to grab. Skipping')
error_counter+=1
continue
if ((trace_dur_run!=int(round(expected_dur))) or (trace_dur_axon!= int(round(expected_dur))) ) :
if error_counter==0:
print ('Epoch # ' + str(xx) +'. Epoch length mismatch warning: Expected duration: ' + str(int(expected_dur)) + ' and trace duration '+ str(int(trace_dur_run)) + ' do not match ')
print ('skipping event/epoch')
error_counter+=1
continue
#get any other signals the user may want
others=[]
others_means=[]
for extras in other_signals:
#get the associated running trace
sig=sp.get_event_trig_avg_samples(data[extras],event_onset_times=starts[xx],
event_end_times=ends[xx],
sample_freq=sample_freq,
time_before=before,
time_after=after, verbose=False)
baseline_sig=np.nanmean(sig[baseline_indices[0]:baseline_indices[1]],axis=0)
mean_sig=np.nanmean(sig[response_indices[0]:response_indices[1]],axis=0)
#calculate in terms of percent change of baseline
delta_sig=(mean_sig-baseline_sig)/baseline_sig*100
onset_sig=sig[int(before*sample_freq)+1]
others.append(sig)
others_means.append([baseline_sig, onset_sig, mean_sig, delta_sig])
#calculate a trace that MUST include the region betweeen start and end. This is performed to allow for averaging of the epochs that have different durations.
#it always will produce a trace that contains the MINIMAL length resonse
end_of_eval_period_for_sig= int(round(((before+min_t)*sample_freq)))
onset_trace=axon_responses[0:end_of_eval_period_for_sig+1]
traces_onset.append(onset_trace)
#calculate a trace that includes the baseline period, the onset, and the amount of time after. used in calculation of signficance for an ROI
before_after_trace=axon_responses[0:int((before+after)*sample_freq)]
before_after_traces.append(before_after_trace)
#get the DF at the threshold crossing
onset_df=axon_responses[int(before*sample_freq)+1]
#end_index=int(ends[xx]*sample_freq)
end_index=int((before*sample_freq)+(durs[xx]*sample_freq)-1)
end_df=axon_responses[end_index]
mean_df=np.nanmean(axon_responses[response_indices[0]:response_indices[1]],axis=0)
#append to list: roi number, mouse_id, epoch number,
#start_time, end_time, duration, axon response array (DF),
#mean df_f responses at user-define time, running array, mean_speed
sublist=[roi_unique_id,mouse_id, xx, starts[xx],ends[xx],durs[xx],
axon_responses, onset_df, mean_df,end_df,
runnings,mean_speed,delta_speed,roi_area,total_roi_counter]
for yy in range(len(others)):
sublist.append(others[yy])
#baseline_sig
sublist.append(others_means[yy][0])
#peak_sig
sublist.append(others_means[yy][1])
#mean_sig
sublist.append(others_means[yy][2])
#delta_sig
sublist.append(others_means[yy][2])
responses.append(sublist)
mean_onset_list.append(onset_df)
mean_end_list.append(end_df)
mean_speed_list.append(mean_speed)
mean_delta_speed_list.append(delta_speed)
#get the mean trace from the onset and beginning of thresholded region
mean_onset_trace=np.nanmean(traces_onset,axis=0)
#determine if the average response for the ROI is significant
#12_6 change: allow significance to be calculated from arbituary regions across the entire baselein and end period, not just the consistently resposne
#therefore use the onset plus and minus the
before_after_mean=np.nanmean(before_after_traces,axis=0)
pvalue=sp.significant_response(before_after_mean, base_period=(baseline_period[0],baseline_period[1]), stim_period=(response_period[0],response_period_end), sample_freq=30.)
if pvalue < 0.05:
significant=True
else:
significant=False
mean_onset_df_roi=np.nanmean(np.asarray(mean_onset_list),axis=0)
mean_end_df_roi=np.nanmean(np.asarray(mean_end_list), axis=0)
mean_speed_roi=np.nanmean(np.asarray(mean_speed_list),axis=0)
mean_delta_speed_roi=np.nanmean(np.asarray(mean_delta_speed_list),axis=0)
meaned_responses.append([roi_unique_id, mouse_id,pvalue,significant, mean_onset_df_roi,mean_end_df_roi,
mean_speed_roi,mean_delta_speed_roi,total_roi_counter,before_after_mean,mean_onset_trace])
total_roi_counter+=1
column_names=['roi id','mouse_ID','epoch number', 'start time', 'end time', 'duration',
'axon trace', 'onset df', 'peak df', 'end df',
'threshold signal trace', 'peak thresh value', 'delta of thresh trace', 'ROI area','roi number']
for names in other_signals:
column_names.append(names)
column_names.append(str(names) + ' baseline sig')
column_names.append(str(names) + ' onset sig')
column_names.append(str(names) + ' peak sig')
column_names.append(str(names) + ' delta % sig')
df=pd.DataFrame(responses,columns=column_names)
df_mean=pd.DataFrame(meaned_responses,columns=['roi id','mouse_ID','p value', 'significant mean resp', 'mean onset df', 'mean end df',
'mean thresh signal', 'mean delta thresh signal', 'roi number','mean trace', 'mean baseline and onset trace'])
#add whether the mean response is significant to the df mean
mean_sigs=[]
for index, row in df.iterrows():
roi_num=df['roi number'][index]
#get whether it is significant on average
mean_p=float(df_mean.loc[(df_mean['roi number']==roi_num)]['p value'])
if mean_p < 0.05:
significant=True
else:
significant=False
mean_sigs.append([mean_p, bool(significant)])
df_sig_responses=pd.DataFrame(mean_sigs, columns=['mean p value', 'mean sig'])
df=pd.concat([df,df_sig_responses], axis=1)
#clean up dataframes by re-indexing by the roi_ids
df=df.sort_values(['roi id', 'epoch number'])
df.reset_index(drop=True)
df_mean=df_mean.sort_values(['roi id'])
df_mean.reset_index(drop=True)
return df,df_mean
def create_df_from_timestamps(data,mouse_id,masks,starts,ends,baseline_period,response_period, before,after, min_t=0.5,
override_ends=False,thresh_signal=False,other_signals=[],sample_freq=30.,use_dff=True,total_roi_counter=0. ):
'''
:param paths: path to HDF5 files
:param data_key: key for the HDF5 to access the data type of interest
'''
import pandas as pd
responses=[]
meaned_responses=[]
error_counter=0
#Get event triggered responses for each ROI
for roi in range(len(data['axon_traces'])):
mean_onset_list=[]
mean_end_list=[]
mean_speed_list=[]
mean_delta_speed_list=[]
#create a list to store the first portion of each trace where there always a epoch peroiod
traces_onset=[]
#create a more inclusive list to store entire baseline, onset, and after periods for arbituarily selecting regions for analysis
before_after_traces=[]
#determine unique ids for each roi and calculate area
roi_unique_id=mouse_id[-6::]+'_'+ str(0)+str(roi)
mask=masks[roi]
pixels=np.where(np.logical_and(mask!=0, ~np.isnan(mask)))
roi_area=np.shape(pixels)[0]*np.shape(pixels)[1]
#loop for each epoch/stimulus
for xx in range(len(starts)):
if response_period[1]=='half':
#check to see if the ends should be ignored
if override_ends==False:
response_period_end=response_period[0]+(ends[xx]-starts[xx])/2.
elif isinstance(override_ends, (int, float)):
response_period_end=response_period[0]+(after/2.)
else:
print ('major error')
else:
response_period_end=response_period[1]
baseline_indices=(int((baseline_period[0]*sample_freq)), int((baseline_period[1]*sample_freq)))
response_indices=(int((response_period[0]*sample_freq)), int((response_period_end*sample_freq)))
expected_dur=((ends[xx]-starts[xx])+before+after)
dur_check=int( round( (ends[xx]-starts[xx]+before+after)*30.))
#check to see if a thresholded signal is passed in. If so extract its parameters
if thresh_signal:
if override_ends=='starts':
runnings=sp.get_event_trig_avg_samples(data[thresh_signal],event_onset_times=ends[xx], event_end_times=ends[xx]+1,
sample_freq=sample_freq,time_before=before, time_after=after, verbose=False)
else:
#get the signal trace that was thresholded
runnings=sp.get_event_trig_avg_samples(data[thresh_signal],event_onset_times=starts[xx],
event_end_times=ends[xx],
sample_freq=sample_freq,
time_before=before,
time_after=after, verbose=False)
#get mean changes in the thresholded signal
baseline_speed=np.nanmean(runnings[baseline_indices[0]:baseline_indices[1]],axis=0)
mean_speed=np.nanmean(runnings[response_indices[0]:response_indices[1]],axis=0)
delta_speed=mean_speed-baseline_speed
trace_dur_run=int(round(len(runnings)/30.))
if trace_dur_run!=int(round(expected_dur)):
print ('Epoch # ' + str(xx) +'. Epoch length mismatch warning: Expected duration: ' + str(int(expected_dur)) + ' and the THRESHOLDED Signal trace duration '+ str(int(trace_dur_run)) + ' do not match ')
print ('skipping event/epoch')
continue
#produce an array that is composed of each ROI's DF/F epeoch
axon_responses=sp.get_event_trig_avg_samples(data['axon_traces'][roi],event_onset_times=starts[xx],
event_end_times=ends[xx],
sample_freq=sample_freq,
time_before=before,
time_after=after, dff=use_dff,dff_baseline=(baseline_period[0], baseline_period[1]), verbose=False)
#check to make sure expected durations match returned trace durations
trace_dur_axon=int(round(len(axon_responses)/30.))
if len(axon_responses)!=dur_check:
if error_counter==0:
print ('Epoch # ' + str(xx) + ' Trace durations do not match expected duration: Likely due to not enough samples to grab. Skipping')
error_counter+=1
continue
if trace_dur_axon!= int(round(expected_dur)):
if error_counter==0:
print ('Epoch # ' + str(xx) +'. Epoch length mismatch warning: Expected duration: ' + str(int(expected_dur)) + ' and trace duration '+ str(int(trace_dur_run)) + ' do not match ')
print ('skipping event/epoch')
error_counter+=1
continue
#get any other signals the user may want
others=[]
others_means=[]
for extras in other_signals:
#get the associated running trace
sig=sp.get_event_trig_avg_samples(data[extras],event_onset_times=starts[xx],
event_end_times=ends[xx],
sample_freq=sample_freq,
time_before=before,
time_after=after, verbose=False)
baseline_sig=np.nanmean(sig[baseline_indices[0]:baseline_indices[1]],axis=0)
mean_sig=np.nanmean(sig[response_indices[0]:response_indices[1]],axis=0)
#calculate in terms of percent change of baseline
delta_sig=(mean_sig-baseline_sig)/baseline_sig*100
onset_sig=sig[int(before*sample_freq)+1]
others.append(sig)
others_means.append([baseline_sig, onset_sig, mean_sig, delta_sig])
#calculate a trace that MUST include the region betweeen start and end. This is performed to allow for averaging of the epochs that have different durations.
#it always will produce a trace that contains the MINIMAL length resonse
end_of_eval_period_for_sig= int(round(((before+min_t)*sample_freq)))
onset_trace=axon_responses[0:end_of_eval_period_for_sig+1]
traces_onset.append(onset_trace)
#calculate a trace that includes the baseline period, the onset, and the amount of time after. used in calculation of signficance for an ROI
before_after_trace=axon_responses[0:int((before+after)*sample_freq)]
before_after_traces.append(before_after_trace)
#get the DF at the threshold crossing
onset_df=axon_responses[int(before*sample_freq)+1]
#end_index=int(ends[xx]*sample_freq)
end_index=int((before*sample_freq)+((ends[xx]-starts[xx])*sample_freq)-1)
end_df=axon_responses[end_index]
mean_df=np.nanmean(axon_responses[response_indices[0]:response_indices[1]],axis=0)
#create a list that is the basis of the dataframe
sublist=[roi_unique_id,mouse_id, xx, starts[xx],ends[xx],(ends[xx]-starts[xx]),
axon_responses, onset_df, mean_df,end_df,
roi_area,total_roi_counter]
#create a list that will be columns corresponding to variabels above
column_names=['roi id','mouse_ID','epoch number', 'start time', 'end time', 'duration',
'axon trace', 'onset df', 'peak df', 'end df',
'ROI area','roi number']
if thresh_signal:
sublist.append(runnings)
sublist.append(mean_speed)
sublist.append(delta_speed)
column_names.append('threshold signal trace')
column_names.append('mean_speed_roi')
column_names.append('mean_delta_speed_roi')
for yy in range(len(others)):
sublist.append(others[yy])
#baseline_sig
sublist.append(others_means[yy][0])
#peak_sig
sublist.append(others_means[yy][1])
#mean_sig
sublist.append(others_means[yy][2])
#delta_sig
sublist.append(others_means[yy][2])
responses.append(sublist)
mean_onset_list.append(onset_df)
mean_end_list.append(end_df)
if thresh_signal:
mean_speed_list.append(mean_speed)
mean_delta_speed_list.append(delta_speed)
#get the mean trace from the onset and beginning of thresholded region
mean_onset_trace=np.nanmean(traces_onset,axis=0)
#determine if the average response for the ROI is significant
#12_6 change: allow significance to be calculated from arbituary regions across the entire baselein and end period, not just the consistently resposne
#therefore use the onset plus and minus the
before_after_mean=np.nanmean(before_after_traces,axis=0)
pvalue=sp.significant_response(before_after_mean, base_period=(baseline_period[0],baseline_period[1]), stim_period=(response_period[0],response_period_end), sample_freq=30.)
if pvalue < 0.05:
significant=True
else:
significant=False
mean_onset_df_roi=np.nanmean(np.asarray(mean_onset_list),axis=0)
mean_end_df_roi=np.nanmean(np.asarray(mean_end_list), axis=0)
if thresh_signal:
mean_speed_roi=np.nanmean(np.asarray(mean_speed_list),axis=0)
mean_delta_speed_roi=np.nanmean(np.asarray(mean_delta_speed_list),axis=0)
meaned_responses.append([roi_unique_id, mouse_id,pvalue,significant, mean_onset_df_roi,mean_end_df_roi,
total_roi_counter,before_after_mean,mean_onset_trace,mean_speed_roi,mean_delta_speed_roi])
meaned_columns=['roi id','mouse_ID','p value', 'significant mean resp', 'mean onset df', 'mean end df','roi number',
'mean trace', 'mean baseline and onset trace','mean thresh signal', 'mean delta thresh signal']
else:
meaned_responses.append([roi_unique_id, mouse_id,pvalue,significant, mean_onset_df_roi,mean_end_df_roi,
total_roi_counter,before_after_mean,mean_onset_trace])
meaned_columns=['roi id','mouse_ID','p value', 'significant mean resp', 'mean onset df', 'mean end df','roi number',
'mean trace', 'mean baseline and onset trace']
total_roi_counter+=1
for names in other_signals:
column_names.append(names)
column_names.append(str(names) + ' baseline sig')
column_names.append(str(names) + ' onset sig')
column_names.append(str(names) + ' peak sig')
column_names.append(str(names) + ' delta % sig')
df=pd.DataFrame(responses,columns=column_names)
df_mean=pd.DataFrame(meaned_responses,columns=meaned_columns)
#add whether the mean response is significant to the df mean
mean_sigs=[]
for index, row in df.iterrows():
roi_num=df['roi id'][index]
#get whether it is significant on average
mean_p=float(df_mean.loc[(df_mean['roi id']==roi_num)]['p value'])
if mean_p < 0.05:
significant=True
else:
significant=False
mean_sigs.append([mean_p, bool(significant)])
df_sig_responses=pd.DataFrame(mean_sigs, columns=['mean p value', 'mean sig'])
df= | pd.concat([df,df_sig_responses], axis=1) | pandas.concat |
import os, sys
import numpy as np
import pandas as pd
import pickle
from tqdm import tqdm
import argparse
from sklearn.utils import shuffle
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
#from nltk.stem import PorterStemmer
from pyspark.sql.types import *
from pyspark import SparkFiles
from pyspark.context import SparkContext
from pyspark.sql.session import SparkSession
##################################################################################################
#home = str(Path.home())
home = str('.')
##################################################################################################
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dataset", help="Name of the dataset.", default='darknet')
parser.add_argument("-v", "--vocab_size", type=int, default=10000, help="The number of vocabs.")
parser.add_argument("--num_train", type=int, default=0, help="The number of training samples.")
parser.add_argument("--num_test", type=int, default=0, help="The number of testing and cv samples.")
parser.add_argument("--max_df", default=0.8, type=float)
parser.add_argument("--min_df", default=3, type=int)
parser.add_argument('--remove_short_docs', dest='remove_short_docs', action='store_true', help='Remove any document that has a length less than 5 words.')
parser.add_argument('--remove_long_docs', dest='remove_long_docs', action='store_true', help='Remove any document that has a length more than 500 words.')
parser.set_defaults(remove_short_docs=True)
parser.set_defaults(remove_long_docs=True)
args = parser.parse_args()
if not args.dataset:
parser.error("Need to provide the dataset.")
##################################################################################################
remove_short_document = args.remove_short_docs
remove_long_document = args.remove_long_docs
if args.dataset == 'ng20':
train = fetch_20newsgroups(subset='train', remove=('headers', 'footers', 'quotes'))
test = fetch_20newsgroups(subset='test', remove=('headers', 'footers', 'quotes'))
train_docs = train.data
train_tags = train.target
test_docs = test.data
test_tags = test.target
elif args.dataset == 'dbpedia':
root_dir = os.path.join(home, 'datasets/dbpedia')
train_fn = os.path.join(root_dir, 'train.csv')
df = pd.read_csv(train_fn, header=None)
df.columns = ['label', 'title', 'body']
train_docs = list(df.body)
train_tags = list(df.label - 1)
del df
test_fn = os.path.join(root_dir, 'test.csv')
df = pd.read_csv(test_fn, header=None)
df.columns = ['label', 'title', 'body']
test_docs = list(df.body)
test_tags = list(df.label - 1)
del df
elif args.dataset == 'agnews':
root_dir = os.path.join(home, 'datasets/agnews')
train_fn = os.path.join(root_dir, 'train.csv')
df = pd.read_csv(train_fn, header=None)
df.columns = ['label', 'title', 'body']
train_docs = list(df.body)
train_tags = list(df.label - 1)
del df
test_fn = os.path.join(root_dir, 'test.csv')
df = pd.read_csv(test_fn, header=None)
df.columns = ['label', 'title', 'body']
test_docs = list(df.body)
test_tags = list(df.label - 1)
del df
elif args.dataset == 'yahooanswer':
root_dir = os.path.join(home, 'datasets/yahooanswer')
train_fn = os.path.join(root_dir, 'train.csv')
df = pd.read_csv(train_fn, header=None)
df.columns = ['label', 'title', 'body', 'answer']
train_docs = list(df.title)
train_tags = list(df.label - 1)
test_fn = os.path.join(root_dir, 'test.csv')
df = | pd.read_csv(test_fn, header=None) | pandas.read_csv |
"""
Created: 2020-07-02
Author: <NAME>
Licence: MIT
Tests for the gap_statistics module.
These tests were extracted from code used in the paper
"Non-invasive profiling of advanced prostate cancer via multi-parametric liquid biopsy and radiomic analysis"
Authors: <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>
Molecular Pathology, Diagnostics, and Therapeutics 2022
"""
import numpy as np
import pandas as pd
from sklearn import cluster
import pytest
from typing import List, Tuple, Union
from gap_statistic import gap_statistic
def make_gaussian_test_data(cluster_info: List[Tuple[np.ndarray, np.ndarray, int]]):
data_points = []
for means, stds, num in cluster_info:
data_points.append(np.random.normal(means, stds, size=num))
return np.vstack(data_points)
def test_calculate_cluster_D():
# D is just the sum of all pairwise distances (in both directionss i,j and j,i)
points = np.array([[0, 0, 0], [-2, 0, 4], [0, 0, 5]])
d = gap_statistic.calculate_cluster_D(pd.DataFrame(points))
assert np.isclose(d, 2*(np.sqrt(4+16) + 5 + np.sqrt(4+1)))
points = np.array([[0, 0, 0], [0, 0, 1], [0, 0, 0]])
d = gap_statistic.calculate_cluster_D(pd.DataFrame(points))
assert np.isclose(d, 2*(1 + 0 + 1))
def test_calculate_W():
# W is the sum of all the cluster Ds divided by two * number of points
points = np.array([[0, 0, 0], [-2, 0, 4], [0, 0, 5]])
df = pd.DataFrame(np.vstack((points, points + np.atleast_2d([0, 0, 100]))))
d = gap_statistic.calculate_cluster_D(pd.DataFrame(points))
with pytest.raises(KeyError):
gap_statistic.calculate_W(df)
df['cluster_id'] = [0, 0, 0, 1, 1, 1]
w = gap_statistic.calculate_W(df)
assert np.isclose(w, 2 * (d / (2 * 3)))
def test_random_unif_sampling():
points = np.array([[0, 0, 0], [-2, 0, 4], [0, 0, 5]])
rand_points = gap_statistic.random_unif_sampling(points, np.zeros((3,)))
assert points.shape == rand_points.shape
def make_principal_comp_data(size):
means = np.arrange(size[1])[::-1]
data = np.random.normal(loc=means, scale=0.1, size=size)
data[:, :2] = np.random.normal(loc=means[:2], scale=[10.0, 25.0], size=(size[0], 2))
return data
@pytest.mark.skip(reason='Unsure how to test functionality')
def test_hyperbox_sampling():
pytest.skip
# Trying to think of a good way to test this
points = np.array([[0, 0, 0], [-2, 0, 4], [0, 0, 5]])
@pytest.mark.skip(reason='Unsure how to test functionality')
def test_abc_sampling():
# Trying to think of a good way to test this
points = np.array([[0, 0, 0], [-2, 0, 4], [0, 0, 5]])
def make_clustered_data(means: List[np.ndarray], stdevs: List[np.ndarray], num_points: List[int]):
points = []
labels = []
for i, (ms, stds, num) in enumerate(zip(means, stdevs, num_points)):
points.append(np.random.normal(loc=ms, scale=stds, size=(num, ms.size)))
labels.append(i * np.ones(num))
return np.vstack(points), np.hstack(labels)
def test_calculate_reference_W():
np.random.seed(1) # Set seed to make the test predictable.
points, cluster_ids = make_clustered_data([np.array([0, 0, 0, 0, 0, 0, 0]), np.array([10, 10, 10, 0, 0, 0, 0])],
[np.array([1, 1, 1, 0.1, 0.1, 0.1, 0.1]),
np.array([1, 1, 1, 0.1, 0.1, 0.1, 0.1])],
[50, 80])
df = pd.DataFrame(points)
df_copy = df.copy()
df_copy['cluster_id'] = cluster_ids
w = gap_statistic.calculate_W(df_copy)
clusterer = cluster.KMeans(n_clusters=2)
w_ref = gap_statistic.calculate_reference_W(df, clusterer, cluster_ids)
assert w_ref >= w
def test_gap_n():
np.random.seed(1) # Set seed to make the test predictable.
points, cluster_ids = make_clustered_data([np.array([0, 0, 0, 0, 0, 0, 0]), np.array([10, 10, 10, 0, 0, 0, 0])],
[np.array([1, 1, 1, 0.1, 0.1, 0.1, 0.1]), np.array([1, 1, 1, 0.1, 0.1, 0.1, 0.1])],
[50, 80])
df = pd.DataFrame(points)
clusterer1 = cluster.KMeans(n_clusters=2)
clusterer2 = cluster.KMeans(n_clusters=5)
gap1, std1, labels1 = gap_statistic.gap_n(df, clusterer1, 100)
gap2, std2, labels2 = gap_statistic.gap_n(df, clusterer2, 100)
assert gap1 > gap2
def test_calculate_optimal_clusters_by_gap():
# An addtional/improved test to make would be to reproduce the test simulations for the original paper
np.random.seed(1) # Set seed to make the test predictable.
points, expected_labels = make_clustered_data([np.array([0, 0, 0, 0, 0, 0, 0]), np.array([10, 10, 10, 0, 0, 0, 0]),
np.array([0, 0, 0, 0, 50, 25, 10])],
[np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]),
np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]),
np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1])],
[50, 80, 50])
df = | pd.DataFrame(points) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import warnings
from datetime import datetime
import numpy as np
from scipy.linalg import solve
from scipy import stats
import pandas as pd
from pandas import to_datetime
# ipython autocomplete will pick these up, which are probably what users only need.
__all__ = [
'qth_survival_times',
'qth_survival_time',
'median_survival_times',
'survival_table_from_events',
'datetimes_to_durations',
'concordance_index',
'k_fold_cross_validation',
'to_long_format',
'add_covariate_to_timeline',
'covariates_from_event_matrix'
]
class StatError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
class ConvergenceWarning(RuntimeWarning):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
def qth_survival_times(q, survival_functions, cdf=False):
"""
Parameters:
q: a float between 0 and 1.
survival_functions: a (n,d) dataframe or numpy array.
If dataframe, will return index values (actual times)
If numpy array, will return indices.
Returns:
v: if d==1, returns a float, np.inf if infinity.
if d > 1, an DataFrame containing the first times the value was crossed.
"""
q = | pd.Series(q) | pandas.Series |
import re
import numpy as np
import pandas as pd
import read_write as rw
def validate_trace(trace, obs_srs_file_path, active_prd_path, prd_prefix="US", srs_prefix="TC"):
# Load lists of obsolete srs and active prd
obs_srs = pd.read_csv(obs_srs_file_path)
obs_srs_list = obs_srs["Formatted ID"].unique()
active_prd = | pd.read_excel(active_prd_path) | pandas.read_excel |
import json
import pandas
import sys
from bmeg.emitter import JSONEmitter
from bmeg import (Aliquot, DrugResponse, Project, Compound,
Compound_Projects_Project,
DrugResponse_Aliquot_Aliquot,
DrugResponse_Compounds_Compound)
def process_screen(screen_name, ex_df, fc_df, dr_df, compound_map, emitter):
drugs = list(set(ex_df.broad_id))
for drug_id in drugs:
ex = ex_df[ex_df.broad_id == drug_id]
doses = ex.dose.tolist()
drug_name = ex.name[0]
compound = compound_map.get(drug_name)
if not compound:
print("WARNING: compound not in lookup: {}".format(drug_name), file=sys.stderr)
continue
for cellline_id, row in fc_df.iterrows():
if "FAILED" in cellline_id:
continue
# TODO: convert nan to None
responses = row[ex.index].tolist()
if isinstance(dr_df, pandas.DataFrame):
try:
dr_vals = dr_df[(dr_df.broad_id == drug_id) & (dr_df.depmap_id == cellline_id)].iloc[0, ].dropna().to_dict()
except Exception:
dr_vals = {}
else:
dr_vals = {}
# create drug response vertex
dr = DrugResponse(
id=DrugResponse.make_gid("PRISM", screen_name, cellline_id, drug_name),
einf=dr_vals.get("upper_limit"),
ec50=dr_vals.get("ec50"),
ic50=dr_vals.get("ic50"),
aac=dr_vals.get("aac"),
hs=dr_vals.get("slope"),
dose_um=doses,
response=responses,
source_cell_name=cellline_id,
source_drug_name=drug_name,
project_id=Project.make_gid("PRISM")
)
emitter.emit_vertex(dr)
emitter.emit_edge(
DrugResponse_Aliquot_Aliquot(
from_gid=dr.gid(),
to_gid=Aliquot.make_gid("PRISM:%s" % (cellline_id))
),
emit_backref=True
)
emitter.emit_edge(
DrugResponse_Compounds_Compound(
from_gid=dr.gid(),
to_gid=compound.gid()
),
emit_backref=True
)
def transform_compounds(
primary_file="source/prism/primary-screen-replicate-collapsed-treatment-info.csv",
secondary_file="source/prism/secondary-screen-replicate-collapsed-treatment-info.csv",
emitter_prefix=None,
emitter_directory='prism'):
emitter = JSONEmitter(prefix=emitter_prefix, directory=emitter_directory)
df1 = pandas.read_csv(primary_file)
df2 = pandas.read_csv(secondary_file)
for drug in set(df1.name.tolist() + df2.name.tolist()):
if not pandas.isna(drug):
compound = Compound(id=Compound.make_gid(drug),
id_source="prism",
submitter_id=drug,
project_id=Project.make_gid('Reference'))
emitter.emit_vertex(compound)
emitter.emit_edge(
Compound_Projects_Project(
from_gid=compound.gid(),
to_gid=Project.make_gid("PRISM")
),
emit_backref=True
)
emitter.close()
def transform_primary(drug_lookup_path='source/prism/compound_lookup.tsv',
primary_foldchange_path='source/prism/primary-screen-replicate-collapsed-logfold-change.csv',
primary_treatment_path='source/prism/primary-screen-replicate-collapsed-treatment-info.csv',
emitter_prefix="primary_screen",
emitter_directory='prism'):
emitter = JSONEmitter(prefix=emitter_prefix, directory=emitter_directory)
compound_map = {}
with open(drug_lookup_path) as handle:
for line in handle:
row = line.split("\t")
if row[1] == "":
compound = Compound(**json.loads(row[1]))
compound['id'] = Compound.make_gid(compound['id'])
else:
compound = Compound(id=Compound.make_gid(row[0]),
id_source="prism",
submitter_id=row[0],
project_id=Project.make_gid('Reference'))
compound_map[row[0]] = compound
primary_fc = | pandas.read_csv(primary_foldchange_path, index_col=0) | pandas.read_csv |
# READ/WRITE REPORTS AS JSON
import json
import pandas as pd
from pandas.io.json import json_normalize
from swmmio.utils import spatial
from swmmio.graphics import swmm_graphics as sg
def decode_report(rpt_path):
#read report from json into a dict
with open(rpt_path, 'r') as f:
read_rpt = json.loads(f.read())
#parse the geojson
def df_clean(uncleandf):
cleaned_cols = [x.split('.')[-1] for x in uncleandf.columns]
uncleandf.columns = cleaned_cols
clean_df = uncleandf.rename(columns={'coordinates':'coords'}).drop(['type'], axis=1)
clean_df = clean_df.set_index(['Name'])
return clean_df
#parse conduit data into a dataframe
conds_df = json_normalize(read_rpt['conduits']['features'])
conds_df = df_clean(conds_df)
#parse node data into a dataframe
nodes_df = json_normalize(read_rpt['nodes']['features'])
nodes_df = df_clean(nodes_df)
#parse parcel data into a dataframe
pars_df = json_normalize(read_rpt['parcels']['features'])
pars_df = df_clean(pars_df)
rpt_dict = {'conduits':conds_df, 'nodes':nodes_df, 'parcels':pars_df}
rpt_dict.update()
return {'conduits':conds_df, 'nodes':nodes_df, 'parcels':pars_df}
def encode_report(rpt, rpt_path):
rpt_dict = {}
#write parcel json files
parcels = spatial.read_shapefile(sg.config.parcels_shapefile)
parcels = parcels[['PARCELID', 'coords']] #omit 'ADDRESS', 'OWNER1'
flooded = rpt.alt_report.parcel_flooding #proposed flooding condition
flooded = pd.merge(flooded, parcels, right_on='PARCELID', left_index=True)
rpt_dict['parcels'] = spatial.write_geojson(flooded, geomtype='polygon')
#non null delta category parcels
delta_parcels = rpt.flood_comparison.loc[pd.notnull(rpt.flood_comparison.Category)]
delta_parcels = | pd.merge(delta_parcels, parcels, right_on='PARCELID', left_index=True) | pandas.merge |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
Module contains class PandasDataframe.
PandasDataframe is a parent abstract class for any dataframe class
for pandas storage format.
"""
from collections import OrderedDict
import numpy as np
import pandas
import datetime
from pandas.core.indexes.api import ensure_index, Index, RangeIndex
from pandas.core.dtypes.common import is_numeric_dtype, is_list_like
from pandas._libs.lib import no_default
from typing import List, Hashable, Optional, Callable, Union, Dict
from modin.core.storage_formats.pandas.query_compiler import PandasQueryCompiler
from modin.error_message import ErrorMessage
from modin.core.storage_formats.pandas.parsers import (
find_common_type_cat as find_common_type,
)
from modin.core.dataframe.base.dataframe.dataframe import ModinDataframe
from modin.core.dataframe.base.dataframe.utils import (
Axis,
JoinType,
)
from modin.pandas.indexing import is_range_like
from modin.pandas.utils import is_full_grab_slice, check_both_not_none
from modin.logging import LoggerMetaClass
def lazy_metadata_decorator(apply_axis=None, axis_arg=-1, transpose=False):
"""
Lazily propagate metadata for the ``PandasDataframe``.
This decorator first adds the minimum required reindexing operations
to each partition's queue of functions to be lazily applied for
each PandasDataframe in the arguments by applying the function
run_f_on_minimally_updated_metadata. The decorator also sets the
flags for deferred metadata synchronization on the function result
if necessary.
Parameters
----------
apply_axis : str, default: None
The axes on which to apply the reindexing operations to the `self._partitions` lazily.
Case None: No lazy metadata propagation.
Case "both": Add reindexing operations on both axes to partition queue.
Case "opposite": Add reindexing operations complementary to given axis.
Case "rows": Add reindexing operations on row axis to partition queue.
axis_arg : int, default: -1
The index or column axis.
transpose : bool, default: False
Boolean for if a transpose operation is being used.
Returns
-------
Wrapped Function.
"""
def decorator(f):
from functools import wraps
@wraps(f)
def run_f_on_minimally_updated_metadata(self, *args, **kwargs):
for obj in (
[self]
+ [o for o in args if isinstance(o, PandasDataframe)]
+ [v for v in kwargs.values() if isinstance(v, PandasDataframe)]
+ [
d
for o in args
if isinstance(o, list)
for d in o
if isinstance(d, PandasDataframe)
]
+ [
d
for _, o in kwargs.items()
if isinstance(o, list)
for d in o
if isinstance(d, PandasDataframe)
]
):
if apply_axis == "both":
if obj._deferred_index and obj._deferred_column:
obj._propagate_index_objs(axis=None)
elif obj._deferred_index:
obj._propagate_index_objs(axis=0)
elif obj._deferred_column:
obj._propagate_index_objs(axis=1)
elif apply_axis == "opposite":
if "axis" not in kwargs:
axis = args[axis_arg]
else:
axis = kwargs["axis"]
if axis == 0 and obj._deferred_column:
obj._propagate_index_objs(axis=1)
elif axis == 1 and obj._deferred_index:
obj._propagate_index_objs(axis=0)
elif apply_axis == "rows":
obj._propagate_index_objs(axis=0)
result = f(self, *args, **kwargs)
if apply_axis is None and not transpose:
result._deferred_index = self._deferred_index
result._deferred_column = self._deferred_column
elif apply_axis is None and transpose:
result._deferred_index = self._deferred_column
result._deferred_column = self._deferred_index
elif apply_axis == "opposite":
if axis == 0:
result._deferred_index = self._deferred_index
else:
result._deferred_column = self._deferred_column
elif apply_axis == "rows":
result._deferred_column = self._deferred_column
return result
return run_f_on_minimally_updated_metadata
return decorator
class PandasDataframe(object, metaclass=LoggerMetaClass):
"""
An abstract class that represents the parent class for any pandas storage format dataframe class.
This class provides interfaces to run operations on dataframe partitions.
Parameters
----------
partitions : np.ndarray
A 2D NumPy array of partitions.
index : sequence
The index for the dataframe. Converted to a ``pandas.Index``.
columns : sequence
The columns object for the dataframe. Converted to a ``pandas.Index``.
row_lengths : list, optional
The length of each partition in the rows. The "height" of
each of the block partitions. Is computed if not provided.
column_widths : list, optional
The width of each partition in the columns. The "width" of
each of the block partitions. Is computed if not provided.
dtypes : pandas.Series, optional
The data types for the dataframe columns.
"""
_partition_mgr_cls = None
_query_compiler_cls = PandasQueryCompiler
# These properties flag whether or not we are deferring the metadata synchronization
_deferred_index = False
_deferred_column = False
@property
def __constructor__(self):
"""
Create a new instance of this object.
Returns
-------
PandasDataframe
"""
return type(self)
def __init__(
self,
partitions,
index,
columns,
row_lengths=None,
column_widths=None,
dtypes=None,
):
self._partitions = partitions
self._index_cache = ensure_index(index)
self._columns_cache = ensure_index(columns)
if row_lengths is not None and len(self.index) > 0:
# An empty frame can have 0 rows but a nonempty index. If the frame
# does have rows, the number of rows must equal the size of the
# index.
num_rows = sum(row_lengths)
if num_rows > 0:
ErrorMessage.catch_bugs_and_request_email(
num_rows != len(self._index_cache),
"Row lengths: {} != {}".format(num_rows, len(self._index_cache)),
)
ErrorMessage.catch_bugs_and_request_email(
any(val < 0 for val in row_lengths),
"Row lengths cannot be negative: {}".format(row_lengths),
)
self._row_lengths_cache = row_lengths
if column_widths is not None and len(self.columns) > 0:
# An empty frame can have 0 column but a nonempty column index. If
# the frame does have columns, the number of columns must equal the
# size of the columns.
num_columns = sum(column_widths)
if num_columns > 0:
ErrorMessage.catch_bugs_and_request_email(
num_columns != len(self._columns_cache),
"Column widths: {} != {}".format(
num_columns, len(self._columns_cache)
),
)
ErrorMessage.catch_bugs_and_request_email(
any(val < 0 for val in column_widths),
"Column widths cannot be negative: {}".format(column_widths),
)
self._column_widths_cache = column_widths
self._dtypes = dtypes
self._filter_empties()
@property
def _row_lengths(self):
"""
Compute the row partitions lengths if they are not cached.
Returns
-------
list
A list of row partitions lengths.
"""
if self._row_lengths_cache is None:
if len(self._partitions.T) > 0:
self._row_lengths_cache = [
obj.length() for obj in self._partitions.T[0]
]
else:
self._row_lengths_cache = []
return self._row_lengths_cache
@property
def _column_widths(self):
"""
Compute the column partitions widths if they are not cached.
Returns
-------
list
A list of column partitions widths.
"""
if self._column_widths_cache is None:
if len(self._partitions) > 0:
self._column_widths_cache = [obj.width() for obj in self._partitions[0]]
else:
self._column_widths_cache = []
return self._column_widths_cache
@property
def _axes_lengths(self):
"""
Get a pair of row partitions lengths and column partitions widths.
Returns
-------
list
The pair of row partitions lengths and column partitions widths.
"""
return [self._row_lengths, self._column_widths]
@property
def dtypes(self):
"""
Compute the data types if they are not cached.
Returns
-------
pandas.Series
A pandas Series containing the data types for this dataframe.
"""
if self._dtypes is None:
self._dtypes = self._compute_dtypes()
return self._dtypes
def _compute_dtypes(self):
"""
Compute the data types via TreeReduce pattern.
Returns
-------
pandas.Series
A pandas Series containing the data types for this dataframe.
"""
def dtype_builder(df):
return df.apply(lambda col: find_common_type(col.values), axis=0)
map_func = self._build_treereduce_func(0, lambda df: df.dtypes)
reduce_func = self._build_treereduce_func(0, dtype_builder)
# For now we will use a pandas Series for the dtypes.
if len(self.columns) > 0:
dtypes = self.tree_reduce(0, map_func, reduce_func).to_pandas().iloc[0]
else:
dtypes = | pandas.Series([]) | pandas.Series |
import gc
from collections import defaultdict
import numpy as np
import pandas as pd
import statsmodels.api as sm
from .plotting import plot_everything, create_and_plot_agreements, get_classification_df, \
plot_experimental_condition, plot_gp, plot_histogram, \
create_and_plot_FDR, create_and_save_KT, plot_histograms_2c
from .helpers import get_all_experimental_conditions, calculate_AUC, calculate_null_kl, dict_to_string, \
relativize, centre, compute_response_angle
from .io import read_pdx_data
def run_kulgap_pipeline(results_path, data_path, fit_gp=True, draw_plots=True, rerun_kl_null=False):
"""
Run the complete KuLGaP pipeline on a `TreatmentResponseExperiment` object. The experiment data is read into
Python from disk; all results are written in `results_path`.
:param results_path: [string] Path to the directory where results will be written.
:param data_path: [string] Path to the directory where the experiment data will be read from.
:param fit_gp:
:param draw_plots:
:param rerun_kl_null:
:return: [None] Writes files to disk
"""
gc.collect()
# =============================================================================
# Definition of file links
# =============================================================================
############WHERE THE INPUT DATA IS SAVED##########################################################
anon_filename = data_path + "alldata_new.csv"
filename_crown = data_path + "20180402_sheng_results.csv"
kl_null_filename = data_path + "kl_control_vs_control.csv"
############WHERE THE REPORT (THINGS THAT FAILED) IS SAVED#########################################
out_report = results_path + 'report_all.txt' # 'report_LPDX.txt'
############WHERE THE OUTPUT STATISTICS ARE SAVED##################################################
stats_outname = results_path + "statistics_all.csv" # "statistics_safeside.csv"
classifiers_outname = results_path + "classifiers.csv"
agreements_outname = results_path + "Fig2_agreements.csv"
agreements_outfigname = results_path + "Fig2_agreements.pdf"
conservative_outname = results_path + "Fig2_conservative.csv"
conservative_outfigname = results_path + "Fig2_conservative.pdf"
scatterplot_outfigname = results_path + "Fig2c"
fig1a_figname = results_path + "fig1a.pdf"
fig1b_figname = results_path + "fig1b.pdf"
fig1c_figname = results_path + "fig1c.pdf"
fig1d_figname = results_path + "fig1d.pdf"
fig3a_figname = results_path + "fig3a.pdf"
fig3b_figname = results_path + "fig3b.pdf"
fig4a_figname = results_path + "fig4a.pdf"
fig4b_figname = results_path + "fig4b.pdf"
fig4c_figname = results_path + "fig4c.pdf"
fig4d_figname = results_path + "fig4d.pdf"
fig5a_figname = results_path + "fig5a.pdf"
fig5b_figname = results_path + "fig5b.pdf"
fig5c_figname = results_path + "fig5c.pdf"
fig5d_figname = results_path + "fig5d.pdf"
supfig1_figname = results_path + "sup-fig1.pdf"
supfig2a_figname = results_path + "sup-fig2a.pdf"
supfig2b_figname = results_path + "sup-fig2b.pdf"
supfig3a_figname = results_path + "sup-fig3a.pdf"
supfig3b_figname = results_path + "sup-fig3b.pdf"
supfig4_figname = results_path + "sup-fig4.pdf"
supfig5a_figname = results_path + "sup-fig5a.pdf"
supfig5b_figname = results_path + "sup-fig5b.pdf"
supfig6a_figname = results_path + "sup-fig6a.pdf"
supfig6b_figname = results_path + "sup-fig6b.pdf"
histograms_out = results_path + "KLDivergenceHistograms/"
histograms_outfile = results_path + "kl_histograms.csv"
KT_outname = results_path + "Kendalls_tau.csv"
allplot_figname = results_path + "allplot.pdf"
###################################################################################################
if fit_gp is False:
rerun_kl_null = False
all_patients = read_pdx_data(anon_filename)
failed_plot = []
failed_p_value = []
failed_mrecist = []
failed_gp = []
failed_response_angle = []
allowed_list = []
P_VAL = 0.05
# =============================================================================
# GP fitting and calculation of other parameters.
# =============================================================================
# TODO: replace by fit_all_gps(treatment_response_expt, ... )
for i in range(0, len(all_patients)):
print("Now dealing with patient %d of %d" % (i + 1, len(all_patients)))
if (allowed_list == []) or (all_patients[i].name in allowed_list):
# if treatment_response_expt[i].name not in ignore_list:
print("Num failed mRECISTS: " + str(len(failed_mrecist)))
print("Num failed plots: " + str(len(failed_plot)))
print("Num failed p values: " + str(len(failed_p_value)))
patient = all_patients[i]
print("CancerModel: " + str(patient.name))
# need to ensure that we've found and processed the control.
control = patient.experimental_condition['Control']
control.normalize_data()
if fit_gp:
control.fit_gaussian_processes()
assert (control.name is not None)
assert (control.x is not None)
assert (control.y is not None)
assert (control.response_norm is not None)
assert (control.drug_start_day is not None)
assert (control.replicates is not None)
assert (control.gp is not None)
assert (control.gp_kernel is not None)
for category in patient.condition_name:
if category != 'Control':
cur_case = patient.experimental_condition[category]
cur_case.normalize_data()
cur_case.start = max(cur_case.find_start_date_index(), control.measurement_start)
cur_case.end = min(control.measurement_end, cur_case.measurement_end)
cur_case.create_full_data(control)
assert (cur_case.full_data != [])
# DELTA LOG LIKELIHOOD
if fit_gp:
try:
cur_case.fit_gaussian_processes(control=control)
assert (cur_case.gp_h0 is not None)
assert (cur_case.gp_h0_kernel is not None)
assert (cur_case.gp_h1 is not None)
assert (cur_case.gp_h1_kernel is not None)
assert (cur_case.delta_log_likelihood_h0_h1 is not None)
# KL DIVERGENCE
cur_case.calculate_kl_divergence(control)
assert (cur_case.kl_divergence is not None)
except Exception as e:
# NEED TO FIGURE OUT HOW TO REFER TO GENERIC ERROR
failed_gp.append((cur_case.source_id, e))
# MRECIST
try:
cur_case.calculate_mrecist()
assert (cur_case.mrecist is not None)
except ValueError as e:
failed_mrecist.append((cur_case.source_id, e))
print(e)
continue
# angle
try:
cur_case.calculate_response_angles(control)
assert (cur_case.response_angle is not None)
cur_case.response_angle_control = {}
for i in range(len(control.replicates)):
# cur_case.response_angle_control[control.replicates[i]] = compute_response_angle(control.variable.ravel(),control.response[i],control.find_start_date_index())
start = control.find_start_date_index() - control.measurement_start
if start is None:
raise TypeError("The 'start' parameter is None")
else:
cur_case.response_angle_control[control.replicates[i]] = compute_response_angle(
control.response[control.treatment_level_start:(control.level_end + 1)].ravel(),
centre(control.y[i, control.measurement_start:control.measurement_end + 1], start),
start)
cur_case.response_angle_rel_control[control.replicates[i]] = compute_response_angle(
control.response[control.treatment_level_start:(control.level_end + 1)].ravel(),
relativize(control.y[i, control.measurement_start:control.measurement_end + 1],
start), start)
except ValueError as e:
failed_response_angle.append((cur_case.source_id, e))
print(e)
continue
# compute AUC
try:
cur_case.calculate_auc(control)
cur_case.calculate_auc_norm(control)
if fit_gp:
cur_case.calculate_gp_auc()
cur_case.auc_gp_control = calculate_AUC(control.x_cut, control.gp.predict(control.x_cut)[0])
cur_case.auc_control = {}
start = max(cur_case.find_start_date_index(), control.measurement_start)
end = min(cur_case.measurement_end, control.measurement_end)
for i in range(len(control.replicates)):
cur_case.auc_control[control.replicates[i]] = calculate_AUC(control.x[start:end],
control.y[i, start:end])
cur_case.auc_control_norm[control.replicates[i]] = calculate_AUC(control.x[start:end],
control.response_norm[i,
start:end])
except ValueError as e:
print(e)
try:
cur_case.calculate_tgi(control)
except ValueError as e:
print(e)
# PERCENT CREDIBLE INTERVALS
if fit_gp:
cur_case.calculate_credible_intervals(control)
assert (cur_case.credible_intervals != [])
cur_case.calculate_credible_intervals_percentage()
assert (cur_case.percent_credible_intervals is not None)
# compute GP derivatives:
cur_case.compute_all_gp_derivatives(control)
# COMPUTATION OF P-VALUES IN SEPARATE ITERATION: WE FIRST NEED TO HAVE FIT ALL THE GPs
# NOW CYCLE AGAIN THROUGH treatment_response_expt TO COMPUTE kl p-values:
categories_by_drug = defaultdict(list)
failed_by_drug = defaultdict(list)
for patient in all_patients:
for key in patient.condition_name:
if patient.experimental_condition[key].gp:
categories_by_drug[key].append(patient.experimental_condition[key])
else:
failed_by_drug[key].append(patient.experimental_condition[key].name)
fig_count = 0
cur_case.kl_p_cvsc = None
print("Now computing KL divergences between controls for kl_control_vs_control - this may take a moment")
controls = [patient.experimental_condition["Control"] for patient in all_patients]
if rerun_kl_null:
kl_control_vs_control = calculate_null_kl(controls, None)
else:
kl_control_vs_control = calculate_null_kl(controls, kl_null_filename)
print("Done computing KL divergences between controls for kl_control_vs_control")
if fit_gp:
# The following plots the KL histgrams
kl_histograms = defaultdict(list)
print("Now computing KL p-values")
for i in range(0, len(all_patients)):
if (allowed_list == []) or (all_patients[i].name in allowed_list):
patient = all_patients[i]
print("CancerModel: ", patient.name, "(", i + 1, "of", len(all_patients), ")")
if patient.name not in ignore_list:
for category in patient.condition_name:
if category != 'Control':
print("ExperimentalCondition: ", category)
cur_case = patient.experimental_condition[category]
# IF FIRST OCCURRENCE OF DRUG: COMPUTE HISTOGRAM OF KL DIVERGENCES
# if cur_case.name not in kl_histograms:
###SOMETHING BAD GOING ON HERE:
# kl_histograms[cur_case.name] = [kl_divergence(variable,response) for variable in categories_by_drug[cur_case.name] for response in categories_by_drug['Control']]
try:
if cur_case.kl_divergence is not None:
####COMPUTE KL DIVERGENCE PVALUES HERE!!
##The old way of computing kl_p_value (by comparing against
## [kl(variable,response) for variable in same_drug for response in controls]) doesn't really
## make sense in the aanonymised setting (the `drug' will be simply C1, C2, etc.)
## therefore replace by same calculation as kl_p_cvsc
## cur_case.kl_p_value= (len([variable for variable in kl_histograms[cur_case.name] if variable >= cur_case.kl_divergence]) + 1) / (len(kl_histograms[cur_case.name]) + 1)
cur_case.kl_p_value = (len([x for x in kl_control_vs_control["list"] if
x >= cur_case.kl_divergence]) + 1) / (
len(kl_control_vs_control["list"]) + 1)
cur_case.kl_p_cvsc = 1 - kl_control_vs_control["smoothed"].cdf(
[cur_case.kl_divergence])
# print(cur_case.kl_p_value,cur_case.kl_p_cvsc, (cur_case.kl_p_cvsc-cur_case.kl_p_value)/cur_case.kl_p_cvsc)
assert (cur_case.kl_p_value is not None)
except Exception as e:
failed_p_value.append((cur_case.source_id, e))
print(e)
raise
if fit_gp:
with open(histograms_outfile, 'w') as outfile:
for key, value in kl_histograms.items():
outfile.write(str(key) + "\n")
outfile.write(",".join(map(str, value)))
outfile.write("\n")
print("Done computing KL p-values, saved to {}".format(histograms_outfile))
# all_kl = [variable["case"].kl_divergence for variable in get_all_cats(treatment_response_expt).values() if
# str(variable["case"].kl_divergence) != "nan"]
with open(out_report, 'w') as f:
print("Errors during plotting:", file=f)
print(failed_plot, file=f)
print("\n\n\n", file=f)
print("failed p-values:", file=f)
print(failed_p_value, file=f)
print("\n\n\n", file=f)
print(failed_mrecist, file=f)
print("\n\n\n", file=f)
print("Errors during GP fitting:", file=f)
print(failed_gp, file=f)
# =============================================================================
# COMPILATION OF STATISTICS
# =============================================================================
stats_dict = {}
for i in range(0, len(all_patients)):
if (allowed_list == []) or (all_patients[i].name in allowed_list):
patient = all_patients[i]
control = patient.experimental_condition['Control']
# control.normalize_data()
# control.fit_gaussian_processes()
for category in patient.treatment_condition.keys():
if 'Control' not in category:
cur_case = patient.categories[category]
key = str(cur_case.source_id) + "*" + str(category)
stats_dict[key] = {'tumour_type': patient.tumour_type, 'mRECIST': None, 'num_mCR': None,
'num_mPR': None,
'num_mSD': None, 'num_mPD': None,
'perc_mCR': None, 'perc_mPR': None,
'perc_mSD': None, 'perc_mPD': None,
'drug': None,
'response_angle': None, 'response_angle_control': None,
'perc_true_credible_intervals': None,
'delta_log_likelihood': None,
'kl': None, 'kl_p_value': None, 'kl_p_cvsc': None, 'gp_deriv': None,
'gp_deriv_control': None, 'auc': None,
'auc_control_norm': None, 'auc_norm': None, 'auc_control': None, 'auc_gp': None,
'auc_gp_control': None,
'number_replicates': len(cur_case.replicates),
'number_replicates_control': len(control.replicates),
"tgi": cur_case.tgi}
stats_dict[key]['drug'] = category
try:
cur_case.calculate_mrecist()
cur_case.enumerate_mrecist()
except Exception as e:
print(e)
continue
num_replicates = len(cur_case.replicates)
stats_dict[key]['mRECIST'] = dict_to_string(cur_case.mrecist)
stats_dict[key]['num_mCR'] = cur_case.mrecist_counts['mCR']
stats_dict[key]['num_mPR'] = cur_case.mrecist_counts['mPR']
stats_dict[key]['num_mSD'] = cur_case.mrecist_counts['mSD']
stats_dict[key]['num_mPD'] = cur_case.mrecist_counts['mPD']
stats_dict[key]['perc_mCR'] = cur_case.mrecist_counts['mCR'] / num_replicates
stats_dict[key]['perc_mPR'] = cur_case.mrecist_counts['mPR'] / num_replicates
stats_dict[key]['perc_mSD'] = cur_case.mrecist_counts['mSD'] / num_replicates
stats_dict[key]['perc_mPD'] = cur_case.mrecist_counts['mPD'] / num_replicates
stats_dict[key]['perc_true_credible_intervals'] = cur_case.percent_credible_intervals
stats_dict[key]['delta_log_likelihood'] = cur_case.delta_log_likelihood_h0_h1
stats_dict[key]['kl'] = cur_case.kl_divergence
stats_dict[key]['kl_p_value'] = cur_case.kl_p_value
stats_dict[key]['kl_p_cvsc'] = cur_case.kl_p_cvsc
stats_dict[key]['gp_deriv'] = np.nanmean(cur_case.rates_list)
stats_dict[key]['gp_deriv_control'] = np.nanmean(cur_case.rates_list_control)
stats_dict[key]['auc'] = dict_to_string(cur_case.auc)
stats_dict[key]['auc_norm'] = dict_to_string(cur_case.auc_norm)
stats_dict[key]['auc_control'] = dict_to_string(cur_case.auc_control)
stats_dict[key]['auc_control_norm'] = dict_to_string(cur_case.auc_control_norm)
try:
stats_dict[key]['auc_gp'] = cur_case.auc_gp[0]
stats_dict[key]['auc_gp_control'] = cur_case.auc_gp_control[0]
except TypeError:
stats_dict[key]['auc_gp'] = ""
stats_dict[key]['auc_gp_control'] = ""
stats_dict[key]['response_angle'] = dict_to_string(cur_case.response_angle)
stats_dict[key]['response_angle_rel'] = dict_to_string(cur_case.response_angle_rel)
stats_dict[key]['response_angle_control'] = dict_to_string(cur_case.response_angle_control)
stats_dict[key]['response_angle_rel_control'] = dict_to_string(cur_case.response_angle_rel_control)
stats_dict[key]['average_angle'] = cur_case.average_angle
stats_dict[key]['average_angle_rel'] = cur_case.average_angle_rel
stats_dict[key]['average_angle_control'] = cur_case.average_angle_control
stats_dict[key]['average_angle_rel_control'] = cur_case.average_angle_rel_control
stats_df = pd.DataFrame.from_dict(stats_dict).transpose()
crown_df = | pd.read_csv(filename_crown, index_col="Seq.") | pandas.read_csv |
import os
import glob
import numpy as np
import pandas as pd
from utils.plots_matplotlib import bar_plot
def load_dataset_file(dataset_file_path):
"""
This method loads dataset file.
Currently supported formats are .csv and .json.
:param dataset_file_path: path of the dataset file
:return: pandas dataframe that contains dataset information
"""
_, file_extension = os.path.splitext(dataset_file_path)
if file_extension == ".csv":
data_info = pd.read_csv(dataset_file_path)
elif file_extension == ".json":
data_info = | pd.read_json(dataset_file_path) | pandas.read_json |
import os
import gzip
import warnings
import pandas as pd
warnings.simplefilter("ignore")
import pickle
def outlier_analysis(df, model_dir):
_df = df[df["is_rescurable_homopolymer"]].reset_index(drop=True)
if not len(_df):
return df
__df = df[~df["is_rescurable_homopolymer"]].reset_index(drop=True)
at_ins_df = _df[_df["is_at_ins"] == 1].reset_index(drop=True)
at_ins_df = find_outliers(at_ins_df, "at_ins", model_dir)
at_del_df = _df[_df["is_at_del"] == 1].reset_index(drop=True)
at_del_df = find_outliers(at_del_df, "at_del", model_dir)
gc_ins_df = _df[_df["is_gc_ins"] == 1].reset_index(drop=True)
gc_ins_df = find_outliers(gc_ins_df, "gc_ins", model_dir)
gc_del_df = _df[_df["is_gc_del"] == 1].reset_index(drop=True)
gc_del_df = find_outliers(gc_del_df, "gc_del", model_dir)
return pd.concat([__df, at_ins_df, at_del_df, gc_ins_df, gc_del_df], axis=0)
def cov_vaf(row):
cov = row["ref_count"] + row["alt_count"]
vaf = row["alt_count"] / cov
return cov, vaf
def reclassify_by_outlier_status(row):
if row["outlying"] == -1:
return "reclassifed_by_outlier_analysis", "somatic"
else:
return row["reclassified"], row["predicted_class"]
def find_outliers(df, homopolymer_type, model_dir):
if not len(df):
return df
df["cov"], df["vaf"] = zip(*df.apply(cov_vaf, axis=1))
saved_model = os.path.join(model_dir, "{}.pkl.gz".format(homopolymer_type))
iso = pickle.load(gzip.open(saved_model, "rb"))
_test = df[["alt_count", "vaf", "cov"]]
pred = pd.DataFrame(data=iso.predict(_test), columns=["outlying"])
_df = | pd.concat([df, pred], axis=1) | pandas.concat |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from collections import namedtuple
import pickle
from sklearn.preprocessing import LabelBinarizer
# Relevant layers in keras
from keras.models import Sequential
from keras.layers import Conv2D, Dense, Activation, AveragePooling2D, Flatten
from keras.callbacks import LearningRateScheduler
# Utility functions for the MNIST notebook
class MnistDataSet(object):
"""
Wrapper class to load and process the MNIST data as needed
"""
def __init__(self, directory, X_train_file='train-images-idx3-ubyte.pkl',
y_train_file='train-labels-idx1-ubyte.pkl',
X_test_file='t10k-images-idx3-ubyte.pkl',
y_test_file='t10k-labels-idx1-ubyte.pkl',
random=1234,
verbose=False):
"""
Creates a new instance of the class, with the directories of
the processed pickle files
"""
self.directory = directory
self.X_train_file = X_train_file
self.y_train_file = y_train_file
self.X_test_file = X_test_file
self.y_test_file = y_test_file
self.random = random # Use for reproducible results
self.verbose = verbose
def data(self):
"""
Loads data and performs all processing before returning it
"""
mnist = MnistDataSet(directory='../input/')
X_train, y_train, X_test, y_test = self.load_data()
y_train, y_test = self.onehot_encode_labels(y_train, y_test)
X_train, X_test = self.add_image_borders(X_train, 2, 0), mnist.add_image_borders(X_test, 2, 0)
X_train, X_test = self.add_image_dim(X_train), mnist.add_image_dim(X_test)
return X_train, y_train, X_test, y_test
def load_data(self):
"""
Loads data from the given pickle files stored in local variables
"""
if self.verbose:
print('Loading pickle files')
try:
X_train = pickle.load(open(self.directory + self.X_train_file, "rb"))
y_train = pickle.load(open(self.directory + self.y_train_file, "rb"))
X_test = pickle.load(open(self.directory + self.X_test_file, "rb"))
y_test = pickle.load(open(self.directory + self.y_test_file, "rb"))
except Exception as e:
print('Error loading pickle file: {}'.format(e))
return None
return X_train, y_train, X_test, y_test
def onehot_encode_labels(self, y_train, y_test):
"""
Converts a 1-d array of values into a 2-d onehot array
"""
lbe = LabelBinarizer()
lbe.fit(y_train)
y_train_ohe = lbe.transform(y_train)
y_test_ohe = lbe.transform(y_test)
return y_train_ohe, y_test_ohe
def add_image_dim(self, images, after=True):
"""
Adds an extra dimension to monochrome image files, optionally before
the X and Y dimensions
"""
if after:
new_images = images[:,:,:, np.newaxis]
else:
new_images = images[np.newaxis:,:,:]
return new_images
def image_border(self, image, size, fill):
"""
Adds a border around the nupmy array of the gizen size and value
"""
im_w, im_h = image.shape
im_dtype = image.dtype
new_image = np.full((im_w + (2 * size), im_h + (2 * size)),
fill_value=fill, dtype=im_dtype)
new_image[size:im_h + size, size:im_w + size] = image
assert new_image.dtype == image.dtype
assert new_image.shape[0] == image.shape[0] + (2 * size)
assert new_image.shape[1] == image.shape[1] + (2 * size)
assert np.array_equal(image, new_image[size:size+im_h, size:size+im_w])
return new_image
def add_image_borders(self, images, size, fill):
"""
Adds image borders to an array of images
"""
new_images = np.zeros((images.shape[0],
images.shape[1] + (2 * size),
images.shape[2] + (2 * size)),
dtype = images.dtype)
for idx in range(images.shape[0]):
new_images[idx] = self.image_border(images[idx], 2, 0)
return new_images
def lenet5_model(verbose=False):
"""
Creates and returns a lenet5 model
"""
# Create the model
model = Sequential()
model.add(Conv2D(filters=6, kernel_size=(5, 5), strides=(1, 1), input_shape=(32, 32, 1))) # C1
model.add(AveragePooling2D(pool_size=(2, 2))) # S2
model.add(Activation('tanh'))
model.add(Conv2D(filters=16, kernel_size=(5, 5), strides=(1, 1))) # C3
model.add(AveragePooling2D(pool_size=(2, 2))) # S4
model.add(Activation('tanh'))
model.add(Conv2D(filters=120, kernel_size=(5, 5), strides=(1, 1))) # C5
model.add(Activation('tanh'))
model.add(Flatten())
model.add(Dense(120)) # F6
model.add(Activation('tanh'))
model.add(Dense(10))
model.add(Activation('softmax'))
if verbose:
print(model.summary())
return model
# Net training methods
class ModelEvaluator(object):
"""
Singleton class used to train and evaluate models. All the results are
stored for later comparison
"""
def __init__(self):
self.models = dict()
self.results = dict()
self.history = dict()
def evaluate_model(self, tag,
model, optimizer,
X_train, y_train,
X_test, y_test,
batch_size, epochs,
verbose=False):
"""
Wrapper method to create, train and optionally CV, and check performance on test set
"""
def lr_value(epoch):
"""
Returns the learning rate based on the epoch
"""
if epoch <= 2:
return 0.01
elif epoch <= 5:
return 0.005
elif epoch <= 8:
return 0.001
elif epoch <= 12:
return 0.0005
else:
return 0.00001
print('Compiling model')
if verbose:
model.summary()
model.compile(optimizer=optimizer,
loss='categorical_crossentropy',
metrics=['accuracy'])
np.random.seed(1234)
print('Training model')
history = model.fit(X_train, y_train,
validation_data=(X_test, y_test),
batch_size=batch_size,
epochs=epochs,
# callbacks=[LearningRateScheduler(lr_value)],
verbose=1 if verbose else 0)
print('Evaluating model')
score = model.evaluate(X_test, y_test, batch_size=batch_size)
train_error = 1.0 - history.history['acc'][-1]
test_error = 1.0 - score[1]
train_error_pct = train_error * 100.0
test_error_pct = test_error * 100.0
print('\Test error %age: {:.4f}. Train error %age: {:.4f}'.format(test_error_pct, train_error_pct))
if verbose:
print('\nTest results: Loss = {:.4f}, Error = {:.4f}'.format(score[0], test_error))
self.models[tag] = model
self.results[tag] = {'train_error_pct': train_error_pct,
'test_error_pct': test_error_pct}
self.history[tag] = history.history
def plot_history(self, hist):
"""
Plots the history object returned by the .fit() call
"""
for metric in ('acc', 'loss', 'val_acc', 'val_loss'):
assert metric in hist.keys()
hist_df = | pd.DataFrame(hist) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
import json
from matplotlib import pyplot as plt
import numpy as np
from numpy.fft import fft, fftfreq
# Configuration
anomaly_color = 'sandybrown'
prediction_color = 'yellowgreen'
training_color = 'yellowgreen'
validation_color = 'gold'
test_color = 'coral'
figsize=(9, 3)
autoclose = True
def load_series(file_name, data_folder):
# Load the input data
data_path = f'{data_folder}/data/{file_name}'
data = pd.read_csv(data_path)
data['timestamp'] = pd.to_datetime(data['timestamp'])
data.set_index('timestamp', inplace=True)
# Load the labels
label_path = f'{data_folder}/labels/combined_labels.json'
with open(label_path) as fp:
labels = pd.Series(json.load(fp)[file_name])
labels = pd.to_datetime(labels)
# Load the windows
window_path = f'{data_folder}/labels/combined_windows.json'
window_cols = ['begin', 'end']
with open(window_path) as fp:
windows = pd.DataFrame(columns=window_cols,
data=json.load(fp)[file_name])
windows['begin'] = pd.to_datetime(windows['begin'])
windows['end'] = pd.to_datetime(windows['end'])
# Return data
return data, labels, windows
def plot_series(data, labels=None,
windows=None,
predictions=None,
highlights=None,
val_start=None,
test_start=None,
figsize=figsize,
show_sampling_points=False,
show_markers=False,
filled_version=None):
# Open a new figure
if autoclose: plt.close('all')
plt.figure(figsize=figsize)
# Plot data
if not show_markers:
plt.plot(data.index, data.values, zorder=0)
else:
plt.plot(data.index, data.values, zorder=0,
marker='.', markersize=3)
if filled_version is not None:
filled = filled_version.copy()
filled[~data['value'].isnull()] = np.nan
plt.scatter(filled.index, filled,
marker='.', c='tab:orange', s=5);
if show_sampling_points:
vmin = data.min()
lvl = np.full(len(data.index), vmin)
plt.scatter(data.index, lvl, marker='.',
c='tab:red', s=5)
# Rotated x ticks
plt.xticks(rotation=45)
# Plot labels
if labels is not None:
plt.scatter(labels.values, data.loc[labels],
color=anomaly_color, zorder=2, s=5)
# Plot windows
if windows is not None:
for _, wdw in windows.iterrows():
plt.axvspan(wdw['begin'], wdw['end'],
color=anomaly_color, alpha=0.3, zorder=1)
# Plot training data
if val_start is not None:
plt.axvspan(data.index[0], val_start,
color=training_color, alpha=0.1, zorder=-1)
if val_start is None and test_start is not None:
plt.axvspan(data.index[0], test_start,
color=training_color, alpha=0.1, zorder=-1)
if val_start is not None:
plt.axvspan(val_start, test_start,
color=validation_color, alpha=0.1, zorder=-1)
if test_start is not None:
plt.axvspan(test_start, data.index[-1],
color=test_color, alpha=0.3, zorder=0)
# Predictions
if predictions is not None:
plt.scatter(predictions.values, data.loc[predictions],
color=prediction_color, alpha=.4, zorder=3,
s=5)
plt.tight_layout()
def plot_autocorrelation(data, max_lag=100, figsize=figsize):
# Open a new figure
if autoclose: plt.close('all')
plt.figure(figsize=figsize)
# Autocorrelation plot
pd.plotting.autocorrelation_plot(data['value'])
# Customized x limits
plt.xlim(0, max_lag)
# Rotated x ticks
plt.xticks(rotation=45)
plt.tight_layout()
def plot_histogram(data, bins=10, vmin=None, vmax=None, figsize=figsize):
# Build a new figure
if autoclose: plt.close('all')
plt.figure(figsize=figsize)
# Plot a histogram
plt.hist(data, density=True, bins=bins)
# Update limits
lims = plt.xlim()
if vmin is not None:
lims = (vmin, lims[1])
if vmax is not None:
lims = (lims[0], vmax)
plt.xlim(lims)
plt.tight_layout()
def plot_histogram2d(xdata, ydata, bins=10, figsize=figsize):
# Build a new figure
if autoclose: plt.close('all')
plt.figure(figsize=figsize)
# Plot a histogram
plt.hist2d(xdata, ydata, density=True, bins=bins)
plt.tight_layout()
def plot_density_estimator_1D(estimator, xr, figsize=figsize):
# Build a new figure
if autoclose: plt.close('all')
plt.figure(figsize=figsize)
# Plot the estimated density
xvals = xr.reshape((-1, 1))
dvals = np.exp(estimator.score_samples(xvals))
plt.plot(xvals, dvals)
plt.tight_layout()
def plot_density_estimator_2D(estimator, xr, yr, figsize=figsize):
# Plot the estimated density
nx = len(xr)
ny = len(yr)
xc = np.repeat(xr, ny)
yc = np.tile(yr, nx)
data = np.vstack((xc, yc)).T
dvals = np.exp(estimator.score_samples(data))
dvals = dvals.reshape((nx, ny))
# Build a new figure
if autoclose: plt.close('all')
plt.figure(figsize=figsize)
plt.pcolor(dvals)
plt.tight_layout()
# plt.xticks(np.arange(0, len(xr)), xr)
# plt.yticks(np.arange(0, len(xr)), yr)
def plot_distribution_2D(f, xr, yr, figsize=figsize):
# Build the input
nx = len(xr)
ny = len(yr)
xc = np.repeat(xr, ny)
yc = np.tile(yr, nx)
data = np.vstack((xc, yc)).T
dvals = np.exp(f.pdf(data))
dvals = dvals.reshape((nx, ny))
# Build a new figure
if autoclose: plt.close('all')
plt.figure(figsize=figsize)
plt.pcolor(dvals)
plt.tight_layout()
xticks = np.linspace(0, len(xr), 6)
xlabels = np.linspace(xr[0], xr[-1], 6)
plt.xticks(xticks, xlabels)
yticks = np.linspace(0, len(yr), 6)
ylabels = np.linspace(yr[0], yr[-1], 6)
plt.yticks(yticks, ylabels)
def get_pred(signal, thr):
return pd.Series(signal.index[signal >= thr])
def get_metrics(pred, labels, windows):
tp = [] # True positives
fp = [] # False positives
fn = [] # False negatives
advance = [] # Time advance, for true positives
# Loop over all windows
used_pred = set()
for idx, w in windows.iterrows():
# Search for the earliest prediction
pmin = None
for p in pred:
if p >= w['begin'] and p < w['end']:
used_pred.add(p)
if pmin is None or p < pmin:
pmin = p
# Compute true pos. (incl. advance) and false neg.
l = labels[idx]
if pmin is None:
fn.append(l)
else:
tp.append(l)
advance.append(l-pmin)
# Compute false positives
for p in pred:
if p not in used_pred:
fp.append(p)
# Return all metrics as pandas series
return pd.Series(tp), \
pd.Series(fp), \
pd.Series(fn), \
| pd.Series(advance) | pandas.Series |
import numpy as np
import pandas as pd
from fklearn.causal.effects import linear_effect
from fklearn.causal.validation.curves import (effect_by_segment, cumulative_effect_curve, cumulative_gain_curve,
relative_cumulative_gain_curve, effect_curves)
def test_effect_by_segment():
df = pd.DataFrame(dict(
t=[1, 1, 1, 2, 2, 2, 3, 3, 3],
x=[1, 2, 3, 1, 2, 3, 1, 2, 3],
y=[1, 1, 1, 2, 3, 4, 3, 5, 7],
))
result = effect_by_segment(df, prediction="x", outcome="y", treatment="t", segments=3, effect_fn=linear_effect)
expected = pd.Series([1., 2., 3.], index=result.index)
pd.testing.assert_series_equal(result, expected)
def test_cumulative_effect_curve():
df = pd.DataFrame(dict(
t=[1, 1, 1, 2, 2, 2, 3, 3, 3],
x=[1, 2, 3, 1, 2, 3, 1, 2, 3],
y=[1, 1, 1, 2, 3, 4, 3, 5, 7],
))
expected = np.array([3., 3., 2.92857143, 2.5, 2.5, 2.46153846, 2.])
result = cumulative_effect_curve(df, prediction="x", outcome="y", treatment="t", min_rows=3, steps=df.shape[0],
effect_fn=linear_effect)
np.testing.assert_allclose(expected, result, rtol=1e-07)
def test_cumulative_gain_curve():
df = pd.DataFrame(dict(
t=[1, 1, 1, 2, 2, 2, 3, 3, 3],
x=[1, 2, 3, 1, 2, 3, 1, 2, 3],
y=[1, 1, 1, 2, 3, 4, 3, 5, 7],
))
expected = np.array([1., 1.33333333, 1.62698413, 1.66666667, 1.94444444, 2.18803419, 2.])
result = cumulative_gain_curve(df, prediction="x", outcome="y", treatment="t", min_rows=3, steps=df.shape[0],
effect_fn=linear_effect)
np.testing.assert_allclose(expected, result, rtol=1e-07)
def test_relative_cumulative_gain_curve():
df = pd.DataFrame(dict(
t=[1, 1, 1, 2, 2, 2, 3, 3, 3],
x=[1, 2, 3, 1, 2, 3, 1, 2, 3],
y=[1, 1, 1, 2, 3, 4, 3, 5, 7],
))
expected = np.array([0.33333333, 0.44444444, 0.51587302, 0.33333333, 0.38888889, 0.41025641, 0.])
result = relative_cumulative_gain_curve(df, prediction="x", outcome="y", treatment="t", min_rows=3,
steps=df.shape[0], effect_fn=linear_effect)
np.testing.assert_allclose(expected, result, rtol=1e-07)
def test_effect_curves():
df = pd.DataFrame(dict(
t=[1, 1, 1, 2, 2, 2, 3, 3, 3],
x=[1, 2, 3, 1, 2, 3, 1, 2, 3],
y=[1, 1, 1, 2, 3, 4, 3, 5, 7],
))
expected = pd.DataFrame({
"samples_count": [3, 4, 5, 6, 7, 8, 9],
"cumulative_effect_curve": [3., 3., 2.92857143, 2.5, 2.5, 2.46153846, 2.],
"samples_fraction": [0.3333333, 0.4444444, 0.5555555, 0.6666666, 0.7777777, 0.8888888, 1.],
"cumulative_gain_curve": [1., 1.33333333, 1.62698413, 1.66666667, 1.94444444, 2.18803419, 2.],
"random_model_cumulative_gain_curve": [0.6666666, 0.8888888, 1.1111111, 1.3333333, 1.5555555, 1.7777777, 2.],
"relative_cumulative_gain_curve": [0.33333333, 0.44444444, 0.51587302, 0.33333333, 0.38888889, 0.41025641, 0.],
})
result = effect_curves(df, prediction="x", outcome="y", treatment="t", min_rows=3, steps=df.shape[0],
effect_fn=linear_effect)
| pd.testing.assert_frame_equal(result, expected, atol=1e-07) | pandas.testing.assert_frame_equal |
import requests as requests
import pandas as pd
import re
from yfinance_ez.constants import USER_AGENT_HEADERS
try:
import ujson as _json
except ImportError:
import json as _json
QUARTER = 0 # index of quarter in quarter string
def get_json(url, proxy=None):
html = requests.get(url=url, proxies=proxy, headers=USER_AGENT_HEADERS).text
if "QuoteSummaryStore" not in html:
html = requests.get(url=url, proxies=proxy, headers=USER_AGENT_HEADERS).text
if "QuoteSummaryStore" not in html:
return {}
json_str = html.split('root.App.main =')[1].split(
'(this)')[0].split(';\n}')[0].strip()
data = _json.loads(json_str)[
'context']['dispatcher']['stores']['QuoteSummaryStore']
# return data
new_data = _json.dumps(data).replace('{}', 'null')
new_data = re.sub(
r'\{[\'|\"]raw[\'|\"]:(.*?),(.*?)\}', r'\1', new_data)
return _json.loads(new_data)
def camel2title(o):
return [re.sub("([a-z])([A-Z])", "\g<1> \g<2>", i).title() for i in o]
def auto_adjust(data):
df = data.copy()
ratio = df["Close"] / df["Adj Close"]
df["Adj Open"] = df["Open"] / ratio
df["Adj High"] = df["High"] / ratio
df["Adj Low"] = df["Low"] / ratio
df.drop(
["Open", "High", "Low", "Close"],
axis=1, inplace=True)
df.rename(columns={
"Adj Open": "Open", "Adj High": "High",
"Adj Low": "Low", "Adj Close": "Close"
}, inplace=True)
df = df[["Open", "High", "Low", "Close", "Volume"]]
return df[["Open", "High", "Low", "Close", "Volume"]]
def back_adjust(data):
""" back-adjusted data to mimic true historical prices """
df = data.copy()
ratio = df["Adj Close"] / df["Close"]
df["Adj Open"] = df["Open"] * ratio
df["Adj High"] = df["High"] * ratio
df["Adj Low"] = df["Low"] * ratio
df.drop(
["Open", "High", "Low", "Adj Close"],
axis=1, inplace=True)
df.rename(columns={
"Adj Open": "Open", "Adj High": "High",
"Adj Low": "Low"
}, inplace=True)
return df[["Open", "High", "Low", "Close", "Volume"]]
def parse_quotes(data, tz=None):
timestamps = data["timestamp"]
ohlc = data["indicators"]["quote"][0]
volumes = ohlc["volume"]
opens = ohlc["open"]
closes = ohlc["close"]
lows = ohlc["low"]
highs = ohlc["high"]
adjclose = closes
if "adjclose" in data["indicators"]:
adjclose = data["indicators"]["adjclose"][0]["adjclose"]
quotes = pd.DataFrame({"Open": opens,
"High": highs,
"Low": lows,
"Close": closes,
"Adj Close": adjclose,
"Volume": volumes})
quotes.index = pd.to_datetime(timestamps, unit="s")
quotes.sort_index(inplace=True)
if tz is not None:
quotes.index = quotes.index.tz_localize(tz)
return quotes
def parse_actions(data, tz=None):
dividends = pd.DataFrame(columns=["Dividends"])
splits = pd.DataFrame(columns=["Stock Splits"])
if "events" in data:
if "dividends" in data["events"]:
dividends = pd.DataFrame(
data=list(data["events"]["dividends"].values()))
dividends.set_index("date", inplace=True)
dividends.index = | pd.to_datetime(dividends.index, unit="s") | pandas.to_datetime |
from unittest import TestCase
import numpy as np
import pandas as pd
from copulas.univariate.gaussian import GaussianUnivariate
class TestGaussianUnivariate(TestCase):
def test___init__(self):
"""On init, default values are set on instance."""
# Setup / Run
copula = GaussianUnivariate()
# Check
assert not copula.name
assert copula.mean == 0
assert copula.std == 1
def test___str__(self):
"""str returns details about the model."""
# Setup
copula = GaussianUnivariate()
expected_result = '\n'.join([
'Distribution Type: Gaussian',
'Variable name: None',
'Mean: 0',
'Standard deviation: 1'
])
# Run
result = copula.__str__()
# Check
assert result == expected_result
def test_fit(self):
"""On fit, stats from fit data are set in the model."""
# Setup
copula = GaussianUnivariate()
column = pd.Series([0, 1, 2, 3, 4, 5], name='column')
mean = 2.5
std = 1.707825127659933
name = 'column'
# Run
copula.fit(column)
# Check
assert copula.mean == mean
assert copula.std == std
assert copula.name == name
def test_fit_empty_data(self):
"""On fit, if column is empty an error is raised."""
# Setup
copula = GaussianUnivariate()
column = | pd.Series([]) | pandas.Series |
import seaborn as sns
import pandas as pd
import geopandas as gpd
import numpy as np
import matplotlib.pyplot as plt
from pandas.io.json import json_normalize
from pysal.lib import weights
from sklearn import cluster
from shapely.geometry import Point
# # # # # PET DATA # # # # #
# filename = "pets.json"
# with open(filename, 'r') as f:
# objects = ijson.items
# austin dangerous dog api
urlD = 'https://data.austintexas.gov/resource/ykw4-j3aj.json'
# austin stray dog data
urlS = 'https://data.austintexas.gov/resource/hye6-gvq2.json'
# found_df / austin found pets pandas data frame constructor
pets_df = pd.read_json(urlS, orient='records')
location_df = json_normalize(pets_df['location'])
concat_df = pd.concat([pets_df, location_df], axis=1)
found_df = concat_df.drop(concat_df.columns[0:7], axis=1)
found_df = found_df.drop(found_df.columns[[2, 4, 6, 10]], axis=1)
address_df = pd.DataFrame(columns=['address', 'city', 'zip_code'])
for i, row in location_df.iterrows():
rowStr = row['human_address']
splitRow = rowStr.split('\"')
address = splitRow[3]
city = splitRow[7]
zipCode = splitRow[15]
address_df = address_df.append({'address': address, 'city': city, 'zip_code': zipCode}, ignore_index=True)
found_df = pd.concat([found_df, address_df], axis=1)
# formatting address correctly
for i, row in found_df.iterrows():
rowStr = row['city']
splitRow = rowStr.split(' ')
# ADD MORE LOCALITIES HERE IF NEEDED IN DATASET
if splitRow[0] not in ('AUSTIN', 'PFLUGERVILLE', 'LAKEWAY', ''):
for j in splitRow:
if j in ('AUSTIN', 'PFLUGERVILLE', 'LAKEWAY'):
found_df.at[i, 'city'] = j
else:
found_df.at[i, 'city'] = ''
found_df.at[i, 'address'] = ''
# danger_df austin dangerous dogs pandas data frame constructor
danger_df = pd.read_json(urlD)
danger_df = danger_df.drop(danger_df.columns[[0, 1, 4, 5]], axis=1)
location_df = json_normalize(danger_df['location'])
address_df = pd.DataFrame(columns=['address'])
for i, row in location_df.iterrows():
rowStr = row['human_address']
splitRow = rowStr.split('\"')
address = splitRow[3]
address_df = address_df.append({'address': address}, ignore_index=True)
danger_df = danger_df.drop(danger_df.columns[[2]], axis=1)
location_df = location_df.drop(location_df.columns[[0]], axis=1)
danger_df = pd.concat([danger_df, address_df, location_df], axis=1)
# converting data types
found_df["latitude"] = pd.to_numeric(found_df["latitude"])
found_df["longitude"] = pd.to_numeric(found_df["longitude"])
found_df["zip_code"] = pd.to_numeric(found_df["zip_code"])
danger_df["latitude"] = | pd.to_numeric(found_df["latitude"]) | pandas.to_numeric |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
import pandas.compat as compat
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
CategoricalIndex, DatetimeIndex, Float64Index, Index, Int64Index,
IntervalIndex, MultiIndex, PeriodIndex, RangeIndex, Series, TimedeltaIndex,
UInt64Index, isna)
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
import pandas.util.testing as tm
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'nbytes']
def setup_indices(self):
for name, idx in self.indices.items():
setattr(self, name, idx)
def test_pickle_compat_construction(self):
# need an object to create with
msg = (r"Index\(\.\.\.\) must be called with a collection of some"
r" kind, None was passed|"
r"__new__\(\) missing 1 required positional argument: 'data'|"
r"__new__\(\) takes at least 2 arguments \(1 given\)")
with pytest.raises(TypeError, match=msg):
self._holder()
def test_to_series(self):
# assert that we are creating a copy of the index
idx = self.create_index()
s = idx.to_series()
assert s.values is not idx.values
assert s.index is not idx
assert s.name == idx.name
def test_to_series_with_arguments(self):
# GH18699
# index kwarg
idx = self.create_index()
s = idx.to_series(index=idx)
assert s.values is not idx.values
assert s.index is idx
assert s.name == idx.name
# name kwarg
idx = self.create_index()
s = idx.to_series(name='__test')
assert s.values is not idx.values
assert s.index is not idx
assert s.name != idx.name
@pytest.mark.parametrize("name", [None, "new_name"])
def test_to_frame(self, name):
# see GH-15230, GH-22580
idx = self.create_index()
if name:
idx_name = name
else:
idx_name = idx.name or 0
df = idx.to_frame(name=idx_name)
assert df.index is idx
assert len(df.columns) == 1
assert df.columns[0] == idx_name
assert df[idx_name].values is not idx.values
df = idx.to_frame(index=False, name=idx_name)
assert df.index is not idx
def test_to_frame_datetime_tz(self):
# GH 25809
idx = pd.date_range(start='2019-01-01', end='2019-01-30', freq='D')
idx = idx.tz_localize('UTC')
result = idx.to_frame()
expected = pd.DataFrame(idx, index=idx)
tm.assert_frame_equal(result, expected)
def test_shift(self):
# GH8083 test the base class for shift
idx = self.create_index()
msg = "Not supported for type {}".format(type(idx).__name__)
with pytest.raises(NotImplementedError, match=msg):
idx.shift(1)
with pytest.raises(NotImplementedError, match=msg):
idx.shift(1, 2)
def test_create_index_existing_name(self):
# GH11193, when an existing index is passed, and a new name is not
# specified, the new index should inherit the previous object name
expected = self.create_index()
if not isinstance(expected, MultiIndex):
expected.name = 'foo'
result = pd.Index(expected)
tm.assert_index_equal(result, expected)
result = pd.Index(expected, name='bar')
expected.name = 'bar'
tm.assert_index_equal(result, expected)
else:
expected.names = ['foo', 'bar']
result = pd.Index(expected)
tm.assert_index_equal(
result, Index(Index([('foo', 'one'), ('foo', 'two'),
('bar', 'one'), ('baz', 'two'),
('qux', 'one'), ('qux', 'two')],
dtype='object'),
names=['foo', 'bar']))
result = pd.Index(expected, names=['A', 'B'])
tm.assert_index_equal(
result,
Index(Index([('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')],
dtype='object'), names=['A', 'B']))
def test_numeric_compat(self):
idx = self.create_index()
with pytest.raises(TypeError, match="cannot perform __mul__"):
idx * 1
with pytest.raises(TypeError, match="cannot perform __rmul__"):
1 * idx
div_err = "cannot perform __truediv__"
with pytest.raises(TypeError, match=div_err):
idx / 1
div_err = div_err.replace(' __', ' __r')
with pytest.raises(TypeError, match=div_err):
1 / idx
with pytest.raises(TypeError, match="cannot perform __floordiv__"):
idx // 1
with pytest.raises(TypeError, match="cannot perform __rfloordiv__"):
1 // idx
def test_logical_compat(self):
idx = self.create_index()
with pytest.raises(TypeError, match='cannot perform all'):
idx.all()
with pytest.raises(TypeError, match='cannot perform any'):
idx.any()
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
with pytest.raises(ValueError, match='The truth value of a'):
if idx:
pass
def test_reindex_base(self):
idx = self.create_index()
expected = np.arange(idx.size, dtype=np.intp)
actual = idx.get_indexer(idx)
tm.assert_numpy_array_equal(expected, actual)
with pytest.raises(ValueError, match='Invalid fill method'):
idx.get_indexer(idx, method='invalid')
def test_get_indexer_consistency(self):
# See GH 16819
for name, index in self.indices.items():
if isinstance(index, IntervalIndex):
continue
if index.is_unique or isinstance(index, CategoricalIndex):
indexer = index.get_indexer(index[0:2])
assert isinstance(indexer, np.ndarray)
assert indexer.dtype == np.intp
else:
e = "Reindexing only valid with uniquely valued Index objects"
with pytest.raises(InvalidIndexError, match=e):
index.get_indexer(index[0:2])
indexer, _ = index.get_indexer_non_unique(index[0:2])
assert isinstance(indexer, np.ndarray)
assert indexer.dtype == np.intp
def test_ndarray_compat_properties(self):
idx = self.create_index()
assert idx.T.equals(idx)
assert idx.transpose().equals(idx)
values = idx.values
for prop in self._compat_props:
assert getattr(idx, prop) == getattr(values, prop)
# test for validity
idx.nbytes
idx.values.nbytes
def test_repr_roundtrip(self):
idx = self.create_index()
tm.assert_index_equal(eval(repr(idx)), idx)
def test_str(self):
# test the string repr
idx = self.create_index()
idx.name = 'foo'
assert "'foo'" in str(idx)
assert idx.__class__.__name__ in str(idx)
def test_repr_max_seq_item_setting(self):
# GH10182
idx = self.create_index()
idx = idx.repeat(50)
with pd.option_context("display.max_seq_items", None):
repr(idx)
assert '...' not in str(idx)
def test_copy_name(self):
# gh-12309: Check that the "name" argument
# passed at initialization is honored.
for name, index in compat.iteritems(self.indices):
if isinstance(index, MultiIndex):
continue
first = index.__class__(index, copy=True, name='mario')
second = first.__class__(first, copy=False)
# Even though "copy=False", we want a new object.
assert first is not second
# Not using tm.assert_index_equal() since names differ.
assert index.equals(first)
assert first.name == 'mario'
assert second.name == 'mario'
s1 = Series(2, index=first)
s2 = Series(3, index=second[:-1])
if not isinstance(index, CategoricalIndex):
# See gh-13365
s3 = s1 * s2
assert s3.index.name == 'mario'
def test_ensure_copied_data(self):
# Check the "copy" argument of each Index.__new__ is honoured
# GH12309
for name, index in compat.iteritems(self.indices):
init_kwargs = {}
if isinstance(index, PeriodIndex):
# Needs "freq" specification:
init_kwargs['freq'] = index.freq
elif isinstance(index, (RangeIndex, MultiIndex, CategoricalIndex)):
# RangeIndex cannot be initialized from data
# MultiIndex and CategoricalIndex are tested separately
continue
index_type = index.__class__
result = index_type(index.values, copy=True, **init_kwargs)
tm.assert_index_equal(index, result)
tm.assert_numpy_array_equal(index._ndarray_values,
result._ndarray_values,
check_same='copy')
if isinstance(index, PeriodIndex):
# .values an object array of Period, thus copied
result = index_type(ordinal=index.asi8, copy=False,
**init_kwargs)
tm.assert_numpy_array_equal(index._ndarray_values,
result._ndarray_values,
check_same='same')
elif isinstance(index, IntervalIndex):
# checked in test_interval.py
pass
else:
result = index_type(index.values, copy=False, **init_kwargs)
tm.assert_numpy_array_equal(index.values, result.values,
check_same='same')
tm.assert_numpy_array_equal(index._ndarray_values,
result._ndarray_values,
check_same='same')
def test_memory_usage(self):
for name, index in compat.iteritems(self.indices):
result = index.memory_usage()
if len(index):
index.get_loc(index[0])
result2 = index.memory_usage()
result3 = index.memory_usage(deep=True)
# RangeIndex, IntervalIndex
# don't have engines
if not isinstance(index, (RangeIndex, IntervalIndex)):
assert result2 > result
if index.inferred_type == 'object':
assert result3 > result2
else:
# we report 0 for no-length
assert result == 0
def test_argsort(self):
for k, ind in self.indices.items():
# separately tested
if k in ['catIndex']:
continue
result = ind.argsort()
expected = np.array(ind).argsort()
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
def test_numpy_argsort(self):
for k, ind in self.indices.items():
result = np.argsort(ind)
expected = ind.argsort()
tm.assert_numpy_array_equal(result, expected)
# these are the only two types that perform
# pandas compatibility input validation - the
# rest already perform separate (or no) such
# validation via their 'values' attribute as
# defined in pandas.core.indexes/base.py - they
# cannot be changed at the moment due to
# backwards compatibility concerns
if isinstance(type(ind), (CategoricalIndex, RangeIndex)):
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(ind, axis=1)
msg = "the 'kind' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(ind, kind='mergesort')
msg = "the 'order' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argsort(ind, order=('a', 'b'))
def test_take(self):
indexer = [4, 3, 0, 2]
for k, ind in self.indices.items():
# separate
if k in ['boolIndex', 'tuples', 'empty']:
continue
result = ind.take(indexer)
expected = ind[indexer]
assert result.equals(expected)
if not isinstance(ind,
(DatetimeIndex, PeriodIndex, TimedeltaIndex)):
# GH 10791
with pytest.raises(AttributeError):
ind.freq
def test_take_invalid_kwargs(self):
idx = self.create_index()
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
idx.take(indices, foo=2)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, out=indices)
msg = "the 'mode' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, mode='clip')
def test_repeat(self):
rep = 2
i = self.create_index()
expected = pd.Index(i.values.repeat(rep), name=i.name)
tm.assert_index_equal(i.repeat(rep), expected)
i = self.create_index()
rep = np.arange(len(i))
expected = pd.Index(i.values.repeat(rep), name=i.name)
tm.assert_index_equal(i.repeat(rep), expected)
def test_numpy_repeat(self):
rep = 2
i = self.create_index()
expected = i.repeat(rep)
tm.assert_index_equal(np.repeat(i, rep), expected)
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.repeat(i, rep, axis=0)
@pytest.mark.parametrize('klass', [list, tuple, np.array, Series])
def test_where(self, klass):
i = self.create_index()
cond = [True] * len(i)
result = i.where(klass(cond))
expected = i
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(i[1:])
expected = pd.Index([i._na_value] + i[1:].tolist(), dtype=i.dtype)
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("case", [0.5, "xxx"])
@pytest.mark.parametrize("method", ["intersection", "union",
"difference", "symmetric_difference"])
def test_set_ops_error_cases(self, case, method):
for name, idx in compat.iteritems(self.indices):
# non-iterable input
msg = "Input must be Index or array-like"
with pytest.raises(TypeError, match=msg):
getattr(idx, method)(case)
def test_intersection_base(self):
for name, idx in compat.iteritems(self.indices):
first = idx[:5]
second = idx[:3]
intersect = first.intersection(second)
if isinstance(idx, CategoricalIndex):
pass
else:
assert tm.equalContents(intersect, second)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
if isinstance(idx, PeriodIndex):
msg = "can only call with other PeriodIndex-ed objects"
with pytest.raises(ValueError, match=msg):
first.intersection(case)
elif isinstance(idx, CategoricalIndex):
pass
else:
result = first.intersection(case)
assert tm.equalContents(result, second)
if isinstance(idx, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.intersection([1, 2, 3])
def test_union_base(self):
for name, idx in compat.iteritems(self.indices):
first = idx[3:]
second = idx[:5]
everything = idx
union = first.union(second)
assert | tm.equalContents(union, everything) | pandas.util.testing.equalContents |
import pandas as pd
import numpy as np
import copy
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import cross_val_score, train_test_split, GridSearchCV
from sklearn.feature_selection import mutual_info_classif, SelectKBest
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
from datetime import datetime
from os import listdir
from os.path import isfile, join
import sys
import math
from sklearn.metrics import accuracy_score, f1_score
import re
from Extractor import get_word_length_matrix, get_word_length_matrix_with_interval, get_average_word_length, \
get_word_length_matrix_with_margin, get_char_count, get_digits, get_sum_digits, get_word_n_grams, \
get_char_affix_n_grams, get_char_word_n_grams, get_char_punct_n_grams, get_pos_tags_n_grams, get_bow_matrix, \
get_yules_k, get_special_char_matrix, get_function_words, get_pos_tags, get_sentence_end_start, \
get_flesch_reading_ease_vector, get_sentence_count, get_word_count
from sklearn.preprocessing import StandardScaler, Normalizer
# Chapter 7.1.1. method to trim a feature with low sum e.g. ngrams lower then 5
def trim_df_sum_feature(par_df, par_n):
par_df = par_df.fillna(value=0)
columns = par_df.columns.to_numpy()
data_array = par_df.to_numpy(dtype=float)
sum_arr = data_array.sum(axis=0)
# reduce n if 0 features would be returned
while len(par_df.columns) - len(np.where(sum_arr < par_n)[0]) == 0:
par_n -= 1
positions = list(np.where(sum_arr < par_n))
columns = np.delete(columns, positions)
data_array = np.delete(data_array, positions, axis=1)
return pd.DataFrame(data=data_array, columns=columns)
# Chapter 7.1.1. method to trim feature with low occurrence over all article
def trim_df_by_occurrence(par_df, n):
df_masked = par_df.notnull().astype('int')
word_rate = df_masked.sum()
columns = []
filtered_bow = pd.DataFrame()
for i in range(0, len(word_rate)):
if word_rate[i] > n:
columns.append(word_rate.index[i])
for c in columns:
filtered_bow[c] = par_df[c]
return filtered_bow
# Chapter 7.1.1. Process of filtering the data with low occurrence and save the filtered features in a new file
def filter_low_occurrence():
df_bow = pd.read_csv("daten/raw/bow.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"BOW before: {len(df_bow.columns)}")
df_bow = trim_df_by_occurrence(df_bow, 1)
print(f"BOW after: {len(df_bow.columns)}")
df_bow.to_csv(f"daten/2_filter_low_occurrence/bow.csv", index=False)
for n in range(2, 7):
word_n_gram = pd.read_csv(f"daten/raw/word_{n}_gram.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"Word_{n}_gram before: {len(word_n_gram.columns)}")
word_n_gram = trim_df_by_occurrence(word_n_gram, 1)
print(f"Word_{n}_gram after: {len(word_n_gram.columns)}")
word_n_gram.to_csv(f"daten/2_filter_low_occurrence/word_{n}_gram.csv", index=False)
for n in range(2, 6):
char_affix_n_gram = pd.read_csv(f"daten/trimmed_occ_greater_one/char_affix_{n}_gram_1.csv", sep=',',
encoding="utf-8", nrows=2500)
print(f"char_affix_{n}_gram before: {len(char_affix_n_gram.columns)}")
char_affix_n_gram = trim_df_sum_feature(char_affix_n_gram, 5)
print(f"char_affix_{n}_gram after: {len(char_affix_n_gram.columns)}")
char_affix_n_gram.to_csv(f"daten/2_filter_low_occurrence/char_affix_{n}_gram.csv", index=False)
char_word_n_gram = pd.read_csv(f"daten/trimmed_occ_greater_one/char_word_{n}_gram_1.csv", sep=',',
encoding="utf-8", nrows=2500)
print(f"char_word_{n}_gram before: {len(char_word_n_gram.columns)}")
char_word_n_gram = trim_df_sum_feature(char_word_n_gram, 5)
print(f"char_word_{n}_gram after: {len(char_word_n_gram.columns)}")
char_word_n_gram.to_csv(f"daten/2_filter_low_occurrence/char_word_{n}_gram.csv", index=False)
char_punct_n_gram = pd.read_csv(f"daten/trimmed_occ_greater_one/char_punct_{n}_gram_1.csv", sep=',',
encoding="utf-8", nrows=2500)
print(f"char_punct_{n}_gram before: {len(char_punct_n_gram.columns)}")
char_punct_n_gram = trim_df_sum_feature(char_punct_n_gram, 5)
print(f"char_punct_{n}_gram after: {len(char_punct_n_gram.columns)}")
char_punct_n_gram.to_csv(f"daten/2_filter_low_occurrence/char_punct_{n}_gram.csv", index=False)
df_f_word = pd.read_csv("daten/raw/function_words.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"Function Words before: {len(df_f_word.columns)}")
df_f_word = trim_df_by_occurrence(df_f_word, 1)
print(f"Function Words after: {len(df_f_word.columns)}")
df_f_word.to_csv(f"daten/2_filter_low_occurrence/function_words.csv", index=False)
for n in range(2, 6):
pos_tags_n_gram = pd.read_csv(f"daten/raw/pos_tag_{n}_gram.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"pos_tag_{n}_gram before: {len(pos_tags_n_gram.columns)}")
pos_tags_n_gram = trim_df_by_occurrence(pos_tags_n_gram, 1)
print(f"pos_tag_{n}_gram after: {len(pos_tags_n_gram.columns)}")
pos_tags_n_gram.to_csv(f"daten/2_filter_low_occurrence/pos_tag_{n}_gram.csv", index=False)
# Chapter 7.1.2. method to filter words based on document frequency
def trim_df_by_doc_freq(par_df, par_doc_freq):
df_masked = par_df.notnull().astype('int')
word_rate = df_masked.sum() / len(par_df)
columns = []
filtered_bow = pd.DataFrame()
for i in range(0, len(word_rate)):
if word_rate[i] < par_doc_freq:
columns.append(word_rate.index[i])
for c in columns:
filtered_bow[c] = par_df[c]
return filtered_bow
# Chapter 7.1.2 Process of filtering the data with high document frequency and save the filtered features in a new file
def filter_high_document_frequency():
# Filter words with high document frequency
df_bow = pd.read_csv("daten/2_filter_low_occurrence/bow.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"BOW before: {len(df_bow.columns)}")
df_bow = trim_df_by_doc_freq(df_bow, 0.5)
print(f"BOW after: {len(df_bow.columns)}")
df_bow.to_csv(f"daten/3_fiter_high_frequency/bow.csv", index=False)
df_f_word = pd.read_csv("daten/2_filter_low_occurrence/function_words.csv", sep=',', encoding="utf-8", nrows=2500)
print(f"Function Word before: {len(df_f_word.columns)}")
df_f_word = trim_df_by_doc_freq(df_f_word, 0.5)
print(f"Function Word after: {len(df_f_word.columns)}")
df_f_word.to_csv(f"daten/3_fiter_high_frequency/function_words.csv", index=False)
for n in range(2, 7):
word_n_gram = pd.read_csv(f"daten/2_filter_low_occurrence/word_{n}_gram.csv", sep=',', encoding="utf-8",
nrows=2500)
print(f"Word_{n}_gram before: {len(word_n_gram.columns)}")
word_n_gram = trim_df_by_doc_freq(word_n_gram, 0.5)
print(f"Word_{n}_gram after: {len(word_n_gram.columns)}")
word_n_gram.to_csv(f"daten/3_fiter_high_frequency/word_{n}_gram.csv", index=False)
# Chapter 7.1.4. get the relative frequency based on a length metric (char, word, sentence)
def get_rel_frequency(par_df_count, par_df_len_metric_vector):
df_rel_freq = pd.DataFrame(columns=par_df_count.columns)
for index, row in par_df_count.iterrows():
df_rel_freq = df_rel_freq.append(row.div(par_df_len_metric_vector[index]))
return df_rel_freq
# Chapter 7.1.4. whole process of the chapter. Get the individual relative frequency of a feature and compare
# the correlation to the article length from the absolute and relative feature, save the feature with the estimated
# relative frequency in a new file
def individual_relative_frequency():
df_len_metrics = pd.read_csv(f"daten/1_raw/length_metrics.csv", sep=',', encoding="utf-8", nrows=2500)
# different metrics for individual relative frequencies
metrics = ['word_count', 'char_count', 'sentence_count']
for m in metrics:
# The csv is placed in a folder based on the metric for the individual relative frequency
path = f'daten/4_relative_frequency/{m}'
files = [f for f in listdir(path) if isfile(join(path, f))]
for f in files:
x = pd.read_csv(f"daten/4_relative_frequency/{m}/{f}",
sep=',', encoding="utf-8", nrows=2500).fillna(value=0)
x_rel = get_rel_frequency(x, df_len_metrics[m])
# Save the CSV with relative frequency
x_rel.to_csv(
f"daten/4_relative_frequency/{f.split('.')[0]}"
f"_rel.csv", index=False)
# Correlation is always between the metrics and the word_count
x['word_count'] = df_len_metrics['word_count']
x_rel['word_count'] = df_len_metrics['word_count']
# only on the test data 60/40 split
x_train, x_test = train_test_split(x, test_size=0.4, random_state=42)
x_train_rel, x_test_rel = train_test_split(x_rel, test_size=0.4, random_state=42)
# Calculate the median correlation
print(f"{f}_abs: {x_train.corr(method='pearson', min_periods=1)['word_count'].iloc[:-1].mean()}")
print(f"{f}_rel: {x_train_rel.corr(method='pearson', min_periods=1)['word_count'].iloc[:-1].mean()}")
# Chapter 7.2.1 First step of the iterative filter: Rank the features
def sort_features_by_score(par_x, par_y, par_select_metric):
# Get a sorted ranking of all features by the selected metric
selector = SelectKBest(par_select_metric, k='all')
selector.fit(par_x, par_y)
# Sort the features by their score
return pd.DataFrame(dict(feature_names=par_x.columns, scores=selector.scores_)).sort_values('scores',
ascending=False)
# Chapter 7.2.1 method to get the best percentile for GNB
def get_best_percentile_gnb(par_x_train, par_y_train, par_iter, par_df_sorted_features, step):
result_list = []
gnb = GaussianNB()
best_perc_round = par_iter - 1 # If no other point is found, highest amount of features (-1 starts to count from 0)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if len(par_y_train.index) / len(np.unique(par_y_train.values).tolist()) < 10:
cv = int(len(par_y_train.index) / len(np.unique(par_y_train.values).tolist())) - 1
else:
cv = 10
for perc_features in np.arange(step, par_iter + 1, step):
start_time = datetime.now()
# 1%*i best features to keep and create new dataframe with those only
number_of_features = int(perc_features * (len(par_x_train.columns) / 100))
# minimum one feature
number_of_features = 1 if number_of_features < 1 else number_of_features
feature_list = par_df_sorted_features['feature_names'][: number_of_features].tolist()
x_new_training = copy.deepcopy(par_x_train[feature_list])
# GNB Training
result_list.append(
cross_val_score(gnb, x_new_training, par_y_train, cv=cv, n_jobs=-1, scoring='accuracy').mean())
# Compares the accuracy with the 5 following points => needs 6 points minimum
if len(result_list) > 5:
# list starts to count at 0, subtract one more from len
difference_list_p2p = [result_list[p + 1] - result_list[p] for p in
range(len(result_list) - 6, len(result_list) - 1)]
difference_list_1p = [result_list[p + 1] - result_list[len(result_list) - 6] for p in
range(len(result_list) - 6, len(result_list) - 1)]
# Find the best percent if 5 following points were lower then the point before or had a deviation <= 0.5%
# or all points are 2% lower then the first point
if all(point_y <= 0 for point_y in difference_list_p2p) or \
all(-0.005 <= point_y <= 0.005 for point_y in difference_list_1p) or \
all(point_y < -0.02 for point_y in difference_list_1p):
# the best perc is the results - 6 point in the result list
best_perc_round = len(result_list) - 6
break
# Console Output
print(f"GNB Round {perc_features / step}: {datetime.now() - start_time}")
# Optimization of the best percent
# If any point with a lower percent is higher, it is the new optimum
if any(point_y > result_list[best_perc_round] for point_y in result_list[:len(result_list) - 5]):
best_perc_round = result_list.index(max(result_list[:len(result_list) - 5]))
# Tradeoff of 0.5% accuracy for lesser percent of features
# As long as there is a lesser maximum with 1% lesser accuracy, which has a minimum of 2% less percent features
better_perc_exists = True
best_accuracy_tradeoff = result_list[best_perc_round] - 0.01
# If there are no 5% left for the tradeoff there is no better perc
if best_perc_round - int(2 / step) < 0:
better_perc_exists = False
while better_perc_exists:
earliest_pos = best_perc_round - int(2 / step)
# if its less then 0 it starts to count backside
earliest_pos = 0 if earliest_pos < 0 else earliest_pos
if any(point_y > best_accuracy_tradeoff for point_y in result_list[:earliest_pos]):
best_perc_round = result_list.index(max(result_list[:earliest_pos]))
else:
better_perc_exists = False
# the best percent of the features is calculated by the percent start plus the rounds * step
best_perc = step + step * best_perc_round
print(best_perc)
return best_perc, best_perc_round, result_list
# Chapter 7.2.1 method to get the best percentile for SVC
def get_best_percentile_svc(par_x_train, par_y_train, par_iter, par_df_sorted_features, step):
result_list = []
# Parameter for SVC
param_grid_svc = {'C': (0.001, 0.01, 0.1, 1, 10),
'kernel': ('linear', 'poly', 'rbf'),
'gamma': ('scale', 'auto')}
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if len(par_y_train.index) / len(np.unique(par_y_train.values).tolist()) < 10:
cv = int(len(par_y_train.index) / len(np.unique(par_y_train.values).tolist())) - 1
else:
cv = 10
best_perc_round = par_iter - 1 # If no other point is found, highest amount of features (-1 starts to count from 0)
for perc_features in np.arange(step, par_iter + 1, step):
start_time = datetime.now()
# 1%*i best features to keep and create new dataframe with those only
number_of_features = int(perc_features * (len(par_x_train.columns) / 100))
# minimum one feature
number_of_features = 1 if number_of_features < 1 else number_of_features
feature_list = par_df_sorted_features['feature_names'][: number_of_features].tolist()
x_new_training = copy.deepcopy(par_x_train[feature_list])
# SVC Test
grid_search = GridSearchCV(svm.SVC(), param_grid_svc, cv=cv, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_new_training, par_y_train)
result_list.append(grid_results.best_score_)
# Compares the accuracy with the 5 following points => needs 6 points minimum
if len(result_list) > 5:
# list starts to count at 0, subtract one more from len
difference_list_p2p = [result_list[p + 1] - result_list[p] for p in
range(len(result_list) - 6, len(result_list) - 1)]
difference_list_1p = [result_list[p + 1] - result_list[len(result_list) - 6] for p in
range(len(result_list) - 6, len(result_list) - 1)]
# Find the best percent if 5 following points were lower then the point before or had a deviation <= 0.5%
# or all points are 2% lower then the first point
if all(point_y <= 0 for point_y in difference_list_p2p) or \
all(-0.005 <= point_y <= 0.005 for point_y in difference_list_1p) or \
all(point_y < -0.02 for point_y in difference_list_1p):
# the best perc is the results - 6 point in the result list
best_perc_round = len(result_list) - 6
break
# Console Output
print(f"SVC Round {perc_features / step}: {datetime.now() - start_time}")
# Optimization of the best percent
# If any point with a lower percent is higher, it is the new optimum
if any(point_y > result_list[best_perc_round] for point_y in result_list[:len(result_list) - 5]):
best_perc_round = result_list.index(max(result_list[:len(result_list) - 5]))
# Tradeoff of 1% accuracy for lesser percent of features
# As long as there is a lesser maximum with 1% lesser accuracy, which has a minimum of 2% less percent features
better_perc_exists = True
best_accuracy_tradeoff = result_list[best_perc_round] - 0.01
# If there are no 5% left for the tradeoff there is no better perc
if best_perc_round - int(2 / step) < 0:
better_perc_exists = False
while better_perc_exists:
earliest_pos = best_perc_round - int(2 / step)
# if its less then 0 it starts to count backside
earliest_pos = 0 if earliest_pos < 0 else earliest_pos
if any(point_y > best_accuracy_tradeoff for point_y in result_list[:earliest_pos]):
best_perc_round = result_list.index(max(result_list[:earliest_pos]))
else:
better_perc_exists = False
# the best percent of the features is calculated by the percent start plus the rounds * step
best_perc = step + step * best_perc_round
print(best_perc)
return best_perc, best_perc_round, result_list
# Chapter 7.2.1 method to get the best percentile for KNN
def get_best_percentile_knn(par_x_train, par_y_train, par_iter, par_df_sorted_features, step):
result_list = []
best_perc_round = par_iter - 1 # If no other point is found, highest amount of features (-1 starts to count from 0)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if len(par_y_train.index) / len(np.unique(par_y_train.values).tolist()) < 10:
cv = int(len(par_y_train.index) / len(np.unique(par_y_train.values).tolist())) - 1
else:
cv = 10
for perc_features in np.arange(step, par_iter + 1, step):
start_time = datetime.now()
# 1%*i best features to keep and create new dataframe with those only
number_of_features = int(perc_features * (len(par_x_train.columns) / 100))
# minimum one feature
number_of_features = 1 if number_of_features < 1 else number_of_features
feature_list = par_df_sorted_features['feature_names'][: number_of_features].tolist()
x_new_training = copy.deepcopy(par_x_train[feature_list])
# Parameter for KNN
# Some Values from 3 to square of samples
neighbors = [i for i in range(3, int(math.sqrt(len(x_new_training.index))), 13)]
neighbors += [1, 3, 5, 11, 19, 36]
if int(math.sqrt(len(feature_list))) not in neighbors:
neighbors.append(int(math.sqrt(len(x_new_training.index))))
# Not more neighbors then samples-2
neighbors = [x for x in neighbors if x < len(x_new_training.index) - 2]
# remove duplicates
neighbors = list(set(neighbors))
param_grid_knn = {'n_neighbors': neighbors,
'weights': ['uniform', 'distance'],
'metric': ['euclidean', 'manhattan']}
# KNN Training
grid_search = GridSearchCV(KNeighborsClassifier(), param_grid_knn, cv=cv, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_new_training, par_y_train)
result_list.append(grid_results.best_score_)
# Compares the accuracy with the 5 following points => needs 6 points minimum
if len(result_list) > 5:
# list starts to count at 0, subtract one more from len
difference_list_p2p = [result_list[p + 1] - result_list[p] for p in
range(len(result_list) - 6, len(result_list) - 1)]
difference_list_1p = [result_list[p + 1] - result_list[len(result_list) - 6] for p in
range(len(result_list) - 6, len(result_list) - 1)]
# Find the best percent if 5 following points were lower then the point before or had a deviation <= 0.5%
# or all points are 2% lower then the first point
if all(point_y <= 0 for point_y in difference_list_p2p) or \
all(-0.005 <= point_y <= 0.005 for point_y in difference_list_1p) or \
all(point_y < -0.02 for point_y in difference_list_1p):
# the best perc is the results - 6 point in the result list
best_perc_round = len(result_list) - 6
break
# Console Output
print(f"KNN Round {perc_features / step}: {datetime.now() - start_time}")
# Optimization of the best percent
# If any point with a lower percent is higher, it is the new optimum
if any(point_y > result_list[best_perc_round] for point_y in result_list[:len(result_list) - 5]):
best_perc_round = result_list.index(max(result_list[:len(result_list) - 5]))
# Tradeoff of 1% accuracy for lesser percent of features
# As long as there is a lesser maximum with 1% lesser accuracy, which has a minimum of 2% less percent features
better_perc_exists = True
best_accuracy_tradeoff = result_list[best_perc_round] - 0.01
# If there are no 5% left for the tradeoff there is no better perc
if best_perc_round - int(2 / step) < 0:
better_perc_exists = False
while better_perc_exists:
earliest_pos = best_perc_round - int(2 / step)
# if its less then 0 it starts to count backside
earliest_pos = 0 if earliest_pos < 0 else earliest_pos
if any(point_y >= best_accuracy_tradeoff for point_y in result_list[:earliest_pos]):
best_perc_round = result_list.index(max(result_list[:earliest_pos]))
else:
better_perc_exists = False
# the best percent of the features is calculated by the percent start plus the rounds * step
best_perc = step + step * best_perc_round
print(best_perc)
return best_perc, best_perc_round, result_list
# Chapter 7.2.1 Filter the feature based on the estimated best percentile and save it into a new file
def print_filter_feature_percentile(par_path, par_df_sorted_features, par_percent, par_x, par_file_name):
# select the 1 percent of the features (len/100) multiplied by par_best_percent
number_features = round(par_percent * (len(par_x.columns) / 100))
# If the 1st percent is less then 1
number_features = 1 if number_features < 1 else number_features
feature_list = par_df_sorted_features['feature_names'][:number_features].tolist()
# print the name of the features in a file
original_stdout = sys.stdout
with open(f'{par_path}selected_features/{par_file_name}_filtered.txt', 'w', encoding="utf-8") as f:
sys.stdout = f
print(f"Features: {len(feature_list)}")
print(f"{feature_list}")
sys.stdout = original_stdout
# select the best features from the original dataset
par_x[feature_list].to_csv(f"{par_path}csv_after_filter/{par_file_name}_filtered.csv", index=False)
# Chapter 7.2.1 Complete process of the iterative Filter
def iterative_filter_process(par_path, par_df, par_num_texts, par_num_authors):
y = par_df['label_encoded']
path = f'{par_path}csv_before_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
# Filter the files for author and text numbers if 'all' is not set.
if par_num_authors != "all":
r = re.compile(f"a{par_num_authors}_")
files = list(filter(r.match, files))
if par_num_authors != "all":
r = re.compile(f".*t{par_num_texts}_")
files = list(filter(r.match, files))
step_perc = 1.0
for f in files:
filename = f.split(".")[0]
print(f)
x = pd.read_csv(f"{par_path}csv_before_filter/{f}", sep=',', encoding="utf-8", nrows=2500)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
# Get sorted features
df_sorted_features = sort_features_by_score(x_train, y_train, mutual_info_classif)
# Calculate the best percentiles of the data for the different classifier
best_perc_gnb, best_round_gnb, result_list_gnb = get_best_percentile_gnb(x_train, y_train, 50,
df_sorted_features, step_perc)
best_perc_svc, best_round_svc, result_list_svc = get_best_percentile_svc(x_train, y_train, 50,
df_sorted_features, step_perc)
best_perc_knn, best_round_knn, result_list_knn = get_best_percentile_knn(x_train, y_train, 50,
df_sorted_features, step_perc)
# select the beast features from the original dataset
print_filter_feature_percentile(par_path, df_sorted_features, best_perc_gnb, x, "gnb_" + filename)
print_filter_feature_percentile(par_path, df_sorted_features, best_perc_svc, x, "svc_" + filename)
print_filter_feature_percentile(par_path, df_sorted_features, best_perc_knn, x, "knn_" + filename)
# print best perc to a file
original_stdout = sys.stdout
with open(f'{par_path}best_perc/{filename}.txt', 'w') as f:
sys.stdout = f
print(f"best_perc_gnb: ({best_perc_gnb}|{result_list_gnb[best_round_gnb]})\n"
f"best_perc_svc: ({best_perc_svc}|{result_list_svc[best_round_svc]})\n"
f"best_perc_knn: ({best_perc_knn}|{result_list_knn[best_round_knn]})")
sys.stdout = original_stdout
# draw diagram
len_list = [len(result_list_gnb), len(result_list_svc), len(result_list_knn)]
plt.plot([i * step_perc for i in range(1, len(result_list_gnb) + 1)], result_list_gnb, 'r-', label="gnb")
plt.plot(best_perc_gnb, result_list_gnb[best_round_gnb], 'rx')
plt.plot([i * step_perc for i in range(1, len(result_list_svc) + 1)], result_list_svc, 'g-', label="svc")
plt.plot(best_perc_svc, result_list_svc[best_round_svc], 'gx')
plt.plot([i * step_perc for i in range(1, len(result_list_knn) + 1)], result_list_knn, 'b-', label="knn")
plt.plot(best_perc_knn, result_list_knn[best_round_knn], 'bx')
plt.axis([step_perc, (max(len_list) + 1) * step_perc, 0, 1])
plt.xlabel('Daten in %')
plt.ylabel('Genauigkeit')
plt.legend()
plt.savefig(f"{par_path}/diagrams/{filename}")
plt.cla()
# print accuracy to file
df_percent = pd.DataFrame(data=[i * step_perc for i in range(1, max(len_list) + 1)], columns=['percent'])
df_gnb = pd.DataFrame(data=result_list_gnb, columns=['gnb'])
df_svc = pd.DataFrame(data=result_list_svc, columns=['svc'])
df_knn = pd.DataFrame(data=result_list_knn, columns=['knn'])
df_accuracy = pd.concat([df_percent, df_gnb, df_svc, df_knn], axis=1)
df_accuracy = df_accuracy.fillna(value="")
df_accuracy.to_csv(f'{par_path}accuracy/{filename}_filtered.csv', index=False)
# Chapter 8.1. and later, basically the process of the iterative filter only with the svc classifier
def iterative_filter_process_svm(par_path, par_df, par_num_texts, par_num_authors):
y = par_df['label_encoded']
path = f'{par_path}csv_before_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
# Filter the files for author and text numbers if 'all' is not set.
if par_num_authors != "all":
r = re.compile(f"a{par_num_authors}_")
files = list(filter(r.match, files))
if par_num_authors != "all":
r = re.compile(f".*t{par_num_texts}_")
files = list(filter(r.match, files))
step_perc = 1.0
for f in files:
filename = f.split(".")[0]
print(f)
x = pd.read_csv(f"{par_path}csv_before_filter/{f}", sep=',', encoding="utf-8", nrows=2500)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
# Get sorted features
df_sorted_features = sort_features_by_score(x_train, y_train, mutual_info_classif)
# Calculate the best percentiles of the data for svc
best_perc_svc, best_round_svc, result_list_svc = get_best_percentile_svc(x_train, y_train, 50,
df_sorted_features, step_perc)
# select the beast features from the original dataset
print_filter_feature_percentile(par_path, df_sorted_features, best_perc_svc, x, filename)
# print best perc to a file
original_stdout = sys.stdout
with open(f'{par_path}best_perc/{filename}.txt', 'w') as out_f:
sys.stdout = out_f
print(f"best_perc_svc: ({best_perc_svc}|{result_list_svc[best_round_svc]})\n")
sys.stdout = original_stdout
# draw diagram
plt.plot([i * step_perc for i in range(1, len(result_list_svc) + 1)], result_list_svc, 'g-', label="svc")
plt.plot(best_perc_svc, result_list_svc[best_round_svc], 'gx')
plt.axis([step_perc, (len(result_list_svc) + 1) * step_perc, 0, 1])
plt.xlabel('Daten in %')
plt.ylabel('Genauigkeit')
plt.legend()
plt.savefig(f"{par_path}/diagrams/{filename}")
plt.cla()
# print accuracy to file
df_percent = pd.DataFrame(data=[i * step_perc for i in range(1, len(result_list_svc) + 1)], columns=['percent'])
df_svc = pd.DataFrame(data=result_list_svc, columns=['svc'])
df_accuracy = pd.concat([df_percent, df_svc], axis=1)
df_accuracy = df_accuracy.fillna(value="")
df_accuracy.to_csv(f'{par_path}accuracy/{filename}_filtered.csv', index=False)
# Chapter 7.2.1. Get the accuracy of the features before the iterative filter, results in table 18
def get_accuracy_before_iterative_filter():
gnb_result_list, svc_result_list, knn_result_list, gnb_time_list, svc_time_list, knn_time_list \
= [], [], [], [], [], []
y = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8", nrows=2500)['label_encoded']
path = f'daten/5_iterative_filter/csv_before_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
gnb = GaussianNB()
param_grid_svc = {'C': (0.001, 0.01, 0.1, 1, 10),
'kernel': ('linear', 'poly', 'rbf'),
'gamma': ('scale', 'auto')}
# Get the feature names for the table
feature_list = [re.search("(.+?(?=_rel))", f).group(1) for f in files]
for f in files:
print(f)
x = pd.read_csv(f"daten/5_iterative_filter/csv_before_filter/{f}", sep=',', encoding="utf-8", nrows=2500)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42)
# GNB fit
start_time = datetime.now()
gnb.fit(x_train, y_train)
# score on test data
score = accuracy_score(gnb.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"GNB test score for {f}: {score}")
print(f"GNB time for {f}: {time_taken}")
gnb_result_list.append(score)
gnb_time_list.append(time_taken)
# SVC parameter optimization
grid_search = GridSearchCV(svm.SVC(), param_grid_svc, cv=10, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_train, y_train)
svc = svm.SVC(C=grid_results.best_params_['C'], gamma=grid_results.best_params_['gamma'],
kernel=grid_results.best_params_['kernel'])
start_time = datetime.now()
# fit on train data
svc.fit(x_train, y_train)
# predict test data
score = accuracy_score(svc.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"SVC test score for {f}: {score}")
print(f"SVC time for {f}: {time_taken}")
svc_result_list.append(score)
svc_time_list.append(time_taken)
# Parameter for KNN
# Some Values from 3 to square of k
neighbors = [i for i in range(3, int(math.sqrt(len(x.columns))), 13)]
neighbors += [5, 11, 19, 36]
if int(math.sqrt(len(x.columns))) not in neighbors:
neighbors.append(int(math.sqrt(len(x.columns))))
param_grid_knn = {'n_neighbors': neighbors,
'weights': ['uniform', 'distance'],
'metric': ['euclidean', 'manhattan']}
# KNN parameter optimization
grid_search = GridSearchCV(KNeighborsClassifier(), param_grid_knn, cv=10, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_train, y_train)
knn = KNeighborsClassifier(n_neighbors=grid_results.best_params_['n_neighbors'],
metric=grid_results.best_params_['metric'],
weights=grid_results.best_params_['weights'])
# fit on train data
knn.fit(x_train, y_train)
# KNN predict test data
start_time = datetime.now()
# predict test data
score = accuracy_score(knn.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"KNN test score for {f}: {score}")
print(f"KNN time for {f}: {time_taken}")
knn_result_list.append(score)
knn_time_list.append(time_taken)
# create dataframe with the scores and times
df_results = pd.DataFrame()
df_results['feature'] = feature_list
df_results['score_gnb'] = gnb_result_list
df_results['time_gnb'] = gnb_time_list
df_results['score_svc'] = svc_result_list
df_results['time_svc'] = svc_time_list
df_results['score_knn'] = knn_result_list
df_results['time_knn'] = knn_time_list
return df_results
# Chapter 7.2.1. Get the accuracy of the features after the iterative filter, results in table 18
def get_accuracy_after_iterative_filter():
df_gnb_result = pd.DataFrame(columns=['feature', 'score_gnb', 'time_gnb'])
df_svc_result = pd.DataFrame(columns=['feature', 'score_svc', 'time_svc'])
df_knn_result = pd.DataFrame(columns=['feature', 'score_knn', 'time_knn'])
y = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8", nrows=2500)['label_encoded']
# path = f'daten/5_iterative_filter/csv_after_filter'
path = f'daten/5_iterative_filter/5_iterative_filter/csv_after_filter'
files = [f for f in listdir(path) if isfile(join(path, f))]
gnb = GaussianNB()
param_grid_svc = {'C': (0.001, 0.01, 0.1, 1, 10),
'kernel': ('linear', 'poly', 'rbf'),
'gamma': ('scale', 'auto')}
for f in files:
print(f)
# Get the feature name for the table
feature = re.search(".{4}(.+?(?=_rel))", f).group(1)
# x = pd.read_csv(f"daten/5_iterative_filter/csv_after_filter/{f}", sep=',', encoding="utf-8", nrows=2500)
x = pd.read_csv(f"daten/5_iterative_filter/5_iterative_filter/csv_after_filter/{f}", sep=',', encoding="utf-8",
nrows=2500)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42)
# Select the classifier by the start of the filename
if f.split("_")[0] == "gnb":
# GNB fit
start_time = datetime.now()
gnb.fit(x_train, y_train)
# score on test data
score = accuracy_score(gnb.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"GNB test score for {f}: {score}")
print(f"GNB time for {f}: {time_taken}")
df_gnb_result = df_gnb_result.append(pd.DataFrame(data={'feature': [feature], 'score_gnb': [score],
'time_gnb': [time_taken]}), ignore_index=True)
elif f.split("_")[0] == "svc":
# SVC parameter optimization
grid_search = GridSearchCV(svm.SVC(), param_grid_svc, cv=10, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_train, y_train)
svc = svm.SVC(C=grid_results.best_params_['C'], gamma=grid_results.best_params_['gamma'],
kernel=grid_results.best_params_['kernel'])
start_time = datetime.now()
# fit on train data
svc.fit(x_train, y_train)
# predict test data
score = accuracy_score(svc.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"SVC test score for {f}: {score}")
print(f"SVC training time for {f}: {time_taken}")
df_svc_result = df_svc_result.append(pd.DataFrame(data={'feature': [feature], 'score_svc': [score],
'time_svc': [time_taken]}), ignore_index=True)
elif f.split("_")[0] == "knn":
# Parameter for KNN
# Some Values from 3 to square of k
neighbors = [i for i in range(3, int(math.sqrt(len(x.columns))), 13)]
neighbors += [5, 11, 19, 36]
if int(math.sqrt(len(x.columns))) not in neighbors:
neighbors.append(int(math.sqrt(len(x.columns))))
param_grid_knn = {'n_neighbors': neighbors,
'weights': ['uniform', 'distance'],
'metric': ['euclidean', 'manhattan']}
# KNN parameter optimization
grid_search = GridSearchCV(KNeighborsClassifier(), param_grid_knn, cv=10, n_jobs=-1, scoring='accuracy')
grid_results = grid_search.fit(x_train, y_train)
knn = KNeighborsClassifier(n_neighbors=grid_results.best_params_['n_neighbors'],
metric=grid_results.best_params_['metric'],
weights=grid_results.best_params_['weights'])
start_time = datetime.now()
# fit on train data
knn.fit(x_train, y_train)
# KNN predict test data
start_time = datetime.now()
# predict test data
score = accuracy_score(knn.predict(x_test), y_test)
time_taken = datetime.now() - start_time
print(f"KNN test score for {f}: {score}")
print(f"KNN test time for {f}: {time_taken}")
df_knn_result = df_knn_result.append(pd.DataFrame(data={'feature': [feature], 'score_knn': [score],
'time_knn': [time_taken]}), ignore_index=True)
df_merge = pd.merge(df_gnb_result, df_knn_result, on="feature", how='outer')
df_merge = pd.merge(df_merge, df_svc_result, on="feature", how='outer')
return df_merge
# Get n article for a given number of authors. Required for setups with different numbers of authors and article
def get_n_article_by_author(par_df, par_label_count, par_article_count):
df_articles = pd.DataFrame(columns=['label_encoded', 'text'])
# only keep entries of the "par_label_count" first labels
par_df = par_df.where(par_df['label_encoded'] <= par_label_count).dropna()
labels = np.unique(par_df['label_encoded'].values).tolist()
list_article_count = [par_article_count for i in labels]
for index, row in par_df.iterrows():
if list_article_count[labels.index(row['label_encoded'])] != 0:
d = {'label_encoded': [row['label_encoded']], 'text': [row['text']]}
df_articles = df_articles.append(pd.DataFrame.from_dict(d), ignore_index=True)
list_article_count[labels.index(row['label_encoded'])] -= 1
if sum(list_article_count) == 0:
break
return df_articles
# Return indices for n article for a given number of authors. Required for setups with different
# numbers of authors and article
def get_n_article_index_by_author(par_df, par_label_count, par_article_count):
index_list = []
# only keep entries of the "par_label_count" first labels
par_df = par_df.where(par_df['label_encoded'] <= par_label_count).dropna()
labels = np.unique(par_df['label_encoded'].values).tolist()
list_article_count = [par_article_count for i in labels]
for index, row in par_df.iterrows():
if row['label_encoded'] in labels:
if list_article_count[labels.index(row['label_encoded'])] != 0:
index_list.append(index)
list_article_count[labels.index(row['label_encoded'])] -= 1
if sum(list_article_count) == 0:
break
return index_list
# Method to estimate the f1 score of the test data for GNB
def get_f1_for_gnb(par_x_train, par_x_test, par_y_train, par_y_test):
gnb = GaussianNB()
# GNB fit
gnb.fit(par_x_train, par_y_train)
# score on test data
gnb_score = f1_score(gnb.predict(par_x_test), par_y_test, average='micro')
return gnb_score
# Method to estimate the f1 score of the test data for SVC
def get_f1_for_svc(par_x_train, par_x_test, par_y_train, par_y_test, par_cv):
# Param Grid SVC
param_grid_svc = {'C': (0.001, 0.01, 0.1, 1, 10),
'kernel': ('linear', 'poly', 'rbf'),
'gamma': ('scale', 'auto')}
# SVC parameter optimization
grid_search = GridSearchCV(svm.SVC(), param_grid_svc, cv=par_cv, n_jobs=-1, scoring='f1_micro')
grid_results = grid_search.fit(par_x_train, par_y_train)
svc = svm.SVC(C=grid_results.best_params_['C'], gamma=grid_results.best_params_['gamma'],
kernel=grid_results.best_params_['kernel'])
# fit on train data
svc.fit(par_x_train, par_y_train)
# predict test data
svc_score = f1_score(svc.predict(par_x_test), par_y_test, average='micro')
return svc_score
# Method to estimate the f1 score of the test data for KNN
def get_f1_for_knn(par_x_train, par_x_test, par_y_train, par_y_test, par_cv):
# define param grid for knn, neighbors has the be lower than samples
neighbors = [1, 3, 5, 11, 19, 36, 50]
# number of neighbors must be less than number of samples
neighbors = [x for x in neighbors if x < len(par_x_test)]
param_grid_knn = {'n_neighbors': neighbors,
'weights': ['uniform', 'distance'],
'metric': ['euclidean', 'manhattan']}
# KNN parameter optimization
grid_search = GridSearchCV(KNeighborsClassifier(), param_grid_knn, cv=par_cv, n_jobs=-1, scoring='f1_micro')
grid_results = grid_search.fit(par_x_train, par_y_train)
knn = KNeighborsClassifier(n_neighbors=grid_results.best_params_['n_neighbors'],
metric=grid_results.best_params_['metric'],
weights=grid_results.best_params_['weights'])
# fit on train data
knn.fit(par_x_train, par_y_train)
# predict test data
knn_score = f1_score(knn.predict(par_x_test), par_y_test, average='micro')
return knn_score
# Method to estimate the accuracy of the test data for SVC
def get_accuracy_for_svc(par_x_train, par_x_test, par_y_train, par_y_test, par_cv):
# Param Grid SVC
param_grid_svc = {'C': (0.001, 0.01, 0.1, 1, 10),
'kernel': ('linear', 'poly', 'rbf'),
'gamma': ('scale', 'auto')}
# SVC parameter optimization
grid_search = GridSearchCV(svm.SVC(), param_grid_svc, cv=par_cv, n_jobs=-1, scoring='f1_micro')
grid_results = grid_search.fit(par_x_train, par_y_train)
svc = svm.SVC(C=grid_results.best_params_['C'], gamma=grid_results.best_params_['gamma'],
kernel=grid_results.best_params_['kernel'])
# fit on train data
svc.fit(par_x_train, par_y_train)
# predict test data
svc_score = accuracy_score(svc.predict(par_x_test), par_y_test)
return svc_score
# Chapter 7.3.1. comparison of the word length feature alternatives
def compare_word_length_features():
df_all_texts = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
# Different values for the texts by authors
list_author_texts = [10, 15, 25, 50, 75, 100]
# save the results in a dictionary
dic_f1_results = {'wl_matrix_gnb': [], 'wl_matrix_svc': [], 'wl_matrix_knn': [],
'wl_matrix_bins_20_30_gnb': [], 'wl_matrix_bins_20_30_svc': [], 'wl_matrix_bins_20_30_knn': [],
'wl_matrix_bins_10_20_gnb': [], 'wl_matrix_bins_10_20_svc': [], 'wl_matrix_bins_10_20_knn': [],
'wl_matrix_20_gnb': [], 'wl_matrix_20_svc': [], 'wl_matrix_20_knn': [],
'wl_avg_gnb': [], 'wl_avg_svc': [], 'wl_avg_knn': []}
for author_texts in list_author_texts:
# get article for n authors with number of author texts
df_article = get_n_article_by_author(df_all_texts, 25, author_texts)
# Get the word count for the individual relative frequency
word_count = get_word_count(df_article)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if author_texts * 0.4 < 10:
cv = int(author_texts * 0.4)
else:
cv = 10
# Get the scores for every feature
for feature in ["wl_matrix", "wl_matrix_bins_20_30", "wl_matrix_bins_10_20", "wl_avg", "wl_matrix_20"]:
# select the test/train data by the feature name and calculate the individual relative frequency
if feature == "wl_matrix":
x = get_rel_frequency(get_word_length_matrix(df_article).fillna(value=0), word_count['word_count'])
elif feature == "wl_matrix_bins_20_30":
x = get_rel_frequency(get_word_length_matrix_with_interval(df_article, 20, 30).fillna(value=0),
word_count['word_count'])
elif feature == "wl_matrix_bins_10_20":
x = get_rel_frequency(get_word_length_matrix_with_interval(df_article, 10, 20).fillna(value=0),
word_count['word_count'])
elif feature == "wl_avg":
x = get_average_word_length(df_article)
elif feature == "wl_matrix_20":
x = get_word_length_matrix_with_margin(df_article, 20)
# Scale the data, else high counter in wl_matrix can dominate and hyperparameter optimization for svc
# takes a while because of small differences from average
scaler = StandardScaler()
scaler.fit(x)
x = scaler.transform(x)
y = df_article['label_encoded']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
y = df_article['label_encoded']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
# calculate scores
gnb_score = get_f1_for_gnb(x_train, x_test, y_train, y_test)
svc_score = get_f1_for_svc(x_train, x_test, y_train, y_test, cv)
knn_score = get_f1_for_knn(x_train, x_test, y_train, y_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {author_texts}: {gnb_score}")
print(f"SVC-Score for {feature} with {author_texts}: {svc_score}")
print(f"KNN-Score for {feature} with {author_texts}: {knn_score}")
df_results = pd.DataFrame(dic_f1_results)
df_results['number_article'] = list_author_texts
return df_results
# Chapter 7.3.2. comparison of the digit feature alternatives
def compare_digit_features():
df_all_texts = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
# Different values for the texts by authors
list_author_texts = [10, 15, 25, 50, 75, 100]
# save the results in a dictionary
dic_f1_results = {'digit_sum_gnb': [], 'digit_sum_svc': [], 'digit_sum_knn': [],
'digits_gnb': [], 'digits_svc': [], 'digits_knn': []}
for author_texts in list_author_texts:
# get article for n authors with number of author texts
df_article = get_n_article_by_author(df_all_texts, 25, author_texts)
# Get the word count for the individual relative frequency
char_count = get_char_count(df_article)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if author_texts * 0.4 < 10:
cv = int(author_texts * 0.4)
else:
cv = 10
# Get the scores for every feature
for feature in ["digit_sum", "digits"]:
# select the test/train data by the feature name and calculate the individual relative frequency
if feature == "digit_sum":
x = get_rel_frequency(get_sum_digits(df_article).fillna(value=0), char_count['char_count'])
elif feature == "digits":
x = get_rel_frequency(get_digits(df_article).fillna(value=0), char_count['char_count'])
y = df_article['label_encoded']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
# calculate scores
gnb_score = get_f1_for_gnb(x_train, x_test, y_train, y_test)
svc_score = get_f1_for_svc(x_train, x_test, y_train, y_test, cv)
knn_score = get_f1_for_knn(x_train, x_test, y_train, y_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {author_texts}: {gnb_score}")
print(f"SVC-Score for {feature} with {author_texts}: {svc_score}")
print(f"KNN-Score for {feature} with {author_texts}: {knn_score}")
df_results = pd.DataFrame(dic_f1_results)
df_results['number_article'] = list_author_texts
return df_results
# Chapter 7.3.3. comparison of the word ngrams with n 4-6
def compare_word_4_6_grams():
df_all_texts = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
# Different values for the texts by authors
list_author_texts = [10, 15, 25, 50, 75, 100]
# save the results in a dictionary
dic_f1_results = {'w4g_gnb': [], 'w4g_svc': [], 'w4g_knn': [],
'w5g_gnb': [], 'w5g_svc': [], 'w5g_knn': [],
'w6g_gnb': [], 'w6g_svc': [], 'w6g_knn': []}
# load the data
df_w4g = pd.read_csv("daten/6_feature_analysis/input_data/word_4_gram_rel.csv", sep=',', encoding="utf-8")
df_w5g = pd.read_csv("daten/6_feature_analysis/input_data/word_5_gram_rel.csv", sep=',', encoding="utf-8")
df_w6g = pd.read_csv("daten/6_feature_analysis/input_data/word_6_gram_rel.csv", sep=',', encoding="utf-8")
for author_texts in list_author_texts:
# indices for article for n authors with m texts
index_list = get_n_article_index_by_author(df_all_texts, 25, author_texts)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if author_texts * 0.4 < 10:
cv = int(author_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
# Get the scores for every feature
for feature in ["w4g", "w5g", "w6g"]:
# select the indices from the article rows by the given indices
if feature == "w4g":
x = df_w4g.iloc[index_list]
elif feature == "w5g":
x = df_w5g.iloc[index_list]
elif feature == "w6g":
x = df_w6g.iloc[index_list]
# Delete features which only occur once
x = trim_df_by_occurrence(x, 1)
# reset the indices to have a order from 0 to authors * text per author - 1
x = x.reset_index(drop=True)
y = df_all_texts.iloc[index_list]['label_encoded']
y = y.reset_index(drop=True)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42, stratify=y)
# calculate scores
gnb_score = get_f1_for_gnb(x_train, x_test, y_train, y_test)
svc_score = get_f1_for_svc(x_train, x_test, y_train, y_test, cv)
knn_score = get_f1_for_knn(x_train, x_test, y_train, y_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {author_texts}: {gnb_score}")
print(f"SVC-Score for {feature} with {author_texts}: {svc_score}")
print(f"KNN-Score for {feature} with {author_texts}: {knn_score}")
df_results = pd.DataFrame(dic_f1_results)
df_results['number_article'] = list_author_texts
return df_results
# Chapter 7.3.3. comparison of the word ngrams with n 2-3
def compare_word_2_3_grams():
df_all_texts = pd.read_csv("musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
# Different values for the texts by authors
list_author_texts = [10, 15, 25, 50, 75, 100]
# save the results in a dictionary
dic_f1_results = {'w2g_gnb': [], 'w2g_svc': [], 'w2g_knn': [],
'w3g_gnb': [], 'w3g_svc': [], 'w3g_knn': []}
for author_texts in list_author_texts:
print(f"Texte pro Autor: {author_texts}")
# indices for article for n authors with m texts
index_list = get_n_article_index_by_author(df_balanced, 25, author_texts)
# define the splits for the hyperparameter tuning, cannot be greater than the number of members in each class
if author_texts * 0.4 < 10:
cv = int(author_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
# select the indices from the article rows by the given indices
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
print(f"Artikel: {len(df_balanced.index)}")
# extract the features
df_w2g = get_word_n_grams(df_balanced, 2)
df_w3g = get_word_n_grams(df_balanced, 3)
# Preprocessing steps
word_count = get_word_count(df_balanced)
df_w2g = preprocessing_steps_pos_tag_n_grams(df_w2g, word_count['word_count'])
df_w3g = preprocessing_steps_pos_tag_n_grams(df_w3g, word_count['word_count'])
# Scaler, else SVM need a lot of time with very low numbers.
scaler = StandardScaler()
df_w2g[df_w2g.columns] = scaler.fit_transform(df_w2g[df_w2g.columns])
df_w3g[df_w3g.columns] = scaler.fit_transform(df_w3g[df_w3g.columns])
label = df_balanced['label_encoded']
# Train/Test 60/40 split
df_w2g_train, df_w2g_test, df_w3g_train, df_w3g_test, label_train, label_test = \
train_test_split(df_w2g, df_w3g, label, test_size=0.4, random_state=42, stratify=label)
# Get the scores for every feature
for feature in ["w2g", "w3g"]:
# select the indices from the article rows by the given indices
# iterative filter
# returns df_x_train_gnb, df_x_test_gnb, df_x_train_svc, df_x_test_svc, df_x_train_knn, df_x_test_knn
if feature == "w2g":
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test = \
feature_selection_iterative_filter(df_w2g_train, df_w2g_test, label_train, 1.0, mutual_info_classif)
elif feature == "w3g":
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test = \
feature_selection_iterative_filter(df_w3g_train, df_w3g_test, label_train, 1.0, mutual_info_classif)
# Do not use iterative filter for gnb train caused by bad results
x_gnb_train, x_gnb_test, label_train, label_test = \
train_test_split(df_w3g, label, test_size=0.4, random_state=42, stratify=label)
print(f"cv: {cv}")
print(f"Train Labels: {label_train.value_counts()}")
print(f"Test Labels: {label_test.value_counts()}")
# calculate scores
gnb_score = get_f1_for_gnb(x_gnb_train, x_gnb_test, label_train, label_test)
svc_score = get_f1_for_svc(x_svc_train, x_svc_test, label_train, label_test, cv)
knn_score = get_f1_for_knn(x_knn_train, x_knn_test, label_train, label_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {author_texts}: {gnb_score}")
print(f"SVC-Score for {feature} with {author_texts}: {svc_score}")
print(f"KNN-Score for {feature} with {author_texts}: {knn_score}")
df_results = pd.DataFrame(dic_f1_results)
df_results['number_article'] = list_author_texts
return df_results
# Chapter 7.3.4. comparison of the different lengths of char ngrams
# Chapter 7.3.4. whole process of the comparison of the char-n-gram features
def compare_char_n_grams_process(par_base_path):
df_all_texts = pd.read_csv(f"musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
author_counts = [25]
text_counts = [10, 15, 25, 50, 75, 100]
for number_authors in author_counts:
for number_texts in text_counts:
index_list = get_n_article_index_by_author(df_all_texts, number_authors, number_texts)
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
extract_n_gram_features_to_csv(df_balanced, par_base_path, number_authors, number_texts)
iterative_filter_process(par_base_path, df_balanced, number_texts, number_authors)
compare_char_affix_ngrams(text_counts, author_counts, par_base_path, df_all_texts) \
.to_csv(f"{par_base_path}results/char_affix_n_grams.csv", index=False)
compare_char_word_ngrams(text_counts, author_counts, par_base_path, df_all_texts) \
.to_csv(f"{par_base_path}results/char_word_n_grams.csv", index=False)
compare_char_punct_ngrams(text_counts, author_counts, par_base_path, df_all_texts) \
.to_csv(f"{par_base_path}results/char_punct_n_grams.csv", index=False)
# Chapter 7.3.4. char-affix-ngrams
def compare_char_affix_ngrams(par_author_texts, par_authors, par_base_path, par_df):
# save the results in a dictionary
dic_f1_results = {'c_affix_2_gnb': [], 'c_affix_2_svc': [], 'c_affix_2_knn': [],
'c_affix_3_gnb': [], 'c_affix_3_svc': [], 'c_affix_3_knn': [],
'c_affix_4_gnb': [], 'c_affix_4_svc': [], 'c_affix_4_knn': [],
'c_affix_5_gnb': [], 'c_affix_5_svc': [], 'c_affix_5_knn': [],
'number_authors': [], 'number_texts': []}
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(par_df, number_authors, number_texts)
df_balanced = par_df.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Get the scores for every feature
# Append authors and texts
dic_f1_results['number_authors'].append(number_authors)
dic_f1_results['number_texts'].append(number_texts)
for feature in ["c_affix_2", "c_affix_3", "c_affix_4", "c_affix_5"]:
# read the data based on n, texts and authors
if feature == "c_affix_2":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_affix_2_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_affix_2_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_affix_2_gram_filtered.csv")
elif feature == "c_affix_3":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_affix_3_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_affix_3_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_affix_3_gram_filtered.csv")
elif feature == "c_affix_4":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_affix_4_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_affix_4_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_affix_4_gram_filtered.csv")
elif feature == "c_affix_5":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_affix_5_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_affix_5_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_affix_5_gram_filtered.csv")
# Scaler, else SVM need a lot of time with very low numbers.
scaler = StandardScaler()
df_gnb[df_gnb.columns] = scaler.fit_transform(df_gnb[df_gnb.columns])
df_svc[df_svc.columns] = scaler.fit_transform(df_svc[df_svc.columns])
df_knn[df_knn.columns] = scaler.fit_transform(df_knn[df_knn.columns])
# Train/Test 60/40 split
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test, label_train, label_test = \
train_test_split(df_gnb, df_svc, df_knn, label, test_size=0.4, random_state=42, stratify=label)
# calculate scores
gnb_score = get_f1_for_gnb(x_gnb_train, x_gnb_test, label_train, label_test)
svc_score = get_f1_for_svc(x_svc_train, x_svc_test, label_train, label_test, cv)
knn_score = get_f1_for_knn(x_knn_train, x_knn_test, label_train, label_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {number_authors} authors and {number_texts} texts: {gnb_score}")
print(f"SVC-Score for {feature} with {number_authors} authors and {number_texts} texts: {svc_score}")
print(f"KNN-Score for {feature} with {number_authors} authors and {number_texts} texts: {knn_score}")
return pd.DataFrame(dic_f1_results)
# Chapter 7.3.4. char-word-ngrams
def compare_char_word_ngrams(par_author_texts, par_authors, par_base_path, par_df):
# save the results in a dictionary
dic_f1_results = {'c_word_2_gnb': [], 'c_word_2_svc': [], 'c_word_2_knn': [],
'c_word_3_gnb': [], 'c_word_3_svc': [], 'c_word_3_knn': [],
'c_word_4_gnb': [], 'c_word_4_svc': [], 'c_word_4_knn': [],
'c_word_5_gnb': [], 'c_word_5_svc': [], 'c_word_5_knn': [],
'number_authors': [], 'number_texts': []}
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(par_df, number_authors, number_texts)
df_balanced = par_df.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Get the scores for every feature
# Append authors and texts
dic_f1_results['number_authors'].append(number_authors)
dic_f1_results['number_texts'].append(number_texts)
for feature in ["c_word_2", "c_word_3", "c_word_4", "c_word_5"]:
# read the data based on n, texts and authors
if feature == "c_word_2":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_word_2_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_word_2_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_word_2_gram_filtered.csv")
elif feature == "c_word_3":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_word_3_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_word_3_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_word_3_gram_filtered.csv")
elif feature == "c_word_4":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_word_4_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_word_4_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_word_4_gram_filtered.csv")
elif feature == "c_word_5":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_word_5_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_word_5_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_word_5_gram_filtered.csv")
# Scaler, else SVM need a lot of time with very low numbers.
scaler = StandardScaler()
df_gnb[df_gnb.columns] = scaler.fit_transform(df_gnb[df_gnb.columns])
df_svc[df_svc.columns] = scaler.fit_transform(df_svc[df_svc.columns])
df_knn[df_knn.columns] = scaler.fit_transform(df_knn[df_knn.columns])
# Train/Test 60/40 split
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test, label_train, label_test = \
train_test_split(df_gnb, df_svc, df_knn, label, test_size=0.4, random_state=42, stratify=label)
# calculate scores
gnb_score = get_f1_for_gnb(x_gnb_train, x_gnb_test, label_train, label_test)
svc_score = get_f1_for_svc(x_svc_train, x_svc_test, label_train, label_test, cv)
knn_score = get_f1_for_knn(x_knn_train, x_knn_test, label_train, label_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {number_authors} authors and {number_texts} texts: {gnb_score}")
print(f"SVC-Score for {feature} with {number_authors} authors and {number_texts} texts: {svc_score}")
print(f"KNN-Score for {feature} with {number_authors} authors and {number_texts} texts: {knn_score}")
return pd.DataFrame(dic_f1_results)
# Chapter 7.3.4. char-punct-ngrams
def compare_char_punct_ngrams(par_author_texts, par_authors, par_base_path, par_df):
# save the results in a dictionary
dic_f1_results = {'c_punct_2_gnb': [], 'c_punct_2_svc': [], 'c_punct_2_knn': [],
'c_punct_3_gnb': [], 'c_punct_3_svc': [], 'c_punct_3_knn': [],
'c_punct_4_gnb': [], 'c_punct_4_svc': [], 'c_punct_4_knn': [],
'c_punct_5_gnb': [], 'c_punct_5_svc': [], 'c_punct_5_knn': [],
'number_authors': [], 'number_texts': []}
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(par_df, number_authors, number_texts)
df_balanced = par_df.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Get the scores for every feature
# Append authors and texts
dic_f1_results['number_authors'].append(number_authors)
dic_f1_results['number_texts'].append(number_texts)
for feature in ["c_punct_2", "c_punct_3", "c_punct_4", "c_punct_5"]:
# read the data based on n, texts and authors
if feature == "c_punct_2":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_punct_2_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_punct_2_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_punct_2_gram_filtered.csv")
elif feature == "c_punct_3":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_punct_3_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_punct_3_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_punct_3_gram_filtered.csv")
elif feature == "c_punct_4":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_punct_4_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_punct_4_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_punct_4_gram_filtered.csv")
elif feature == "c_punct_5":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_char_punct_5_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_char_punct_5_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_char_punct_5_gram_filtered.csv")
# Scaler, else SVM need a lot of time with very low numbers.
scaler = StandardScaler()
df_gnb[df_gnb.columns] = scaler.fit_transform(df_gnb[df_gnb.columns])
df_svc[df_svc.columns] = scaler.fit_transform(df_svc[df_svc.columns])
df_knn[df_knn.columns] = scaler.fit_transform(df_knn[df_knn.columns])
# Train/Test 60/40 split
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test, label_train, label_test = \
train_test_split(df_gnb, df_svc, df_knn, label, test_size=0.4, random_state=42, stratify=label)
# calculate scores
gnb_score = get_f1_for_gnb(x_gnb_train, x_gnb_test, label_train, label_test)
svc_score = get_f1_for_svc(x_svc_train, x_svc_test, label_train, label_test, cv)
knn_score = get_f1_for_knn(x_knn_train, x_knn_test, label_train, label_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {number_authors} authors and {number_texts} texts: {gnb_score}")
print(f"SVC-Score for {feature} with {number_authors} authors and {number_texts} texts: {svc_score}")
print(f"KNN-Score for {feature} with {number_authors} authors and {number_texts} texts: {knn_score}")
return pd.DataFrame(dic_f1_results)
# Chapter 7.3.4. Print the char-n-gram features in different files
def extract_n_gram_features_to_csv(par_df, par_base_path, par_number_authors, par_number_texts):
char_count = get_char_count(par_df)
# n from 2-5
for n in range(2, 6):
ca_ng = get_char_affix_n_grams(par_df, n)
preprocessing_steps_char_n_grams(ca_ng, char_count['char_count'])\
.to_csv(f"{par_base_path}csv_before_filter/a{par_number_authors}_t{par_number_texts}"
f"_char_affix_{n}_gram.csv", index=False)
cw_ng = get_char_word_n_grams(par_df, n)
preprocessing_steps_char_n_grams(cw_ng, char_count['char_count'])\
.to_csv(f"{par_base_path}csv_before_filter/a{par_number_authors}_t{par_number_texts}"
f"_char_word_{n}_gram.csv", index=False)
cp_ng = get_char_punct_n_grams(par_df, n)
preprocessing_steps_char_n_grams(cp_ng, char_count['char_count'])\
.to_csv(f"{par_base_path}csv_before_filter/a{par_number_authors}_t{par_number_texts}"
f"_char_punct_{n}_gram.csv", index=False)
print(f"Extraction Round {n - 1} done")
return True
# combined preprocessing steps of the pos-tag-n-grams
def preprocessing_steps_pos_tag_n_grams(par_feature, length_metric):
# Filter features which only occur once
par_feature = trim_df_by_occurrence(par_feature, 1)
# Individual relative frequency
par_feature = get_rel_frequency(par_feature.fillna(value=0), length_metric)
return par_feature
# combined preprocessing steps of the char-n-grams
def preprocessing_steps_char_n_grams(par_feature, length_metric):
# Filter features which only occur once
par_feature = trim_df_sum_feature(par_feature, 5)
# Individual relative frequency
par_feature = get_rel_frequency(par_feature.fillna(value=0), length_metric)
return par_feature
# Feature selection with the iterative filter without printing the results in a file
def feature_selection_iterative_filter(par_x_train, par_x_test, par_y_train, par_step, par_classif):
df_sorted_features = sort_features_by_score(par_x_train, par_y_train, par_classif)
# Calculate the best percentiles of the data for the different classifier
best_perc_gnb = get_best_percentile_gnb(par_x_train, par_y_train, 50, df_sorted_features, par_step)[0]
best_perc_svc = get_best_percentile_svc(par_x_train, par_y_train, 50, df_sorted_features, par_step)[0]
best_perc_knn = get_best_percentile_knn(par_x_train, par_y_train, 50, df_sorted_features, par_step)[0]
# select the 1 percent of the features (len/100) multiplied by par_best_percent
# select the best features from the original dataset
df_x_train_gnb = par_x_train[
df_sorted_features['feature_names'][: round(best_perc_gnb * (len(par_x_train.columns) / 100))].tolist()]
df_x_test_gnb = par_x_test[
df_sorted_features['feature_names'][: round(best_perc_gnb * (len(par_x_train.columns) / 100))].tolist()]
df_x_train_svc = par_x_train[
df_sorted_features['feature_names'][: round(best_perc_svc * (len(par_x_train.columns) / 100))].tolist()]
df_x_test_svc = par_x_test[
df_sorted_features['feature_names'][: round(best_perc_svc * (len(par_x_train.columns) / 100))].tolist()]
df_x_train_knn = par_x_train[
df_sorted_features['feature_names'][: round(best_perc_knn * (len(par_x_train.columns) / 100))].tolist()]
df_x_test_knn = par_x_test[
df_sorted_features['feature_names'][: round(best_perc_knn * (len(par_x_train.columns) / 100))].tolist()]
return df_x_train_gnb, df_x_test_gnb, df_x_train_svc, df_x_test_svc, df_x_train_knn, df_x_test_knn
# Chapter 7.3.5. function to compare the pos-tag-n-grams
def compare_pos_tag_ngrams(par_author_texts, par_authors, par_base_path, par_df):
# save the results in a dictionary
dic_f1_results = {'pos_2_gnb': [], 'pos_2_svc': [], 'pos_2_knn': [],
'pos_3_gnb': [], 'pos_3_svc': [], 'pos_3_knn': [],
'pos_4_gnb': [], 'pos_4_svc': [], 'pos_4_knn': [],
'pos_5_gnb': [], 'pos_5_svc': [], 'pos_5_knn': [],
'number_authors': [], 'number_texts': []}
for number_authors in par_authors:
for number_texts in par_author_texts:
index_list = get_n_article_index_by_author(par_df, number_authors, number_texts)
df_balanced = par_df.iloc[index_list].reset_index(drop=True)
# define the splits for the hyperparameter tuning, cannot be greater than number of members in each class
if number_texts * 0.4 < 10:
cv = int(number_texts * 0.4)
# CV between 5 and 10 is unusual
cv = 5 if cv > 5 else cv
else:
cv = 10
label = df_balanced['label_encoded']
# Get the scores for every feature
# Append authors and texts
dic_f1_results['number_authors'].append(number_authors)
dic_f1_results['number_texts'].append(number_texts)
for feature in ["pos_2", "pos_3", "pos_4", "pos_5"]:
# read the data based on n, texts and authors
if feature == "pos_2":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_pos_tag_2_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_pos_tag_2_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_pos_tag_2_gram_filtered.csv")
elif feature == "pos_3":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_pos_tag_3_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_pos_tag_3_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_pos_tag_3_gram_filtered.csv")
elif feature == "pos_4":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_pos_tag_4_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_pos_tag_4_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_pos_tag_4_gram_filtered.csv")
elif feature == "pos_5":
df_gnb = pd.read_csv(
f"{par_base_path}csv_after_filter/gnb_a{number_authors}_t{number_texts}"
f"_pos_tag_5_gram_filtered.csv")
df_svc = pd.read_csv(
f"{par_base_path}csv_after_filter/svc_a{number_authors}_t{number_texts}"
f"_pos_tag_5_gram_filtered.csv")
df_knn = pd.read_csv(
f"{par_base_path}csv_after_filter/knn_a{number_authors}_t{number_texts}"
f"_pos_tag_5_gram_filtered.csv")
# Scaler, else SVM need a lot of time with very low numbers.
scaler = StandardScaler()
df_gnb[df_gnb.columns] = scaler.fit_transform(df_gnb[df_gnb.columns])
df_svc[df_svc.columns] = scaler.fit_transform(df_svc[df_svc.columns])
df_knn[df_knn.columns] = scaler.fit_transform(df_knn[df_knn.columns])
# Train/Test 60/40 split
x_gnb_train, x_gnb_test, x_svc_train, x_svc_test, x_knn_train, x_knn_test, label_train, label_test = \
train_test_split(df_gnb, df_svc, df_knn, label, test_size=0.4, random_state=42, stratify=label)
# calculate scores
gnb_score = get_f1_for_gnb(x_gnb_train, x_gnb_test, label_train, label_test)
svc_score = get_f1_for_svc(x_svc_train, x_svc_test, label_train, label_test, cv)
knn_score = get_f1_for_knn(x_knn_train, x_knn_test, label_train, label_test, cv)
# Append scores to dictionary
dic_f1_results[f'{feature}_gnb'].append(gnb_score)
dic_f1_results[f'{feature}_svc'].append(svc_score)
dic_f1_results[f'{feature}_knn'].append(knn_score)
# Console output
print(f"GNB-Score for {feature} with {number_authors} authors and {number_texts} texts: {gnb_score}")
print(f"SVC-Score for {feature} with {number_authors} authors and {number_texts} texts: {svc_score}")
print(f"KNN-Score for {feature} with {number_authors} authors and {number_texts} texts: {knn_score}")
return pd.DataFrame(dic_f1_results)
# Chapter 7.3.5. complete process of the pos-tag-n-grams comparison
def compare_pos_n_grams_process(par_base_path):
df_all_texts = pd.read_csv(f"musikreviews_balanced_authors.csv", sep=',', encoding="utf-8")
author_counts = [25]
text_counts = [10, 15, 25, 50, 75, 100]
for number_authors in author_counts:
for number_texts in text_counts:
index_list = get_n_article_index_by_author(df_all_texts, number_authors, number_texts)
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
word_count = get_word_count(df_balanced)
# extract features and preprocessing
for n in range(2, 6):
pt_ng = get_pos_tags_n_grams(df_balanced, n)
preprocessing_steps_pos_tag_n_grams(pt_ng, word_count['word_count']) \
.to_csv(f"{par_base_path}csv_before_filter/"
f"a{number_authors}_t{number_texts}_pos_tag_{n}_gram.csv", index=False)
iterative_filter_process(par_base_path, df_balanced, number_texts, number_authors)
# 2 grams for svc get not filtered, overwrite unfiltered for svc
pt_ng = get_pos_tags_n_grams(df_balanced, 2)
preprocessing_steps_pos_tag_n_grams(pt_ng, word_count['word_count']) \
.to_csv(f"{par_base_path}csv_after_filter/"
f"svc_a{number_authors}_t{number_texts}_pos_tag_2_gram_filtered.csv", index=False)
compare_pos_tag_ngrams(text_counts, author_counts, par_base_path, df_all_texts) \
.to_csv(f"{par_base_path}results/pos_tag_n_grams.csv", index=False)
# Method to print all features for different counts of authors and texts
# Including all Preprocessing steps and filtering
def print_all_features_svc(par_base_path, par_article_path):
df_all_texts = pd.read_csv(f"{par_article_path}", sep=',', encoding="utf-8")
author_counts = [2, 3, 4, 5, 10, 15, 25]
text_counts = [5, 10, 15, 25, 50, 75, 100]
for number_authors in author_counts:
for number_texts in text_counts:
index_list = get_n_article_index_by_author(df_all_texts, number_authors, number_texts)
df_balanced = df_all_texts.iloc[index_list].reset_index(drop=True)
# get all the features
df_bow = get_bow_matrix(df_balanced)
df_word_2g = get_word_n_grams(df_balanced, 2)
df_word_count = get_word_count(df_balanced)
df_word_length = get_word_length_matrix_with_margin(df_balanced, 20)
df_yules_k = get_yules_k(df_balanced)
sc_label_vector = ["!", "„", "“", "§", "$", "%", "&", "/", "(", ")", "=", "?", "{", "}", "[", "]", "\\",
"@", "#",
"‚", "‘", "-", "_", "+", "*", ".", ",", ";"]
special_char_matrix = get_special_char_matrix(df_balanced, sc_label_vector)
sc_label_vector = ["s_char:" + sc for sc in sc_label_vector]
df_special_char = pd.DataFrame(data=special_char_matrix, columns=sc_label_vector)
df_char_affix_4g = get_char_affix_n_grams(df_balanced, 4)
df_char_word_3g = get_char_word_n_grams(df_balanced, 3)
df_char_punct_3g = get_char_punct_n_grams(df_balanced, 3)
df_digits = get_sum_digits(df_balanced)
df_fwords = get_function_words(df_balanced)
df_pos_tags = get_pos_tags(df_balanced)
df_pos_tag_2g = get_pos_tags_n_grams(df_balanced, 2)
df_start_pos, df_end_pos = get_sentence_end_start(df_balanced)
df_start_end_pos = | pd.concat([df_start_pos, df_end_pos], axis=1) | pandas.concat |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
self.assertEqual(idx1.argmin(), 1)
self.assertEqual(idx2.argmin(), 0)
self.assertEqual(idx1.argmax(), 3)
self.assertEqual(idx2.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
def test_numpy_minmax(self):
pr = pd.period_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(pr), Period('2016-01-15', freq='D'))
self.assertEqual(np.max(pr), Period('2016-01-20', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, pr, out=0)
self.assertEqual(np.argmin(pr), 0)
self.assertEqual(np.argmax(pr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, pr, out=0)
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D')
exp1 = """PeriodIndex([], dtype='period[D]', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"""
exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', "
"freq='D')")
exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]', freq='D')")
exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
"freq='A-DEC')")
exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]', freq='H')")
exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
"dtype='period[Q-DEC]', freq='Q-DEC')")
exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], "
"dtype='period[3D]', freq='3D')")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9, exp10]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
# GH 10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """Series([], dtype: object)"""
exp2 = """0 2011-01-01
dtype: object"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: object"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: object"""
exp5 = """0 2011
1 2012
2 2013
dtype: object"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: object"""
exp7 = """0 2013Q1
dtype: object"""
exp8 = """0 2013Q1
1 2013Q2
dtype: object"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: object"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(
['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H',
'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second',
'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_union(self):
# union
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=10)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=8)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00',
'2000-01-01 11:00', '2000-01-01 12:00',
'2000-01-01 13:00', '2000-01-02 09:00',
'2000-01-02 10:00', '2000-01-02 11:00',
'2000-01-02 12:00', '2000-01-02 13:00'],
freq='H')
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'
'2000-01-01 09:08'],
freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05', '2000-01-01 09:08'],
freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=10)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('1998-01-01', freq='A', periods=10)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4,
expected4),
(rng5, other5, expected5), (rng6, other6,
expected6),
(rng7, other7, expected7)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with tm.assertRaises(TypeError):
rng + other
with tm.assertRaises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), | Timedelta('23:00:00') | pandas.Timedelta |
#%%
import os
import sys
try:
os.chdir('/Volumes/GoogleDrive/My Drive/python_code/connectome_tools/')
print(os.getcwd())
except:
pass
# %%
import sys
sys.path.append('/Volumes/GoogleDrive/My Drive/python_code/maggot_models/')
sys.path.append('/Volumes/GoogleDrive/My Drive/python_code/connectome_tools/')
from pymaid_creds import url, name, password, token
import pymaid
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from graspy.plot import gridplot, heatmap
from graspy.utils import binarize, pass_to_ranks
from src.data import load_metagraph
from src.visualization import CLASS_COLOR_DICT, adjplot
rm = pymaid.CatmaidInstance(url, token, name, password)
mg = load_metagraph("Gad", version="2020-06-10", path = '/Volumes/GoogleDrive/My Drive/python_code/maggot_models/data/processed/')
#mg = load_metagraph("G", version="2020-06-10", path = '/Volumes/GoogleDrive/My Drive/python_code/maggot_models/data/processed/')
mg.calculate_degrees(inplace=True)
adj = mg.adj # adjacency matrix from the "mg" object
#%%
# identify left and right side for each skid category
def split_hemilateral_to_indices(skids, left, right, mg):
intersect_left = np.intersect1d(skids, left)
indices_left = np.where([x in intersect_left for x in mg.meta.index])[0]
intersect_right = np.intersect1d(skids, right)
indices_right = np.where([x in intersect_right for x in mg.meta.index])[0]
return(indices_left, indices_right, intersect_left, intersect_right)
ORN_skids = pymaid.get_skids_by_annotation('mw ORN')
dVNC_skids = pymaid.get_skids_by_annotation('mw dVNC')
left = pymaid.get_skids_by_annotation('mw left')
right = pymaid.get_skids_by_annotation('mw right')
output_skids = list(map(pymaid.get_skids_by_annotation, pymaid.get_annotated('mw brain outputs').name))
output_skids = [val for sublist in output_skids for val in sublist]
output_indices = np.where([x in output_skids for x in mg.meta.index])[0]
ORN_indices_left, ORN_indices_right, ORN_left, ORN_right = split_hemilateral_to_indices(ORN_skids, left, right, mg)
dVNC_indices_left, dVNC_indices_right, dVNC_left, dVNC_right = split_hemilateral_to_indices(dVNC_skids, left, right, mg)
#%%
from src.traverse import Cascade, to_transmission_matrix
from src.traverse import TraverseDispatcher
from src.visualization import matrixplot
import numpy.random as random
def static_random_subset_cascade(s_indices, r_indices, subset_number, n_init, cdispatch):
# combination of a randomized subset of indices and a static subset will be used for cascade start_nodes
# make sure that input cdispath is set with n_init = 1
hit_hist_list = []
random_indices_list = []
for i in range(0, n_init):
random.seed(i)
random_nums = random.choice(len(r_indices), subset_number, replace = False)
random_indices = r_indices[random_nums]
all_indices = np.concatenate([random_indices, s_indices])
subset_hit_hist = cdispatch.multistart(start_nodes = all_indices)
hit_hist_list.append(subset_hit_hist)
random_indices_list.append(all_indices)
return(sum(hit_hist_list), random_indices_list)
p = 0.05
max_hops = 10
n_init = 1
simultaneous = True
transition_probs = to_transmission_matrix(adj, p)
cdispatch = TraverseDispatcher(
Cascade,
transition_probs,
stop_nodes = output_indices,
max_hops=max_hops,
allow_loops = False,
n_init=n_init,
simultaneous=simultaneous,
)
input_indices_left = ORN_indices_left
its = 1000
num_full = int(np.round(len(ORN_indices_left)))
num_95L = int(np.round(len(ORN_indices_left)*9.5/10))
num_75L = int(np.round(len(ORN_indices_left)*3/4))
num_50L = int(np.round(len(ORN_indices_left)/2))
num_25L = int(np.round(len(ORN_indices_left)/4))
num_10L = int(np.round(len(ORN_indices_left)/10))
num_5L = int(np.round(len(ORN_indices_left)/20))
fullR_fullL_hist, fullR_fullL_indices = static_random_subset_cascade(ORN_indices_right, ORN_indices_left, num_full, its, cdispatch)
fullR_95L_hist, fullR_95L_indices = static_random_subset_cascade(ORN_indices_right, ORN_indices_left, num_95L, its, cdispatch)
fullR_75L_hist, fullR_75L_indices = static_random_subset_cascade(ORN_indices_right, ORN_indices_left, num_75L, its, cdispatch)
fullR_50L_hist, fullR_50L_indices = static_random_subset_cascade(ORN_indices_right, ORN_indices_left, num_50L, its, cdispatch)
fullR_25L_hist, fullR_25L_indices = static_random_subset_cascade(ORN_indices_right, ORN_indices_left, num_25L, its, cdispatch)
fullR_10L_hist, fullR_10L_indices = static_random_subset_cascade(ORN_indices_right, ORN_indices_left, num_10L, its, cdispatch)
fullR_5L_hist, fullR_5L_indices = static_random_subset_cascade(ORN_indices_right, ORN_indices_left, num_5L, its, cdispatch)
fullR_fullL_hist = pd.DataFrame(fullR_fullL_hist)
fullR_95L_hist = pd.DataFrame(fullR_95L_hist)
fullR_75L_hist = pd.DataFrame(fullR_95L_hist)
fullR_50L_hist = pd.DataFrame(fullR_50L_hist)
fullR_25L_hist = pd.DataFrame(fullR_25L_hist)
fullR_10L_hist = pd.DataFrame(fullR_10L_hist)
# opposite direction, stronger on left than right
fullL_50R_hist, fullL_50R_indices = static_random_subset_cascade(ORN_indices_left, ORN_indices_right, num_50L, its, cdispatch)
fullL_25R_hist, fullL_25R_indices = static_random_subset_cascade(ORN_indices_left, ORN_indices_right, num_25L, its, cdispatch)
fullL_10R_hist, fullL_10R_indices = static_random_subset_cascade(ORN_indices_left, ORN_indices_right, num_10L, its, cdispatch)
fullL_50R_hist = pd.DataFrame(fullL_25R_hist)
fullL_25R_hist = pd.DataFrame(fullL_25R_hist)
fullL_10R_hist = pd.DataFrame(fullL_10R_hist)
import os
os.system('say "code executed"')
# %%
# initial plots
fig, axs = plt.subplots(
3, 1, figsize=(6, 20)
)
fig.tight_layout(pad=2.5)
ax = axs[0]
sns.heatmap(fullR_fullL_hist[dVNC_indices_left], ax = ax)
ax = axs[1]
sns.heatmap(fullR_25L_hist[dVNC_indices_left], ax = ax)
ax = axs[2]
sns.heatmap((fullR_fullL_hist.iloc[dVNC_indices_left, :] - fullR_25L_hist.iloc[dVNC_indices_left, :]), ax = ax)
#fig.savefig('cascades/interhemisphere_plots/assymetric_input_test.pdf', format='pdf', bbox_inches='tight')
sns.clustermap((fullR_fullL_hist[dVNC_indices_left] - fullR_25L_hist[dVNC_indices_left]), col_cluster = False)
sns.clustermap((fullR_fullL_hist[dVNC_indices_right] - fullR_25L_hist[dVNC_indices_right]), col_cluster = False)
# seems that descending neurons are hit earlier in strong signal than in weak signal
# %%
import connectome_tools.process_matrix as promat
# comparing signal between descending partners; any asymmetry?
pairs = pd.read_csv('data/pairs-2020-05-08.csv', header = 0)
def compare_pairs(skids_left, skids_right, indices_left, indices_right, pairList, hit_hist_list, right = True):
skids = np.concatenate([skids_left, skids_right])
pairs = []
for i in skids:
if(int(i) in pairList["leftid"].values):
pair = promat.get_paired_skids(int(i), pairList)
left_index = indices_left[skids_left==pair[0]][0]
right_index = indices_right[skids_right==pair[1]][0]
left_hits = hit_hist_list.iloc[left_index, :].sum(axis=0)
right_hits = hit_hist_list.iloc[right_index, :].sum(axis=0)
if(right == True): # depending on one's perspective; makes right bias positive
diff = right_hits - left_hits
percent_diff = ((right_hits - left_hits)/((right_hits + left_hits)/2))*100
if(right == False): # depending on one's perspective; makes left bias positive
diff = left_hits - right_hits
percent_diff = ((left_hits - right_hits)/((right_hits + left_hits)/2))*100
pairs.append({'leftid': pair[0], 'rightid': pair[1],
'left_index': left_index, 'right_index': right_index,
'left_hits': left_hits, 'right_hits': right_hits,
'diff': diff, 'percent_diff': percent_diff})
return( | pd.DataFrame(pairs) | pandas.DataFrame |
import os
import argparse
import sys
sys.path.append('../')
from load_paths import load_box_paths
from processing_helpers import *
import pandas as pd
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
mpl.rcParams['pdf.fonttype'] = 42
sns.set_style('whitegrid', {'axes.linewidth': 0.5})
def parse_args():
description = "Simulation run for modeling Covid-19"
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"-s",
"--stem",
type=str,
help="Name of simulation experiment"
)
parser.add_argument(
"-loc",
"--Location",
type=str,
help="Local or NUCLUSTER",
default="Local"
)
parser.add_argument(
"-perc",
"--overflow_threshold_percents",
type=float,
nargs='+',
help="Calculate probability for specified percent of capacity limit",
default=99
)
return parser.parse_args()
def get_latest_filedate(file_path=os.path.join(datapath, 'covid_IDPH', 'Corona virus reports',
'hospital_capacity_thresholds'), extraThresholds=False):
files = os.listdir(file_path)
files = sorted(files, key=len)
if extraThresholds == False:
files = [name for name in files if not 'extra_thresholds' in name]
if extraThresholds == True:
files = [name for name in files if 'extra_thresholds' in name]
filedates = [item.replace('capacity_weekday_average_', '') for item in files]
filedates = [item.replace('.csv', '') for item in filedates]
latest_filedate = max([int(x) for x in filedates])
fname = f'capacity_weekday_average_{latest_filedate}.csv'
if extraThresholds == True:
fname = f'capacity_weekday_average_{latest_filedate}__extra_thresholds.csv'
return fname
def get_probs(ems_nr, channels=['total_hosp_census', 'crit_det', 'ventilators'], overflow_threshold_percents=[1, 0.8],
param=None, save_csv=False, plot=True):
"""Define columns and labels"""
if ems_nr == 0:
region_suffix = "_All"
region_label = 'Illinois'
else:
region_suffix = "_EMS-" + str(ems_nr)
region_label = region_suffix.replace('_EMS-', 'COVID-19 Region ')
column_list = ['scen_num', 'sample_num', 'time', 'startdate']
grp_channels = ['date']
if param is not None:
column_list = column_list + param
grp_channels = ['date'] + param
column_list_t = column_list
for channel in ['hosp_det', 'crit_det']:
column_list_t.append(channel + region_suffix)
""" Load dataframes"""
df = load_sim_data(exp_name, region_suffix=region_suffix, column_list=column_list, add_incidence=False)
df['total_hosp_census'] = df['hosp_det'] + df['crit_det']
df['ventilators'] = get_vents(df['crit_det'])
capacity_df = load_capacity(ems_nr)
len(df['scen_num'].unique())
df['N_scen_num'] = df.groupby(grp_channels)['scen_num'].transform('count')
df_all = pd.DataFrame()
for channel in channels:
if channel == "crit_det": channel_label = 'icu_availforcovid'
if channel == "hosp_det": channel_label = 'hb_availforcovid'
if channel == "total_hosp_census": channel_label = 'hb_availforcovid'
if channel == "ventilators": channel_label = 'vent_availforcovid'
for overflow_threshold_percent in overflow_threshold_percents:
thresh = capacity_df[f'{channel}'] * overflow_threshold_percent
mdf = df.copy()
mdf.loc[df[f'{channel}'] < thresh, 'above_yn'] = 0
mdf.loc[df[f'{channel}'] >= thresh, 'above_yn'] = 1
mdf = mdf.groupby(grp_channels)['above_yn'].agg(['sum', 'count', 'nunique']).rename_axis(None, axis=0)
mdf = mdf.reset_index()
mdf['prob'] = mdf['sum'] / mdf['count']
mdf = mdf.rename(columns={'sum': 'n_above', 'count': 'N_scen_num', 'index': 'date'})
mdf['overflow_threshold_percent'] = overflow_threshold_percent
mdf['capacity_channel'] = channel_label
mdf['availforcovid'] = capacity_df[f'{channel}']
mdf['region'] = ems_nr
del thresh
if df_all.empty:
df_all = mdf
else:
df_all = pd.concat([df_all, mdf])
del mdf
if plot:
plot_probs(df=df_all, region_label=region_label)
if save_csv:
filename = f'overflow_probabilities_over_time_region_{ems_nr}.csv'
df_all.to_csv(os.path.join(sim_output_path, filename), index=False, date_format='%Y-%m-%d')
return df_all
def plot_probs(df, region_label):
fig = plt.figure(figsize=(12, 4))
fig.suptitle(region_label, y=0.97, fontsize=14)
fig.subplots_adjust(right=0.98, wspace=0.2, left=0.05, hspace=0.4, top=0.84, bottom=0.13)
palette = sns.color_palette('Set1', 12)
axes = [fig.add_subplot(1, 3, x + 1) for x in range(3)]
linestyles = ['solid', 'dashed']
for c, channel in enumerate(df.capacity_channel.unique()):
mdf = df[df['capacity_channel'] == channel]
ax = axes[c]
ax.set_ylim(0, 1)
ax.set_title(channel)
ax.set_ylabel(f'Probability of overflow')
for e, t in enumerate(list(df.overflow_threshold_percent.unique())):
line_label = f'{channel} ({t * 100})%'
adf = mdf[mdf['overflow_threshold_percent'] == t]
ax.plot(adf['date'], adf['prob'], linestyle=linestyles[e], color=palette[c], label=line_label)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d\n%b\n%Y'))
axes[-1].legend()
plotname = f'overflow_probabilities_{region_label}'
plt.savefig(os.path.join(plot_path, f'{plotname}.png'))
plt.savefig(os.path.join(plot_path, 'pdf', f'{plotname}.pdf'))
def write_probs_to_template(df, plot=True):
fname_capacity = get_latest_filedate()
civis_template = pd.read_csv(
os.path.join(datapath, 'covid_IDPH', 'Corona virus reports', 'hospital_capacity_thresholds', fname_capacity))
civis_template = civis_template.drop_duplicates()
civis_template['date_window_upper_bound'] = pd.to_datetime(civis_template['date_window_upper_bound'])
civis_template_all = pd.DataFrame()
for index, row in civis_template.iterrows():
upper_limit = row['date_window_upper_bound']
lower_limit = upper_limit - pd.Timedelta(7, 'days')
df_sub = df[df['date'].between(lower_limit, upper_limit)]
df_sub = df_sub[df_sub['region'] == int(row['geography_modeled'].replace("covidregion_", ""))]
df_sub = df_sub[df_sub['capacity_channel'] == row['resource_type']]
df_sub = df_sub[df_sub['overflow_threshold_percent'] == row['overflow_threshold_percent']]
"""Take maximum of previous 7 days"""
civis_template.loc[index, 'percent_of_simulations_that_exceed'] = df_sub['prob'].max()
if civis_template_all.empty:
civis_template_all = civis_template
else:
civis_template_all = pd.concat([civis_template_all, civis_template])
"""Replace NAs with zero """
civis_template_all['percent_of_simulations_that_exceed'] = civis_template_all[
'percent_of_simulations_that_exceed'].fillna(0)
"""Scenario name of simulation - here hardcoded to baseline!!"""
civis_template_all['scenario_name'] = 'baseline'
file_str = 'nu_hospitaloverflow_' + str(exp_name[:8]) + '.csv'
civis_template_all.to_csv(os.path.join(sim_output_path, file_str), index=False)
if plot:
plot_probs_from_template(df=civis_template_all)
def plot_probs_from_template(df=None, show_75=True):
if df is None:
file_str = 'nu_hospitaloverflow_' + str(exp_name[:8]) + '.csv'
df = pd.read_csv(os.path.join(sim_output_path, file_str))
regionlist = df['geography_modeled'].unique()
df['date_md'] = df['date_window_upper_bound'].dt.strftime('%m-%d\n%Y')
df['region'] = df['geography_modeled'].str.replace('covidregion_', '')
fig = plt.figure(figsize=(14, 12))
fig.suptitle('Overflow probability per week dates by COVID-19 Region', y=0.97, fontsize=14)
fig.subplots_adjust(right=0.98, wspace=0.4, left=0.05, hspace=0.4, top=0.90, bottom=0.07)
palette = sns.color_palette('Set1', len(df.resource_type.unique()))
axes = [fig.add_subplot(4, 3, x + 1) for x in range(len(regionlist))]
for c, reg_nr in enumerate(regionlist):
reg_label = reg_nr.replace('covidregion_', 'COVID-19 Region ')
mdf = df[df['geography_modeled'] == reg_nr]
ax = axes[c]
ax.set_ylim(0, 1)
ax.set_title(reg_label)
ax.set_ylabel(f'Probability of overflow')
for e, t in enumerate(list(df.resource_type.unique())):
adf = mdf[mdf['resource_type'] == t]
adf1 = adf[adf['overflow_threshold_percent'] == 1]
adf2 = adf[adf['overflow_threshold_percent'] != 1]
ax.plot(adf1['date_md'], adf1['percent_of_simulations_that_exceed'], color=palette[e], label=t)
if show_75:
ax.plot(adf2['date_md'], adf2['percent_of_simulations_that_exceed'], color=palette[e], label='',
alpha=0.5)
axes[-1].legend()
plt.savefig(os.path.join(plot_path, 'overflow_probabilities.png'))
plt.savefig(os.path.join(plot_path, 'pdf', 'overflow_probabilities.pdf'))
if __name__ == '__main__':
args = parse_args()
stem = args.stem
Location = args.Location
overflow_threshold_percents = args.overflow_threshold_percents
datapath, projectpath, wdir, exe_dir, git_dir = load_box_paths(Location=Location)
first_plot_day = pd.Timestamp.today() - pd.Timedelta(14, 'days')
last_plot_day = pd.Timestamp.today() + | pd.Timedelta(90, 'days') | pandas.Timedelta |
import pandas as pd
import numpy as np
import ast
import random
"""
script for preprocessing crawled news.
"""
def label_names(
original_csv_path='../data/tbrain_train_final_0610.csv',
crawled_csv_path='../bert/other_news.csv',
save_csv_path='./other_news-v2.csv'):
"""
Label names in crawled news based on names in tbrain data.
"""
tbrain_news_df = pd.read_csv(original_csv_path)
crawled_news_df = pd.read_csv(crawled_csv_path)
# get names in tbrain news
name_list = []
for index, row in tbrain_news_df.iterrows():
cur_name_list = ast.literal_eval(row["name"])
name_list.extend(cur_name_list)
name_list = list(set(name_list))
print(len(name_list))
# identify names in crawled news
labels = []
for index, row in crawled_news_df.iterrows():
cur_labels = []
cur_content = row['content']
if not isinstance(cur_content, float):
for name in name_list:
if name in cur_content:
cur_labels.append(name)
labels.append(cur_labels)
assert len(labels) == crawled_news_df.shape[0]
# write crawled news to file with new labels
crawled_news_df['name'] = labels
crawled_news_df.to_csv(save_csv_path, index=False)
return
def filter_same_news(
crawled_csv_path='./other_news-v2.csv',
save_csv_path='./other_news-v3.csv'):
"""
filter duplicate news by urls
"""
crawled_news_df = pd.read_csv(crawled_csv_path)
crawled_news_df = crawled_news_df.reset_index()
print(crawled_news_df.shape)
ids_to_remove = []
for name in crawled_news_df.person.unique():
cur_urls = []
person_df = crawled_news_df.loc[crawled_news_df['person'] == name]
for index, row in person_df.iterrows():
cur_url = row['hyperlink']
if cur_url not in cur_urls:
cur_urls.append(cur_url)
else:
ids_to_remove.append(row['other_news_ID'])
# ids_to_remove.append(row.index)
crawled_news_df.drop(crawled_news_df[crawled_news_df['other_news_ID'].isin(ids_to_remove)].index, inplace=True)
crawled_news_df.drop('index', axis='columns', inplace=True)
crawled_news_df['other_news_ID'] = range(1, crawled_news_df.shape[0]+1)
print(crawled_news_df.shape)
crawled_news_df.to_csv(save_csv_path, index=False)
return
def replace_names_from_csv(
names_list_path='./fake_names_1000.txt',
news_csv_path='./other_news-v3.csv',
save_csv_path='./other_news-v4.csv',
name_format='comma-only',
num_len_2_limit=35,
generate_n_data_from_content=5):
"""
Replace names in contents with fake names for data augmentation.
"""
with open(names_list_path, 'r') as f:
fake_names = f.readline().strip('\n').split(', ')
# make up names with length 2
num_len_2_count = 0
for index, name in enumerate(fake_names):
if len(name) == 2:
num_len_2_count += 1
elif len(name) == 3:
fake_names[index] = name[:2]
num_len_2_count += 1
if num_len_2_count > num_len_2_limit:
break
random.shuffle(fake_names)
# data augmentation
if name_format == 'comma-only':
news_df = pd.read_csv(news_csv_path, keep_default_na=False)
else:
news_df = | pd.read_csv(news_csv_path) | pandas.read_csv |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": | pd.DataFrame([6, 7, 8]) | pandas.DataFrame |
import tensorflow.keras.preprocessing.text as text
import tensorflow as tf
import tensorflow.keras.preprocessing.sequence as sequence
import pandas as pd
import numpy as np
from os.path import join
import json
TRAIN_SPLIT = 0.8
TOKENIZER_CONFIG_FILE = 'datasets/amazon/tokenizer_config.json'
def load_books(num_words, sequence_length, data_dir='datasets/amazon', seed=1337, verbose=0):
print('Loading books')
data = pd.read_pickle(join(data_dir, 'books.pkl'))
train_data = pd.DataFrame(columns=data.columns)
test_data = | pd.DataFrame(columns=data.columns) | pandas.DataFrame |
#############################################################################
# Copyright (C) 2020-2021 German Aerospace Center (DLR-SC)
#
# Authors: <NAME>, <NAME>
#
# Contact: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#############################################################################
import unittest
import pandas as pd
from pyfakefs import fake_filesystem_unittest
from memilio.epidata import modifyDataframeSeries as mDfS
class Test_modifyDataframeSeries(fake_filesystem_unittest.TestCase):
test_df1 = pd.DataFrame(
{
'Date':
['2021-01-06', '2021-01-06', '2021-01-06', '2021-01-07', '2021-01-07',
'2021-01-07', '2021-01-08', '2021-01-08', '2021-01-08', '2021-01-09',
'2021-01-09', '2021-01-09', '2021-01-10', '2021-01-10',
'2021-01-10'],
'test_col1': [12, 3, 6, 0, 3, 1, 4, 7, 11, 15, 19, 19, 27, 13, 5],
'test_col2': ['a', 'x', 't', 'a', 'b', 'a', 'x', 't', 'a', 'b', 'a', 'x', 't', 'a', 'b'],
'test_col3': [1, 0, 1, 9, 4, 3, 2, 1, 1, 1, 0, 6, 5, 3, 1],
'ID': [1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3]})
test_df2 = pd.DataFrame(
{
'Date':
['2021-01-06', '2021-01-06', '2021-01-06', '2021-01-07', '2021-01-07',
'2021-01-07', '2021-01-08', '2021-01-08', '2021-01-08', '2021-01-09',
'2021-01-09', '2021-01-09', '2021-01-13', '2021-01-13',
'2021-01-13'],
'test_col1': [12, 3, 6, 0, 3, 1, 4, 7, 11, 15, 19, 19, 27, 13, 5],
'test_col2': ['a', 'x', 't', 'a', 'b', 'a', 'x', 't', 'a', 'b', 'a', 'x', 't', 'a', 'b'],
'test_col3': [1, 0, 1, 9, 4, 3, 2, 1, 1, 1, 0, 6, 5, 3, 1],
'ID': [1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3]})
def setUp(self):
self.setUpPyfakefs()
def test_impute_and_reduce_df(self):
group_by_cols = {'ID': sorted(set(self.test_df1['ID'].unique()))}
mod_cols = ['test_col1', 'test_col3']
# test impute forward and fill dates with moving average = 3
df = mDfS.impute_and_reduce_df(
self.test_df1, group_by_cols, mod_cols, impute='forward',
moving_average=3, min_date='2021-01-05', max_date='2021-01-11',
start_w_firstval=False)
# test that values at new start days are zero since start_w_firstval=False
self.assertAlmostEqual(df[(df['Date'] == "2021-01-05") & (df['ID'] == 2.0)]['test_col1'].item(), 0)
self.assertAlmostEqual(df[(df['Date'] == "2021-01-05") & (df['ID'] == 2.0)]['test_col3'].item(), 0)
# test that the values at first original date are obtained by the value itself plus the value right of it divided by 3
# (6 + 1) / 3 = 2 + 2 / 3
self.assertAlmostEqual(df[(df['Date'] == "2021-01-06") & (df['ID'] == 3.0)]['test_col1'].item(), 2 + 1 / 3)
# (3 + 1) / 3 = 1 + 1 / 3
self.assertAlmostEqual(df[(df['Date'] == "2021-01-06") & (df['ID'] == 3.0)]['test_col3'].item(), 1 + 1 / 3)
# check that last entry of original frame is computed by the value left of it and twice its value since impute='forward'
# (15 + 27 + 27) / 3 = 23
self.assertAlmostEqual(df[(df['Date'] == "2021-01-10") & (df['ID'] == 1.0)]['test_col1'].item(), 23)
# (1 + 5 + 5) / 3 = 3 + 2 / 3
self.assertAlmostEqual(df[(df['Date'] == "2021-01-10") & (df['ID'] == 1.0)]['test_col3'].item(), 3 + 2/ 3)
# check that new date gets imputed the value the column had the day before because impute='forward'
self.assertAlmostEqual(df[(df['Date'] == "2021-01-11") & (df['ID'] == 3.0)]['test_col3'].item(), 1)
# test impute zeros with moving average = 3
df = mDfS.impute_and_reduce_df(
self.test_df1, group_by_cols, mod_cols, impute='zeros',
moving_average=3, min_date='2021-01-05', max_date='2021-01-11',
start_w_firstval=False)
# test that values at new start days are zero since start_w_firstval=False
self.assertAlmostEqual(df[(df['Date'] == "2021-01-05") & (df['ID'] == 2.0)]['test_col3'].item(), 0)
self.assertAlmostEqual(df[(df['Date'] == "2021-01-05") & (df['ID'] == 2.0)]['test_col1'].item(), 0)
# test that the values at first original date are obtained by the value itself plus the value right of it divided by 3
# (6 + 1) / 3 = 2 + 2 / 3
self.assertAlmostEqual(df[(df['Date'] == "2021-01-06") & (df['ID'] == 3.0)]['test_col1'].item(), 2 + 1 / 3)
# (3 + 1) / 3 = 1 + 1 / 3
self.assertAlmostEqual(df[(df['Date'] == "2021-01-06") & (df['ID'] == 3.0)]['test_col3'].item(), 1 + 1 / 3)
# check that last entry of original frame is computed by the value left of it and the value itself because impute = "zeros"
# (15 + 27) / 3 = 14
self.assertAlmostEqual(df[(df['Date'] == "2021-01-10") & (df['ID'] == 1.0)]['test_col1'].item(), 14)
# (1 + 5) / 3 = 2
self.assertAlmostEqual(df[(df['Date'] == "2021-01-10") & (df['ID'] == 1.0)]['test_col3'].item(), 2)
# check that new date gets imputed 0 because impute = "zeros"
self.assertAlmostEqual(df[(df['Date'] == "2021-01-11") & (df['ID'] == 3.0)]['test_col3'].item(), 0)
self.assertAlmostEqual(df[(df['Date'] == "2021-01-11") & (df['ID'] == 3.0)]['test_col1'].item(), 0)
# test fill missing dates moving average = 4
# if moving average is an even number it always should calculate with one more earlier date
df = mDfS.impute_and_reduce_df(
self.test_df2, group_by_cols, mod_cols, impute='forward',
moving_average=4, min_date='2021-01-06', max_date='2021-01-13',
start_w_firstval=False)
# test that the values at first original date arent changed since there is no value left of it
self.assertAlmostEqual(df[(df['Date'] == "2021-01-06") & (df['ID'] == 2.0)]['test_col3'].item(), 0)
self.assertAlmostEqual(df[(df['Date'] == "2021-01-06") & (df['ID'] == 2.0)]['test_col1'].item(), 3)
# test that a value is computed by two values left of it, the value itself and the value right of it divided by 4
# (6 + 1 + 11 + 19) / 4 = 9 + 1 / 4
self.assertAlmostEqual(df[(df['Date'] == "2021-01-08") & (df['ID'] == 3.0)]['test_col1'].item(), 9 + 1 / 4)
# (1 + 3 + 1 + 6) = 2 + 3 / 4
self.assertAlmostEqual(df[(df['Date'] == "2021-01-08") & (df['ID'] == 3.0)]['test_col3'].item(), 2 + 3 / 4)
self.assertAlmostEqual(df[(df['Date'] == "2021-01-13") & (df['ID'] == 3.0)]['test_col3'].item(), 1)
self.assertAlmostEqual(df[(df['Date'] == "2021-01-13") & (df['ID'] == 3.0)]['test_col1'].item(), 5)
# test that the first of three missing dates is computed by three times the value left of it plus the penultimate value devided by 4
# (19 + 19 + 19 + 11) / 4 = 17
self.assertAlmostEqual(df[(df['Date'] == "2021-01-10") & (df['ID'] == 3.0)]['test_col1'].item(), 17)
# (6 + 6 + 6 + 1) / 4 = 4 + 3 / 4
self.assertAlmostEqual(df[(df['Date'] == "2021-01-10") & (df['ID'] == 3.0)]['test_col3'].item(), 4 + 3/4)
# test mod_cols = ['test_col1']. test_col3 should not be modified
mod_cols = ['test_col1']
df = mDfS.impute_and_reduce_df(
self.test_df2, group_by_cols, mod_cols, impute='forward',
moving_average=4, min_date='2021-01-06', max_date='2021-01-13',
start_w_firstval=False)
# test same tests as in the previous test with moving average = 4
# 'test_col1' should be same same as in the previous test
self.assertAlmostEqual(df[(df['Date'] == "2021-01-06") & (df['ID'] == 2.0)]['test_col1'].item(), 3)
self.assertAlmostEqual(df[(df['Date'] == "2021-01-08") & (df['ID'] == 3.0)]['test_col1'].item(), 9 + 1 / 4)
self.assertAlmostEqual(df[(df['Date'] == "2021-01-13") & (df['ID'] == 3.0)]['test_col1'].item(), 5)
self.assertAlmostEqual(df[(df['Date'] == "2021-01-10") & (df['ID'] == 3.0)]['test_col1'].item(), 17)
# 'test_col3' should not be changed
self.assertAlmostEqual(df[(df['Date'] == "2021-01-06") & (df['ID'] == 2.0)]['test_col3'].item(), 0)
self.assertAlmostEqual(df[(df['Date'] == "2021-01-08") & (df['ID'] == 3.0)]['test_col3'].item(), 1)
self.assertAlmostEqual(df[(df['Date'] == "2021-01-13") & (df['ID'] == 3.0)]['test_col3'].item(), 1)
self.assertAlmostEqual(df[(df['Date'] == "2021-01-10") & (df['ID'] == 3.0)]['test_col3'].item(), 6)
mod_cols = ['test_col1', 'test_col3']
# test start date higher than end date
# empty dataframe should be returned
df = mDfS.impute_and_reduce_df(
self.test_df1, group_by_cols, mod_cols, impute='forward',
moving_average=4, min_date='2021-01-13', max_date='2021-01-06',
start_w_firstval=False)
edf = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import os
import logging
import yaml
import datetime
import json
import time
import sys
import holoviews as hv
from holoviews import opts
from holoviews.element import Div
from bokeh.models import HoverTool
hv.extension('bokeh')
allowed_ontologies = ["KO", "EC", "SSO", "RO", "META", "MSRXN",
"MSCPD", "MSCPX", "BIGG", "BIGGCPD", "GO", "TC", "RHEA"]
def df_to_ontology(params, pass_df=None, method="Import Annotations"):
'''
Takes the text file from staging, or the pandas df passed from the merge
app, and converts to an ontology dictionary suitable from the annotation
ontology API add_annotation_ontology_events() method
The merge app also calls this, and it can use the same params that the
import gives... all shared features are in both (except for the
annotations_file which the html_add_ontology_summary needs to fix)
The new bulk app also calls this, using pass_df and a "fake" params
'''
if isinstance(pass_df, pd.DataFrame):
annotations = pass_df
else:
if 'debug' in params and params['debug'] is True:
annotations_file_path = os.path.join(
'/kb/module/test/test_data', params['annotation_file'])
else:
annotations_file_path = os.path.join("/staging/", params['annotation_file'])
annotations = pd.read_csv(annotations_file_path,
sep='\t',
header=None,
names=['gene', 'term']
)
# remove duplicate rows, if any
annotations = annotations.drop_duplicates()
ontology = {
'event_id': params['description'],
'description': params['description'],
'ontology_id': params['ontology'],
'method': method, # from above
'method_version': get_app_version(),
"timestamp": datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S"),
'ontology_terms': {},
'gene_count': int(annotations['gene'].nunique()), # not used in the api
'term_count': int(annotations['term'].nunique()) # not used in the api
}
# add imported terms
for index, row in annotations.iterrows():
if pd.notnull(row['term']):
if row['gene'] in ontology['ontology_terms']:
ontology['ontology_terms'][row['gene']].append(
{'term': row['term']}
)
else:
ontology['ontology_terms'][row['gene']] = [
{'term': row['term']}
]
return [ontology]
def bulk_df_to_ontology(params):
ontologies = []
if 'debug' in params and params['debug'] is True:
annotations_file_path = os.path.join(
'/kb/module/test/test_data', params['annotation_file'])
else:
annotations_file_path = os.path.join("/staging/", params['annotation_file'])
annotations = pd.read_csv(annotations_file_path,
sep='\t',
header=None,
names=['gene', 'term', 'ontology', 'description']
)
for description, description_df in annotations.groupby(annotations['description']):
for ontology, ontology_df in description_df.groupby(description_df['ontology']):
if ontology.upper() not in allowed_ontologies:
sys.exit(f"ERROR: {ontology} is not a valid Ontology string")
time.sleep(2) # This just "guarantees" the timestamps will all be different
ontology_df = ontology_df[ontology_df['term'].notna()]
ontology_df = ontology_df.drop_duplicates()
ontology = {
'event_id': description,
'description': description,
'ontology_id': ontology.upper(),
'method': "Import Bulk Annotations",
'method_version': get_app_version(),
"timestamp": datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S"),
'ontology_terms': {},
'gene_count': int(ontology_df['gene'].nunique()), # not used in the api
'term_count': int(ontology_df['term'].nunique()) # not used in the api
}
# add imported terms
for index, row in ontology_df.iterrows():
if | pd.notnull(row['term']) | pandas.notnull |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime, timedelta
from binance.client import Client
from analytic_function import get_stock
import seaborn as sns
import analytic_function
import warnings
import os
import config
import glob
from strategy import profit_cumulative_calculation, advanced_analytics, strategy_v1_sl_tp_cap_cumul
warnings.filterwarnings('ignore')
# For a given strategy with SL TP AND EMA parameters, form back-test for all pairs
# and save the annual performance for all pairs in a heatmap
# ------ FunctionS ------ #
def listdir_nohidden(path):
return glob.glob(os.path.join(path, '*'))
if __name__ == "__main__":
# ------ Parameters ------ #
binance_api_source_key = config.BINANCE_API_SOURCE_KEY
binance_api_sources_secret = config.BINANCE_API_SOURCE_SECRET
client = Client(binance_api_source_key, binance_api_sources_secret)
list_KLINE = config.KLINE
path_data = config.PATH
path_result = config.PATH_RESULT
backtest_period = pd.Timedelta(days=config.BACKTEST_PERIOD_IN_DAYS)
amount = config.AMOUNT
fees = config.FEES
stopLoss = config.STOPLOSS
takeProfit = config.TAKEPROFIT
grid_temp = pd.DataFrame(columns=list_KLINE)
# loop to itererate backtest on all candlesticks
for kline in list_KLINE:
path_foldername = os.path.join(path_data,kline)
print(path_foldername)
# path of all folers which contains data
# e.g: /Users/sebastientetaud/Documents/Python/trading/data/15m/
files = listdir_nohidden(path_foldername)
files = sorted(files)
# loop to itererate backtest on all coins
for file in files:
# path of a file
# e.g: /Users/sebastientetaud/Documents/Python/trading/data/15m/LINKUPUSDT.xlsx
filename = os.path.join(path_foldername,file)
print(filename)
# Extract symbol from the file
symbol = os.path.basename(file)
symbol = symbol.replace(".xlsx","")
print(symbol)
# Open files
df = pd.read_csv(filename)
df.index = df['Close_time']
data = pd.DataFrame(df["Close"],index=pd.DatetimeIndex(df.index))
if len(data):
# Period check
data_period = data.index[-1] - data.index[0]
if data_period == backtest_period:
# Backtest for a given strategy
buy_data, sell_data = strategy_v1_sl_tp_cap_cumul(data,fastperiod=12, slowperiod=26, stopLoss=stopLoss, takeProfit=takeProfit)
# Extract profit dataframe
profit = profit_cumulative_calculation(amount=amount, fees=fees , buy_price=buy_data, sell_price=sell_data, verbose = False)
# Perfome advanced analytics
# Number of Trade, Ratio Win/Loss
# Save or Show the result
analytical_df = advanced_analytics(data= data,buy_data=buy_data,sell_data=sell_data,symbol=symbol, kline=kline, profit= profit, show_plot=False, save_plot=True, path = path_result)
win_rate = float(analytical_df.loc['Ratio Win/Loss [%]'].values)
performance = float(analytical_df.loc['Performance [%]'].values)
profit_factor = float(analytical_df.loc['Profit factor'].values)
# append annual performance in % into a dataframe
grid_temp[kline][symbol] = performance
# Generation of a performance dataFrame: coins vs candlesticks
grid_performance = | pd.DataFrame() | pandas.DataFrame |
from datetime import (
datetime,
timedelta,
)
import re
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.errors import InvalidIndexError
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_integer
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
isna,
notna,
)
import pandas._testing as tm
import pandas.core.common as com
# We pass through a TypeError raised by numpy
_slice_msg = "slice indices must be integers or None or have an __index__ method"
class TestDataFrameIndexing:
def test_getitem(self, float_frame):
# Slicing
sl = float_frame[:20]
assert len(sl.index) == 20
# Column access
for _, series in sl.items():
assert len(series.index) == 20
assert tm.equalContents(series.index, sl.index)
for key, _ in float_frame._series.items():
assert float_frame[key] is not None
assert "random" not in float_frame
with pytest.raises(KeyError, match="random"):
float_frame["random"]
def test_getitem2(self, float_frame):
df = float_frame.copy()
df["$10"] = np.random.randn(len(df))
ad = np.random.randn(len(df))
df["@awesome_domain"] = ad
with pytest.raises(KeyError, match=re.escape("'df[\"$10\"]'")):
df.__getitem__('df["$10"]')
res = df["@awesome_domain"]
tm.assert_numpy_array_equal(ad, res.values)
def test_setitem_list(self, float_frame):
float_frame["E"] = "foo"
data = float_frame[["A", "B"]]
float_frame[["B", "A"]] = data
tm.assert_series_equal(float_frame["B"], data["A"], check_names=False)
tm.assert_series_equal(float_frame["A"], data["B"], check_names=False)
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
data[["A"]] = float_frame[["A", "B"]]
newcolumndata = range(len(data.index) - 1)
msg = (
rf"Length of values \({len(newcolumndata)}\) "
rf"does not match length of index \({len(data)}\)"
)
with pytest.raises(ValueError, match=msg):
data["A"] = newcolumndata
def test_setitem_list2(self):
df = DataFrame(0, index=range(3), columns=["tt1", "tt2"], dtype=np.int_)
df.loc[1, ["tt1", "tt2"]] = [1, 2]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series([1, 2], df.columns, dtype=np.int_, name=1)
tm.assert_series_equal(result, expected)
df["tt1"] = df["tt2"] = "0"
df.loc[df.index[1], ["tt1", "tt2"]] = ["1", "2"]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series(["1", "2"], df.columns, name=1)
tm.assert_series_equal(result, expected)
def test_getitem_boolean(self, mixed_float_frame, mixed_int_frame, datetime_frame):
# boolean indexing
d = datetime_frame.index[10]
indexer = datetime_frame.index > d
indexer_obj = indexer.astype(object)
subindex = datetime_frame.index[indexer]
subframe = datetime_frame[indexer]
tm.assert_index_equal(subindex, subframe.index)
with pytest.raises(ValueError, match="Item wrong length"):
datetime_frame[indexer[:-1]]
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
with pytest.raises(ValueError, match="Boolean array expected"):
datetime_frame[datetime_frame]
# test that Series work
indexer_obj = Series(indexer_obj, datetime_frame.index)
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test that Series indexers reindex
# we are producing a warning that since the passed boolean
# key is not the same as the given index, we will reindex
# not sure this is really necessary
with tm.assert_produces_warning(UserWarning):
indexer_obj = indexer_obj.reindex(datetime_frame.index[::-1])
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test df[df > 0]
for df in [
datetime_frame,
mixed_float_frame,
mixed_int_frame,
]:
data = df._get_numeric_data()
bif = df[df > 0]
bifw = DataFrame(
{c: np.where(data[c] > 0, data[c], np.nan) for c in data.columns},
index=data.index,
columns=data.columns,
)
# add back other columns to compare
for c in df.columns:
if c not in bifw:
bifw[c] = df[c]
bifw = bifw.reindex(columns=df.columns)
tm.assert_frame_equal(bif, bifw, check_dtype=False)
for c in df.columns:
if bif[c].dtype != bifw[c].dtype:
assert bif[c].dtype == df[c].dtype
def test_getitem_boolean_casting(self, datetime_frame):
# don't upcast if we don't need to
df = datetime_frame.copy()
df["E"] = 1
df["E"] = df["E"].astype("int32")
df["E1"] = df["E"].copy()
df["F"] = 1
df["F"] = df["F"].astype("int64")
df["F1"] = df["F"].copy()
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")] * 2
+ [np.dtype("int64")] * 2,
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
# int block splitting
df.loc[df.index[1:3], ["E1", "F1"]] = 0
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")]
+ [np.dtype("float64")]
+ [np.dtype("int64")]
+ [np.dtype("float64")],
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
def test_getitem_boolean_list(self):
df = DataFrame(np.arange(12).reshape(3, 4))
def _checkit(lst):
result = df[lst]
expected = df.loc[df.index[lst]]
tm.assert_frame_equal(result, expected)
_checkit([True, False, True])
_checkit([True, True, True])
_checkit([False, False, False])
def test_getitem_boolean_iadd(self):
arr = np.random.randn(5, 5)
df = DataFrame(arr.copy(), columns=["A", "B", "C", "D", "E"])
df[df < 0] += 1
arr[arr < 0] += 1
tm.assert_almost_equal(df.values, arr)
def test_boolean_index_empty_corner(self):
# #2096
blah = DataFrame(np.empty([0, 1]), columns=["A"], index=DatetimeIndex([]))
# both of these should succeed trivially
k = np.array([], bool)
blah[k]
blah[k] = 0
def test_getitem_ix_mixed_integer(self):
df = DataFrame(
np.random.randn(4, 3), index=[1, 10, "C", "E"], columns=[1, 2, 3]
)
result = df.iloc[:-1]
expected = df.loc[df.index[:-1]]
tm.assert_frame_equal(result, expected)
result = df.loc[[1, 10]]
expected = df.loc[Index([1, 10])]
tm.assert_frame_equal(result, expected)
def test_getitem_ix_mixed_integer2(self):
# 11320
df = DataFrame(
{
"rna": (1.5, 2.2, 3.2, 4.5),
-1000: [11, 21, 36, 40],
0: [10, 22, 43, 34],
1000: [0, 10, 20, 30],
},
columns=["rna", -1000, 0, 1000],
)
result = df[[1000]]
expected = df.iloc[:, [3]]
tm.assert_frame_equal(result, expected)
result = df[[-1000]]
expected = df.iloc[:, [1]]
tm.assert_frame_equal(result, expected)
def test_getattr(self, float_frame):
tm.assert_series_equal(float_frame.A, float_frame["A"])
msg = "'DataFrame' object has no attribute 'NONEXISTENT_NAME'"
with pytest.raises(AttributeError, match=msg):
float_frame.NONEXISTENT_NAME
def test_setattr_column(self):
df = DataFrame({"foobar": 1}, index=range(10))
df.foobar = 5
assert (df.foobar == 5).all()
def test_setitem(self, float_frame):
# not sure what else to do here
series = float_frame["A"][::2]
float_frame["col5"] = series
assert "col5" in float_frame
assert len(series) == 15
assert len(float_frame) == 30
exp = np.ravel(np.column_stack((series.values, [np.nan] * 15)))
exp = Series(exp, index=float_frame.index, name="col5")
tm.assert_series_equal(float_frame["col5"], exp)
series = float_frame["A"]
float_frame["col6"] = series
tm.assert_series_equal(series, float_frame["col6"], check_names=False)
# set ndarray
arr = np.random.randn(len(float_frame))
float_frame["col9"] = arr
assert (float_frame["col9"] == arr).all()
float_frame["col7"] = 5
assert (float_frame["col7"] == 5).all()
float_frame["col0"] = 3.14
assert (float_frame["col0"] == 3.14).all()
float_frame["col8"] = "foo"
assert (float_frame["col8"] == "foo").all()
# this is partially a view (e.g. some blocks are view)
# so raise/warn
smaller = float_frame[:2]
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
smaller["col10"] = ["1", "2"]
assert smaller["col10"].dtype == np.object_
assert (smaller["col10"] == ["1", "2"]).all()
def test_setitem2(self):
# dtype changing GH4204
df = DataFrame([[0, 0]])
df.iloc[0] = np.nan
expected = DataFrame([[np.nan, np.nan]])
tm.assert_frame_equal(df, expected)
df = DataFrame([[0, 0]])
df.loc[0] = np.nan
tm.assert_frame_equal(df, expected)
def test_setitem_boolean(self, float_frame):
df = float_frame.copy()
values = float_frame.values
df[df["A"] > 0] = 4
values[values[:, 0] > 0] = 4
tm.assert_almost_equal(df.values, values)
# test that column reindexing works
series = df["A"] == 4
series = series.reindex(df.index[::-1])
df[series] = 1
values[values[:, 0] == 4] = 1
tm.assert_almost_equal(df.values, values)
df[df > 0] = 5
values[values > 0] = 5
tm.assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
tm.assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
tm.assert_almost_equal(df.values, values)
# indexed with same shape but rows-reversed df
df[df[::-1] == 2] = 3
values[values == 2] = 3
tm.assert_almost_equal(df.values, values)
msg = "Must pass DataFrame or 2-d ndarray with boolean values only"
with pytest.raises(TypeError, match=msg):
df[df * 0] = 2
# index with DataFrame
mask = df > np.abs(df)
expected = df.copy()
df[df > np.abs(df)] = np.nan
expected.values[mask.values] = np.nan
tm.assert_frame_equal(df, expected)
# set from DataFrame
expected = df.copy()
df[df > np.abs(df)] = df * 2
np.putmask(expected.values, mask.values, df.values * 2)
tm.assert_frame_equal(df, expected)
def test_setitem_cast(self, float_frame):
float_frame["D"] = float_frame["D"].astype("i8")
assert float_frame["D"].dtype == np.int64
# #669, should not cast?
# this is now set to int64, which means a replacement of the column to
# the value dtype (and nothing to do with the existing dtype)
float_frame["B"] = 0
assert float_frame["B"].dtype == np.int64
# cast if pass array of course
float_frame["B"] = np.arange(len(float_frame))
assert issubclass(float_frame["B"].dtype.type, np.integer)
float_frame["foo"] = "bar"
float_frame["foo"] = 0
assert float_frame["foo"].dtype == np.int64
float_frame["foo"] = "bar"
float_frame["foo"] = 2.5
assert float_frame["foo"].dtype == np.float64
float_frame["something"] = 0
assert float_frame["something"].dtype == np.int64
float_frame["something"] = 2
assert float_frame["something"].dtype == np.int64
float_frame["something"] = 2.5
assert float_frame["something"].dtype == np.float64
def test_setitem_corner(self, float_frame):
# corner case
df = DataFrame({"B": [1.0, 2.0, 3.0], "C": ["a", "b", "c"]}, index=np.arange(3))
del df["B"]
df["B"] = [1.0, 2.0, 3.0]
assert "B" in df
assert len(df.columns) == 2
df["A"] = "beginning"
df["E"] = "foo"
df["D"] = "bar"
df[datetime.now()] = "date"
df[datetime.now()] = 5.0
# what to do when empty frame with index
dm = DataFrame(index=float_frame.index)
dm["A"] = "foo"
dm["B"] = "bar"
assert len(dm.columns) == 2
assert dm.values.dtype == np.object_
# upcast
dm["C"] = 1
assert dm["C"].dtype == np.int64
dm["E"] = 1.0
assert dm["E"].dtype == np.float64
# set existing column
dm["A"] = "bar"
assert "bar" == dm["A"][0]
dm = DataFrame(index=np.arange(3))
dm["A"] = 1
dm["foo"] = "bar"
del dm["foo"]
dm["foo"] = "bar"
assert dm["foo"].dtype == np.object_
dm["coercible"] = ["1", "2", "3"]
assert dm["coercible"].dtype == np.object_
def test_setitem_corner2(self):
data = {
"title": ["foobar", "bar", "foobar"] + ["foobar"] * 17,
"cruft": np.random.random(20),
}
df = DataFrame(data)
ix = df[df["title"] == "bar"].index
df.loc[ix, ["title"]] = "foobar"
df.loc[ix, ["cruft"]] = 0
assert df.loc[1, "title"] == "foobar"
assert df.loc[1, "cruft"] == 0
def test_setitem_ambig(self):
# Difficulties with mixed-type data
from decimal import Decimal
# Created as float type
dm = DataFrame(index=range(3), columns=range(3))
coercable_series = Series([Decimal(1) for _ in range(3)], index=range(3))
uncoercable_series = Series(["foo", "bzr", "baz"], index=range(3))
dm[0] = np.ones(3)
assert len(dm.columns) == 3
dm[1] = coercable_series
assert len(dm.columns) == 3
dm[2] = uncoercable_series
assert len(dm.columns) == 3
assert dm[2].dtype == np.object_
def test_setitem_None(self, float_frame):
# GH #766
float_frame[None] = float_frame["A"]
tm.assert_series_equal(
float_frame.iloc[:, -1], float_frame["A"], check_names=False
)
tm.assert_series_equal(
float_frame.loc[:, None], float_frame["A"], check_names=False
)
| tm.assert_series_equal(float_frame[None], float_frame["A"], check_names=False) | pandas._testing.assert_series_equal |
#code will get the proper values like emyield, marketcap, cacl, etc, and supply a string and value to put back into the dataframe.
import pandas as pd
import numpy as np
import logging
import inspect
from scipy import stats
from dateutil.relativedelta import relativedelta
from datetime import datetime
from scipy import stats
import math
class quantvaluedata: #just contains functions, will NEVEFR actually get the data
def __init__(self,allitems=None):
if allitems is None:
self.allitems=[]
else:
self.allitems=allitems
return
def get_value(self,origdf,key,i=-1):
if key not in origdf.columns and key not in self.allitems and key not in ['timedepositsplaced','fedfundssold','interestbearingdepositsatotherbanks']:
logging.error(key+' not found in allitems')
#logging.error(self.allitems)
return None
df=origdf.copy()
df=df.sort_values('yearquarter')
if len(df)==0:
##logging.error("empty dataframe")
return None
if key not in df.columns:
#logging.error("column not found:"+key)
return None
interested_quarter=df['yearquarter'].iloc[-1]+i+1#because if we want the last quarter we need them equal
if not df['yearquarter'].isin([interested_quarter]).any(): #if the quarter we are interested in is not there
return None
s=df['yearquarter']==interested_quarter
df=df[s]
if len(df)>1:
logging.error(df)
logging.error("to many rows in df")
exit()
pass
value=df[key].iloc[0]
if pd.isnull(value):
return None
return float(value)
def get_sum_quarters(self,df,key,seed,length):
values=[]
#BIG BUG, this was origionally -length-1, which was always truncating the array and producing nans.
periods=range(seed,seed-length,-1)
for p in periods:
values.append(self.get_value(df,key,p))
#logging.info('values:'+str(values))
if pd.isnull(values).any(): #return None if any of the values are None
return None
else:
return float(np.sum(values))
def get_market_cap(self,statements_df,prices_df,seed=-1):
total_shares=self.get_value(statements_df,'weightedavedilutedsharesos',seed)
if pd.isnull(total_shares):
return None
end_date=statements_df['end_date'].iloc[seed]
if seed==-1: #get the latest price but see if there was a split between the end date and now
s=pd.to_datetime(prices_df['date'])>pd.to_datetime(end_date)
tempfd=prices_df[s]
splits=tempfd['split_ratio'].unique()
adj=pd.Series(splits).product() #multiply all the splits together to get the total adjustment factor from the last total_shares
total_shares=total_shares*adj
last_price=prices_df.sort_values('date').iloc[-1]['close']
price=float(last_price)
market_cap=price*float(total_shares)
return market_cap
else:
marketcap=self.get_value(statements_df,'marketcap',seed)
if pd.isnull(marketcap):
return None
else:
return marketcap
def get_netdebt(self,statements_df,seed=-1):
shorttermdebt=self.get_value(statements_df,'shorttermdebt',seed)
longtermdebt=self.get_value(statements_df,'longtermdebt',seed)
capitalleaseobligations=self.get_value(statements_df,'capitalleaseobligations',seed)
cashandequivalents=self.get_value(statements_df,'cashandequivalents',seed)
restrictedcash=self.get_value(statements_df,'restrictedcash',seed)
fedfundssold=self.get_value(statements_df,'fedfundssold',seed)
interestbearingdepositsatotherbanks=self.get_value(statements_df,'interestbearingdepositsatotherbanks',seed)
timedepositsplaced=self.get_value(statements_df,'timedepositsplaced',seed)
s=pd.Series([shorttermdebt,longtermdebt,capitalleaseobligations,cashandequivalents,restrictedcash,fedfundssold,interestbearingdepositsatotherbanks,timedepositsplaced]).astype('float')
if pd.isnull(s).all(): #return None if everything is null
return None
m=pd.Series([1,1,1,-1,-1,-1,-1])
netdebt=s.multiply(m).sum()
return float(netdebt)
def get_enterprise_value(self,statements_df,prices_df,seed=-1):
#calculation taken from https://intrinio.com/data-tag/enterprisevalue
marketcap=self.get_market_cap(statements_df,prices_df,seed)
netdebt=self.get_netdebt(statements_df,seed)
totalpreferredequity=self.get_value(statements_df,'totalpreferredequity',seed)
noncontrollinginterests=self.get_value(statements_df,'noncontrollinginterests',seed)
redeemablenoncontrollinginterest=self.get_value(statements_df,'redeemablenoncontrollinginterest',seed)
s=pd.Series([marketcap,netdebt,totalpreferredequity,noncontrollinginterests,redeemablenoncontrollinginterest])
if pd.isnull(s).all() or pd.isnull(marketcap):
return None
return float(s.sum())
def get_ebit(self,df,seed=-1,length=4):
ebit=self.get_sum_quarters(df,'totaloperatingincome',seed,length)
if pd.notnull(ebit):
return float(ebit)
totalrevenue=self.get_sum_quarters(df,'totalrevenue',seed,length)
provisionforcreditlosses=self.get_sum_quarters(df,'provisionforcreditlosses',seed,length)
totaloperatingexpenses=self.get_sum_quarters(df,'totaloperatingexpenses',seed,length)
s=pd.Series([totalrevenue,provisionforcreditlosses,totaloperatingexpenses])
if pd.isnull(s).all():
return None
ebit=(s.multiply(pd.Series([1,-1,-1]))).sum()
if pd.notnull(ebit):
return float(ebit)
return None
def get_emyield(self,statements_df,prices_df,seed=-1,length=4):
ebit=self.get_ebit(statements_df,seed,length)
enterprisevalue=self.get_enterprise_value(statements_df,prices_df,seed)
if pd.isnull([ebit,enterprisevalue]).any() or enterprisevalue==0:
return None
return float(ebit/enterprisevalue)
def get_scalednetoperatingassets(self,statements_df,seed=-1):
"""
SNOA = (Operating Assets Operating Liabilities) / Total Assets
where
OA = total assets cash and equivalents
OL = total assets ST debt LT debt minority interest - preferred stock - book common
oa=ttmsdfcompany.iloc[-1]['totalassets']-ttmsdfcompany.iloc[-1]['cashandequivalents']
ol=ttmsdfcompany.iloc[-1]['totalassets']-ttmsdfcompany.iloc[-1]['netdebt']-ttmsdfcompany.iloc[-1]['totalequityandnoncontrollinginterests']
snoa=(oa-ol)/ttmsdfcompany.iloc[-1]['totalassets']
"""
totalassets=self.get_value(statements_df,'totalassets',seed)
cashandequivalents=self.get_value(statements_df,'cashandequivalents',seed)
netdebt=self.get_netdebt(statements_df,seed)
totalequityandnoncontrollinginterests=self.get_value(statements_df,'totalequityandnoncontrollinginterests',seed)
if pd.isnull(totalassets) or totalassets==0:
return None
s=pd.Series([totalassets,cashandequivalents])
m=pd.Series([1,-1])
oa=s.multiply(m).sum()
s=pd.Series([totalassets,netdebt,totalequityandnoncontrollinginterests])
m=pd.Series([1,-1,-1])
ol=s.multiply(m).sum()
scalednetoperatingassets=(oa-ol)/totalassets
return float(scalednetoperatingassets)
def get_scaledtotalaccruals(self,statements_df,seed=-1,length=4):
netincome=self.get_sum_quarters(statements_df,'netincome',seed,length)
netcashfromoperatingactivities=self.get_sum_quarters(statements_df,'netcashfromoperatingactivities',seed,length)
start_assets=self.get_value(statements_df,'cashandequivalents',seed-length)
end_assets=self.get_value(statements_df,'cashandequivalents',seed)
if pd.isnull([start_assets,end_assets]).any():
return None
totalassets=np.mean([start_assets,end_assets])
if pd.isnull(totalassets):
return None
num=pd.Series([netincome,netcashfromoperatingactivities])
if pd.isnull(num).all():
return None
m=pd.Series([1,-1])
num=num.multiply(m).sum()
den=totalassets
if den==0:
return None
scaledtotalaccruals=num/den
return float(scaledtotalaccruals)
def get_grossmargin(self,statements_df,seed=-1,length=4):
totalrevenue=self.get_sum_quarters(statements_df, 'totalrevenue', seed, length)
totalcostofrevenue=self.get_sum_quarters(statements_df, 'totalcostofrevenue', seed, length)
if pd.isnull([totalrevenue,totalcostofrevenue]).any() or totalcostofrevenue==0:
return None
grossmargin=(totalrevenue-totalcostofrevenue)/totalcostofrevenue
return float(grossmargin)
def get_margingrowth(self,statements_df,seed=-1,length1=20,length2=4):
grossmargins=[]
for i in range(seed,seed-length1,-1):
grossmargins.append(self.get_grossmargin(statements_df, i, length2))
grossmargins=pd.Series(grossmargins)
if pd.isnull(grossmargins).any():
return None
growth=grossmargins.pct_change(periods=1)
growth=growth[pd.notnull(growth)]
if len(growth)==0:
return None
grossmargingrowth=stats.gmean(1+growth)-1
if pd.isnull(grossmargingrowth):
return None
return float(grossmargingrowth)
def get_marginstability(self,statements_df,seed=-1,length1=20,length2=4):
#length1=how far back to go, how many quarters to get 20 quarters
#length2=for each quarter, how far back to go 4 quarters
grossmargins=[]
for i in range(seed,seed-length1,-1):
grossmargins.append(self.get_grossmargin(statements_df, i, length2))
grossmargins=pd.Series(grossmargins)
if pd.isnull(grossmargins).any() or grossmargins.std()==0:
return None
marginstability=grossmargins.mean()/grossmargins.std()
if pd.isnull(marginstability):
return None
return float(marginstability)
def get_cacl(self,df,seed=-1):
a=self.get_value(df,'totalcurrentassets',seed)
l=self.get_value(df,'totalcurrentliabilities',seed)
if pd.isnull([a,l]).any() or l==0:
return None
else:
return a/l
def get_tatl(self,df,seed=-1):
a=self.get_value(df,'totalassets',seed)
l=self.get_value(df,'totalliabilities',seed)
if pd.isnull([a,l]).any() or l==0:
return None
else:
return a/l
def get_longterm_cacl(self,df,seed=-1,length=20):
ltcacls=[]
for i in range(seed,seed-length,-1):
ltcacls.append(self.get_cacl(df,i))
ltcacls=pd.Series(ltcacls)
if pd.isnull(ltcacls).any():
return None
return stats.gmean(1+ltcacls)-1 #not totally sure we need the 1+, and the -1 11/9/17
def get_longterm_tatl(self,df,seed=-1,length=20):
lttatls=[]
for i in range(seed,seed-length,-1):
lttatls.append(self.get_tatl(df,i))
lttatls=pd.Series(lttatls)
if pd.isnull(lttatls).any():
return None
return stats.gmean(1+lttatls)-1 #not totally sure we need the 1+, and the -1 11/9/17
def get_capex(self,df,seed=-1,length=4):
purchaseofplantpropertyandequipment=self.get_sum_quarters(df,'purchaseofplantpropertyandequipment',seed,length)
saleofplantpropertyandequipment=self.get_sum_quarters(df,'saleofplantpropertyandequipment',seed,length)
s=pd.Series([purchaseofplantpropertyandequipment,saleofplantpropertyandequipment])
if pd.isnull(s).all():
return None
m=pd.Series([-1,-1])
capex=(s*m).sum()
if capex is None:
return None
return float(capex)
def get_freecashflow(self,df,seed=-1):
netcashfromoperatingactivities=self.get_value(df,'netcashfromoperatingactivities',seed)
capex=self.get_capex(df,seed,length=1)
s=pd.Series([netcashfromoperatingactivities,capex])
if pd.isnull(s).all():
return None
m=pd.Series([1,-1])
fcf=(s*m).sum()
return float(fcf)
#add a length2 paramater so we take the sums of cash flows
def get_cashflowonassets(self,df,seed=-1,length1=20,length2=4):
cfoas=[]
for i in range(seed,seed-length1,-1):
start_assets=self.get_value(df,'totalassets',i-length2)
end_assets=self.get_value(df,'totalassets',i)
fcfs=[]
for k in range(i,i-length2,-1):
fcf=self.get_freecashflow(df,k)
fcfs.append(fcf)
if pd.isnull(fcfs).any():
return None
total_fcf=pd.Series(fcfs).sum()
avg_assets=pd.Series([start_assets,end_assets]).mean()
if pd.isnull([total_fcf,avg_assets]).any() or avg_assets==0:
return None
else:
cfoas.append(total_fcf/avg_assets)
if | pd.isnull(cfoas) | pandas.isnull |
"""
Module to generate counterfactual explanations from a KD-Tree
This code is similar to 'Interpretable Counterfactual Explanations Guided by Prototypes': https://arxiv.org/pdf/1907.02584.pdf
"""
import copy
import timeit
import numpy as np
import pandas as pd
from dice_ml import diverse_counterfactuals as exp
from dice_ml.constants import ModelTypes
from dice_ml.explainer_interfaces.explainer_base import ExplainerBase
class DiceKD(ExplainerBase):
def __init__(self, data_interface, model_interface):
"""Init method
:param data_interface: an interface class to access data related params.
:param model_interface: an interface class to access trained ML model.
"""
self.total_random_inits = 0
super().__init__(data_interface) # initiating data related parameters
# As DiCE KD uses one-hot-encoding
self.data_interface.create_ohe_params()
# initializing model variables
self.model = model_interface
self.model.load_model() # loading pickled trained model if applicable
self.model.transformer.feed_data_params(data_interface)
self.model.transformer.initialize_transform_func()
# loading trained model
self.model.load_model()
# number of output nodes of ML model
if self.model.model_type == ModelTypes.Classifier:
self.num_output_nodes = self.model.get_num_output_nodes2(
self.data_interface.data_df[0:1][self.data_interface.feature_names])
self.predicted_outcome_name = self.data_interface.outcome_name + '_pred'
def _generate_counterfactuals(self, query_instance, total_CFs, desired_range=None, desired_class="opposite",
features_to_vary="all",
permitted_range=None, sparsity_weight=1,
feature_weights="inverse_mad", stopping_threshold=0.5, posthoc_sparsity_param=0.1,
posthoc_sparsity_algorithm="linear", verbose=False):
"""Generates diverse counterfactual explanations
:param query_instance: A dictionary of feature names and values. Test point of interest.
:param total_CFs: Total number of counterfactuals required.
:param desired_range: For regression problems. Contains the outcome range to generate counterfactuals in.
:param desired_class: Desired counterfactual class - can take 0 or 1. Default value is "opposite" to the
outcome class of query_instance for binary classification.
:param features_to_vary: Either a string "all" or a list of feature names to vary.
:param permitted_range: Dictionary with continuous feature names as keys and permitted min-max range in
list as values. Defaults to the range inferred from training data.
If None, uses the parameters initialized in data_interface.
:param sparsity_weight: Parameter to determine how much importance to give to sparsity
:param feature_weights: Either "inverse_mad" or a dictionary with feature names as keys and corresponding
weights as values. Default option is "inverse_mad" where the weight for a continuous
feature is the inverse of the Median Absolute Devidation (MAD) of the feature's
values in the training set; the weight for a categorical feature is equal to 1 by default.
:param stopping_threshold: Minimum threshold for counterfactuals target class probability.
:param posthoc_sparsity_param: Parameter for the post-hoc operation on continuous features to enhance sparsity.
:param posthoc_sparsity_algorithm: Perform either linear or binary search. Takes "linear" or "binary".
Prefer binary search when a feature range is large (for instance, income
varying from 10k to 1000k) and only if the features share a monotonic
relationship with predicted outcome in the model.
:param verbose: Parameter to determine whether to print 'Diverse Counterfactuals found!'
:return: A CounterfactualExamples object to store and visualize the resulting counterfactual explanations
(see diverse_counterfactuals.py).
"""
data_df_copy = self.data_interface.data_df.copy()
features_to_vary = self.setup(features_to_vary, permitted_range, query_instance, feature_weights)
# Prepares user defined query_instance for DiCE.
query_instance_orig = query_instance.copy()
query_instance = self.data_interface.prepare_query_instance(query_instance=query_instance)
# find the predicted value of query_instance
test_pred = self.predict_fn(query_instance)[0]
query_instance[self.data_interface.outcome_name] = test_pred
desired_class = self.misc_init(stopping_threshold, desired_class, desired_range, test_pred)
if desired_range is not None:
if desired_range[0] > desired_range[1]:
raise ValueError("Invalid Range!")
if desired_class == "opposite" and self.model.model_type == ModelTypes.Classifier:
if self.num_output_nodes == 2:
desired_class = 1.0 - test_pred
elif self.num_output_nodes > 2:
raise ValueError("Desired class can't be opposite if the number of classes is more than 2.")
if isinstance(desired_class, int) and desired_class > self.num_output_nodes - 1:
raise ValueError("Desired class should be within 0 and num_classes-1.")
# Partitioned dataset and KD Tree for each class (binary) of the dataset
self.dataset_with_predictions, self.KD_tree, self.predictions = \
self.build_KD_tree(data_df_copy, desired_range, desired_class, self.predicted_outcome_name)
query_instance, cfs_preds = self.find_counterfactuals(data_df_copy,
query_instance, query_instance_orig,
desired_range,
desired_class,
total_CFs, features_to_vary,
permitted_range,
sparsity_weight,
stopping_threshold,
posthoc_sparsity_param,
posthoc_sparsity_algorithm, verbose)
self.cfs_preds = cfs_preds
return exp.CounterfactualExamples(data_interface=self.data_interface,
final_cfs_df=self.final_cfs_df,
test_instance_df=query_instance,
final_cfs_df_sparse=self.final_cfs_df_sparse,
posthoc_sparsity_param=posthoc_sparsity_param,
desired_range=desired_range,
desired_class=desired_class,
model_type=self.model.model_type)
def predict_fn(self, input_instance):
"""returns predictions"""
return self.model.get_output(input_instance, model_score=False)
def do_sparsity_check(self, cfs, query_instance, sparsity_weight):
cfs = cfs.assign(sparsity=np.nan, distancesparsity=np.nan)
for index, row in cfs.iterrows():
cnt = 0
for column in self.data_interface.continuous_feature_names:
if not np.isclose(row[column], query_instance[column].values[0]):
cnt += 1
for column in self.data_interface.categorical_feature_names:
if row[column] != query_instance[column].values[0]:
cnt += 1
cfs.at[index, "sparsity"] = cnt
cfs["distance"] = (cfs["distance"] - cfs["distance"].min()) / (cfs["distance"].max() - cfs["distance"].min())
cfs["sparsity"] = (cfs["sparsity"] - cfs["sparsity"].min()) / (cfs["sparsity"].max() - cfs["sparsity"].min())
cfs["distancesparsity"] = cfs["distance"] + sparsity_weight * cfs["sparsity"]
cfs = cfs.sort_values(by="distancesparsity")
cfs = cfs.drop(["distance", "sparsity", "distancesparsity"], axis=1)
return cfs
def vary_valid(self, KD_query_instance, total_CFs, features_to_vary, permitted_range, query_instance,
sparsity_weight):
"""This function ensures that we only vary features_to_vary when generating counterfactuals"""
# TODO: this should be a user-specified parameter
num_queries = min(len(self.dataset_with_predictions), total_CFs * 10)
cfs = []
if self.KD_tree is not None and num_queries > 0:
KD_tree_output = self.KD_tree.query(KD_query_instance, num_queries)
distances = KD_tree_output[0][0]
indices = KD_tree_output[1][0]
cfs = self.dataset_with_predictions.iloc[indices].copy()
cfs['distance'] = distances
cfs = self.do_sparsity_check(cfs, query_instance, sparsity_weight)
cfs = cfs.drop(self.data_interface.outcome_name, axis=1)
self.final_cfs = | pd.DataFrame() | pandas.DataFrame |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
import numpy as np
import warnings
from pandas.core import common as com
from pandas.types.common import (is_integer,
is_float,
is_object_dtype,
is_integer_dtype,
is_float_dtype,
is_scalar,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_timedelta64_dtype,
is_period_dtype,
is_bool_dtype,
pandas_dtype,
_ensure_int64,
_ensure_object)
from pandas.types.dtypes import PeriodDtype
from pandas.types.generic import ABCSeries
import pandas.tseries.frequencies as frequencies
from pandas.tseries.frequencies import get_freq_code as _gfc
from pandas.tseries.index import DatetimeIndex, Int64Index, Index
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.base import DatelikeOps, DatetimeIndexOpsMixin
from pandas.tseries.tools import parse_time_string
import pandas.tseries.offsets as offsets
import pandas._period as period
from pandas._period import (Period, IncompatibleFrequency,
get_period_field_arr, _validate_end_alias,
_quarter_to_myear)
from pandas.core.base import _shared_docs
from pandas.indexes.base import _index_shared_docs, _ensure_index
from pandas import compat
from pandas.util.decorators import (Appender, Substitution, cache_readonly,
deprecate_kwarg)
from pandas.lib import infer_dtype
import pandas.tslib as tslib
from pandas.compat import zip, u
import pandas.indexes.base as ibase
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(target_klass='PeriodIndex or list of Periods'))
def _field_accessor(name, alias, docstring=None):
def f(self):
base, mult = _gfc(self.freq)
return get_period_field_arr(alias, self._values, base)
f.__name__ = name
f.__doc__ = docstring
return property(f)
def dt64arr_to_periodarr(data, freq, tz):
if data.dtype != np.dtype('M8[ns]'):
raise ValueError('Wrong dtype: %s' % data.dtype)
freq = Period._maybe_convert_freq(freq)
base, mult = _gfc(freq)
return period.dt64arr_to_periodarr(data.view('i8'), base, tz)
# --- Period index sketch
_DIFFERENT_FREQ_INDEX = period._DIFFERENT_FREQ_INDEX
def _period_index_cmp(opname, nat_result=False):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
if isinstance(other, Period):
func = getattr(self._values, opname)
other_base, _ = _gfc(other.freq)
if other.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
result = func(other.ordinal)
elif isinstance(other, PeriodIndex):
if other.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise IncompatibleFrequency(msg)
result = getattr(self._values, opname)(other._values)
mask = self._isnan | other._isnan
if mask.any():
result[mask] = nat_result
return result
elif other is tslib.NaT:
result = np.empty(len(self._values), dtype=bool)
result.fill(nat_result)
else:
other = Period(other, freq=self.freq)
func = getattr(self._values, opname)
result = func(other.ordinal)
if self.hasnans:
result[self._isnan] = nat_result
return result
return wrapper
class PeriodIndex(DatelikeOps, DatetimeIndexOpsMixin, Int64Index):
"""
Immutable ndarray holding ordinal values indicating regular periods in
time such as particular years, quarters, months, etc. A value of 1 is the
period containing the Gregorian proleptic datetime Jan 1, 0001 00:00:00.
This ordinal representation is from the scikits.timeseries project.
For instance,
# construct period for day 1/1/1 and get the first second
i = Period(year=1,month=1,day=1,freq='D').asfreq('S', 'S')
i.ordinal
===> 1
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1-dimensional), optional
Optional period-like data to construct index with
copy : bool
Make a copy of input ndarray
freq : string or period object, optional
One of pandas period strings or corresponding objects
start : starting value, period-like, optional
If data is None, used as the start point in generating regular
period data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end value, period-like, optional
If periods is none, generated index will extend to first conforming
period on or just past end argument
year : int, array, or Series, default None
month : int, array, or Series, default None
quarter : int, array, or Series, default None
day : int, array, or Series, default None
hour : int, array, or Series, default None
minute : int, array, or Series, default None
second : int, array, or Series, default None
tz : object, default None
Timezone for converting datetime64 data to Periods
dtype : str or PeriodDtype, default None
Examples
--------
>>> idx = PeriodIndex(year=year_arr, quarter=q_arr)
>>> idx2 = PeriodIndex(start='2000', end='2010', freq='A')
"""
_box_scalars = True
_typ = 'periodindex'
_attributes = ['name', 'freq']
_datetimelike_ops = ['year', 'month', 'day', 'hour', 'minute', 'second',
'weekofyear', 'week', 'dayofweek', 'weekday',
'dayofyear', 'quarter', 'qyear', 'freq',
'days_in_month', 'daysinmonth',
'to_timestamp', 'asfreq', 'start_time', 'end_time',
'is_leap_year']
_is_numeric_dtype = False
_infer_as_myclass = True
freq = None
__eq__ = _period_index_cmp('__eq__')
__ne__ = _period_index_cmp('__ne__', nat_result=True)
__lt__ = _period_index_cmp('__lt__')
__gt__ = _period_index_cmp('__gt__')
__le__ = _period_index_cmp('__le__')
__ge__ = _period_index_cmp('__ge__')
def __new__(cls, data=None, ordinal=None, freq=None, start=None, end=None,
periods=None, copy=False, name=None, tz=None, dtype=None,
**kwargs):
if periods is not None:
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
raise ValueError('Periods must be a number, got %s' %
str(periods))
if name is None and hasattr(data, 'name'):
name = data.name
if dtype is not None:
dtype = pandas_dtype(dtype)
if not is_period_dtype(dtype):
raise ValueError('dtype must be PeriodDtype')
if freq is None:
freq = dtype.freq
elif freq != dtype.freq:
msg = 'specified freq and dtype are different'
raise IncompatibleFrequency(msg)
if data is None:
if ordinal is not None:
data = np.asarray(ordinal, dtype=np.int64)
else:
data, freq = cls._generate_range(start, end, periods,
freq, kwargs)
else:
ordinal, freq = cls._from_arraylike(data, freq, tz)
data = np.array(ordinal, dtype=np.int64, copy=copy)
return cls._simple_new(data, name=name, freq=freq)
@classmethod
def _generate_range(cls, start, end, periods, freq, fields):
if freq is not None:
freq = Period._maybe_convert_freq(freq)
field_count = len(fields)
if com._count_not_none(start, end) > 0:
if field_count > 0:
raise ValueError('Can either instantiate from fields '
'or endpoints, but not both')
subarr, freq = _get_ordinal_range(start, end, periods, freq)
elif field_count > 0:
subarr, freq = _range_from_fields(freq=freq, **fields)
else:
raise ValueError('Not enough parameters to construct '
'Period range')
return subarr, freq
@classmethod
def _from_arraylike(cls, data, freq, tz):
if freq is not None:
freq = Period._maybe_convert_freq(freq)
if not isinstance(data, (np.ndarray, PeriodIndex,
DatetimeIndex, Int64Index)):
if is_scalar(data) or isinstance(data, Period):
raise ValueError('PeriodIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
try:
data = _ensure_int64(data)
if freq is None:
raise ValueError('freq not specified')
data = np.array([Period(x, freq=freq) for x in data],
dtype=np.int64)
except (TypeError, ValueError):
data = _ensure_object(data)
if freq is None:
freq = period.extract_freq(data)
data = period.extract_ordinals(data, freq)
else:
if isinstance(data, PeriodIndex):
if freq is None or freq == data.freq:
freq = data.freq
data = data._values
else:
base1, _ = _gfc(data.freq)
base2, _ = _gfc(freq)
data = period.period_asfreq_arr(data._values,
base1, base2, 1)
else:
if is_object_dtype(data):
inferred = infer_dtype(data)
if inferred == 'integer':
data = data.astype(np.int64)
if freq is None and is_object_dtype(data):
# must contain Period instance and thus extract ordinals
freq = period.extract_freq(data)
data = period.extract_ordinals(data, freq)
if freq is None:
msg = 'freq not specified and cannot be inferred'
raise ValueError(msg)
if data.dtype != np.int64:
if np.issubdtype(data.dtype, np.datetime64):
data = dt64arr_to_periodarr(data, freq, tz)
else:
data = _ensure_object(data)
data = period.extract_ordinals(data, freq)
return data, freq
@classmethod
def _simple_new(cls, values, name=None, freq=None, **kwargs):
if not is_integer_dtype(values):
values = np.array(values, copy=False)
if (len(values) > 0 and is_float_dtype(values)):
raise TypeError("PeriodIndex can't take floats")
else:
return cls(values, name=name, freq=freq, **kwargs)
values = np.array(values, dtype='int64', copy=False)
result = object.__new__(cls)
result._data = values
result.name = name
if freq is None:
raise ValueError('freq is not specified')
result.freq = Period._maybe_convert_freq(freq)
result._reset_identity()
return result
def _shallow_copy_with_infer(self, values=None, **kwargs):
""" we always want to return a PeriodIndex """
return self._shallow_copy(values=values, **kwargs)
def _shallow_copy(self, values=None, **kwargs):
if kwargs.get('freq') is None:
# freq must be provided
kwargs['freq'] = self.freq
if values is None:
values = self._values
return super(PeriodIndex, self)._shallow_copy(values=values, **kwargs)
def _coerce_scalar_to_index(self, item):
"""
we need to coerce a scalar to a compat for our index type
Parameters
----------
item : scalar item to coerce
"""
return PeriodIndex([item], **self._get_attributes_dict())
def __contains__(self, key):
if isinstance(key, Period):
if key.freq != self.freq:
return False
else:
return key.ordinal in self._engine
else:
try:
self.get_loc(key)
return True
except Exception:
return False
return False
@property
def asi8(self):
return self._values.view('i8')
@cache_readonly
def _int64index(self):
return Int64Index(self.asi8, name=self.name, fastpath=True)
@property
def values(self):
return self.asobject.values
@property
def _values(self):
return self._data
def __array__(self, dtype=None):
if is_integer_dtype(dtype):
return self.asi8
else:
return self.asobject.values
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc. Needs additional handling as
PeriodIndex stores internal data as int dtype
Replace this to __numpy_ufunc__ in future version
"""
if isinstance(context, tuple) and len(context) > 0:
func = context[0]
if (func is np.add):
pass
elif (func is np.subtract):
name = self.name
left = context[1][0]
right = context[1][1]
if (isinstance(left, PeriodIndex) and
isinstance(right, PeriodIndex)):
name = left.name if left.name == right.name else None
return Index(result, name=name)
elif isinstance(left, Period) or isinstance(right, Period):
return Index(result, name=name)
elif isinstance(func, np.ufunc):
if 'M->M' not in func.types:
msg = "ufunc '{0}' not supported for the PeriodIndex"
# This should be TypeError, but TypeError cannot be raised
# from here because numpy catches.
raise ValueError(msg.format(func.__name__))
if is_bool_dtype(result):
return result
# the result is object dtype array of Period
# cannot pass _simple_new as it is
return PeriodIndex(result, freq=self.freq, name=self.name)
@property
def _box_func(self):
return lambda x: Period._from_ordinal(ordinal=x, freq=self.freq)
def _to_embed(self, keep_tz=False):
"""
return an array repr of this object, potentially casting to object
"""
return self.asobject.values
@property
def _formatter_func(self):
return lambda x: "'%s'" % x
def asof_locs(self, where, mask):
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
where_idx = where
if isinstance(where_idx, DatetimeIndex):
where_idx = PeriodIndex(where_idx.values, freq=self.freq)
locs = self._values[mask].searchsorted(where_idx._values, side='right')
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where_idx._values < self._values[first])] = -1
return result
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True, how='start'):
dtype = pandas_dtype(dtype)
if is_object_dtype(dtype):
return self.asobject
elif is_integer_dtype(dtype):
if copy:
return self._int64index.copy()
else:
return self._int64index
elif is_datetime64_dtype(dtype):
return self.to_timestamp(how=how)
elif is_datetime64tz_dtype(dtype):
return self.to_timestamp(how=how).tz_localize(dtype.tz)
elif is_period_dtype(dtype):
return self.asfreq(freq=dtype.freq)
raise ValueError('Cannot cast PeriodIndex to dtype %s' % dtype)
@Substitution(klass='PeriodIndex')
@Appender(_shared_docs['searchsorted'])
@deprecate_kwarg(old_arg_name='key', new_arg_name='value')
def searchsorted(self, value, side='left', sorter=None):
if isinstance(value, Period):
if value.freq != self.freq:
msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, value.freqstr)
raise | IncompatibleFrequency(msg) | pandas._period.IncompatibleFrequency |
import io
import random
import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import date
from flask import Response
from flask_wtf import Form
from datetime import timedelta
from matplotlib.figure import Figure
from pandas_datareader import data as pdr
from wtforms.fields.html5 import DateField
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from wtforms import Form, StringField, TextAreaField, PasswordField, validators
from flask import Flask, render_template, flash, redirect, url_for, session, request, logging
app = Flask(__name__)
# Index
@app.route('/')
def index():
stocks = get_top_stocks()
return render_template('dashboard.html', stocks=stocks)
# Index
@app.route('/search', methods=['GET', 'POST'])
def search_stocks():
if request.method == 'POST':
ticker_list = []
# Get Data from Form
ticker = request.form.get('query')
ticker_list.append(ticker)
all_data = fetch_tickers_data(ticker_list)
# Prepare data for output
stocks = all_data.loc[(ticker)].reset_index().T.to_dict().values()
return render_template('search.html', ticker=ticker, stocks=stocks)
else:
return render_template('search.html', ticker="")
@app.route('/plot.png')
def plot_png():
ticker_list = []
ticker = request.args.get('my_var', None)
ticker_list.append(ticker)
fig = create_figure(ticker_list)
output = io.BytesIO()
FigureCanvas(fig).print_png(output)
return Response(output.getvalue(), mimetype='image/png')
def create_figure(ticker_list):
all_data = fetch_tickers_data(ticker_list)
# reset the index to make everything columns
just_closing_prices = all_data[['Adj Close']].reset_index()
daily_close_px = just_closing_prices.pivot('Date', 'Ticker','Adj Close')
fig = plt.figure();
_ = daily_close_px[ticker_list[0]].plot(figsize=(12,8));
return fig
def fetch_tickers_data(ticker_list):
# Generate Dates to fetch stock data
start_date= date.today() - timedelta(days = 5)
end_date=date.today()
# Fetch data from Twitter API
return get(ticker_list, start_date, end_date)
def get_top_stocks():
# Tickers list
# We can add and delete any ticker from the list to get desired ticker live data
ticker_list=['DJIA', 'DOW', 'LB', 'EXPE', 'PXD', 'MCHP', 'CRM', 'NRG', 'HFC', 'NOW']
today = date.today()
# We can get data by our choice by giving days bracket
start_date= "2021-04-25"
end_date="2021-05-01"
# Fetch data from Twitter API
all_data = get(ticker_list, start_date, today)
# reset the index to make everything columns
just_closing_prices = all_data[['Adj Close']].reset_index()
daily_close_px = just_closing_prices.pivot('Date', 'Ticker','Adj Close')
# Pick the first and last row to calculate price difference
res = pd.concat([daily_close_px.head(1), daily_close_px.tail(1)])
res = res.diff().T
rslt = pd.DataFrame(np.zeros((0,3)), columns=['top1','top2','top3'])
for i in res.columns:
df1row = pd.DataFrame(res.nlargest(3, i).index.tolist(), index=['top1','top2','top3']).T
rslt = pd.concat([rslt, df1row], axis=0)
list_of_stocks = prepare_output_data(rslt, all_data, today)
return list_of_stocks
def prepare_output_data(rslt, all_data, today):
list_of_stocks = []
for row in rslt.itertuples():
stock_data = {}
stock_data['StockName'] = row.top1
stock_data.update(all_data.loc[(row.top1, (today-timedelta(days = 3)).strftime('%Y-%m-%d'))].to_dict())
list_of_stocks.append(stock_data)
stock_data = {}
stock_data['StockName'] = row.top2
stock_data.update(all_data.loc[(row.top2, (today-timedelta(days = 3)).strftime('%Y-%m-%d'))].to_dict())
list_of_stocks.append(stock_data)
stock_data = {}
stock_data['StockName'] = row.top3
stock_data.update(all_data.loc[(row.top3, (today-timedelta(days = 3)).strftime('%Y-%m-%d'))].to_dict())
list_of_stocks.append(stock_data)
return list_of_stocks
def get(tickers, start, end):
def data(ticker):
return pdr.get_data_yahoo(ticker, start=start, end=end)
datas = map(data, tickers)
return | pd.concat(datas, keys=tickers, names=['Ticker','Date']) | pandas.concat |
# import Ipynb_importer
import pandas as pd
from .public_fun import *
# 全局变量
class glv:
def _init():
global _global_dict
_global_dict = {}
def set_value(key,value):
_global_dict[key] = value
def get_value(key,defValue=None):
try:
return _global_dict[key]
except KeyError:
return defValue
## fun_01to06
class fun_01to06(object):
def __init__(self, data):
self.cf = [2, 1, 1, 17, 1, 2]
self.cf_a = hexlist2(self.cf)
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"起始符",
"命令标识",
"应答标志",
"唯一识别码",
"数据单元加密方式",
"数据单元长度"
]
self.oj = list2dict(self.o, self.list_o, self.cf_a)
self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o)
self.pj = {
"起始符":hex2str(self.oj["起始符"]),
"命令标识":dict_list_replace('02', self.oj['命令标识']),
"应答标志":dict_list_replace('03', self.oj['应答标志']),
"唯一识别码":hex2str(self.oj["唯一识别码"]),
"数据单元加密方式":dict_list_replace('05', self.oj['数据单元加密方式']),
"数据单元长度":hex2dec(self.oj["数据单元长度"]),
}
self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o)
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
self.mo = self.oj["命令标识"]
glv.set_value('data_f', self.next)
glv.set_value('data_mo', self.mo)
glv.set_value('data_01to07', self.o)
print('fun_01to06 done!')
## fun_07
class fun_07:
def __init__(self, data):
self.mo = glv.get_value("data_mo")
if self.mo == '01':
self.o = fun_07_01(glv.get_value('data_f'))
elif self.mo == '02' or self.mo == '03':
self.o = fun_07_02(glv.get_value('data_f'))
elif self.mo == '04':
self.o = fun_07_04(glv.get_value('data_f'))
elif self.mo == '05':
self.o = fun_07_05(glv.get_value('data_f'))
elif self.mo == '06':
self.o = fun_07_06(glv.get_value('data_f'))
else :
print('命令标识:',self.mo,'有误')
self.c = fun_07_cursor(glv.get_value('data_f'))
self.oj = dict(self.o.oj, **self.c.oj)
self.oj2 = {'数据单元':self.oj}
self.ol = pd.merge(self.o.ol, self.c.ol, left_index=True, right_index=True)
self.pj = dict(self.o.pj, **self.c.pj)
self.pj2 = {'数据单元':self.pj}
self.pl = pd.merge(self.o.pl, self.c.pl, left_index=True, right_index=True)
print('fun_07 done!')
## fun_07_01
class fun_07_01(object):
def __init__(self, data):
self.cf = [6, 2, 20, 1, 1]
self.cf_a = hexlist2(self.cf)
self.n = hex2dec(data[self.cf_a[3]:self.cf_a[4]])
self.m = hex2dec(data[self.cf_a[4]:self.cf_a[5]])
self.cf.append(self.n*self.m)
self.cf_a = hexlist2(self.cf)
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"数据采集时间",
"登入流水号",
"ICCID",
"可充电储能子系统数",
"可充电储能系统编码长度",
"可充电储能系统编码",
]
self.oj = list2dict(self.o, self.list_o, self.cf_a)
self.oj2 = {'车辆登入': self.oj}
self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o)
self.pj = {
"数据采集时间":get_datetime(self.oj['数据采集时间']),
"登入流水号":hex2dec(self.oj['登入流水号']),
"ICCID":hex2str(self.oj['ICCID']),
"可充电储能子系统数":hex2dec(self.oj['可充电储能子系统数']),
"可充电储能系统编码长度":hex2dec(self.oj['可充电储能系统编码长度']),
"可充电储能系统编码":fun_07_01.fun_07_01_06(self.oj['可充电储能系统编码'], self.oj['可充电储能子系统数'], self.oj['可充电储能系统编码长度']),
}
self.pj2 = {'车辆登入': self.pj}
self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o)
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
glv.set_value('data_f', self.next)
glv.set_value('data_07_01', self.o)
print('fun_07_01 done!')
def fun_07_01_06(data, n, m):
if m=='00':
return "NA"
else :
n = hex2dec(n)
m = hex2dec(m) * 2
output = []
for i in range(n):
output_unit = hex2str(data[i * m: i* m +m])
output.append(output_unit)
return output
## fun_07_04
class fun_07_04(object):
def __init__(self, data):
self.cf = [6, 2]
self.cf_a = hexlist2(self.cf)
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"登出时间",
"登出流水号",
]
self.oj = list2dict(self.o, self.list_o, self.cf_a)
self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o)
self.pj = {
"登出时间":get_datetime(self.oj['登出时间']),
"登出流水号":hex2dec(self.oj['登出流水号']),
}
self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o)
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
glv.set_value('data_f', self.next)
glv.set_value('data_07_04', self.o)
print('fun_07_04 done!')
## fun_07_05
class fun_07_05(object):
def __init__(self, data):
self.cf = [6, 2, 12, 20, 1]
self.cf_a = hexlist2(self.cf)
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"平台登入时间",
"登入流水号",
"平台用户名",
"平台密码",
"加密规则",
]
self.oj = list2dict(self.o, self.list_o, self.cf_a)
self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o)
self.pj = {
"平台登入时间":get_datetime(self.oj['平台登入时间']),
"登入流水号":hex2dec(self.oj['登入流水号']),
"平台用户名":hex2str(self.oj['平台用户名']),
"平台密码":hex2str(self.oj['平台密码']),
"加密规则":dict_list_replace('07_05_05',self.oj['加密规则']),
}
self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o)
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
glv.set_value('data_f', self.next)
glv.set_value('data_07_05', self.o)
print('fun_07_05 done!')
## fun_07_06
class fun_07_06(object):
def __init__(self, data):
self.cf = [6, 2]
self.cf_a = hexlist2(self.cf)
self.o = data[0:self.cf_a[-1]]
self.list_o = [
"登出时间",
"登出流水号",
]
self.oj = list2dict(self.o, self.list_o, self.cf_a)
print(self.oj)
self.ol = pd.DataFrame([self.oj]).reindex(columns=self.list_o)
self.pj = {
"登出时间":get_datetime(self.oj['登出时间']),
"登出流水号":hex2dec(self.oj['登出流水号']),
}
self.pl = pd.DataFrame([self.pj]).reindex(columns=self.list_o)
self.next = data[len(self.o):]
self.nextMark = data[len(self.o):len(self.o)+2]
glv.set_value('data_f', self.next)
glv.set_value('data_07_06', self.o)
print('fun_07_06 done!')
## fun_07_02
class fun_07_02:
def __init__(self, data):
self.o = data
self.oj = {'数据采集时间': self.o[:12]}
self.ol = pd.DataFrame({'01':['01']})
self.pj = {'数据采集时间': get_datetime(self.oj['数据采集时间'])}
self.pl = pd.DataFrame({'01':['01']})
glv.set_value('data_f', data[12:])
glv.set_value('m_07_02', data[12:14])
self.mo_list = glv.get_value('model')
self.do_list = []
while(glv.get_value('m_07_02') in self.mo_list):
# 记录已执行的
self.do_list.append(glv.get_value('m_07_02'))
# 删除已执行的
self.mo_list.remove(glv.get_value('m_07_02'))
if glv.get_value('m_07_02') == '01':
self.f_01 = fun_07_02_01(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '02':
self.f_02 = fun_07_02_02(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '03':
self.f_03 = fun_07_02_03(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '04':
self.f_04 = fun_07_02_04(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '05':
self.f_05 = fun_07_02_05(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '06':
self.f_06 = fun_07_02_06(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '07':
self.f_07 = fun_07_02_07(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '08':
self.f_08 = fun_07_02_08(glv.get_value('data_f'))
elif glv.get_value('m_07_02') == '09':
self.f_09 = fun_07_02_09(glv.get_value('data_f'))
else:
print("fun_07_02 done")
print(glv.get_value('data_f'))
print(glv.get_value('m_07_02'))
self.do_list.sort()
for i in self.do_list:
if i == '01':
self.oj = dict(self.oj,**self.f_01.oj2)
self.ol = pd.merge(self.ol, self.f_01.ol, left_index=True, right_index=True)
self.pj = dict(self.pj,**self.f_01.pj2)
self.pl = pd.merge(self.pl, self.f_01.pl, left_index=True, right_index=True)
elif i == '02':
self.oj = dict(self.oj,**self.f_02.oj2)
self.ol = pd.merge(self.ol, self.f_02.ol, left_index=True, right_index=True)
self.pj = dict(self.pj,**self.f_02.pj2)
self.pl = pd.merge(self.pl, self.f_02.pl, left_index=True, right_index=True)
elif i == '03':
self.oj = dict(self.oj,**self.f_03.oj2)
self.ol = pd.merge(self.ol, self.f_03.ol, left_index=True, right_index=True)
self.pj = dict(self.pj,**self.f_03.pj2)
self.pl = pd.merge(self.pl, self.f_03.pl, left_index=True, right_index=True)
elif i == '04':
self.oj = dict(self.oj,**self.f_04.oj2)
self.ol = pd.merge(self.ol, self.f_04.ol, left_index=True, right_index=True)
self.pj = dict(self.pj,**self.f_04.pj2)
self.pl = | pd.merge(self.pl, self.f_04.pl, left_index=True, right_index=True) | pandas.merge |
import os
import pandas as pd
import numpy as np
def read_data():
# Define raw data path
raw_data_path = os.path.join('data', 'raw')
train_file_path = os.path.join(raw_data_path, 'train.csv')
test_file_path = os.path.join(raw_data_path, 'test.csv')
# read data from cvs file
train_df = pd.read_csv(train_file_path, index_col='PassengerId')
test_df = pd.read_csv(test_file_path, index_col='PassengerId')
test_df['Survived'] = -1000 # set a default value for survived column on test data
combined_df = pd.concat((train_df, test_df), axis=0, sort=True)
return combined_df
def processed_data(df):
# use method chaining concept
return (df
# Create title attribute
.assign(Title = lambda x : x.Name.map(get_clean_title))
# working missing values
.pipe(fill_missing_values)
# Create Fare bin feature
.assign(Fare_Bin = lambda x: | pd.qcut(x.Fare, 4, labels=['very_low', 'low', 'high', 'very_high']) | pandas.qcut |
import numpy as np
import pandas as pd
from tqdm import tqdm
import numpy.ma as ma
from scipy.special import gammaln
from pykalman import KalmanFilter
from pynowcasting.pycsminwel import csminwel
class BVARGLP(object):
def __init__(self, data, lags, hz=8, vc=10e6, stationary_prior=None, crit=1e-16,
hyperpriors=True, mnpsi=True, mnalpha=False, sur=True, noc=True,
fcast=False, mcmc=False, ndraws=20000, ndrawsdiscard=None, mcmcconst=1,
mcmcfcast=True, mcmcstorecoef=True, verbose=False):
"""
This class implements the Bayesian VAR from Giannone, Lenza and Primiceri (2012), hence the name GLP. The main
idea of the models is to use multiple priors, each with their own hyperprior, in order to generate a shrinkage
behaviour.
This class only accepts data with a quarterly frequency and with no missign data.
@param hyperpriors: False = no priors on hyperparameters
True = reference priors on hyperparameters (default)
[NOTE: hyperpriors on psi calibrated for data expressed in
4 x logs, such as 4 x log(GDP). Thus if interest rate is in
percentage, divide by 100]
@param vc: prior variance in the MN prior for the coefficients multiplying
the contant term (Default: vc=10e6)
@param stationary_prior: names of the variables that enter the VAR in first
differences and for which one might want to set the prior mean
on the coefficient on the first own lag in the MN prior and the
prior mean of the sum-of-coefficients prior to 0 (instead of
the typical 1)
@param mnpsi: False = diagonal elements of the scale matrix of the IW prior on
the covariance of the residuals NOT treated as
hyperparameters (set to the residual variance of an AR(1))
True = diagonal elements of the scale matrix of the IW prior on
the covariance of the residuals treated as hyperparameters (default)
@param mnalpha: False = Lag-decaying parameter of the MN prior set to 2 and
NOT treated as hyperparameter (default)
True = Lag-decaying parameter of the MN prior treated as
hyperparameter
@param sur: False = single-unit-root prior is OFF
True = single-unit-root prior is ON and its std is treated as an
hyperparameter (default)
@param noc: False = no-cointegration (sum-of coefficients) prior is OFF
True = no-cointegration (sum-of coefficients) is ON and its std is
treated as an hyperparameter (default)
@param fcast: False = does not generate forecasts at the posterior mode
True = generates forecasts at the posterior mode (default)
@param hz: number of quarters for which it generates forecasts (default: hz=8)
@param mcmc: False = does not run the MCMC (default)
True = runs the MCMC after the maximization
@param ndraws: number of draws in the MCMC (default: Ndraws=20000)
@param ndrawsdiscard: number of draws initially discarded to allow convergence
in the in the MCMC (default=Ndraws/2)
@param mcmcconst: scaling constant for the MCMC (should be calibrated to achieve
an acceptance rate of approx 25%) (default: MCMCconst=1)
@param mcmcfcast: False = does not generate forecasts when running the MCMC
True = generates forecasts while running the MCMC
(for each draw of the hyperparameters the code takes a
draw of the VAR coefficients and shocks, and generates
forecasts at horizons hz) (default).
@param mcmcstorecoef: False = does not store the MCMC draws of the VAR
coefficients and residual covariance matrix
True = stores the MCMC draws of the VAR coefficients and
residual covariance matrix (default)
@param verbose: Prints relevant information during the estimation.
@param crit: value for convergence criteria
"""
assert data.index.inferred_freq == 'Q', "input 'data' must be quarterly and recognized by pandas."
self.data = data
self.lags = lags
self.hyperpriors = hyperpriors
self.vc = vc
self.stationary_prior = stationary_prior
if stationary_prior is None:
self.pos = None
else:
self.pos = [self.data.columns.get_loc(var) for var in stationary_prior]
self.mnalpha = mnalpha
self.mnpsi = mnpsi
self.sur = sur
self.noc = noc
self.fcast = fcast
self.hz = hz
self.mcmc = mcmc
self.ndraws = ndraws
self.ndrwasdiscard = int(ndraws/2) if ndrawsdiscard is None else ndrawsdiscard
self.mcmccosnt = mcmcconst
self.mcmcfcast = mcmcfcast
self.mcmcstorecoef = mcmcstorecoef
self.verbose = verbose
self.crit = crit
self.TT = data.shape[0] # Time-series sample size without lags
self.n = data.shape[1] # Number of variables in the VAR
self.k = self.n * self.lags + 1 # Number of coefficients on each equation
self._set_priors()
self._regressor_matrix_ols()
self._minimization()
if self.fcast:
self._forecasts()
if self.mcmc:
self._mcmc()
def _set_priors(self):
# Sets up the default choices for the priors of the BVAR of Giannone, Lenza and Primiceri (2012)
if self.hyperpriors:
# hyperprior mode
mode_lambda = 0.2
mode_miu = 1
mode_theta = 1
# hyperprior sds
sd_lambda = 0.4
sd_miu = 1
sd_theta = 1
# scale and shape of the IG on psi/(d-n-1)
scalePSI = 0.02 ** 2
priorcoef = pd.DataFrame(index=['lambda', 'miu', 'theta', 'alpha', 'beta'],
columns=['r_k', 'r_theta', 'PSI'])
priorcoef.loc['lambda', 'r_k'], priorcoef.loc['lambda', 'r_theta'] = \
self._gamma_coef(mode_lambda, sd_lambda)
priorcoef.loc['miu', 'r_k'], priorcoef.loc['miu', 'r_theta'] = self._gamma_coef(mode_miu, sd_miu)
priorcoef.loc['theta', 'r_k'], priorcoef.loc['theta', 'r_theta'] = self._gamma_coef(mode_theta, sd_theta)
priorcoef.loc['alpha', 'PSI'] = scalePSI
priorcoef.loc['beta', 'PSI'] = scalePSI
self.priorcoef = priorcoef
else:
self.priorcoef = None
def _regressor_matrix_ols(self):
# purpose is to construct the SS matrix
# Constructs the matrix of regressors
n = self.n
lags = self.lags
data = self.data
x = np.zeros((self.TT, self.k))
x[:, 0] = 1
for i in range(1, self.lags + 1):
x[:, 1 + (i - 1) * n: i * n + 1] = data.shift(i).values
self.y0 = data.iloc[:lags, :].mean().values
self.x = x[lags:, :]
self.y = data.values[lags:, :]
self.T = self.y.shape[0] # Sample size after lags
# OLS for AR(1) residual variance of each equation
SS = np.zeros(self.n)
for i in range(self.n):
y_reg = self.y[1:, i]
x_reg = np.hstack((np.ones((self.T - 1, 1)), self.y[:-1, i].reshape((-1, 1))))
ar1 = OLS1(y_reg, x_reg)
SS[i] = ar1.sig2hatols
self.SS = SS
def _minimization(self):
# Starting values for the minimization
self.lambda0 = 0.2 # std of MN prior
self.theta0 = 1 # std of SUR prior
self.miu0 = 1 # std NOC prior
self.alpha0 = 2 # lag-decaying parameter of the MN prior
self.psi0 = self.SS
# Bounds for the minimization step
self.lambda_min = 0.0001
self.lambda_max = 5
self.alpha_min = 0.1
self.alpha_max = 5
self.theta_min = 0.0001
self.theta_max = 50
self.miu_min = 0.0001
self.miu_max = 50
self.psi_min = self.SS / 100
self.psi_max = self.SS * 100
# Transforming inputs to unbounded and builds the initial guess
x0 = np.array([-np.log((self.lambda_max - self.lambda0) / (self.lambda0 - self.lambda_min))])
if self.mnpsi:
inpsi = -np.log((self.psi_max - self.psi0) / (self.psi0 - self.psi_min))
x0 = np.concatenate((x0, inpsi))
if self.sur:
intheta = np.array([-np.log((self.theta_max - self.theta0) / (self.theta0 - self.theta_min))])
x0 = np.concatenate((x0, intheta))
if self.noc:
inmiu = np.array([-np.log((self.miu_max - self.miu0) / (self.miu0 - self.miu_min))])
x0 = np.concatenate((x0, inmiu))
if self.mnalpha:
inalpha = np.array([-np.log((self.alpha_max - self.alpha0) / (self.alpha0 - self.alpha_min))])
x0 = np.concatenate((x0, inalpha))
# initial guess for the inverse Hessian
H0 = 10 * np.eye(len(x0))
# Minimization of the negative of the posterior of the hyperparameters
def myfun(xxx):
logML, _, _ = self._logmlvar_formin(xxx)
return -logML
# Optimization
fh, xh, gh, h, itct, fcount, retcodeh = csminwel(fcn=myfun,
x0=x0,
h0=H0,
grad=None,
crit=self.crit,
nit=1000,
verbose=self.verbose)
self.itct = itct
self.xh = xh
self.h = h
self.log_post, self.betahat, self.sigmahat = self._logmlvar_formin(xh)
self.lamb = self.lambda_min + (self.lambda_max - self.lambda_min) / (1 + np.exp(-xh[0]))
self.theta = self.theta_max
self.miu = self.miu_max
if self.mnpsi:
# diagonal elements of the scale matrix of the IW prior on the residual variance
self.psi = self.psi_min + (self.psi_max - self.psi_min) / (1 + np.exp(-xh[1:self.n + 1]))
if self.sur:
# std of sur prior at the peak
self.theta = self.theta_min + (self.theta_max - self.theta_min) / (1 + np.exp(-xh[self.n + 1]))
if self.noc:
# std of noc prior at the peak
self.miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-xh[self.n + 2]))
else: # self.sur == 0
if self.noc:
# std of noc prior at the peak
self.miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-xh[self.n + 1]))
else: # self.mnpsi == 0
self.psi = self.SS
if self.sur:
# std of sur prior at the peak
self.theta = self.theta_min + (self.theta_max - self.theta_min) / (1 + np.exp(-xh[1]))
if self.noc:
# std of noc prior at the peak
self.miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-xh[2]))
else:
if self.noc:
# std of noc prior at the peak
self.miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-xh[1]))
if not self.mnalpha:
self.alpha = 2
else:
# Lag-decaying parameter of the MN prior
self.alpha = self.alpha_min + (self.alpha_max - self.alpha_min) / (1 + np.exp(-xh[-1]))
def _forecasts(self):
# Forecasts ate the posterior mode
Y = np.vstack([self.y, np.zeros((self.hz, self.n))])
for tau in range(self.hz):
indexes = list(range(self.T + tau - 1, self.T + tau - self.lags - 1, -1))
xT = np.vstack([1, Y[indexes].T.reshape((self.k - 1, 1), order="F")]).T
Y[self.T + tau, :] = xT @ self.betahat
self.forecast = Y[-self.hz:, :]
def _mcmc(self):
# Jacobian of the transformation of the hyperparameters that has been
# used for the constrained maximization
JJ = np.exp(self.xh) / ((1 + np.exp(self.xh)) ** 2)
JJ[0] = (self.lambda_max - self.lambda_min) * JJ[0]
if self.mnpsi:
JJ[1: self.n + 1] = (self.psi_max - self.psi_min) * JJ[1: self.n + 1]
if self.sur:
JJ[self.n + 1] = (self.theta_max - self.theta_min) * JJ[self.n + 1]
if self.noc:
JJ[self.n + 2] = (self.miu_max - self.miu_min) * JJ[self.n + 2]
else:
if self.noc:
JJ[self.n + 1] = (self.miu_max - self.miu_min) * JJ[self.n + 1]
else:
if self.sur:
JJ[1] = (self.theta_max - self.theta_min) * JJ[1]
if self.noc:
JJ[2] = (self.miu_max - self.miu_min) * JJ[2]
else:
if self.noc:
JJ[1] = (self.miu_max - self.miu_min) * JJ[1]
if self.mnalpha:
JJ[-1] = (self.alpha_max - self.alpha_min) * JJ[-1]
JJ = np.diag(JJ)
HH = JJ @ self.h @ JJ
# Regularization to assure that HH is positive-definite
eigval, eigvec = np.linalg.eig(HH)
HH = eigvec @ np.diag(np.abs(eigval)) @ eigvec.T
# recovering the posterior mode
postmode = np.array([self.lamb])
if self.mnpsi:
modepsi = np.array(self.psi)
postmode = np.concatenate((postmode, modepsi))
if self.sur:
modetheta = np.array([self.theta])
postmode = np.concatenate((postmode, modetheta))
if self.noc:
modemiu = np.array([self.miu])
postmode = np.concatenate((postmode, modemiu))
if self.mnalpha:
modealpha = np.array([self.alpha])
postmode = np.concatenate((postmode, modealpha))
# starting value of the Metropolis algorithm
P = np.zeros((self.ndraws, self.xh.shape[0]))
logMLold = -10e15
while logMLold == -10e15:
P[0, :] = np.random.multivariate_normal(mean=postmode,
cov=(self.mcmccosnt ** 2) * HH)
logMLold, betadrawold, sigmadrawold = self._logmlvar_formcmc(P[0])
# matrix to store the draws of the VAR coefficients if MCMCstorecoeff is on
if self.mcmcstorecoef:
mcmc_beta = np.zeros((self.k, self.n, self.ndraws - self.ndrwasdiscard))
mcmc_sigma = np.zeros((self.n, self.n, self.ndraws - self.ndrwasdiscard))
else:
mcmc_beta = None
mcmc_sigma = None
# matrix to store the forecasts if MCMCfcast is on
if self.mcmcfcast:
mcmc_Dforecast = np.zeros((self.hz, self.n, self.ndraws - self.ndrwasdiscard))
else:
mcmc_Dforecast = None
# Metropolis iterations
count = 0
for i in tqdm(range(1, self.ndraws), 'MCMC Iterations', disable=not self.verbose):
# draw candidate value
P[i, :] = np.random.multivariate_normal(mean=P[i - 1, :],
cov=(self.mcmccosnt ** 2) * HH)
logMLnew, betadrawnew, sigmadrawnew = self._logmlvar_formcmc(P[i, :])
if logMLnew > logMLold: # if there is an improvement, accept it
logMLold = logMLnew
count = count + 1
else: # If there is no improvement, there is a chance to accept the draw
if np.random.rand() < np.exp(logMLnew - logMLold): # If accetpted
logMLold = logMLnew
count = count + 1
else: # If not accepted, overwrite the draw with the last value
P[i, :] = P[i - 1, :]
# if MCMCfcast is on, take a new draw of the VAR coefficients with
# the old hyperparameters if have rejected the new ones
if self.mcmcfcast or self.mcmcstorecoef:
_, betadrawnew, sigmadrawnew = self._logmlvar_formcmc(P[i, :])
# stores draws of VAR coefficients if MCMCstorecoeff is on
if (i >= self.ndrwasdiscard) and self.mcmcstorecoef:
mcmc_beta[:, :, i - self.ndrwasdiscard] = betadrawnew
mcmc_sigma[:, :, i - self.ndrwasdiscard] = sigmadrawnew
# produce and store the forecasts if MCMCfcast is on
if (i >= self.ndrwasdiscard) and self.mcmcfcast:
Y = np.vstack([self.y, np.zeros((self.hz, self.n))])
for tau in range(self.hz):
indexes = list(range(self.T + tau - 1, self.T + tau - self.lags - 1, -1))
xT = np.vstack([1, Y[indexes].T.reshape((self.k - 1, 1), order="F")]).T
Y[self.T + tau, :] = xT @ betadrawnew + np.random.multivariate_normal(mean=np.zeros(self.n),
cov=sigmadrawnew)
mcmc_Dforecast[:, :, i - self.ndrwasdiscard] = Y[-self.hz:, :]
# store the draws of the hyperparameters
mcmc_lambda = P[self.ndrwasdiscard:, 0] # Standard Minesota Prior
mcmc_psi = None
mcmc_theta = None
mcmc_miu = None
if self.mnpsi:
# diagonal elements of the scale matrix of the IW prior on the residual variance
mcmc_psi = P[self.ndrwasdiscard:, 1:self.n+2]
if self.sur:
# std of sur prior
mcmc_theta = P[self.ndrwasdiscard:, self.n + 1]
if self.noc:
# std of noc prior
mcmc_miu = P[self.ndrwasdiscard:, self.n + 2]
else: # self.sur == 0
if self.noc:
# std of noc prior
mcmc_miu = P[self.ndrwasdiscard:, self.n + 1]
else: # self.mnpsi == 0
if self.sur:
# std of sur prior
mcmc_theta = P[self.ndrwasdiscard:, 1]
if self.noc:
# std of noc prior
mcmc_miu = P[self.ndrwasdiscard:, 2]
else: # self.sur == 0
if self.noc:
# std of noc prior
mcmc_miu = P[self.ndrwasdiscard:, 1]
if self.mnalpha:
# Lag-decaying parameter of the MN prior
mcmc_alpha = P[self.ndrwasdiscard:, -1]
self.mcmc_alpha = mcmc_alpha
mcmc_accrate = np.mean((mcmc_lambda[1:] != mcmc_lambda[:-1]))
# Save the chains as attributes
self.mcmc_beta = mcmc_beta
self.mcmc_sigma = mcmc_sigma
self.mcmc_dforecast = mcmc_Dforecast
self.mcmc_lambda = mcmc_lambda
self.mcmc_psi = mcmc_psi
self.mcmc_theta = mcmc_theta
self.mcmc_miu = mcmc_miu
self.mcmc_accrate = mcmc_accrate
def _logmlvar_formin(self, par):
"""
This function computes the log-posterior (or the logML if hyperpriors=0),
the posterior mode of the coefficients and the covariance matrix of the
residuals of the BVAR of Giannone, Lenza and Primiceri (2012)
"""
# The following avoids the warning "referenced before assignment"
theta = None
miu = None
# hyperparameters
lambda_ = self.lambda_min + (self.lambda_max - self.lambda_min) / (1 + np.exp(-par[0]))
d = self.n + 2
if not self.mnpsi:
psi = self.SS * (d - self.n - 1)
if self.sur:
theta = self.theta_min + (self.theta_max - self.theta_min) / (1 + np.exp(-par[1]))
if self.noc:
miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-par[2]))
else:
if self.noc:
miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-par[1]))
else:
psi = self.psi_min + (self.psi_max - self.psi_min) / (1 + np.exp(-par[1:self.n + 1]))
if self.sur:
theta = self.theta_min + (self.theta_max - self.theta_min) / (1 + np.exp(-par[self.n + 1]))
if self.noc:
miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-par[self.n + 2]))
else:
if self.noc:
miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-par[self.n + 1]))
if not self.mnalpha:
alpha = 2
else: # self.mnalpha == 1
alpha = self.alpha_min + (self.alpha_max - self.alpha_min) / (1 + np.exp(-par[-1]))
# Setting up the priors
omega = np.zeros(self.k)
omega[0] = self.vc
for i in range(1, self.lags + 1):
omega[1 + (i - 1) * self.n: 1 + i * self.n] = \
(d - self.n - 1) * (lambda_ ** 2) * (1 / (i ** alpha)) / psi
# Prior scale matrix for the covariance of the shocks
PSI = np.diag(psi)
# dummy observations if sur and / or noc = 1
Td = 0
xdsur = np.array([]).reshape((0, self.k))
ydsur = np.array([]).reshape((0, self.n))
xdnoc = np.array([]).reshape((0, self.k))
ydnoc = np.array([]).reshape((0, self.n))
y = self.y.copy()
x = self.x.copy()
T = self.T
if self.sur:
xdsur = (1 / theta) * np.tile(self.y0, (1, self.lags))
xdsur = np.hstack((np.array([[1 / theta]]), xdsur))
ydsur = (1 / theta) * self.y0
y = np.vstack((y, ydsur))
x = np.vstack((x, xdsur))
Td = Td + 1
if self.noc:
ydnoc = (1 / miu) * np.diag(self.y0)
# Set to zero the prior mean on the first own lag for variables selected in the vector pos
if self.pos is not None:
ydnoc[self.pos, self.pos] = 0
xdnoc = (1 / miu) * np.tile(np.diag(self.y0), (1, self.lags))
xdnoc = np.hstack((np.zeros((self.n, 1)), xdnoc))
y = np.vstack((y, ydnoc))
x = np.vstack((x, xdnoc))
Td = Td + self.n
T = T + Td
# ===== OUTPUT ===== #
# Minnesota prior mean
b = np.zeros((self.k, self.n))
diagb = np.ones(self.n)
# Set to zero the prior mean on the first own lag for variables selected in the vector pos
if self.pos is not None:
diagb[self.pos] = 0
b[1:self.n + 1, :] = np.diag(diagb)
# posterior mode of the VAR coefficients
matA = x.T @ x + np.diag(1 / omega)
matB = x.T @ y + np.diag(1 / omega) @ b
betahat = np.linalg.solve(matA, matB) # np.solve runs more efficiently that inverting a gigantic matrix
# VAR residuals
epshat = y - x @ betahat
# Posterior mode of the covariance matrix
sigmahat = (epshat.T @ epshat + PSI + (betahat - b).T @ np.diag(1 / omega) @ (betahat - b))
sigmahat = sigmahat / (T + d + self.n + 1)
# logML
aaa = np.diag(np.sqrt(omega)) @ x.T @ x @ np.diag(np.sqrt(omega))
bbb = np.diag(1 / np.sqrt(psi)) @ (epshat.T @ epshat + (betahat - b).T @ np.diag(1/omega) @
(betahat-b)) @ np.diag(1 / np.sqrt(psi))
eigaaa = np.linalg.eig(aaa)[0].real
eigaaa[eigaaa < 1e-12] = 0
eigaaa = eigaaa + 1
eigbbb = np.linalg.eig(bbb)[0].real
eigbbb[eigbbb < 1e-12] = 0
eigbbb = eigbbb + 1
logML = - self.n * T * np.log(np.pi) / 2
logML = logML + sum(gammaln((T + d - np.arange(self.n)) / 2) - gammaln((d - np.arange(self.n)) / 2))
logML = logML - T * sum(np.log(psi)) / 2
logML = logML - self.n * sum(np.log(eigaaa)) / 2
logML = logML - (T + d) * sum(np.log(eigbbb)) / 2
if self.sur or self.noc:
yd = np.vstack((ydsur, ydnoc))
xd = np.vstack((xdsur, xdnoc))
# prior mode of the VAR coefficients
betahatd = b
# VAR residuals at the prior mode
epshatd = yd - xd @ betahatd
aaa = np.diag(np.sqrt(omega)) @ xd.T @ xd @ np.diag(np.sqrt(omega))
bbb = np.diag(1 / np.sqrt(psi)) @ (epshatd.T @ epshatd + (betahatd - b).T @ np.diag(1 / omega) @
(betahatd - b)) @ np.diag(1 / np.sqrt(psi))
eigaaa = np.linalg.eig(aaa)[0].real
eigaaa[eigaaa < 1e-12] = 0
eigaaa = eigaaa + 1
eigbbb = np.linalg.eig(bbb)[0].real
eigbbb[eigbbb < 1e-12] = 0
eigbbb = eigbbb + 1
# normalizing constant
norm = - self.n * Td * np.log(np.pi) / 2
norm = norm + sum(gammaln((Td + d - np.arange(self.n)) / 2) - gammaln((d - np.arange(self.n)) / 2))
norm = norm - Td * sum(np.log(psi)) / 2
norm = norm - self.n * sum(np.log(eigaaa)) / 2
norm = norm - (T + d) * sum(np.log(eigbbb)) / 2
logML = logML - norm
if self.hyperpriors:
logML = logML + self._log_gammma_pdf(x=lambda_,
k=self.priorcoef.loc['lambda', 'r_k'],
theta=self.priorcoef.loc['lambda', 'r_theta'])
if self.sur:
logML = logML + self._log_gammma_pdf(x=theta,
k=self.priorcoef.loc['theta', 'r_k'],
theta=self.priorcoef.loc['theta', 'r_theta'])
if self.noc:
logML = logML + self._log_gammma_pdf(x=miu,
k=self.priorcoef.loc['miu', 'r_k'],
theta=self.priorcoef.loc['miu', 'r_theta'])
if self.mnpsi:
toadd = self._log_invgammma_to_pdf(x=psi / (d - self.n - 1),
alpha=self.priorcoef.loc['alpha', 'PSI'],
beta=self.priorcoef.loc['beta', 'PSI'])
logML = logML + sum(toadd)
return logML, betahat, sigmahat
def _logmlvar_formcmc(self, par):
"""
This function computes the log-posterior (or the logML if hyperpriors=0),
and draws from the posterior distribution of the coefficients and of the
covariance matrix of the residuals of the BVAR of Giannone, Lenza and
Primiceri (2012)
"""
# hyperparameters
lambda_ = par[0]
d = self.n + 2
theta = self.theta_min
miu = self.miu_min
if not self.mnpsi:
psi = self.SS * (d - self.n - 1)
if self.sur:
theta = par[1]
if self.noc:
miu = par[2]
else: # if self.sur == 0
if self.noc:
miu = par[1]
else:
psi = par[1:self.n + 1]
if self.sur:
theta = par[self.n + 1]
if self.noc:
miu = par[self.n + 2]
else:
if self.noc:
miu = par[self.n + 1]
if not self.mnalpha:
alpha = 2
else:
alpha = par[-1]
# Check if parameters are outside of parameter space and, if so, return a very low value of the posterior
cond_lower_bound = np.any([lambda_ < self.lambda_min,
np.any(psi < self.psi_min),
theta < self.theta_min,
miu < self.miu_min,
alpha < self.alpha_min])
cond_upper_bound = np.any([lambda_ > self.lambda_max,
np.any(psi > self.psi_max),
theta > self.theta_max,
miu > self.miu_max])
if cond_lower_bound or cond_upper_bound:
logML = -10e15
betadraw = None
drawSIGMA = None
return logML, betadraw, drawSIGMA
else:
# Priors
omega = np.zeros(self.k)
omega[0] = self.vc
for i in range(1, self.lags + 1):
omega[1 + (i - 1) * self.n: 1 + i * self.n] = \
((d - self.n - 1) * (lambda_ ** 2) * (1 / (i ** alpha))) / psi
# Prior scale matrix for the covariance of the shocks
PSI = np.diag(psi)
Td = 0
xdsur = np.array([]).reshape((0, self.k))
ydsur = np.array([]).reshape((0, self.n))
xdnoc = np.array([]).reshape((0, self.k))
ydnoc = np.array([]).reshape((0, self.n))
# dummy observations if sur and / or noc = 1
y = self.y.copy()
x = self.x.copy()
T = self.T
if self.sur:
xdsur = (1 / theta) * np.tile(self.y0, (1, self.lags))
xdsur = np.hstack((np.array([[1 / theta]]), xdsur))
ydsur = (1 / theta) * self.y0
y = np.vstack((y, ydsur))
x = np.vstack((x, xdsur))
Td = Td + 1
if self.noc:
ydnoc = (1 / miu) * np.diag(self.y0)
# Set to zero the prior mean on the first own lag for variables selected in the vector pos
ydnoc[self.pos, self.pos] = 0
xdnoc = (1 / miu) * np.tile(np.diag(self.y0), (1, self.lags))
xdnoc = np.hstack((np.zeros((self.n, 1)), xdnoc))
y = np.vstack((y, ydnoc))
x = np.vstack((x, xdnoc))
Td = Td + self.n
# ===== Output =====
# minesota prior mean
b = np.zeros((self.k, self.n))
diagb = np.ones(self.n)
# Set to zero the prior mean on the first own lag for variables selected in the vector pos
diagb[self.pos] = 0
b[1:self.n + 1, :] = np.diag(diagb)
# self.b = b
# posterior mode of the VAR coefficients
matA = x.T @ x + np.diag(1 / omega)
matB = x.T @ y + np.diag(1 / omega) @ b
betahat = np.linalg.solve(matA, matB) # np.solve runs more efficiently that inverting a gigantic matrix
# VAR residuals
epshat = y - x @ betahat
# logMl
T = T + Td
aaa = np.diag(np.sqrt(omega)) @ x.T @ x @ np.diag(np.sqrt(omega))
bbb = np.diag(1 / np.sqrt(psi)) @ (epshat.T @ epshat + (betahat - b).T @ np.diag(1 / omega) @
(betahat - b)) @ np.diag(1 / np.sqrt(psi))
eigaaa = np.linalg.eig(aaa)[0].real
eigaaa[eigaaa < 1e-12] = 0
eigaaa = eigaaa + 1
eigbbb = np.linalg.eig(bbb)[0].real
eigbbb[eigbbb < 1e-12] = 0
eigbbb = eigbbb + 1
logML = - self.n * T * np.log(np.pi) / 2
logML = logML + sum(gammaln((T + d - np.arange(self.n)) / 2) - gammaln((d - np.arange(self.n)) / 2))
logML = logML - T * sum(np.log(psi)) / 2
logML = logML - self.n * sum(np.log(eigaaa)) / 2
logML = logML - (T + d) * sum(np.log(eigbbb)) / 2
# More terms for logML in case of more priors
if self.sur or self.noc:
yd = np.vstack((ydsur, ydnoc))
xd = np.vstack((xdsur, xdnoc))
# prior mode of the VAR coefficients
betahatd = b
# VAR residuals at the prior mode
epshatd = yd - xd @ betahatd
aaa = np.diag(np.sqrt(omega)) @ xd.T @ xd @ np.diag(np.sqrt(omega))
bbb = np.diag(1 / np.sqrt(psi)) @ (epshatd.T @ epshatd + (betahatd - b).T @ np.diag(1 / omega) @
(betahatd - b)) @ np.diag(1 / np.sqrt(psi))
eigaaa = np.linalg.eig(aaa)[0].real
eigaaa[eigaaa < 1e-12] = 0
eigaaa = eigaaa + 1
eigbbb = np.linalg.eig(bbb)[0].real
eigbbb[eigbbb < 1e-12] = 0
eigbbb = eigbbb + 1
# normalizing constant
norm = - self.n * Td * np.log(np.pi) / 2
norm = norm + sum(gammaln((Td + d - np.arange(self.n)) / 2) - gammaln((d - np.arange(self.n)) / 2))
norm = norm - Td * sum(np.log(psi)) / 2
norm = norm - self.n * sum(np.log(eigaaa)) / 2
norm = norm - (T + d) * sum(np.log(eigbbb)) / 2
logML = logML - norm
if self.hyperpriors:
logML = logML + self._log_gammma_pdf(x=lambda_,
k=self.priorcoef.loc['lambda', 'r_k'],
theta=self.priorcoef.loc['lambda', 'r_theta'])
if self.sur:
logML = logML + self._log_gammma_pdf(x=theta,
k=self.priorcoef.loc['theta', 'r_k'],
theta=self.priorcoef.loc['theta', 'r_theta'])
if self.noc:
logML = logML + self._log_gammma_pdf(x=miu,
k=self.priorcoef.loc['miu', 'r_k'],
theta=self.priorcoef.loc['miu', 'r_theta'])
if self.mnpsi:
toadd = self._log_invgammma_to_pdf(x=psi / (d - self.n - 1),
alpha=self.priorcoef.loc['alpha', 'PSI'],
beta=self.priorcoef.loc['beta', 'PSI'])
logML = logML + sum(toadd)
# takes a draw from the posterior of SIGMA and beta, if draw is on
draw = self.mcmcfcast or self.mcmcstorecoef
if not draw:
betadraw = None
drawSIGMA = None
else:
S = PSI + epshat.T @ epshat + (betahat - b).T @ np.diag(1 / omega) @ (betahat - b)
E, V = np.linalg.eig(S)
Sinv = V @ np.diag(1 / np.abs(E)) @ V.T
eta = np.random.multivariate_normal(mean=np.zeros(self.n),
cov=Sinv,
size=T+d)
drawSIGMA = np.linalg.solve(eta.T @ eta, np.eye(self.n))
cholSIGMA = self._cholred((drawSIGMA + drawSIGMA.T) / 2)
cholZZinv = self._cholred(np.linalg.solve(x.T @ x + np.diag(1 / omega), np.eye(self.k)))
betadraw = betahat + cholZZinv.T @ np.random.normal(size=betahat.shape) @ cholSIGMA
return logML, betadraw, drawSIGMA
@staticmethod
def _gamma_coef(mode, sd):
k = (2 + mode ** 2 / sd ** 2 + np.sqrt((4 + mode ** 2 / sd ** 2) * mode ** 2 / sd ** 2)) / 2
theta = np.sqrt(sd ** 2 / k)
return k, theta
@staticmethod
def _log_gammma_pdf(x, k, theta):
r = (k - 1) * np.log(x) - x / theta - k * np.log(theta) - gammaln(k)
return r
@staticmethod
def _log_invgammma_to_pdf(x, alpha, beta):
r = alpha * np.log(beta) - (alpha + 1) * np.log(x) - beta * (1 / x) - gammaln(alpha)
return r
@staticmethod
def _cholred(s):
d, v = np.linalg.eig((s + s.T) / 2)
d = d.real
scale = np.diag(s).mean() * 1e-12
J = d > scale
C = np.zeros(s.shape)
C[J, :] = (v[:, J] @ np.diag(d[J] ** 0.5)).T
return C
class OLS1(object):
"""
This is a simple OLS regression, but with a more leaner and simple layout
"""
def __init__(self, y, x):
self.x = x
self.y = y
nobsy = y.shape[0]
nobs, nvar = x.shape
assert nobsy == nobs, 'x and y must have the same number of lines'
self.nobs = nobs
self.nvar = nvar
self.XX = x.T @ x
self.invXX = np.linalg.inv(self.XX)
self.bhatols = self.invXX @ (x.T @ y)
self.yhatols = x @ self.bhatols
self.resols = y - self.yhatols
self.sig2hatols = (self.resols.T @ self.resols) / (nobs - nvar)
self.sigbhatols = self.sig2hatols * self.invXX
self.r2 = np.var(self.yhatols) / np.var(y)
class CRBVAR(object):
def __init__(self, data, lags, hz=24, vc=10e6, stationary_prior=None, crit=1e-16,
hyperpriors=True, mnpsi=True, mnalpha=False, sur=True, noc=True,
fcast=False, mcmc=False, ndraws=20000, ndrawsdiscard=None, mcmcconst=1,
mcmcfcast=True, mcmcstorecoef=True, verbose=False, resample_method='full'):
"""
This class implements the "Cube-Root" Bayesian VAR from Climadomo, Giannone, Lenza, Monti and Sokol (2020).
The main idea of the models is to use the BVARGLP class to estimate a quarterly VAR and "monthlize" it for
a state-space model capable of dealing with missing data and mixed frequancy data.
This class only accepts data with at leaset one monthly time series. Quarterly variable are allowed but
must be in the same pandas.DataFrame with a monthly index.
@param hyperpriors: False = no priors on hyperparameters
True = reference priors on hyperparameters (default)
[NOTE: hyperpriors on psi calibrated for data expressed in
4 x logs, such as 4 x log(GDP). Thus if interest rate is in
percentage, divide by 100]
@param vc: prior variance in the MN prior for the coefficients multiplying
the contant term (Default: vc=10e6)
@param stationary_prior: names of the variables that enter the VAR in first
differences and for which one might want to set the prior mean
on the coefficient on the first own lag in the MN prior and the
prior mean of the sum-of-coefficients prior to 0 (instead of
the typical 1)
@param mnpsi: False = diagonal elements of the scale matrix of the IW prior on
the covariance of the residuals NOT treated as
hyperparameters (set to the residual variance of an AR(1))
True = diagonal elements of the scale matrix of the IW prior on
the covariance of the residuals treated as hyperparameters (default)
@param mnalpha: False = Lag-decaying parameter of the MN prior set to 2 and
NOT treated as hyperparameter (default)
True = Lag-decaying parameter of the MN prior treated as
hyperparameter
@param sur: False = single-unit-root prior is OFF
True = single-unit-root prior is ON and its std is treated as an
hyperparameter (default)
@param noc: False = no-cointegration (sum-of coefficients) prior is OFF
True = no-cointegration (sum-of coefficients) is ON and its std is
treated as an hyperparameter (default)
@param fcast: False = does not generate forecasts at the posterior mode
True = generates forecasts at the posterior mode (default)
@param hz: number of quarters for which it generates forecasts (default: hz=8)
@param mcmc: False = does not run the MCMC (default)
True = runs the MCMC after the maximization
@param ndraws: number of draws in the MCMC (default: Ndraws=20000)
@param ndrawsdiscard: number of draws initially discarded to allow convergence
in the in the MCMC (default=Ndraws/2)
@param mcmcconst: scaling constant for the MCMC (should be calibrated to achieve
an acceptance rate of approx 25%) (default: MCMCconst=1)
@param mcmcfcast: False = does not generate forecasts when running the MCMC
True = generates forecasts while running the MCMC
(for each draw of the hyperparameters the code takes a
draw of the VAR coefficients and shocks, and generates
forecasts at horizons hz) (default).
@param mcmcstorecoef: False = does not store the MCMC draws of the VAR
coefficients and residual covariance matrix
True = stores the MCMC draws of the VAR coefficients and
residual covariance matrix (default)
@param verbose: Prints relevant information during the estimation.
@param crit: precision for convergence criteria.
@param resample_method: 'full' only includes quarters that have all of its data available.
'last' uses the last observation available for each quarter.
"""
assert data.index.inferred_freq == 'M', "input 'data' must be monthly and recognized by pandas."
self.data = data
if resample_method == 'full':
self.data_quarterly = self._get_quarterly_df()
elif resample_method == 'last':
self.data_quarterly = data.resample('Q').last().dropna()
else:
raise NotImplementedError('resample method not implemented')
self.lags = lags
self.hyperpriors = hyperpriors
self.vc = vc
self.stationary_prior = stationary_prior
self.mnalpha = mnalpha
self.mnpsi = mnpsi
self.sur = sur
self.noc = noc
self.fcast = fcast
self.hz = hz
self.mcmc = mcmc
self.ndraws = ndraws
self.ndrwasdiscard = int(ndraws/2) if ndrawsdiscard is None else ndrawsdiscard
self.mcmccosnt = mcmcconst
self.mcmcfcast = mcmcfcast
self.mcmcstorecoef = mcmcstorecoef
self.verbose = verbose
self.crit = crit
self.bvar_quarterly = BVARGLP(data=self.data_quarterly,
lags=lags,
hyperpriors=hyperpriors,
vc=vc,
stationary_prior=stationary_prior,
mnpsi=mnpsi,
mnalpha=mnalpha,
sur=sur,
noc=noc,
fcast=fcast,
hz=hz,
mcmc=mcmc,
ndraws=ndraws,
ndrawsdiscard=ndrawsdiscard,
mcmcconst=mcmcconst,
mcmcfcast=mcmcfcast,
mcmcstorecoef=mcmcstorecoef,
verbose=verbose,
crit=crit)
betahat = self.bvar_quarterly.betahat
sigmahat = self.bvar_quarterly.sigmahat
k, n = betahat.shape
_, _, _, aa, _, qq, c2, c1, CC, _, _, _ = self._build_monthly_ss(betahat, sigmahat)
qqKF = np.zeros((n * lags, n * lags))
qqKF[:n, :n] = qq.real
# Next line is just a weird reshaping of the starting state
initX = np.flip(self.data_quarterly.iloc[:lags].values, axis=0).T.reshape(-1, 1, order='F').reshape(-1)
initV = np.eye(initX.shape[0]) * 1e-7
kf = KalmanFilter(transition_matrices=aa,
transition_offsets=c2,
transition_covariance=qqKF,
observation_matrices=CC,
observation_offsets=c1,
observation_covariance=np.zeros((n, n)),
initial_state_mean=initX,
initial_state_covariance=initV)
# Data format for Kalman Filter
kf_data = ma.masked_invalid(self.data.values)
self.logLik = kf.loglikelihood(kf_data)
new_index = | pd.date_range(start=self.data.index[0], periods=self.data.shape[0] + hz, freq='M') | pandas.date_range |
__version__ = "1.4"
import csv
import os # for linesep
import pandas as pd
import numpy as np
from numpy import array as arr
import h5py as h5
from inspect import signature
import uncertainties as unc
import uncertainties.unumpy as unp
from warnings import warn
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit as fit
import scipy.optimize as opt
import scipy.special as special
import scipy.interpolate as interp
import warnings
import MatplotlibPlotters as mp
import PhysicsConstants as mc
import Miscellaneous as misc
from Miscellaneous import what
from copy import copy, deepcopy
from fitters import ( #cython_poissonian as poissonian,
poissonian as poissonian,
FullBalisticMotExpansion, LargeBeamMotExpansion, exponential_saturation )
from fitters.Gaussian import double as double_gaussian, gaussian_2d, arb_2d_sum, bump
import MainAnalysis as ma
import AtomThreshold
import ThresholdOptions
import ExpFile as exp
from ExpFile import ExpFile, dataAddress
# from .TimeTracker import TimeTracker
import PictureWindow as pw
import TransferAnalysisOptions as tao
import scipy.ndimage as ndimage
import scipy.ndimage.filters as filters
from statsmodels.stats.proportion import proportion_confint as confidenceInterval
import imageio
import matplotlib as mpl
import matplotlib.cm
from IPython.display import Image, HTML, display
def softwareBinning(binningParams, rawData):
if binningParams is not None:
sb = binningParams
if len(np.array(rawData).shape) == 3:
if not ((rawData.shape[1]/sb[0]).is_integer()):
raise ValueError('Vertical size ' + str(rawData.shape[1]) + ' not divisible by binning parameter ' + str(sb[0]))
if not ((rawData.shape[2]/sb[1]).is_integer()):
raise ValueError('Horizontal size ' + str(rawData.shape[2]) + ' not divisible by binning parameter ' + str(sb[1]))
rawData = rawData.reshape(rawData.shape[0], rawData.shape[1]//sb[0], sb[0], rawData.shape[2]//sb[1], sb[1]).sum(4).sum(2)
elif len(np.array(rawData).shape) == 2:
if not ((rawData.shape[0]/sb[0]).is_integer()):
raise ValueError('Vertical size ' + str(rawData.shape[0]) + ' not divisible by binning parameter ' + str(sb[0]))
if not ((rawData.shape[1]/sb[1]).is_integer()):
raise ValueError('Horizontal size ' + str(rawData.shape[1]) + ' not divisible by binning parameter ' + str(sb[1]))
rawData = rawData.reshape(rawData.shape[0]//sb[0], sb[0], rawData.shape[1]//sb[1], sb[1]).sum(3).sum(1)
else:
raise ValueError('Raw data must either 2 or 3 dimensions')
return rawData
def windowImage(image, window):
if len(np.array(image).shape) == 2:
return image[window[0]:window[1], window[2]:window[3]]
else:
return image[:,window[0]:window[1], window[2]:window[3]]
def makeVid(pics, gifAddress, videoType, fileAddress=None, dur=1, lim=None, includeCount=True, lowLim=None,
finLabels=[], finTxt="Atom Reservoir Depleted", vidMap='inferno', maxMult=1, offset=0,
resolutionMult=1):
infernoMap = [mpl.cm.inferno(i)[:-1] for i in range(256)]
viridisMap = [mpl.cm.viridis(i)[:-1] for i in range(256)]
magmaMap = [mpl.cm.magma(i)[:-1] for i in range(256)]
hotMap = [mpl.cm.hot(i)[:-1] for i in range(256)]
cividisMap = [mpl.cm.cividis(i)[:-1] for i in range(256)]
if vidMap == 'inferno':
vidMap = infernoMap
if vidMap == 'viridis':
vidMap = viridisMap
if vidMap == 'cividisMap':
vidMap = cividisMap
# global count
# select subsection
if lim is None:
lim = len(pics)
if lowLim is None:
lowLim = 0
pics = pics[lowLim:lim]
# normalize to rgb scale
pics = pics - min(pics.flatten())
pics = np.uint16(pics / max(pics.flatten()) * 256 * maxMult)
pics = arr([[[int(elem) for elem in row] for row in pic] for pic in pics])
pics = arr(pics-min(pics.flatten()) - offset)
pics = [[[vidMap[elem] if elem < 256 and elem >= 0 else vidMap[255] if elem >= 256 else vidMap[0]
for elem in row] for row in pic] for pic in pics]
images = []
sequenceCount = 1
offset = 0
for picCount, pic in enumerate(pics):
fig = plt.figure()
fig.set_size_inches([9,9])
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
plt.grid(False)
ax.imshow(pic, aspect='equal')
if includeCount:
ax.text(-0.1, 0.1, str(picCount+1-offset), color='white', fontsize=40)
if picCount+1 in finLabels:
ax.text(1.5, 14, finTxt, color='r', fontsize=40)
name = "temp"+str(picCount+1)+".png"
plt.savefig(name)
images.append(imageio.imread(name))
if picCount+1 in finLabels:
sequenceCount += 1
offset = picCount+1
for _ in range(4):
images.append(imageio.imread(name))
plt.close('all')
# make bigger
pics = [np.repeat(np.repeat(pic, resolutionMult, axis=0), resolutionMult, axis=1) for pic in pics]
imageio.mimsave(gifAddress, images, format=videoType, duration=dur)
def collapseImage(im, avg=True):
vAvg = np.zeros(len(im[0]))
for r in im:
vAvg += r
if avg:
vAvg /= len(im)
hAvg = np.zeros(len(im))
for c in misc.transpose(im):
hAvg += c
if avg:
hAvg /= len(im[0])
return hAvg, vAvg
def jeffreyInterval(m,num):
# sigma = 1-0.6827 gives the standard "1 sigma" intervals.
i1, i2 = confidenceInterval(round(m*num), num, method='jeffreys', alpha=1-0.6827)
return (m - i1, i2 - m)
def findImageMaxima(im, neighborhood_size=20, threshold=1):
data_max = filters.maximum_filter(im, neighborhood_size)
maxima = (im == data_max)
data_min = filters.minimum_filter(im, neighborhood_size)
diff = ((data_max - data_min) > threshold)
maxima[diff == 0] = 0
labeled, num_objects = ndimage.label(maxima)
slices = ndimage.find_objects(labeled)
x, y = [], []
for dy,dx in slices:
x_center = (dx.start + dx.stop - 1)/2
x.append(x_center)
y_center = (dy.start + dy.stop - 1)/2
y.append(y_center)
print('Found ' + str(len(x)) + ' Maxima.')
return [p for p in zip([int(x_) for x_ in x],[int(y_) for y_ in y])]
def fitManyGaussianImage(im, numGauss, neighborhood_size=20, threshold=1, direct=True, widthGuess=1):
"""
Maxima finding is based on the answer to this question:
https://stackoverflow.com/questions/9111711/get-coordinates-of-local-maxima-in-2d-array-above-certain-value
"""
maximaLocs = findImageMaxima(im, neighborhood_size=neighborhood_size, threshold=threshold)
if len(maximaLocs) != numGauss:
raise ValueError("ERROR: didn't find the right number of maxima!")
guess = [min(im.flatten())]
for loc in maximaLocs:
guess += [im[loc[1],loc[0]], loc[0], loc[1], widthGuess, widthGuess]
xpts = np.arange(len(im[0]))
ypts = np.arange(len(im))
X,Y = np.meshgrid(xpts,ypts)
zpts = arb_2d_sum.f((X,Y), *guess).reshape(X.shape)
f, ax = plt.subplots(1,5,figsize=(20,10))
ax[0].imshow(im)
ax[0].set_title('Orig')
ax[1].imshow(zpts)
ax[1].set_title('Guess')
ax[2].imshow(im-zpts)
ax[2].set_title('Guess-Diff')
optParam, optCov = opt.curve_fit(arb_2d_sum.f, (X,Y), im.flatten(), p0=guess)
zpts_fit = arb_2d_sum.f((X,Y), *optParam).reshape(X.shape)
ax[3].imshow(zpts_fit)
ax[3].set_title('Fit')
ax[4].imshow(im-zpts_fit)
ax[4].set_title('Fit-Diff')
return optParam
def temperatureAnalysis( data, magnification, temperatureGuess=100e-6, **standardImagesArgs ):
res = ma.standardImages(data, scanType="Time(ms)", majorData='fits', fitPics=True, manualAccumulation=True, quiet=True, **standardImagesArgs)
(key, rawData, dataMinusBg, dataMinusAvg, avgPic, pictureFitParams, fitCov, plottedData, v_params, v_errs, h_params, h_errs, intRawData) = res
# convert to meters, convert from sigma to waist
waists = 2 * mc.baslerScoutPixelSize * np.sqrt((pictureFitParams[:, 3]**2+pictureFitParams[:, 4]**2)/2) * magnification
# waists_1D = 2 * mc.baslerScoutPixelSize * np.sqrt((v_params[:, 2]**2+h_params[:, 2]**2)/2) * magnification
waists_1D = 2 * mc.baslerScoutPixelSize * v_params[:, 2] * magnification
# convert to s
times = key / 1000
temp, fitVals, fitCov = calcBallisticTemperature(times, waists / 2, guess = [*LargeBeamMotExpansion.guess()[:-1], temperatureGuess])
temp_1D, fitVals_1D, fitCov_1D = calcBallisticTemperature(times, waists_1D / 2, guess = [*LargeBeamMotExpansion.guess()[:-1], temperatureGuess])
return ( temp, fitVals, fitCov, times, waists, rawData, pictureFitParams, key, plottedData, dataMinusBg, v_params, v_errs, h_params, h_errs,
waists_1D, temp_1D, fitVals_1D, fitCov_1D )
def motFillAnalysis( dataSetNumber, motKey, exposureTime, window=pw.PictureWindow(), sidemotPower=2.05, diagonalPower=8, motRadius=8 * 8e-6,
imagingLoss=0.8, detuning=10e6, **standardImagesArgs ):
res = ma.standardImages(dataSetNumber, key=motKey, scanType="time (s)", window=window, quiet=True, **standardImagesArgs)
motKey, rawData = res[0], res[1]
intRawData = integrateData(rawData)
try:
fitParams, pcov = opt.curve_fit( exponential_saturation.f, motKey, intRawData,
p0=[np.min(intRawData) - np.max(intRawData), 1 / 2, np.max(intRawData)] )
except RuntimeError:
print('MOT # Fit failed!')
# probably failed because of a bad guess. Show the user the guess fit to help them debug.
popt = [np.min(intRawData) - np.max(intRawData), 1 / 2, np.max(intRawData)]
fitErr = np.sqrt(np.diag(pcov))
motNum, fluorescence = computeMotNumber(sidemotPower, diagonalPower, motRadius, exposureTime, imagingLoss, -fitParams[0],
detuning=detuning)
return rawData, intRawData, motNum, fitParams, fluorescence, motKey, fitErr
def getTodaysTemperatureData():
path = dataAddress + 'Temperature_Data.csv'
df = pd.read_csv(path, header=None, sep=',| ', engine='python')
return df
def Temperature(show=True):
df = getTodaysTemperatureData()
legends = ['1: Master Computer', '2: B236', '3: Auxiliary Table', '4: Main Exp. (Near Ion Pump)']
xpts = [x[:5] for x in df[1]]
if not show:
return xpts, df
fig = plt.figure(figsize=(30,15))
ax1 = fig.add_subplot(2,1,1)
ax2 = fig.add_subplot(2,1,2, sharex=ax1)
ax1.clear()
for ind, leg in zip(np.arange(3,13,3), legends):
pltx, data = [], []
for num, dp in enumerate(df[ind]):
try:
data.append(float(dp))
pltx.append(xpts[num])
except ValueError:
print('Bad Value!', dp, xpts[num])
pass
ax1.plot(pltx, data, label=leg)
ax1.legend(loc='upper center', bbox_to_anchor=(0.5,1.2),ncol=4, fontsize=10)
ax1.set_ylabel('Temperature (C)')
plt.setp(ax1.get_xticklabels(), visible=False)
ax2.clear()
for ind, leg in zip(np.arange(4,14,3), legends):
pltx, data = [], []
for num, dp in enumerate(df[ind]):
try:
data.append(float(dp))
pltx.append(xpts[num])
except ValueError:
print('Bad Value!', dp, xpts[num])
ax2.plot(pltx, data, label=leg)
ax2.set_ylabel('Humidity (%)')
incr = int(len(xpts)/20)+1
ax2.set_xticks(xpts[::incr])
plt.xlabel('Time (hour:minute)')
plt.xticks(rotation=75);
return xpts, df, ax1, ax2
def splitData(data, picsPerSplit, picsPerRep, runningOverlap=0):
data = np.reshape(data, (picsPerSplit, int(data.shape[1]/picsPerSplit), data.shape[2], data.shape[3]))
return data, int(picsPerSplit/picsPerRep), np.arange(0,int(data.shape[1]))
def parseRearrangeInfo(addr, limitedMoves=-1):
moveList = []
readyForAtomList = False
with open(addr) as centerLog:
for i, line in enumerate(centerLog):
# this number depends on the size of the target matrix. it is height + 2.
if i < 12:
continue
txt = line.split(' ')
if txt[0] == 'Rep':
moveList.append({'Rep': txt[2]})
continue
if txt[0] == 'Moves:\n':
continue
if txt[0] == 'Source:':
moveList[-1]['Source'] = []
for loc in txt[1:]:
if loc != ';' and loc != '\n':
moveList[-1]['Source'].append(int(loc[:-1]))
continue
if txt[0] == 'Target' and txt[1] == 'Location:':
moveList[-1]['Target-Location'] = txt[2] + ',' + txt[3]
moveList[-1]['Moves'] = []
continue
if not readyForAtomList:
if len(moveList[-1]['Moves']) >= limitedMoves and limitedMoves != -1:
continue
moveList[-1]['Moves'].append({'Flashed': bool(int(txt[1])), 'Direction': txt[2]})
moveList[-1]['Moves'][-1]['Atoms'] = []
readyForAtomList = True
continue
if len(txt) != 1:
if len(moveList[-1]['Moves']) >= limitedMoves+1 and limitedMoves != -1:
continue
moveList[-1]['Moves'][-1]['Atoms'].append((txt[0], txt[1]))
else:
# this blank line happens between moves.
readyForAtomList = False
return moveList
def handleKeyModifications(hdf5Key, numVariations, keyInput=None, keyOffset=0, groupData=False, keyConversion=None, keySlice=None ):
"""
keySlice: mostly for handling the case of two concurrent variables that are varying the same, so it's not quite a multidimensional
slice but I need to specify which value to use for the x-axis etc.
"""
key = None
key = hdf5Key if keyInput is None else keyInput
if key is None:
key = arr([0]) if numVariations == 1 else arr([])
if groupData:
key = [0]
if len(key.shape) == 1:
key -= keyOffset
if keyConversion is not None:
key = [keyConversion.f(k) for k in key]
#keyName += "; " + keyConversion.units()
if len(key) != numVariations:
raise ValueError("ERROR: The Length of the key doesn't match the data found. "
"Did you want to use a transfer-based function instead of a population-based function? Key:",
len(key), "vars:", numVariations)
if keySlice is not None:
key = key[:,keySlice]
return key
def modFitFunc(sign, hBiasIn, vBiasIn, depthIn, *testBiases):
newDepths = extrapolateModDepth(sign, hBiasIn, vBiasIn, depthIn, testBiases)
if newDepths is None:
return 1e9
return np.std(newDepths)
def genAvgDiscrepancyImage(data, shape, locs):
"""
generates an image and determines color mins and maxes to
make the mean white on a normal diverging colormap.
"""
me = np.mean(data)
pic = np.ones(shape) * me
for i, loc in enumerate(locs):
pic[loc[0], loc[1]] = data[i]
mi = min(pic.flatten())
ma = max(pic.flatten())
if me - mi > ma - me:
vmin = mi
vmax = 2*me - mi
else:
vmin = 2*me-ma
vmax = ma
return pic, vmin, vmax
def getBetterBiases(prevDepth, prev_V_Bias, prev_H_Bias, sign=1, hFreqs=None, vFreqs=None, hPhases=None, vPhases=None):
for d in prevDepth.flatten():
if d < 0:
print('ERROR: This function cannot currently deal with negative arguments.')
print('Assuming that (', prev_V_Bias[0],',',prev_V_Bias[-1], ') is the bias of the (highest, lowest)-frequency row')
print('Assuming that (', prev_H_Bias[0],',',prev_H_Bias[-1], ') is the bias of the (lowest, highest)-frequency column')
print('Please note that if using the outputted centers from Survival(), then you need to reshape the data'
' into a 2D numpy array correctly to match the ordering of the V and H biases. This is normally done'
' via a call to np.reshape() and a transpose to match the comments above.')
print('Sign Argument should be -1 if numbers are pushout resonance locations.')
if type(prevDepth) is not type(arr([])) or type(prevDepth[0]) is not type(arr([])):
raise TypeError('ERROR: previous depth array must be 2D numpy array')
result, modDepth = extrapolateEveningBiases(prev_H_Bias, prev_V_Bias, prevDepth, sign=sign);
new_H_Bias = result['x'][:len(prev_H_Bias)]
new_V_Bias = result['x'][len(prev_H_Bias):]
print('Horizontal Changes')
for prev, new in zip(prev_H_Bias, new_H_Bias):
print(misc.round_sig(prev,4), '->', misc.round_sig(new,4), str(misc.round_sig(100 * (new - prev) / prev, 2)) + '%')
print('Vertical Changes')
for prev, new in zip(prev_V_Bias, new_V_Bias):
print(misc.round_sig(prev,4), '->', misc.round_sig(new,4), str(misc.round_sig(100 * (new - prev) / prev, 2)) + '%')
print('Previous Depth Relative Variation:', misc.round_sig(np.std(prevDepth),4), '/',
misc.round_sig(np.mean(prevDepth),4), '=', misc.round_sig(100 * np.std(prevDepth)/np.mean(prevDepth)), '%')
print('Expected new Depth Relative Variation:', misc.round_sig(100*np.std(modDepth)/np.mean(prevDepth),4),'%')
print('New Vertical Biases \n[',end='')
for v in new_V_Bias:
print(v, ',', end=' ')
#print(']\nNew Horizontal Biases \n[', end='')
#for h in new_H_Bias:
# print(h, ',', end=' ')
#print(']\n')
print(']\nNew Horizontal Biases \n[ ', end='')
for h in new_H_Bias:
print(h, ' ', end=' ')
print(']\n')
if hFreqs is None:
return
if not (len(new_H_Bias) == len(hFreqs) == len(hPhases)):
raise ValueError('Lengths of horizontal data dont match')
if not (len(new_V_Bias) == len(vFreqs) == len(vPhases)):
raise ValueError('Lengths of vertical data dont match')
with open('\\\\jilafile.colorado.edu\\scratch\\regal\\common\\LabData\\Quantum Gas Assembly\\Code_Files\\New-Depth-Evening-Config.txt','w') as file:
file.write('HORIZONTAL:\n')
for f, b, p in zip(hFreqs, new_H_Bias, hPhases):
file.write(str(f) + '\t' + str(b) + '\t' + str(p) + '\n')
file.write('VERTICAL:\n')
for f, b, p in zip(vFreqs, reversed(new_V_Bias), vPhases):
file.write(str(f) + '\t' + str(b) + '\t' + str(p) + '\n')
def extrapolateEveningBiases(hBiasIn, vBiasIn, depthIn, sign=1):
"""
depth in is some measure of the trap depth which is assumed to be roughly linear with the trap depth. It need not be in the right units.
"""
# normalize biases
hBiasIn /= np.sum(hBiasIn)
vBiasIn /= np.sum(vBiasIn)
guess = np.concatenate((hBiasIn, vBiasIn))
f = lambda g: modFitFunc(sign, hBiasIn, vBiasIn, depthIn, *g, )
result = opt.minimize(f, guess)
return result, extrapolateModDepth(sign, hBiasIn, vBiasIn, depthIn, result['x'])
def extrapolateModDepth(sign, hBiasIn, vBiasIn, depthIn, testBiases):
"""
assumes that hBiasIn and vBiasIn are normalized.
This function extrapolates what the depth of each tweezer should be based on the
current depths and current biases. Basically, it assumes that if you change the bias by x%,
then the depth for every atom in that row/column will change by x%.
"""
hBiasTest = testBiases[:len(hBiasIn)]
if len(hBiasTest) > 1:
for b in hBiasTest:
if b <= 0 or b > 1:
return None
vBiasTest = testBiases[len(hBiasIn):len(hBiasIn) + len(vBiasIn)]
if len(vBiasTest) > 1:
for b in vBiasTest:
if b <= 0 or b > 1:
return None
# normalize tests
hBiasTest /= np.sum(hBiasTest)
vBiasTest /= np.sum(vBiasTest)
modDepth = deepcopy(depthIn)
for rowInc, _ in enumerate(depthIn):
dif = (vBiasTest[rowInc] - vBiasIn[rowInc])/vBiasIn[rowInc]
modDepth[rowInc] = modDepth[rowInc] * (1- sign * dif)
for colInc, _ in enumerate(misc.transpose(depthIn)):
dif = (hBiasTest[colInc] - hBiasIn[colInc])/hBiasIn[colInc]
modDepth[:, colInc] = modDepth[:, colInc] * (1-sign * dif)
return modDepth
def fitWithModule(module, key, vals, errs=None, guess=None, getF_args=[None], maxfev=2000):
# this also works with class objects which have all the required member functions.
key = arr(key)
xFit = (np.linspace(min(key), max(key), 1000) if len(key.shape) == 1 else np.linspace(min(misc.transpose(key)[0]),
max(misc.transpose(key)[0]), 1000))
fitNom = fitStd = fitValues = fitErrs = fitCovs = fitGuess = rSq = None
from numpy.linalg import LinAlgError
try:
fitF = module.getF(*getF_args) if hasattr(module, 'getF') else module.f
fitF_unc = module.getF_unc(*getF_args) if hasattr(module, 'getF_unc') else module.f_unc
if len(key) < len(signature(fitF).parameters) - 1:
raise RuntimeError('Not enough data points to constrain a fit!')
guessUsed = guess if guess is not None else module.guess(key,vals)
fitValues, fitCovs = opt.curve_fit(fitF, key, vals, p0=guessUsed, maxfev=maxfev)
fitErrs = np.sqrt(np.diag(fitCovs))
corr_vals = unc.correlated_values(fitValues, fitCovs)
fitUncObject = fitF_unc(xFit, *corr_vals)
fitNom = unp.nominal_values(fitUncObject)
fitStd = unp.std_devs(fitUncObject)
fitFinished = True
fitGuess = fitF(xFit, *guessUsed)
residuals = vals - fitF(key, *fitValues)
ss_res = np.sum(residuals**2)
ss_tot = np.sum((vals-np.mean(vals))**2)
rSq = 1 - (ss_res / ss_tot)
except (RuntimeError, LinAlgError, ValueError) as e:
fitF = module.getF(*getF_args) if hasattr(module, 'getF') else module.f
fitF_unc = module.getF_unc(*getF_args) if hasattr(module, 'getF_unc') else module.f_unc
warn('Data Fit Failed! ' + str(e))
print('stuff',key, vals, guessUsed)
fitValues = module.guess(key, vals)
fitNom = fitF(xFit, *fitValues)
fitFinished = False
guessUsed = guess if guess is not None else module.guess(key,vals)
fitGuess = fitF(xFit, *guessUsed)
fitInfo = {'x': xFit, 'nom': fitNom, 'std': fitStd, 'vals': fitValues, 'errs': fitErrs, 'cov': fitCovs, 'guess': fitGuess, 'R-Squared': rSq}
return fitInfo, fitFinished
def combineData(data, key):
"""
combines similar key value data entries. data will be in order that unique key items appear in key.
For example, if key = [1,3,5,3,7,1], returned key and corresponding data will be newKey = [1, 3, 5, 7]
:param data:
:param key:
:return:
"""
items = {}
newKey = []
newData = []
for elem in key:
if str(elem) not in items:
indexes = [i for i, x in enumerate(key) if x == elem]
# don't get it again
items[str(elem)] = "!"
newKey.append(elem)
newItem = np.zeros((data.shape[1], data.shape[2]))
# average together the corresponding data.
for index in indexes:
newItem += data[index]
newItem /= len(indexes)
newData.append(newItem)
return arr(newData), arr(newKey)
def fitPic(picture, showFit=True, guessSigma_x=1, guessSigma_y=1, guess_x=None, guess_y=None, fitF=gaussian_2d.f_notheta, guessOffset=None, extraGuess=None):
"""
Fit an individual picture with a 2d gaussian, and fit the horizontal and vertical averages with 1d gaussians
"""
pos = arr(np.unravel_index(np.argmax(picture), picture.shape))
pos[1] = guess_x if guess_x is not None else pos[1]
pos[0] = guess_y if guess_x is not None else pos[0]
pic = picture.flatten()
x = np.linspace(0, picture.shape[1], picture.shape[1])
y = np.linspace(0, picture.shape[0], picture.shape[0])
X, Y = np.meshgrid(x, y)
### 2D Fit
initial_guess = [(np.max(pic) - np.min(pic)), pos[1], pos[0], guessSigma_x, guessSigma_y, np.min(pic) if guessOffset is None else guessOffset]
# for fitting functions with more arguments
if extraGuess is not None:
initial_guess += extraGuess
try:
print('fitting...')
popt, pcov = opt.curve_fit(fitF, (X, Y), pic, p0=initial_guess)#, epsfcn=0.01, ftol=0)
except RuntimeError:
popt = np.zeros(len(initial_guess))
pcov = np.zeros((len(initial_guess), len(initial_guess)))
warn('2D Gaussian Picture Fitting Failed!')
### Vertical (i.e. collapse in the vertical direction) Average Fit
vAvg = np.zeros(len(picture[0]))
for r in picture:
vAvg += r
vAvg /= len(picture)
vGuess = [np.max(vAvg) - np.min(vAvg), x[np.argmax(vAvg)], guessSigma_x, np.min(vAvg)]
try:
popt_v, pcov_v = opt.curve_fit(bump.f, x, vAvg, vGuess)
except RuntimeError:
popt_v = np.zeros(len(vGuess))
pcov_v = np.zeros((len(vGuess), len(vGuess)))
warn('Vertical Average Picture Fitting Failed!')
### Horizontal Average Fit
hAvg = np.zeros(len(picture))
for c in misc.transpose(picture):
hAvg += c
hAvg /= len(picture[0])
hGuess = [np.max(hAvg) - np.min(hAvg), y[np.argmax(hAvg)], guessSigma_y, np.min(hAvg)]
try:
popt_h, pcov_h = opt.curve_fit(bump.f, y, hAvg, hGuess)
except RuntimeError:
popt_h = np.zeros(len(hGuess))
pcov_h = np.zeros((len(hGuess), len(hGuess)))
warn('Horizontal Average Picture Fitting Failed!')
if showFit:
print(fitF)
data_fitted = fitF((X,Y), *popt)
fig, axs = plt.subplots(1, 3)
plt.grid(False)
im = axs[0].imshow(picture, origin='lower')#, extent=(x.min(), x.max(), y.min(), y.max()))
data_fitted = data_fitted.reshape(picture.shape[0],picture.shape[1])
axs[0].contour(x, y, data_fitted, 4, colors='w', alpha=0.2)
mp.addAxColorbar(fig, axs[0], im)
axs[0].set_title('Raw Data')
im = axs[1].imshow( data_fitted, origin='lower')
mp.addAxColorbar(fig, axs[1], im)
axs[1].set_title('Fit')
im = axs[2].imshow( picture - data_fitted, origin='lower' )
mp.addAxColorbar(fig, axs[2], im)
axs[2].contour(x, y, data_fitted, 4, colors='w', alpha=0.2)
axs[2].set_title('Residuals')
return initial_guess, popt, np.sqrt(np.diag(pcov)), popt_v, np.sqrt(np.diag(pcov_v)), popt_h, np.sqrt(np.diag(pcov_h))
def fitPictures(pictures, dataRange, guessSigma_x=1, guessSigma_y=1, quiet=False, firstIsGuide=True):
"""
fit an array of pictures with gaussians
if firstIsGuide is true then use the fit from the first pic as the guide for the next pictures.
:param pictures:
:param dataRange:
:param guessSigma_x:
:param guessSigma_y:
:return:
"""
fitParameters, fitErrors, vParams, vErrs, hParams, hErrs = [[] for _ in range(6)]
count = 0
warningHasBeenThrown = False
if not quiet:
print('fitting picture Number...')
for picInc, picture in enumerate(pictures):
if not quiet:
print(picInc, ',', end='')
if count not in dataRange:
parameters, errors = [np.zeros(7) for _ in range(2)]
v_param, v_err, h_param, h_err = [np.zeros(4) for _ in range(4)]
else:
try:
if firstIsGuide and picInc != 0:
# amplitude, xo, yo, sigma_x, sigma_y, theta, offset
_, parameters, errors, v_param, v_err, h_param, h_err = fitPic(picture, showFit=False,
guess_x = fitParameters[0][1], guess_y = fitParameters[0][2],
guessSigma_x=fitParameters[0][3], guessSigma_y=fitParameters[0][4])
else:
_, parameters, errors, v_param, v_err, h_param, h_err = fitPic(picture, showFit=False,
guessSigma_x=guessSigma_x, guessSigma_y=guessSigma_y)
except RuntimeError:
if not warningHasBeenThrown:
print("Warning! Not all picture fits were able to fit the picture signal to a 2D Gaussian.\n"
"When the fit fails, the fit parameters are all set to zero.")
warningHasBeenThrown = True
parameters, errors = [np.zeros(7) for _ in range(2)]
v_param, v_err, h_param, h_err = [np.zeros(4) for _ in range(2)]
# append things regardless of whether the fit succeeds or not in order to keep things the right length.
fitParameters.append(parameters)
fitErrors.append(errors)
vParams.append(v_param)
vErrs.append(v_err)
hParams.append(h_param)
hErrs.append(h_err)
count += 1
return np.array(fitParameters), np.array(fitErrors), np.array(vParams), np.array(vErrs), np.array(hParams), np.array(hErrs)
def fitDoubleGaussian(binCenters, binnedData, fitGuess, quiet=False):
try:
fitVals, fitCovNotUsed = opt.curve_fit( lambda x, a1, a2, a3, a4, a5, a6:
double_gaussian.f(x, a1, a2, a3, a4, a5, a6, 0),
binCenters, binnedData, fitGuess )
except opt.OptimizeWarning as err:
if not quiet:
print('Double-Gaussian Fit Failed! (Optimization Warning)', err)
fitVals = (0, 0, 0, 0, 0, 0)
except RuntimeError as err:
if not quiet:
print('Double-Gaussian Fit Failed! (Runtime error)', err)
fitVals = (0, 0, 0, 0, 0, 0)
return [*fitVals,0]
def fitGaussianBeamWaist(data, key, wavelength):
# expects waists as inputs
initial_guess = [min(data.flatten()), key[int(3*len(key)/4)]]
try:
# fix the wavelength
# beamWaistExpansion(z, w0, wavelength)
popt, pcov = fit(lambda x, a, b: beamWaistExpansion(x, a, b, wavelength), key, data, p0=initial_guess)
except RuntimeError:
popt, pcov = [0, 0]
warn('Fit Failed!')
return popt, pcov
# #############################
# ### Analyzing machine outputs
def load_SRS_SR780(fileAddress):
"""
from a TXT file from the SRS, returns the frequencies (the [0] element) and the powers (the [1] element)
"""
data = pd.read_csv(fileAddress, delimiter=',', header=None)
return data[0], data[1]
def load_HP_4395A(fileAddress):
"""
Analyzing HP 4395A Spectrum & Network Analyzer Data
"""
data = pd.read_csv(fileAddress, delimiter='\t', header=11)
return data["Frequency"], data["Data Trace"]
def load_RSA_6114A(fileLocation):
"""
return xData, yData, yUnits, xUnits
"""
lines = []
count = 0
yUnits = ""
xUnits = ""
xPointNum, xStart, xEnd = [0, 0, 0]
with open(fileLocation) as file:
for line in iter(file.readline, ''):
count += 1
# 18 lines to skip.
if count == 11:
yUnits = str(line[line[:].index('>')+1:line[1:].index('<')+1])
continue
elif count == 12:
xUnits = str(line[line[:].index('>')+1:line[1:].index('<')+1])
continue
elif count == 16:
xPointNum = float(line[line[:].index('>')+1:line[1:].index('<')+1])
continue
elif count == 17:
xStart = float(line[line[:].index('>')+1:line[1:].index('<')+1])
continue
elif count == 18:
xEnd = float(line[line[:].index('>')+1:line[1:].index('<')+1])
continue
elif count <= 18:
continue
try:
lines.append(line[line[:].index('>')+1:line[1:].index('<')+1])
except ValueError:
pass
yData = np.float64(arr(lines))
xData = np.linspace(xStart, xEnd, xPointNum)
return xData, yData, yUnits, xUnits
# ##########################
# ### Some AOM Optimizations
def getOptimalAomBiases(minX, minY, spacing, widthX, widthY):
# This function has been deprecated. It was only valid for the old Gooch and Housego aoms.
return "FUNCTION-DEPRECATED"
"""
:param minX:
:param minY:
:param spacing:
:param widthX:
:param widthY:
:return:
"""
# these calibrations were taken on 9/11/2017\n",
# At Vertical Frequency = 80 MHz. \n",
horFreq = [70, 75, 65, 67.5, 72.5, 80, 85, 90, 95, 60, 50, 55, 45, 62.5, 57.5, 52.5]
powerInRail = [209, 197, 180, 198, 205, 186, 156, 130, 72.5, 181, 109, 179, 43.5, 174, 182, 165]
#def orderData(data, key, keyDim=None, otherDimValues=None):
powerInRail, horFreq, _ = orderData(powerInRail, horFreq)
relativeHorPowerInRail = arr(powerInRail) / max(powerInRail) * 100
horAomCurve = interp.InterpolatedUnivariateSpline(horFreq, relativeHorPowerInRail)
# at horizontal freq of 70MHz\n",
vertFreq = [80, 82.5, 77.5, 75, 85, 90, 95, 100, 105, 70, 65, 60, 55, 50, 52.5, 57.5, 62.5]
vertPowerInRail = [206, 204, 202, 201, 197, 184, 145, 126, 64, 193, 185, 140, 154, 103, 141, 140, 161]
vertPowerInRail, vertFreq, _ = orderData(vertPowerInRail, vertFreq)
relativeVertPowerInRail = arr(vertPowerInRail) / max(vertPowerInRail) * 100
#vertAomCurve = interp.interp1d(vertFreq, relativeVertPowerInRail)
vertAomCurve = interp.InterpolatedUnivariateSpline(vertFreq, relativeVertPowerInRail)
xFreqs = [minX + num * spacing for num in range(widthX)]
#xAmps = arr([100 / base_interp(xFreq, horFreq, relativeHorPowerInRail) for xFreq in xFreqs])
xAmps = arr([100 / horAomCurve(xFreq) for xFreq in xFreqs])
xAmps /= np.sum(xAmps) / len(xAmps)
yFreqs = [minY + num * spacing for num in range(widthY)]
#yAmps = arr([100 / base_interp(yFreq, vertFreq, relativeVertPowerInRail) for yFreq in yFreqs])
yAmps = arr([100 / vertAomCurve(yFreq) for yFreq in yFreqs])
yAmps /= np.sum(yAmps) / len(yAmps)
#yAmps = [100 / vertAomCurve(yFreq) for yFreq in yFreqs]
return xFreqs, xAmps, yFreqs, yAmps
def maximizeAomPerformance(horCenterFreq, vertCenterFreq, spacing, numTweezersHor, numTweezersVert, iterations=10, paperGuess=True, metric='max',
vertAmps=None, horAmps=None, carrierFre=255):
"""
computes the amplitudes and phases to maximize the AOM performance.
:param horCenterFreq:
:param vertCenterFreq:
:param spacing:
:param numTweezersHor:
:param numTweezersVert:
:param iterations:
:return:
"""
horFreqs = [horCenterFreq - spacing * (numTweezersHor - 1) / 2.0 + i * spacing for i in range(numTweezersHor )]
vertFreqs = [horCenterFreq - spacing * (numTweezersVert - 1) / 2.0 + i * spacing for i in range(numTweezersVert)]
actualHFreqs = 255 - arr(horFreqs)
actualVFreqs = 255 - arr(vertFreqs)
if vertAmps is None:
vertAmps = np.ones(numTweezersVert)
if horAmps is None:
horAmps = np.ones(numTweezersHor)
def calcWaveCos(xPts, phases, freqs, amps):
volts = np.zeros(len(xPts))
phases += [0]
for phase, freq, amp in zip(phases, freqs, amps):
volts += amp * np.cos(2*np.pi*freq * 1e6 * xPts + phase)
return volts
def calcWave(xPts, phases, freqs, amps):
volts = np.zeros(len(xPts))
phases += [0]
for phase, freq, amp in zip(phases, freqs, amps):
volts += amp * np.sin(2*np.pi*freq * 1e6 * xPts + phase)
return volts
def getXMetric(phases):
x = np.linspace(0, 3e-6, 20000)
if metric=='max':
return max(abs(calcWave(x, phases, actualHFreqs, horAmps)))
elif metric == 'std':
return np.std(calcWave(x, phases, actualHFreqs, horAmps))
def getYMetric(phases):
x = np.linspace(0, 3e-6, 20000)
if metric=='max':
return max(abs(calcWave(x, phases, actualVFreqs, vertAmps)))
elif metric == 'std':
return np.std(calcWave(x, phases, actualVFreqs, vertAmps))
xBounds = [(0, 2 * mc.pi) for _ in range(numTweezersHor-1)]
#
if paperGuess:
xGuess = arr([np.pi * i**2/numTweezersHor for i in range(numTweezersHor-1)])
else:
xGuess = arr([0 for _ in range(numTweezersHor-1)])
minimizer_kwargs = dict(method="L-BFGS-B", bounds=xBounds)
xPhases = opt.basinhopping(getXMetric, xGuess, minimizer_kwargs=minimizer_kwargs, niter=iterations, stepsize=0.2)
xPhases = list(xPhases.x) + [0]
print('horFreqs', horFreqs)
print('horAmps', horAmps)
print('Hor-Phases:', [misc.round_sig_str(x,10) for x in xPhases])
if paperGuess:
yGuess = arr([np.pi * i**2/numTweezersVert for i in range(numTweezersVert-1)])
else:
yGuess = arr([0 for _ in range(numTweezersVert-1)])
yBounds = [(0, 2 * mc.pi) for _ in range(numTweezersVert-1)]
minimizer_kwargs = dict(method="L-BFGS-B", bounds=yBounds)
yPhases = opt.basinhopping(getYMetric, yGuess, minimizer_kwargs=minimizer_kwargs, niter=iterations, stepsize=0.2)
yPhases = list(yPhases.x) + [0]
for i, xp in enumerate(yPhases):
yPhases[i] = misc.round_sig(xp, 10)
print('vertFreqs', vertFreqs)
print('vertAmps', vertAmps)
print('Vert-Phases:', [misc.round_sig_str(y,10) for y in yPhases])
xpts = np.linspace(0, 1e-6, 10000)
ypts_x = calcWave(xpts, xPhases, actualHFreqs, horAmps)
yptsOrig = calcWaveCos(xpts, arr([0 for _ in range(numTweezersHor)]), actualHFreqs, horAmps)
plt.title('X-Axis')
plt.plot(xpts, ypts_x, ':', label='X-Optimization')
plt.plot(xpts, yptsOrig, ':', label='X-Worst-Case')
plt.legend()
plt.figure()
yptsOrig = calcWave(xpts, arr([0 for _ in range(numTweezersVert)]), actualVFreqs, vertAmps)
ypts_y = calcWaveCos(xpts, yPhases, actualVFreqs, vertAmps)
plt.title('Y-Axis')
plt.plot(xpts, ypts_y, ':', label='Y-Optimization')
plt.plot(xpts, yptsOrig, ':', label='Y-Worst-Case')
plt.legend()
return xpts, ypts_x, ypts_y,
def integrateData(pictures):
"""
:param pictures:
:return:
"""
if len(pictures.shape) == 3:
integratedData = np.zeros(pictures.shape[0])
picNum = 0
for pic in pictures:
for row in pic:
for elem in row:
integratedData[picNum] += elem
picNum += 1
else:
integratedData = 0
for row in pictures:
for elem in row:
integratedData += elem
return integratedData
def beamIntensity(power, waist, radiusOfInterest=0):
"""
computes the average beam intensity, in mW/cm^2, of a beam over some radius of interest.
:param power: power of the laser beam, in mW
:param waist: waist of the laser beam, in cm.
:param radiusOfInterest: the radius of interest. In the case that this is << waist, the equation below
reduces to a simpler, more commonly referenced form. The literal math gives 0/0 though, so I
include the reduced form.
"""
if radiusOfInterest == 0:
return 2 * power / (mc.pi * waist ** 2)
else:
return power * (1 - np.exp(-2 * radiusOfInterest ** 2 / waist ** 2)) / (mc.pi * radiusOfInterest ** 2)
def computeBaslerGainDB(rawGain):
"""
Gain (NOT currently used in fluorescence calc...)
"""
G_c = 20 * np.log10((658 + rawGain)/(658 - rawGain))
if 110 <= rawGain <= 511:
gainDB = 20 * np.log10((658 + rawGain)/(658 - rawGain)) - G_c
elif 511 <= rawGain <= 1023:
gainDB = 0.0354 * rawGain - G_c
else:
gainDB = None
warn('raw gain out of range! gainDB set to None/')
return gainDB
def computeScatterRate(totalIntensity, D2Line_Detuning):
"""
Computes the rate of photons scattering off of a single atom. From steck, equation 48.
Assumes 2-Level approximation, good for near resonant light since the near-resonant transition
will be dominant.
Assumes D2 2 to 3' transition.
:param totalIntensity: the total intensity (from all beams) shining on the atoms.
:param D2Line_Detuning: the detuning, in Hz, of the light shining on the atoms from the D2 transition.
"""
isat = mc.Rb87_I_Sat_ResonantIsotropic_2_to_3
rate = (mc.Rb87_D2Gamma / 2) * (totalIntensity / isat) / (1 + 4 * (D2Line_Detuning / mc.Rb87_D2Gamma) ** 2
+ totalIntensity / isat)
return rate
def computeFlorescence(greyscaleReading, imagingLoss, imagingLensDiameter, imagingLensFocalLength, exposure ):
"""
TODO: incorporate gain into the calculation, currently assumes gain = X1... need to check proper conversion
from basler software. I'd expect a power conversion so a factor of 20,
Fluorescence
:param greyscaleReading:
:param imagingLoss:
:param imagingLensDiameter:
:param imagingLensFocalLength:
:param exposure:
:return:
"""
term1 = greyscaleReading * mc.cameraConversion / (mc.h * mc.c / mc.Rb87_D2LineWavelength)
term2 = 1 * imagingLoss * (imagingLensDiameter**2 / (16 * imagingLensFocalLength**2)) * exposure
fluorescence = term1 / term2
return fluorescence
# mot radius is in cm
def computeMotNumber(sidemotPower, diagonalPower, motRadius, exposure, imagingLoss, greyscaleReading, detuning=10e6):
"""
:param sidemotPower: power in the sidemot beam, in mW. Code Assumes 3.3mm sidemot waist
:param diagonalPower: power in an individual diagonal mot beam, in mW
:param motRadius: the approximate radius of the MOT. Used as a higher order part of the calculation which takes into
account the spread of the intensity of the beams over the finite size of the MOT. Less needed for
big MOT beams.
:param exposure: exposure time of the camera, in seconds.
:param imagingLoss: Approximate amount of light lost in the imaging line due to mirrors efficiency, filter
efficiency, etc.
:param greyscaleReading: the integrated greyscale count reading from the camera.
===
The mot number is determined via the following formula:
MOT # = (Scattered Light Collected) / (Scattered light predicted per atom)
Here with sideMOT power in mW assuming 3.3mm radius waist and a very rough estimation of main MOT diameter
one inch, motRadius using the sigma of the MOT size but should not matter if it's small enough, and exposure
in sec, typically 0.8 for the imaging loss accounting for the line filter, greyscaleReading is the integrated gray
scale count with 4by4 binning on the Basler camera, and assuming gain set to 260 which is unity gain for Basler
"""
# in cm
sidemotWaist = .33 / (2 * np.sqrt(2))
# in cm
diagonalWaist = 2.54 / 2
# intensities
sidemotIntensity = beamIntensity(sidemotPower, sidemotWaist, motRadius)
diagonalIntensity = beamIntensity(diagonalPower, diagonalWaist, motRadius)
totalIntensity = sidemotIntensity + 2 * diagonalIntensity
rate = computeScatterRate(totalIntensity, detuning)
imagingLensDiameter = 2.54
imagingLensFocalLength = 10
fluorescence = computeFlorescence(greyscaleReading, imagingLoss, imagingLensDiameter, imagingLensFocalLength,
exposure)
motNumber = fluorescence / rate
return motNumber, fluorescence
def calcBallisticTemperature(times, sizeSigmas, guess = LargeBeamMotExpansion.guess(), sizeErrors=None):
""" Small wrapper around a fit
expects time in s, sigma in m
return temp, vals, cov
"""
warnings.simplefilter("error", opt.OptimizeWarning)
try:
fitVals, fitCovariances = opt.curve_fit(LargeBeamMotExpansion.f, times, sizeSigmas, p0=guess, sigma = sizeErrors)
temperature = fitVals[2]
except opt.OptimizeWarning as error:
warn('Mot Temperature Expansion Fit Failed!' + str(error))
try:
fitValsTemp, fitCovTemp = opt.curve_fit(lambda t,x,y: LargeBeamMotExpansion.f(t, x, 0, y), times, sizeSigmas, p0=[guess[0], guess[2]], sigma = sizeErrors)
temperature = fitValsTemp[1]
fitVals = [fitValsTemp[0], 0, fitValsTemp[1]]
fitCovariances = np.zeros((len(guess),len(guess)))
fitCovariances[0,0] = fitCovTemp[0,0]
fitCovariances[2,0] = fitCovTemp[1,0]
fitCovariances[0,2] = fitCovTemp[0,1]
fitCovariances[2,2] = fitCovTemp[1,1]
except opt.OptimizeWarning:
fitVals = np.zeros(len(guess))
fitCovariances = np.zeros((len(guess), len(guess)))
temperature = 0
warn('Restricted Mot Temperature Expansion Fit Failed Too with optimize error!')
except RuntimeError:
fitVals = np.zeros(len(guess))
fitCovariances = np.zeros((len(guess), len(guess)))
temperature = 0
warn('Mot Temperature Expansion Fit Failed with Runtime error!')
except RuntimeError:
fitVals = np.zeros(len(guess))
fitCovariances = np.zeros((len(guess), len(guess)))
temperature = 0
warn('Mot Temperature Expansion Fit Failed!')
warnings.simplefilter("default", opt.OptimizeWarning)
return temperature, fitVals, fitCovariances
def orderData(data, key, keyDim=None, otherDimValues=None):
"""
return arr(data), arr(key), arr(otherDimValues)
"""
zipObj = (zip(key, data, otherDimValues) if otherDimValues is not None else zip(key, data))
if keyDim is not None:
key, data, otherDimValues = list(zip(*sorted(zipObj, key=lambda x: x[0][keyDim])))
# assuming 2D
count = 0
for val in key:
if val[keyDim] == key[0][keyDim]:
count += 1
majorKeySize = int(len(key) / count)
tmpKey = arr(key[:])
tmpVals = arr(data[:])
tmpKey.resize([majorKeySize, count, 2])
tmpVals.resize([majorKeySize, count, arr(data).shape[1], arr(data).shape[2], arr(data).shape[3]])
finKey = []
finData = []
for k, d in zip(tmpKey, tmpVals):
k1, d1 = list(zip(*sorted(zip(k, d), key=lambda x: x[0][int(not keyDim)])))
for k2, d2 in zip(k1, d1):
finKey.append(arr(k2))
finData.append(arr(d2))
return arr(finData), arr(finKey), arr(otherDimValues)
else:
if otherDimValues is None:
key, data = list(zip(*sorted(zipObj, key=lambda x: x[0])))
else:
key, data, otherDimValues = list(zip(*sorted(zipObj, key=lambda x: x[0])))
return arr(data), arr(key), arr(otherDimValues)
def groupMultidimensionalData(key, varyingDim, atomLocations, survivalData, survivalErrs, loadingRate):
"""
Normally my code takes all the variations and looks at different locations for all those variations.
In the multi-dim case, this there are multiple variations for the same primary key value. I need to
split up those multiple variations.
"""
if len(key.shape) == 1:
# no grouping needed
return (key, atomLocations, survivalErrs, survivalData, loadingRate,
[None for _ in range(len(key)*len(atomLocations))])
# make list of unique indexes for each dimension
uniqueSecondaryAxisValues = []
newKey = []
for keyValNum, secondaryValues in enumerate(misc.transpose(key)):
if keyValNum == varyingDim:
for val in secondaryValues:
if val not in newKey:
newKey.append(val)
continue
uniqueSecondaryAxisValues.append([])
for val in secondaryValues:
if val not in uniqueSecondaryAxisValues[-1]:
uniqueSecondaryAxisValues[-1].append(val)
extraDimValues = 1
for i, dim in enumerate(uniqueSecondaryAxisValues):
extraDimValues *= len(dim)
newLoadingRate, newTransferData, newErrorData, locationsList, otherDimsList = [[] for _ in range(5)]
allSecondaryDimVals = arr(uniqueSecondaryAxisValues).flatten()
# iterate through all locations
for loc, locData, locErrs, locLoad in zip(atomLocations, survivalData, survivalErrs, loadingRate):
newData = locData[:]
newErr = locErrs[:]
newLoad = locLoad[:]
newData.resize(int(len(locData)/extraDimValues), extraDimValues)
newData = misc.transpose(newData)
newErr.resize(int(len(locData)/extraDimValues), extraDimValues)
newErr = misc.transpose(newErr)
newLoad.resize(int(len(locData)/extraDimValues), extraDimValues)
newLoad = misc.transpose(newLoad)
# iterate through all extra dimensions in the locations
secondIndex = 0
for val, err, load in zip(newData, newErr, newLoad):
newTransferData.append(val)
newErrorData.append(err)
newLoadingRate.append(load)
locationsList.append(loc)
otherDimsList.append(allSecondaryDimVals[secondIndex])
secondIndex += 1
return (arr(newKey), arr(locationsList), arr(newErrorData), arr(newTransferData), arr(newLoadingRate),
arr(otherDimsList))
def getFitsDataFrame(fits, fitModules, avgFit):
uniqueModules = set(fitModules)
fitDataFrames = [ | pd.DataFrame() | pandas.DataFrame |
# Copyright © 2019 <NAME>
"""
Tests for the variable/column cleaning with variable dropping on equality.
"""
from pandas import DataFrame
from pandas.util.testing import assert_frame_equal
import unittest
# Tests for:
from ...row_filter import RowFilter
class CleanDropIfEqualTests(unittest.TestCase):
"""
Tests for the ``preprocess._clean_variables`` module dropping rows based on "=="
"""
@staticmethod
def test_drop_if_equal_1():
"""
Test that no rows are dropped if equality conditions are not met.
"""
_table_1 = DataFrame({"a": [1.0, 2.0, 3.0], "b": [2.0, 3.0, 4.0]})
_cleanings = [{"operator": "drop_if_equal", "columns": ["a"], "value": 5.0}]
_rf = RowFilter(_table_1)
_rf.filter(_cleanings)
assert_frame_equal(_table_1, _rf.frame)
@staticmethod
def test_drop_if_equal_2():
"""
Test that a single row is dropped
"""
_table_2 = DataFrame({"a": [1.0, 2.0, 3.0], "b": [2.0, 3.0, 4.0]})
_cleanings = [{"operator": "drop_if_equal", "columns": ["a"], "value": 1.0}]
_expected = DataFrame({"a": [2.0, 3.0], "b": [3.0, 4.0]})
_rf = RowFilter(_table_2)
_rf.filter(_cleanings)
# TODO: There might be a bug in how pandas checks indexes, this is a hack:
_expected.index = _rf.frame.index
assert_frame_equal(_expected, _rf.frame)
@staticmethod
def test_drop_if_equal_3():
"""
Test that no rows are dropped even if conditions would be met in a different row.
"""
_table_3 = | DataFrame({"a": [1.0, 2.0, 3.0], "b": [2.0, 3.0, 4.0]}) | pandas.DataFrame |
#! /usr/bin python
#------------------------------------------------------------------------------
# PROGRAM: plot_ncc_stripes_multiaxis.py
#------------------------------------------------------------------------------
# Version 0.2
# 11 October, 2021
# <NAME>
# https://patternizer.github.io
# patternizer AT gmail DOT com
# michael DOT a DOT taylor AT uea DOT ac DOT uk
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# IMPORT PYTHON LIBRARIES
#------------------------------------------------------------------------------
# Dataframe libraries:
import numpy as np
import pandas as pd
import xarray as xr
# Datetime libraries:
from datetime import datetime
import nc_time_axis
import cftime
from cftime import num2date, DatetimeNoLeap
# Plotting libraries:
import matplotlib
#matplotlib.use('agg')
import matplotlib.pyplot as plt; plt.close('all')
import matplotlib.colors as mcolors
import matplotlib.gridspec as gridspec
from matplotlib.cm import ScalarMappable
from matplotlib import rcParams
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
# Statistics libraries:
from scipy import stats
# Silence library version notifications
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
warnings.filterwarnings("ignore", category=RuntimeWarning)
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# SETTINGS:
#------------------------------------------------------------------------------
fontsize = 14
cbar_max = 6.0
barwidthfraction = 1.0
use_dark_theme = True
use_overlay_colorbar = True
plot_climate_bars = True
#projectionstr = 'RCP3pd'
#projectionstr = 'RCP45'
#projectionstr = 'RCP6'
#projectionstr = 'RCP85'
#projectionstr = 'SSP119'
#projectionstr = 'SSP126'
#projectionstr = 'SSP245'
#projectionstr = 'SSP370'
projectionstr = 'SSP585'
baselinestr = 'baseline_1851_1900'
#baselinestr = 'baseline_1961_1990'
#baselinestr = 'baseline_1971_2000'
titlestr = 'Global mean anomaly, 65 Myr ( < 2015) - 2200 CE: ' + projectionstr
pathstr = 'DATA/'
pages2kstr = 'PAGES2k.txt'
hadcrut5str = 'HadCRUT5.csv'
fairstr = 'fair' + '_' + projectionstr.lower() + '.csv'
lovarstr = 'variability_realisation0.txt'
hivarstr = 'variability_realisation1.txt'
fairstr = 'fair' + '_' + projectionstr.lower() + '.csv'
paleostr = 'paleo_data_compilation.xls'
pages2k_file = pathstr + pages2kstr
hadcrut5_file = pathstr + hadcrut5str
fair_file = pathstr + fairstr
lo_var_file = pathstr + lovarstr
hi_var_file = pathstr + hivarstr
paleo_file = pathstr + paleostr
ipcc_rgb_txtfile = np.loadtxt("DATA/temp_div.txt") # IPCC AR6 temp div colormap file
cmap = mcolors.LinearSegmentedColormap.from_list('colormap', ipcc_rgb_txtfile) # ipcc_colormap
#cmap = plt.cm.get_cmap('RdBu_r')
#cmap = plt.cm.get_cmap('bwr')
#------------------------------------------------------------------------------
# DARK THEME
#------------------------------------------------------------------------------
if use_dark_theme == True:
matplotlib.rcParams['text.usetex'] = False
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['<NAME>', 'Lucida Grande', 'Verdana', 'DejaVu Sans' ]
plt.rc('text',color='white')
plt.rc('lines',color='white')
plt.rc('patch',edgecolor='white')
plt.rc('grid',color='lightgray')
plt.rc('xtick',color='white')
plt.rc('ytick',color='white')
plt.rc('axes',labelcolor='white')
plt.rc('axes',facecolor='black')
plt.rc('axes',edgecolor='lightgray')
plt.rc('figure',facecolor='black')
plt.rc('figure',edgecolor='black')
plt.rc('savefig',edgecolor='black')
plt.rc('savefig',facecolor='black')
else:
matplotlib.rcParams['text.usetex'] = False
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Avant Garde', 'Lucida Grande', 'Verdana', 'DejaVu Sans' ]
plt.rc('text',color='black')
plt.rc('lines',color='black')
plt.rc('patch',edgecolor='black')
plt.rc('grid',color='lightgray')
plt.rc('xtick',color='black')
plt.rc('ytick',color='black')
plt.rc('axes',labelcolor='black')
plt.rc('axes',facecolor='white')
plt.rc('axes',edgecolor='black')
plt.rc('figure',facecolor='white')
plt.rc('figure',edgecolor='white')
plt.rc('savefig',edgecolor='white')
plt.rc('savefig',facecolor='white')
# Calculate current time
now = datetime.now()
currentdy = str(now.day).zfill(2)
currentmn = str(now.month).zfill(2)
currentyr = str(now.year)
titletime = str(currentdy) + '/' + currentmn + '/' + currentyr
#------------------------------------------------------------------------------
# METHODS
#------------------------------------------------------------------------------
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 5))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
#-----------------------------------------------------------------------------
# LOAD: PAGES2k (via <NAME> with thanks) --> df_pages2k
# NB: convert time to year.decimal
#-----------------------------------------------------------------------------
# FORMAT:
# Year CE | raw instrumental target data | reconstruction ensemble 50th | 2.5th | 97.5th percentiles |
# 31-year butterworth filtered instrumental target data | 31-year butterworth filtered reconstruction 50th |
# 2.5th | 97.5th percentiles
nheader = 5
f = open(pages2k_file)
lines = f.readlines()
years = [] # [0001,2000]
obs = []
for i in range(nheader,len(lines)):
words = lines[i].split()
year = words[0].zfill(4)
val = (len(words)-1)*[None]
for j in range(len(val)):
try: val[j] = float(words[j+1])
except:
pass
years.append(year)
obs.append(val)
f.close()
obs = np.array(obs)
t_pages2k = xr.cftime_range(start=years[0], periods=len(years), freq='A', calendar='gregorian')[0:1850]
ts_pages2k_instr = pd.to_numeric(obs[:,1][0:1850], errors='coerce')
ts_pages2k_recon = pd.to_numeric(obs[:,5][0:1850], errors='coerce')
#ts_pages2k = np.append(ts_pages2k_recon[0:-36],ts_pages2k_instr[-36:],axis=None)
ts_pages2k = ts_pages2k_recon
df_pages2k = pd.DataFrame()
df_pages2k['t_pages2k'] = t_pages2k.year.astype(float)
df_pages2k['ts_pages2k'] = ts_pages2k
#-----------------------------------------------------------------------------
# LOAD: HadCRUT5 (via <NAME>orn and UKMO with thanks) --> df_hadcrut5
# NB: convert time to year.decimal
#-----------------------------------------------------------------------------
hadcrut5 = pd.read_csv(hadcrut5_file)
t_hadcrut5_monthly = xr.cftime_range(start='1850', periods=len(hadcrut5), freq='MS', calendar='noleap')
ts_hadcrut5_monthly = hadcrut5['Anomaly (deg C)'].values
df_hadcrut5 = pd.DataFrame()
df_hadcrut5['t_hadcrut5'] = t_hadcrut5_monthly.year.astype(float) + t_hadcrut5_monthly.month.astype(float)/12.0
df_hadcrut5['ts_hadcrut5'] = ts_hadcrut5_monthly
years = np.unique(t_hadcrut5_monthly.year)
yearly = []
SD = []
for yyyy in years:
year_data = df_hadcrut5[np.floor(df_hadcrut5['t_hadcrut5']).astype('int') == yyyy]['ts_hadcrut5']
yearly_mean = np.nanmean(year_data)
yearly_SD = np.nanstd(year_data)
yearly.append(yearly_mean)
SD.append(yearly_SD)
df_hadcrut5_yearly = pd.DataFrame()
df_hadcrut5_yearly['t_hadcrut5'] = years.astype('float')
df_hadcrut5_yearly['ts_hadcrut5'] = yearly
df_hadcrut5_yearly['ts_hadcrut5_SD'] = SD
df_hadcrut5_yearly = df_hadcrut5_yearly[ (df_hadcrut5_yearly.t_hadcrut5 >= 1851) & (df_hadcrut5_yearly.t_hadcrut5 <= 2020) ]
#-----------------------------------------------------------------------------
# LOAD: FaIR v1.6.3 projections (constrained by HadCRUT5-analysis) --> df_fair
# NB: convert time to year.decimal
#-----------------------------------------------------------------------------
fair = pd.read_csv(fair_file,index_col=0)
df_fair = pd.DataFrame()
df_fair['t_fair'] = fair.Year.values.astype('float')
#-----------------------------------------------------------------------------
# LOAD: internal variability for FaIR v1.6.4 projections calculated by Tim from the instrumental record.
#-----------------------------------------------------------------------------
nheader = 0
f_lo = open(lo_var_file)
f_hi = open(hi_var_file)
lines_lo = f_lo.readlines()
lines_hi = f_hi.readlines()
years = []
obs_lo = []
obs_hi = []
for i in range(nheader,180):
words_lo = lines_lo[i].split()
words_hi = lines_hi[i].split()
year = int(words_lo[0].zfill(4)) + 671 # 1350 --> 2021 offset
val_lo = (len(words_lo)-1)*[None]
val_hi = (len(words_hi)-1)*[None]
for j in range(len(val_lo)):
try:
val_lo[j] = float(words_lo[j+1])
val_hi[j] = float(words_hi[j+1])
except:
pass
years.append(year)
obs_lo.append(val_lo)
obs_hi.append(val_hi)
f_lo.close()
f_hi.close()
obs_lo = np.array(obs_lo).ravel()
obs_hi = np.array(obs_hi).ravel()
df_variability = pd.DataFrame({'lo_var':obs_lo, 'hi_var':obs_hi}, index=years)
if (projectionstr == 'SSP119') | (projectionstr == 'SSP126') | (projectionstr == 'SSP245') | (projectionstr == 'RCP3pd') | (projectionstr == 'RCP45'):
df_fair['ts_fair'] = fair.Global.values + df_variability.lo_var.values
elif (projectionstr == 'SSP370') | (projectionstr == 'SSP585') | (projectionstr == 'RCP6') | (projectionstr == 'RCP85'):
df_fair['ts_fair'] = fair.Global.values + df_variability.hi_var.values
#-----------------------------------------------------------------------------
# LOAD: geological anomalies: 65.5229 Myr ( before 2015 )
# NB: paleo_file has copy-paste of "data_compilation" sheet values from All_palaeotemps.xlsx
#
#-----------------------------------------------------------------------------
#import xlrd
#workbook = xlrd.open_workbook(paleo_file)
#worksheet = workbook.sheet_by_index(0) # first sheet in workbook
#ncols = worksheet.utter_max_cols
#nrows = worksheet.utter_max_rows
xl = pd.ExcelFile(paleo_file)
df_xl = xl.parse('Sheet1',header=2)
# FORMAT:
# Royer et al (2004) Friedrich et al (2012) & Hansen et al (2013) Zachos et al (2008) & Hansen et al (2013) Lisiecki and Raymo (2005) & Hansen et al (2013) EPICA Dome C, Antarctica (x 0.5) NGRIP, Greenland & Johnsen et al (1989) (x 0.5) Marcott et al (2013) Berkeley Earth land-ocean IPCC AR5 RCP8.5
# Age My Royer / Veizer (x 2.0) Royer / Veizer - CO₂ from proxies (x 2.0) Low High Axis [] Age My Age ky before 2015 δ18O Tdo Ts T anomaly Age My Age ky before 2015 δ18O Tdo Ts T anomaly Age My Age ky before 2015 δ18O Tdo Ts T anomaly Age ky before 2015 T T global Age ky before 2015 δ18O Ts T anomaly T global Age ky before 2015 T 1σ Decade Age ky before 2015 T average Year Age ky before 2015 T
t_epica = df_xl.iloc[:,28] * -1.0e3 + 2015.0
ts_epica = df_xl.iloc[:,30]
t_lisiecki = df_xl.iloc[:,22] * -1.0e3 + 2015.0
ts_lisiecki = df_xl.iloc[:,26]
t_zachos = df_xl.iloc[:,15] * -1.0e3 + 2015.0
ts_zachos = df_xl.iloc[:,19]
df_epica = pd.DataFrame()
df_epica['t'] = t_epica
df_epica['ts'] = ts_epica
df_epica_sorted = df_epica.sort_values(by=['t']).dropna().reset_index(drop=True)
df_epica = df_epica_sorted[ (df_epica_sorted.t <= 0) ]
df_lisiecki = pd.DataFrame()
df_lisiecki['t'] = t_lisiecki
df_lisiecki['ts'] = ts_lisiecki
df_lisiecki_sorted = df_lisiecki.sort_values(by=['t']).dropna().reset_index(drop=True)
df_lisiecki = df_lisiecki_sorted[ (df_lisiecki_sorted.t <= 0) ]
df_zachos = | pd.DataFrame() | pandas.DataFrame |
import numpy as np, pandas as pd
from scipy import stats
"""
Notes on Analysis:
- we have mean, se, & worst on radius, texture, perimeter, area, smoothness, compactness,
concavity, concave points, symmetry, fractal dimensions
1st preprocessing: normalize each columns using z-score
"""
print("Import data")
df = pd.read_csv("data.csv")
print("get z-score for each")
new_data = {
'id': df['id'],
'diagnosis': df['diagnosis']
}
print("create new attributes by z-score")
features = set(df.columns.to_list()) - set(["id", "diagnosis"])
for f in features:
zscores = stats.zscore(df[f])
new_data[f] = np.array(zscores > 0, dtype=int)
print("export processed data")
ndf = | pd.DataFrame.from_dict(new_data) | pandas.DataFrame.from_dict |
from flask import Flask, request, render_template
import pandas as pd
import numpy as np
import plotly
import plotly.express as px
import json
import plotly.io as pio
from plotly.subplots import make_subplots
import plotly.graph_objects as go
from sklearn.model_selection import train_test_split
import sklearn.metrics
data = | pd.read_csv("stonks.csv") | pandas.read_csv |
import numpy as np
import pandas as pd
__all__ = ["readLinkagesByLineFile"]
def readLinkagesByLineFile(linkages_file,
linkage_id_start=1,
column_mapping={
"obs_id": "obs_id",
"linkage_id" : "linkage_id"
}):
"""
Reads a file that contains linkages where each linkage is written in terms of its
observations line by line.
Example:
137541512 137543165 137615070 137620728 138216303 138216866 138221227
137541512 137543165 137615070 137620728 138216303 138216866 138221227 144513728 144533645
137541512 137543165 137615070 137620728 138216303 138216866 138221227 144513728 144533645 146991832 147084549
137541512 137543165 137615070 137620728 138216303 138216866 138221227 144514371 144534274
137541512 137543165 137615070 137620728 142747928 142763154
137541512 137543165 137615070 137620728 142748009 142763229
137541512 137543165 137615070 137620728 142748009 142763229 144513839 144533746
137541512 137543165 137615070 137620728 142748120 142763338
137541512 137543165 137615070 137620728 142748305 142763529
137541512 137543165 137615070 137620728 142748337 142763570
Parameters
----------
linkages_file : str
Path the linkages file that needs to be converted.
linkage_id_start = 1
Number at which to start the linkage ID count.
[Default = 1]
column_mapping : dict, optional
The mapping of columns in linkages_file to internally used names.
Needs the following: "linkage_id" : ..., "obs_id" : ... .
[Default = {'obs_id' : 'obs_id',
'linkage_id' : 'linkage_id'}]
Returns
-------
linkage_members : `~pandas.DataFrame`
DataFrame with two columns: the linkage ID and a second column with one row
per observation ID.
"""
# Read initial file
linkages = | pd.read_table(linkages_file, header=None, names=[column_mapping["obs_id"]]) | pandas.read_table |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# to save report:
# clone the following repo: https://github.com/ihuston/jupyter-hide-code-html
# run in terminal: jupyter nbconvert --to html --template jupyter-hide-code-html/clean_output.tpl path/to/CGR_16S_Microbiome_QC_Report.ipynb
# name the above file NP###_pipeline_run_folder_QC_report.html and place it in the directory with the pipeline output
# for version control:
# Kernel > Restart & Clear Output
# run in terminal: jupyter nbconvert --to script CGR_16S_Microbiome_QC_Report.ipynb
# add/commit CGR_16S_Microbiome_QC_Report.ipynb AND CGR_16S_Microbiome_QC_Report.py to git
# # CGR 16S Microbiome QC Report
# <!-- <div id="toc_container"> -->
# <h2>Table of Contents</h2>
# <ul class="toc_list">
# <a href="#1 General-analysis-information">1 General analysis information</a><br>
# <ul>
# <a href="#1.1 Project-directory">1.1 Project directory</a><br>
# <a href="#1.2 Project-directory-contents">1.2 Project directory contents</a><br>
# <a href="#1.3 Parameters">1.3 Parameters</a><br>
# <a href="#1.4 Dependency-versions">1.4 Dependency versions</a><br>
# </ul>
# <a href="#2 Samples-included-in-the-project">2 Samples included in the project<br>
# <a href="#3 QC-checks">3 QC checks</a><br>
# <ul>
# <a href="#3.1 Read-trimming">3.1 Read trimming</a><br>
# <a href="#3.2 Proportion-of-non-bacterial-reads">3.2 Proportion of non-bacterial reads<br>
# <ul>
# <a href="#3.2.1 Proportion-of-non-bacterial-reads-per-sample-type">3.2.1 Proportion of non-bacterial reads per sample type<br>
# </ul>
# <a href="#3.3 Sequencing-depth-distribution-per-flow-cell">3.3 Sequencing distribution per flow cell</a><br>
# <a href="#3.4 Read-counts-after-filtering-in-blanks-vs.-study-samples">3.4 Read counts after filtering in blanks vs. study samples</a><br>
# <a href="#3.5 Sequential-sample--and-feature-based-filters">3.5 Sequential sample- and feature-based filters</a><br>
# <a href="#3.6 Biological-replicates">3.6 Biological replicates</a><br>
# <a href="#3.7 QC-samples">3.7 QC samples</a><br>
# </ul>
# <a href="#4 Rarefaction-threshold">4 Rarefaction threshold</a><br>
# <a href="#5 Alpha-diversity">5 Alpha diversity</a><br>
# <a href="#6 Beta-diversity">6 Beta diversity</a><br>
# <ul>
# <a href="#6.1 Bray-Curtis">6.1 Bray-Curtis</a><br>
# <a href="#6.2 Jaccard">6.2 Jaccard</a><br>
# <a href="#6.3 Weighted-UniFrac">6.3 Weighted UniFrac</a><br>
# <a href="#6.4 Unweighted-UniFrac">6.4 Unweighted UniFrac</a><br>
# </ul>
# </ul>
# In[ ]:
# allow user definition of column headers for certain things, eg sample type?
# <h2 id="1 General-analysis-information">1 General analysis information</h2>
# <h3 id="1.1 Project-directory">1.1 Project directory</h3>
# All production microbiome projects are located in `/DCEG/Projects/Microbiome/Analysis/`. There is a parent folder named with the project ID; that folder contains the [bioinformatic pipeline](https://github.com/NCI-CGR/QIIME_pipeline) runs for that project and a `readme` summarizing the changes between each run.
#
# - The initial run (always named `<datestamp>_initial_run`) is used for some QC checks and to evaluate parameter settings.
# - The second run implements additional read trimming and excludes water blanks, no-template controls, and QC samples (e.g. robogut or artificial colony samples). (NOTE: pick one of intentional dups?)
# - Additional runs are performed for study-specific reasons which are summarized in the `readme`.
# <br><br>
#
# __The project and pipeline run described in this report is located here:__
# In[ ]:
proj_dir='/DCEG/Projects/Microbiome/Analysis/Project_NP0453_MB2_and_3/20201020_dev_test'
ref_db='silva-132-99-515-806-nb-classifier'
# In[ ]:
get_ipython().run_line_magic('cd', '{proj_dir}')
# The contents of the `readme`, at the time of report generation:
# In[ ]:
get_ipython().system('cat ../README')
# <h3 id="1.2 Project-directory-contents">1.2 Project directory contents</h3>
# In[ ]:
get_ipython().system('ls')
# <h3 id="1.3 Parameters">1.3 Parameters</h3>
# In[ ]:
get_ipython().system('cat *.y[a]*ml')
# <h3 id="1.4 Dependency-versions">1.4 Dependency versions</h3>
# In[ ]:
get_ipython().system('cat $(ls -t Q2_wrapper.sh.o* | head -n1)')
# <h2 id="2 Samples-included-in-the-project">2 Samples included in the project</h2>
# The tables below show the count of samples grouped by metadata provided in the manifest.
# In[ ]:
from IPython.display import display
import os.path
get_ipython().run_line_magic('matplotlib', 'inline')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
import glob
from skbio.stats.ordination import pcoa
from skbio import DistanceMatrix
sns.set(style="whitegrid")
# In[ ]:
manifest = pd.read_csv(glob.glob('*.txt')[0],sep='\t',index_col=0)
manifest.columns = map(str.lower, manifest.columns)
manifest = manifest.dropna(how='all', axis='columns')
manifest.columns = manifest.columns.str.replace(' ', '') # remove once cleaning is implemented in the pipeline
# In[ ]:
if len(manifest['run-id'].astype(str).str.split('_',n=2,expand=True).columns) > 1:
manifest['Sequencer'] = (manifest['run-id'].astype(str).str.split('_',n=2,expand=True))[1]
else:
print("Can not infer sequencer ID from run ID.")
if 'sourcepcrplate' in manifest.columns:
manifest['PCR_plate'] = (manifest['sourcepcrplate'].str.split('_',n=1,expand=True))[0]
else:
print("Source PCR Plate column not detected in manifest.")
# should probably save this file, or even better, include in original manifest prior to analysis....
# In[ ]:
m = manifest.drop(columns=['externalid','sourcepcrplate','project-id','extractionbatchid','fq1','fq2'],errors='ignore')
# when do we want to drop extraction ID? in this case, it's all unique values for QC samples and NaNs for study samples
# possibly look for (# unique values == # non-nan values) instead of alßways dropping
for i in m.columns:
display(m[i].value_counts().rename_axis(i).to_frame('Number of samples'))
# <h2 id="3 QC-checks">3 QC checks</h2>
# <h3 id="3.1 Read-trimming">3.1 Read trimming</h3>
# The trimming parameters for the initial pipeline run (`<datestamp>_initial_run`) are set to 0 (no trimming). For subsequent runs, trimming parameters are set based on the read quality plots (not shown here; please browse `import_and_demultiplex/<runID>.qzv` using [QIIME's viewer](https://view.qiime2.org/) for quality plots). For this run, trimming parameters (also found in the config) are as follows:
# In[ ]:
get_ipython().system('grep -A4 "dada2_denoise" *.y[a]*ml')
# <h3 id="3.2 Proportion-of-non-bacterial-reads">3.2 Proportion of non-bacterial reads</h3>
# After error correction, chimera removal, removal of phiX sequences, and the four-step filtering defined above, the remaining reads are used for taxonomic classification. We are performing classification with a naive Bayes classifier trained on the SILVA 99% OTUs database that includes only the V4 region (defined by the 515F/806R primer pair). This data is located at `taxonomic_classification/silva-132-99-515-806-nb-classifier/barplots.qzv`. Please use [QIIME's viewer](https://view.qiime2.org/) for a more detailed interactive plot.
#
# The plots below show the "level 1" taxonomic classification. The first set of plots show relative abundances; the second show absolute. Plots are split into sets of ~500 samples per plot.
#
# Note that reads are being classified using a database of predominantly bacterial sequences, so human reads, for example, will generally be in the "Unclassified" category rather than "Eukaryota." Non-bacterial reads can indicate host (human) or other contamination.
# In[ ]:
get_ipython().system('unzip -q -d taxonomic_classification/rpt_silva taxonomic_classification/{ref_db}/barplots.qzv')
# In[ ]:
f = glob.glob('taxonomic_classification/rpt_silva/*/data/level-1.csv')
df_l1 = pd.read_csv(f[0])
df_l1 = df_l1.rename(columns = {'index':'Sample'})
df_l1 = df_l1.set_index('Sample')
df_l1 = df_l1.select_dtypes(['number']).dropna(axis=1, how='all')
df_l1_rel = df_l1.div(df_l1.sum(axis=1), axis=0) * 100
# In[ ]:
def split_df(df, max_rows = 500):
split_dfs = list()
rows = df.shape[0]
n = rows % max_rows
last_rows = True
for i in range(0, rows, max_rows):
# if the last remainder of the rows is less than half the max value,
# just combine it with the second-to-last plot
# otherwise it looks weird
if i in range(rows-max_rows*2,rows-max_rows) and n <= (max_rows // 2):
split_dfs.append(df.iloc[i:i+max_rows+n])
last_rows = False
elif last_rows:
split_dfs.append(df.iloc[i:i+max_rows])
return split_dfs
# need to split very large datasets so rendering doesn't get weird
# In[ ]:
df_list = split_df(df_l1)
df_rel_list = split_df(df_l1_rel)
# In[ ]:
for i in df_rel_list:
plt.figure(dpi=200)
pal = sns.color_palette("Accent")
ax = i.sort_values('D_0__Bacteria').plot.bar(stacked=True, color=pal, figsize=(60,7), width=1, edgecolor='white', ax=plt.gca())
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.5),ncol=4,fontsize=52)
ax.set_ylabel('Relative frequency (%)',fontsize=52)
ax.set_title('Taxonomic classification, level 1',fontsize=52)
ax.set_yticklabels(ax.get_yticks(), size=40)
plt.show()
# In[ ]:
for i in df_list:
plt.figure(dpi=200)
pal = sns.color_palette("Accent")
ax = i.sort_values('D_0__Bacteria').plot.bar(stacked=True, color=pal, figsize=(60,7), width=1, edgecolor='white', ax=plt.gca())
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.5),ncol=4,fontsize=52)
ax.set_ylabel('Absolute frequency',fontsize=52)
ax.set_title('Taxonomic classification, level 1',fontsize=52)
ax.set_yticklabels(ax.get_yticks(), size=40)
plt.show()
# <h4 id="3.2.1 Proportion-of-non-bacterial-reads-per-sample-type">3.2.1 Proportion of non-bacterial reads per sample type</h4>
# This section highlights non-bacterial reads in various sub-populations included in the study (e.g. study samples, robogut or artificial control samples, and blanks). This can be helpful with troubleshooting if some samples unexpectedly have a high proportion of non-bacterial reads.
# In[ ]:
def plot_level_1_subpops(samples,pop):
plt.rcParams["xtick.labelsize"] = 12
n = -0.5
r = 90
ha = "center"
f = 12
if len(samples) < 30:
plt.rcParams["xtick.labelsize"] = 40
n = -0.8
r = 40
ha = "right"
f = 40
df = df_l1_rel[df_l1_rel.index.isin(samples)]
for i in split_df(df):
plt.figure(dpi=200)
pal = sns.color_palette("Accent")
ax = i.sort_values('D_0__Bacteria').plot.bar(stacked=True, color=pal, figsize=(60,7), width=1, edgecolor='white', ax=plt.gca())
ax.legend(loc='upper center', bbox_to_anchor=(0.5, n),ncol=4,fontsize=52)
ax.set_ylabel('Relative frequency (%)',fontsize=52)
ax.set_xlabel('Sample',fontsize=f)
ax.set_title('Taxonomic classification, level 1, ' + pop + ' samples only',fontsize=52)
ax.set_yticklabels(ax.get_yticks(), size = 40)
ax.set_xticklabels(ax.get_xticklabels(), rotation=r, ha=ha)
plt.show()
# In[ ]:
if 'sampletype' in manifest.columns:
for i in manifest['sampletype'].unique():
l = list(manifest[manifest['sampletype'].str.match(i)].index)
plot_level_1_subpops(l,i)
else:
print("No Sample Type column detected in manifest.")
# ## Non-bacterial read removal
# Best practices indicate we should filter these reads regardless of the degree to which we observe them. The plots below show the "level 1" classification after removal of non-bacterial reads and reads without a phylum classification.
#
# This data is located at `taxonomic_classification_bacteria_only/silva-132-99-515-806-nb-classifier/barplots.qzv`. Please use [QIIME's viewer](https://view.qiime2.org/) for a more detailed interactive plot.
# In[ ]:
get_ipython().system('unzip -q -d taxonomic_classification_bacteria_only/rpt_silva taxonomic_classification_bacteria_only/{ref_db}/barplots.qzv')
# In[ ]:
f = glob.glob('taxonomic_classification_bacteria_only/rpt_silva/*/data/level-1.csv')
df_l1b = pd.read_csv(f[0])
df_l1b = df_l1b.rename(columns = {'index':'Sample'})
df_l1b = df_l1b.set_index('Sample')
df_l1b = df_l1b.select_dtypes(['number']).dropna(axis=1, how='all')
df_l1b_rel = df_l1b.div(df_l1b.sum(axis=1), axis=0) * 100
# In[ ]:
for i in split_df(df_l1b_rel):
plt.figure(dpi=200)
plt.rcParams["xtick.labelsize"] = 12
pal = sns.color_palette("Accent")
ax = i.sort_values('D_0__Bacteria').plot.bar(stacked=True, color=pal, figsize=(60,7), width=1, edgecolor='white', ax=plt.gca())
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.5),ncol=4,fontsize=52)
ax.set_ylabel('Relative frequency (%)',fontsize=52)
ax.set_xlabel('Sample',fontsize=12)
ax.set_title('Taxonomic classification, level 1',fontsize=52)
ax.set_yticklabels(ax.get_yticks(), size=40)
plt.show()
# In[ ]:
for i in split_df(df_l1b):
plt.figure(dpi=200)
pal = sns.color_palette("Accent")
plt.rcParams["xtick.labelsize"] = 12
ax = i.sort_values('D_0__Bacteria').plot.bar(stacked=True, color=pal, figsize=(60,7), width=1, edgecolor='white', ax=plt.gca())
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.5),ncol=4,fontsize=52)
ax.set_ylabel('Absolute frequency',fontsize=52)
ax.set_xlabel('Sample',fontsize=12)
ax.set_title('Taxonomic classification, level 1',fontsize=52)
ax.set_yticklabels(ax.get_yticks(), size=40)
ax.set_xticklabels(ax.get_xticklabels(), rotation=90, ha="center", size=12)
plt.show()
# <h3 id="3.3 Sequencing-depth-distribution-per-flow-cell">3.3 Sequencing depth distribution per flow cell</h3>
# Per-sample read depths are recorded in `import_and_demultiplex/<runID>.qzv`. Those values are plotted below, excluding NTC and water blanks. Distributions per flow cell should be similar if the flow cells contained the same number of non-blank samples. If a flow cell contains fewer samples, each sample will have a greater number of reads, so that the total number of reads produced per flow cell remains approximately the same.
# In[ ]:
get_ipython().run_cell_magic('bash', '', 'cd import_and_demultiplex\nfor i in *qzv; do unzip -q $i -d "rpt_${i%.*}"; done\nfor i in rpt_*/*/data/per-sample-fastq-counts.csv; do j=${i%%/*}; k=${j#"rpt_"}; awk -v var="$k" \'BEGIN{FS=",";OFS="\\t"}$1!~/Sample name/{print $1,$2,var}\' $i >> t; done\ncat <(echo -e "Sample_name\\tSequence_count\\tRun_ID") t > rpt_vertical_per-sample-fastq-counts.csv\nrm t\ncd ..')
# In[ ]:
df_depth = pd.read_csv('import_and_demultiplex/rpt_vertical_per-sample-fastq-counts.csv',sep='\t')
search_values = ['Water','NTC']
df_depth_no_blanks = df_depth[~df_depth.Sample_name.str.contains('|'.join(search_values ),case=False)]
plt.figure(dpi=100)
sns.set(style="whitegrid")
ax = sns.boxplot(x="Run_ID",y="Sequence_count",data=df_depth_no_blanks)
ax.set_xticklabels(ax.get_xticklabels(),rotation=40,ha="right")#,fontsize=8)
ax.axes.set_title("Sequencing depth distribution per flow cell",fontsize=12)
# ax.tick_params(labelsize=8)
plt.show()
# <h3 id="3.4 Read-counts-after-filtering-in-blanks-vs.-study-samples">3.4 Read counts after filtering in blanks vs. study samples</h3>
# Per-sample read depths at each filtering step are recorded in `denoising/stats/<runID>.qzv`. The plots below show the mean for each category; error bars indicate the 95% confidence interval.
#
# NTC blanks are expected to have near-zero read depths, and represent false positives introduced by sequencing reagents.
#
# Water blanks are expected to have read depths that are at least one to two orders of magnitude lower than the average study sample depth. They represent the relatively low level of taxa that may be detected in the water used in the lab.
# In[ ]:
get_ipython().run_cell_magic('bash', '', 'cd denoising/stats/\nfor i in *qzv; do unzip -q $i -d "rpt_${i%.*}"; done\nfor i in rpt_*/*/data/metadata.tsv; do dos2unix -q $i; j=${i%%/*}; k=${j#"rpt_"}; awk -v var="$k" \'BEGIN{FS=OFS="\\t"}NR>2{print $0,var}\' $i >> t; done\ncat <(echo -e "sample-id\\tinput\\tfiltered\\tdenoised\\tmerged\\tnon-chimeric\\tflow_cell") t > rpt_denoising_stats.tsv\nrm t\ncd ../..')
# In[ ]:
df_stats = pd.read_csv('denoising/stats/rpt_denoising_stats.tsv',sep='\t')
df_stats = df_stats.set_index('sample-id')
# In[ ]:
def plot_read_counts(samples,pop):
plt.figure(dpi=100)
sns.set(style="whitegrid")
ax = sns.barplot(data=df_stats[df_stats.index.isin(samples)]).set_title('Number of reads in ' + pop + ' samples')
plt.show()
# In[ ]:
if 'sampletype' in manifest.columns:
for i in manifest['sampletype'].unique():
l = list(manifest[manifest['sampletype'].str.match(i)].index)
plot_read_counts(l,i)
else:
print("No Sample Type column detected in manifest.")
# The table below shows the 30 samples with the lowest non-chimeric read counts. This information may be helpful in identifying problematic samples and determining a minimum read threshold for sample inclusion. Note that low-depth study samples will be excluded from diversity analysis based on the sampling depth threshold selected (discussed in the following section).
# In[ ]:
if 'externalid' in manifest.columns:
display(df_stats.join(manifest[['externalid']])[['externalid','input','filtered','denoised','merged','non-chimeric']].sort_values(['non-chimeric']).head(30))
else:
display(df_stats[['input','filtered','denoised','merged','non-chimeric']].sort_values(['non-chimeric']).head(30))
# <h3 id="3.5 Sequential-sample--and-feature-based-filters">3.5 Sequential sample- and feature-based filters</h3>
# We remove samples and features based on the parameters defined in the config. For this run, filtering parameters are as follows:
# In[ ]:
get_ipython().system('grep "min_num_" *.y[a]*ml')
# Four sequential filtering steps are applied as follows:
# 1. Remove any samples with reads below the defined threshold
# 2. Remove any features with reads below the defined threshold
# 3. Remove any features that occur in fewer samples than the defined threshold
# 4. Remove any samples that contain fewer features than the defined threshold
#
# Filtering is propagated through to sequence tables as well.
#
# For this run, filtering resulted in the following counts:
# In[ ]:
get_ipython().system('unzip -q -d read_feature_and_sample_filtering/feature_tables/rpt_1 read_feature_and_sample_filtering/feature_tables/1_remove_samples_with_low_read_count.qzv')
get_ipython().system('unzip -q -d read_feature_and_sample_filtering/feature_tables/rpt_2 read_feature_and_sample_filtering/feature_tables/2_remove_features_with_low_read_count.qzv')
get_ipython().system('unzip -q -d read_feature_and_sample_filtering/feature_tables/rpt_3 read_feature_and_sample_filtering/feature_tables/3_remove_features_with_low_sample_count.qzv')
get_ipython().system('unzip -q -d read_feature_and_sample_filtering/feature_tables/rpt_4 read_feature_and_sample_filtering/feature_tables/4_remove_samples_with_low_feature_count.qzv')
# In[ ]:
get_ipython().system('echo "Feature counts:"')
get_ipython().system('echo "no_filtering" $(grep -cv "^#" denoising/feature_tables/feature-table.from_biom.txt)')
get_ipython().system('echo "remove_samples_with_low_read_count" $(wc -l read_feature_and_sample_filtering/feature_tables/rpt_1/*/data/feature-frequency-detail.csv | cut -d\' \' -f1)')
get_ipython().system('echo "remove_features_with_low_read_count" $(wc -l read_feature_and_sample_filtering/feature_tables/rpt_2/*/data/feature-frequency-detail.csv | cut -d\' \' -f1)')
get_ipython().system('echo "remove_features_with_low_sample_count" $(wc -l read_feature_and_sample_filtering/feature_tables/rpt_3/*/data/feature-frequency-detail.csv | cut -d\' \' -f1)')
get_ipython().system('echo "remove_samples_with_low_feature_count" $(wc -l read_feature_and_sample_filtering/feature_tables/rpt_4/*/data/feature-frequency-detail.csv | cut -d\' \' -f1)')
# In[ ]:
get_ipython().system('echo "Sample counts:"')
get_ipython().system('echo "no_filtering" $(grep -m1 "^#OTU" denoising/feature_tables/feature-table.from_biom.txt | tr "\\t" "\\n" | grep -cv "^#")')
get_ipython().system('echo "remove_samples_with_low_read_count" $(wc -l read_feature_and_sample_filtering/feature_tables/rpt_1/*/data/sample-frequency-detail.csv | cut -d\' \' -f1)')
get_ipython().system('echo "remove_features_with_low_read_count" $(wc -l read_feature_and_sample_filtering/feature_tables/rpt_2/*/data/sample-frequency-detail.csv | cut -d\' \' -f1)')
get_ipython().system('echo "remove_features_with_low_sample_count" $(wc -l read_feature_and_sample_filtering/feature_tables/rpt_3/*/data/sample-frequency-detail.csv | cut -d\' \' -f1)')
get_ipython().system('echo "remove_samples_with_low_feature_count" $(wc -l read_feature_and_sample_filtering/feature_tables/rpt_4/*/data/sample-frequency-detail.csv | cut -d\' \' -f1)')
# <h3 id="3.6 Biological-replicates">3.6 Biological replicates</h3>
# Paired duplicates, for the purposes of this pipeline, are defined by an identical "ExternalID." The taxonomic classification (using the SILVA 99% OTUs database) at levels 2 through 7 are compared across each pair and evaluated using cosine similarity. The closer the cosine similarity value is to 1, the more similar the vectors are. Note that this comparison uses the taxonomic classification prior to removal of non-bacterial reads.
# In[ ]:
manifest_no_blanks = manifest[~manifest.index.str.contains('|'.join(['Water','NTC']),case=False)]
if 'externalid' in manifest_no_blanks.columns:
dup1_sample = list(manifest_no_blanks[manifest_no_blanks.duplicated(subset='externalid', keep='first')].sort_values('externalid').index)
dup2_sample = list(manifest_no_blanks[manifest_no_blanks.duplicated(subset='externalid', keep='last')].sort_values('externalid').index)
l = dup1_sample + dup2_sample
else:
print("No External ID column detected in manifest.")
# In[ ]:
def compare_replicates(f,l):
df = pd.read_csv(f[0])
df = df.rename(columns = {'index':'Sample'})
df = df.set_index('Sample')
df_dups = df[df.index.isin(l)]
df_dups = df_dups.select_dtypes(['number']).dropna(axis=1, how='all')
return df_dups
# In[ ]:
from scipy.spatial.distance import cosine
# In[ ]:
ids_list = []
if 'externalid' in manifest_no_blanks.columns:
for a, b in zip(dup1_sample, dup2_sample):
ids = [manifest.loc[a,'externalid'], a, b]
ids_list.append(ids)
df_cosine = pd.DataFrame(ids_list, columns=['externalid','replicate_1','replicate_2'])
levels = [2,3,4,5,6,7]
for n in levels:
cos_list = []
f = glob.glob('taxonomic_classification/rpt_silva/*/data/level-' + str(n) + '.csv')
df_dups = compare_replicates(f, l)
for a, b in zip(dup1_sample, dup2_sample):
cos_list.append(1 - cosine(df_dups.loc[a,],df_dups.loc[b,]))
df_cosine['level_' + str(n)] = cos_list
display(df_cosine)
# In[ ]:
if 'externalid' in manifest_no_blanks.columns:
if (df_cosine.drop(columns=['externalid','replicate_1','replicate_2']) < 0.99 ).any().any():
print("Some biological replicates have cosine similarity below 0.99.")
else:
print("At all levels of taxonomic classification, the biological replicate samples have cosine similarity of at least 0.99.")
# <h3 id="3.7 QC-samples">3.7 QC samples</h3>
# If robogut and/or artificial colony samples are included in the analysis, then the distributions of relative abundances in each sample at classification levels 2 through 6 are shown here. This illustrates the variability between samples within each QC population with regard to taxonomic classification. Note that this section uses the taxonomic classification prior to removal of non-bacterial reads.
# In[ ]:
ac_samples = []
rg_samples = []
if 'sampletype' in manifest.columns:
ac_samples = list(manifest[manifest['sampletype'].str.lower().isin(['artificialcolony','artificial colony'])].index)
rg_samples = list(manifest[manifest['sampletype'].str.lower().isin(['robogut'])].index)
else:
print("No Sample Type column detected in manifest.")
# In[ ]:
def plot_rel_abundances_in_QCs(samples,qc_pop):
levels = [2,3,4,5,6]
for n in levels:
f = glob.glob('taxonomic_classification/rpt_silva/*/data/level-' + str(n) + '.csv')
df = pd.read_csv(f[0],index_col=0)
df = df[df.index.isin(samples)]
df = df.select_dtypes(['number']).dropna(axis=1, how='all').loc[:,~(df==0.0).all(axis=0)]
df_rel = df.div(df.sum(axis=1), axis=0) * 100
plt.figure(dpi=150)
ax = df_rel.boxplot()
ax.set_xticklabels(ax.get_xticklabels(),rotation=90,fontsize=8)
ax.set_title('Distribution of relative abundances in ' + qc_pop + ', level ' + str(n))
plt.show()
# In[ ]:
if ac_samples:
plot_rel_abundances_in_QCs(ac_samples,'artificial colony')
else:
print("No artificial colony samples were included in this pipeline run.")
# In[ ]:
if rg_samples:
plot_rel_abundances_in_QCs(rg_samples,'robogut')
else:
print("No robogut samples were included in this pipeline run.")
# <h2 id="4 Rarefaction-threshold">4 Rarefaction threshold</h2>
# QIIME randomly subsamples the reads per sample, without replacement, up to the sampling depth parameter. Samples with reads below the sampling depth are excluded from analysis. A higher sampling depth will include more reads overall, but will also exclude more samples.
#
# Our default sampling depth is 10,000, which is the setting for the initial pipeline run (`<datestamp>_initial_run`). The information provided in this section may be used to fine tune the sampling depth for subsequent runs.
# In[ ]:
get_ipython().system('unzip -q -d bacteria_only/feature_tables/rpt_merged_{ref_db}_qzv bacteria_only/feature_tables/{ref_db}/merged.qzv')
# In[ ]:
df_features_per_samples = pd.read_csv(glob.glob('bacteria_only/feature_tables/rpt_merged_' + ref_db + '_qzv/*/data/sample-frequency-detail.csv')[0],sep=",",header=None,index_col=0)
if 'externalid' in manifest.columns:
df_features_per_samples = df_features_per_samples.join(manifest[['externalid']]).set_index('externalid')
sample_ttl = len(df_features_per_samples.index)
feature_ttl = df_features_per_samples[1].sum()
blank_ttl = len(df_features_per_samples[df_features_per_samples.index.str.contains('Water|NTC',case=False)])
values = [5000,10000,15000,20000,25000,30000,35000,40000]
samples = []
features = []
blanks = []
ids = []
for n in values:
df_temp = df_features_per_samples[df_features_per_samples[1] > n]
l = df_features_per_samples[df_features_per_samples[1] <= n].index.to_list()
l.sort()
ids.append(l)
samples_left = len(df_temp.index)
blanks_left = len(df_temp[df_temp.index.str.contains('Water|NTC',case=False)])
samples.append(samples_left/sample_ttl * 100)
features.append((samples_left * n)/feature_ttl * 100)
if blank_ttl != 0:
blanks.append(blanks_left/blank_ttl * 100)
else:
blanks.append("NA")
df_rarify = pd.DataFrame(list(zip(values, samples, features, ids, blanks)),columns=['Sampling_depth','Percent_retained_samples','Percent_retained_seqs','Samples_excluded','Percent_retained_blanks'])
df_rarify = df_rarify.set_index('Sampling_depth')
pd.set_option('display.max_colwidth', 0)
df_rarify[['Samples_excluded','Percent_retained_samples','Percent_retained_blanks']]
# In[ ]:
df_rarify_tidy = df_rarify.reset_index().drop(columns=['Samples_excluded','Percent_retained_blanks']).melt(id_vars='Sampling_depth')
df_rarify_tidy.columns = ['Sampling_depth','Var','Percent_retained']
df_rarify_tidy['Var'] = df_rarify_tidy['Var'].str.replace('Percent_retained_s','S')
plt.figure(dpi=120)
plt.rcParams["xtick.labelsize"] = 12
ax = sns.lineplot(x="Sampling_depth", y="Percent_retained", hue="Var",data=df_rarify_tidy)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles=handles[1:], labels=labels[1:], loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
# For this pipeline run, the rarefaction depth was set in the config file as follows:
# In[ ]:
get_ipython().system('grep "sampling_depth" *.y[a]*ml')
# <h2 id="5 Alpha-diversity">5 Alpha diversity</h2>
# Alpha diversity measures species richness, or variance within a sample.
#
# The rarefaction curves below show the number of species as a function of the number of samples. The various plots are stratified by the metadata available in the manifest. The curves are expected to grow rapidly as common species are identified, then plateau as only the rarest species remain to be sampled. The rarefaction threshold discussed above should fall within the plateau of the rarefaction curves.
#
# This report provides the following alpha diversity metrics:
# - __Observed OTUs:__ represents the number of observed species for each class
# - __Shannon diversity index:__ Calculates richness and diversity using a natural logarithm; accounts for both abundance and evenness of the taxa present; more sensitive to species richness than evenness
# - __Faith's phylogenetic diversity:__ Measure of biodiversity that incorporates phylogenetic difference between species via sum of length of branches
#
# Note that both phylogenetic tree construction and alpha diversity analysis are performed after non-bacterial read exclusion.
# In[ ]:
get_ipython().system('unzip -q -d diversity_core_metrics/{ref_db}/rpt_rarefaction diversity_core_metrics/{ref_db}/rarefaction.qzv')
# In[ ]:
def format_alpha_data(metric, csv):
df = pd.read_csv(csv,index_col=0)
df.columns = map(str.lower, df.columns)
depth_cols = [col for col in df.columns if 'depth-' in col]
non_depth_cols = [col for col in df.columns if 'depth-' not in col]
depths = list(set([i.split('_', 1)[0] for i in depth_cols]))
iters = list(set([i.split('_', 1)[1] for i in depth_cols]))
df_melt1 = pd.DataFrame()
df_melt2 = pd.DataFrame()
for d in depths:
df_temp = df.filter(regex=d+'_')
df_temp.columns = iters
df_temp = pd.concat([df_temp,df[non_depth_cols]],axis=1)
df_temp['depth'] = int(d.split('-')[1])
df_melt1 = pd.concat([df_melt1,df_temp],axis=0)
non_depth_cols.append('depth')
for i in iters:
df_temp = df_melt1.filter(regex='^' + i + '$')
df_temp.columns = [metric]
df_temp = pd.concat([df_temp,df_melt1[non_depth_cols]],axis=1)
df_temp['iteration'] = int(i.split('-')[1])
df_melt2 = | pd.concat([df_melt2,df_temp],axis=0) | pandas.concat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.