prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2022 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import pytest
import gc
import copy
import numpy as np
import pandas as pd
try:
import geopandas as gpd
import shapely.geometry
GEOPANDAS_INSTALLED = True
except ImportError:
GEOPANDAS_INSTALLED = False
from pandapower.auxiliary import get_indices
import pandapower as pp
import pandapower.networks
import pandapower.control
import pandapower.timeseries
class MemoryLeakDemo:
"""
Dummy class to demonstrate memory leaks
"""
def __init__(self, net):
self.net = net
# it is interesting, that if "self" is just an attribute of net, there are no problems
# if "self" is saved in a DataFrame, it causes a memory leak
net['memory_leak_demo'] = pd.DataFrame(data=[self], columns=['object'])
class MemoryLeakDemoDF:
"""
Dummy class to demonstrate memory leaks
"""
def __init__(self, df):
self.df = df
# if "self" is saved in a DataFrame, it causes a memory leak
df.loc[0, 'object'] = self
class MemoryLeakDemoDict:
"""
Dummy class to demonstrate memory leaks
"""
def __init__(self, d):
self.d = d
d['object'] = self
def test_get_indices():
a = [i + 100 for i in range(10)]
lookup = {idx: pos for pos, idx in enumerate(a)}
lookup["before_fuse"] = a
# First without fused buses no magic here
# after fuse
result = get_indices([102, 107], lookup, fused_indices=True)
assert np.array_equal(result, [2, 7])
# before fuse
result = get_indices([2, 7], lookup, fused_indices=False)
assert np.array_equal(result, [102, 107])
# Same setup EXCEPT we have fused buses now (bus 102 and 107 are fused)
lookup[107] = lookup[102]
# after fuse
result = get_indices([102, 107], lookup, fused_indices=True)
assert np.array_equal(result, [2, 2])
# before fuse
result = get_indices([2, 7], lookup, fused_indices=False)
assert np.array_equal(result, [102, 107])
def test_net_deepcopy():
net = pp.networks.example_simple()
net.line_geodata.loc[0, 'coords'] = [[0, 1], [1, 2]]
net.bus_geodata.loc[0, ['x', 'y']] = 0, 1
pp.control.ContinuousTapControl(net, tid=0, vm_set_pu=1)
ds = pp.timeseries.DFData(pd.DataFrame(data=[[0, 1, 2], [3, 4, 5]]))
pp.control.ConstControl(net, element='load', variable='p_mw', element_index=[0], profile_name=[0], data_source=ds)
net1 = copy.deepcopy(net)
assert not net1.controller.object.at[1].data_source is ds
assert not net1.controller.object.at[1].data_source.df is ds.df
assert not net1.line_geodata.coords.at[0] is net.line_geodata.coords.at[0]
if GEOPANDAS_INSTALLED:
for tab in ('bus_geodata', 'line_geodata'):
if tab == 'bus_geodata':
geometry = net[tab].apply(lambda x: shapely.geometry.Point(x.x, x.y), axis=1)
else:
geometry = net[tab].coords.apply(shapely.geometry.LineString)
net[tab] = gpd.GeoDataFrame(net[tab], geometry=geometry)
net1 = net.deepcopy()
assert isinstance(net1.line_geodata, gpd.GeoDataFrame)
assert isinstance(net1.bus_geodata, gpd.GeoDataFrame)
assert isinstance(net1.bus_geodata.geometry.iat[0], shapely.geometry.Point)
assert isinstance(net1.line_geodata.geometry.iat[0], shapely.geometry.LineString)
def test_memory_leaks():
net = pp.networks.example_simple()
# first, test to check that there are no memory leaks
types_dict1 = pp.toolbox.get_gc_objects_dict()
num = 3
for _ in range(num):
net_copy = copy.deepcopy(net)
# In each net copy it has only one controller
pp.control.ContinuousTapControl(net_copy, tid=0, vm_set_pu=1)
gc.collect()
types_dict2 = pp.toolbox.get_gc_objects_dict()
assert types_dict2[pandapower.auxiliary.pandapowerNet] - types_dict1[pandapower.auxiliary.pandapowerNet] == 1
assert types_dict2[pandapower.control.ContinuousTapControl] - types_dict1.get(
pandapower.control.ContinuousTapControl, 0) == 1
def test_memory_leaks_demo():
net = pp.networks.example_simple()
# first, test to check that there are no memory leaks
types_dict1 = pp.toolbox.get_gc_objects_dict()
# now, demonstrate how a memory leak occurs
# emulates the earlier behavior before the fix with weakref
num = 3
for _ in range(num):
net_copy = copy.deepcopy(net)
MemoryLeakDemo(net_copy)
# demonstrate how the garbage collector doesn't remove the objects even if called explicitly
gc.collect()
types_dict2 = pp.toolbox.get_gc_objects_dict()
assert types_dict2[pandapower.auxiliary.pandapowerNet] - types_dict1[pandapower.auxiliary.pandapowerNet] == num
assert types_dict2[MemoryLeakDemo] - types_dict1.get(MemoryLeakDemo, 0) == num
def test_memory_leaks_no_copy():
types_dict0 = pp.toolbox.get_gc_objects_dict()
num = 3
for _ in range(num):
net = pp.create_empty_network()
# In each net copy it has only one controller
pp.control.ConstControl(net, 'sgen', 'p_mw', 0)
gc.collect()
types_dict1 = pp.toolbox.get_gc_objects_dict()
assert types_dict1[pandapower.control.ConstControl] - types_dict0.get(pandapower.control.ConstControl, 0) == 1
assert types_dict1[pandapower.auxiliary.pandapowerNet] - types_dict0.get(pandapower.auxiliary.pandapowerNet, 0) <= 1
def test_memory_leak_no_copy_demo():
types_dict1 = pp.toolbox.get_gc_objects_dict()
# now, demonstrate how a memory leak occurs
# emulates the earlier behavior before the fix with weakref
num = 3
for _ in range(num):
net = pp.networks.example_simple()
MemoryLeakDemo(net)
# demonstrate how the garbage collector doesn't remove the objects even if called explicitly
gc.collect()
types_dict2 = pp.toolbox.get_gc_objects_dict()
assert types_dict2[pandapower.auxiliary.pandapowerNet] - \
types_dict1.get(pandapower.auxiliary.pandapowerNet, 0) >= num-1
assert types_dict2[MemoryLeakDemo] - types_dict1.get(MemoryLeakDemo, 0) == num
def test_memory_leak_df():
types_dict1 = pp.toolbox.get_gc_objects_dict()
num = 3
for _ in range(num):
df = pd.DataFrame()
MemoryLeakDemoDF(df)
gc.collect()
types_dict2 = pp.toolbox.get_gc_objects_dict()
assert types_dict2[MemoryLeakDemoDF] - types_dict1.get(MemoryLeakDemoDF, 0) == num
def test_memory_leak_dict():
types_dict1 = pp.toolbox.get_gc_objects_dict()
num = 3
for _ in range(num):
d = dict()
MemoryLeakDemoDict(d)
gc.collect()
types_dict2 = pp.toolbox.get_gc_objects_dict()
assert types_dict2[MemoryLeakDemoDict] - types_dict1.get(MemoryLeakDemoDict, 0) <= 1
def test_create_trafo_characteristics():
net = pp.networks.example_multivoltage()
# test 2 modes, multiple index and single index, for 2w trafo
pp.control.create_trafo_characteristics(net, "trafo", [1], 'vk_percent', [[-2,-1,0,1,2]], [[2,3,4,5,6]])
assert "characteristic" in net
assert "tap_dependent_impedance" in net.trafo.columns
assert net.trafo.tap_dependent_impedance.dtype == np.bool_
assert net.trafo.tap_dependent_impedance.at[1]
assert not net.trafo.tap_dependent_impedance.at[0]
assert "vk_percent_characteristic" in net.trafo.columns
assert net.trafo.at[1, 'vk_percent_characteristic'] == 0
assert pd.isnull(net.trafo.at[0, 'vk_percent_characteristic'])
assert net.trafo.vk_percent_characteristic.dtype == pd.Int64Dtype()
assert "vkr_percent_characteristic" not in net.trafo.columns
pp.control.create_trafo_characteristics(net, "trafo", 1, 'vkr_percent', [-2,-1,0,1,2], [1.323,1.324,1.325,1.326,1.327])
assert len(net.characteristic) == 2
assert "vkr_percent_characteristic" in net.trafo.columns
assert net.trafo.at[1, 'vkr_percent_characteristic'] == 1
assert pd.isnull(net.trafo.at[0, 'vkr_percent_characteristic'])
assert net.trafo.vkr_percent_characteristic.dtype == | pd.Int64Dtype() | pandas.Int64Dtype |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Country data of B.1.1.7 occurrence.
Function: get_country_data().
@author: @hk_nien
"""
import re
from pathlib import Path
import pandas as pd
import datetime
import numpy as np
def _ywd2date(ywd):
"""Convert 'yyyy-Www-d' string to date (12:00 on that day)."""
twelvehours = | pd.Timedelta('12 h') | pandas.Timedelta |
"""
2021.05 Redesign
_______________________________________________________________________________________________________________________
Original colloidspy design was just a series of functions that were not tied together in any meaningful way.
These functions each had their own error handling and type checking to make sure it would run smoothly, however they
were not very structured.
This version redesigns the architecture to an object-oriented style.
- Adam
"""
import os
from pathlib import Path
import numpy as np
import pandas as pd
import cv2
import skimage.io as io
import skimage.filters as filters
from skimage.util import img_as_ubyte
from skimage.feature import peak_local_max
from skimage.segmentation import watershed
from scipy import ndimage, stats
import matplotlib.pyplot as plt
from matplotlib.widgets import RectangleSelector
# %matplotlib qt
from tqdm import tqdm
def load(path, conserve_memory=True):
stack = io.ImageCollection(path, conserve_memory=conserve_memory)
if len(np.shape(stack)) == 4:
from skimage.color import rgb2gray
if np.shape(stack)[-1] == 4:
from skimage.color import rgba2rgb
files = stack.files
stack = [rgb2gray(rgba2rgb(stack[i])) for i in range(len(stack))]
else:
files = stack.files
stack = [rgb2gray(stack[i]) for i in range(len(stack))]
return img_as_ubyte(stack), files
else:
return img_as_ubyte(stack), stack.files
def view_particles(img, particles, min_area=0, fill=False, weight=0):
if 'bool' in str(type(img[0][0])):
clusters = cv2.cvtColor(np.zeros(img.shape, np.uint8), cv2.COLOR_GRAY2RGB)
else:
clusters = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
for particle in np.unique(particles):
# if the label is zero, we are examining the 'background', so ignore it
if particle == 0:
continue
# otherwise, allocate memory for the label region and draw it on the mask
mask = np.zeros(img.shape, np.uint8)
mask[particles == particle] = 255
# detect contours in the mask and grab the largest one
try:
cnts, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
except ValueError:
ct_im, cnts, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if cv2.contourArea(cnts[0]) < min_area:
continue
if fill:
cv2.drawContours(clusters, cnts, 0, (255, 255, 255), -1)
cv2.drawContours(clusters, cnts, 0, (255, 0, 0), weight)
else:
cv2.drawContours(clusters, cnts, 0, (255, 0, 0), weight)
return clusters
class CspyStack(np.ndarray):
"""
Class to hold raw and process images and their respective particle data.
Attributes:
cropped - image ROI's, set with add_cropped()
binary_otsu - ROI's binarized with otsu threshold, set with add_otsu()
binary_loc - ROI's binarized with local threshold, set with add_local_threshold()
binary_hyst - ROI's binarized with hysteresis threshold, set with add_hysteresis_threshold()
cleaned - Cleaned binary ROI's, set with add_cleaned(BinaryStack) where BinaryStack is the set of binary
images chosen earlier with otsu, local, or hysteresis threshold methods
particles - ROI where the pixels of all particles are assigned a unique integer labeled by particle
particle_data - pandas dataframes of particle data in each image
crop_coords - (x,y) coordinates of image ROI
"""
def __new__(cls, input_array, filenames=None, cropped=None, crop_coords=None, binary_otsu=None, binary_loc=None,
binary_hyst=None, cleaned=None, particles=None, particle_data=None):
obj = np.asarray(input_array).view(cls)
obj.filenames = filenames
obj.cropped = cropped
obj.crop_coords = crop_coords
obj.binary_otsu = binary_otsu
obj.binary_loc = binary_loc
obj.binary_hyst = binary_hyst
obj.cleaned = cleaned
obj.particles = particles
obj.particle_data = particle_data
return obj
def __array_finalize__(self, obj):
if obj is None:
return
self.filenames = getattr(obj, 'filenames', None)
self.cropped = getattr(obj, 'cropped', None)
self.crop_coords = getattr(obj, 'crop_coords', None)
self.binary_otsu = getattr(obj, 'binary_otsu', None)
self.binary_loc = getattr(obj, 'binary_loc', None)
self.binary_hyst = getattr(obj, 'binary_hyst', None)
self.cleaned = getattr(obj, 'cleaned', None)
self.particles = getattr(obj, 'particles', None)
self.particle_data = getattr(obj, 'particle_data', None)
def add_cropped(self, cropall=True):
"""
Interactictive image cropper, built from matplotlib.
Select ROI from top-left to bottom-right with mouse, press any key to accept selection except q and r
Sets CspyStack.cropped attribute
:param cropall: True - Same ROI is applied to all images in stack. False - crop all images individually
:return: nothing
"""
def line_select_callback(eclick, erelease):
x1, y1 = eclick.xdata, eclick.ydata
x2, y2 = erelease.xdata, erelease.ydata
while erelease is None:
pass
print("(%3.2f, %3.2f) --> (%3.2f, %3.2f)" % (x1, y1, x2, y2))
print(" The button you used were: %s %s" % (eclick.button, erelease.button))
self.crop_coords.append(((int(x1), int(y1)), (int(x2), int(y2))))
def toggle_selector(event):
print(' Key pressed.')
if event.key in ['Q', 'q'] and toggle_selector.RS.active:
print(' RectangleSelector deactivated.')
toggle_selector.RS.set_active(False)
if event.key in ['A', 'a'] and not toggle_selector.RS.active:
print(' RectangleSelector activated.')
toggle_selector.RS.set_active(True)
def select_roi(img):
fig, current_ax = plt.subplots()
plt.title("Select ROI, press any key to continue.")
plt.imshow(img, cmap='gray') # show the first image in the stack
toggle_selector.RS = RectangleSelector(current_ax, line_select_callback,
drawtype='box', useblit=True,
button=[1, 3], # don't use middle button
minspanx=5, minspany=5,
spancoords='pixels',
interactive=True)
plt.connect('key_press_event', toggle_selector)
plt.show()
# # some IDE's (pycharm, jupyter) move on to plt.close(fig) instead of waiting for the callback to finish
# # if that is the case, uncomment the next three lines
keyboardClick = False
while not keyboardClick:
keyboardClick = plt.waitforbuttonpress()
plt.close(fig)
self.crop_coords = []
if len(self.shape) == 2:
img = self
select_roi(img)
# Crop all images in the stack
# Numpy's axis convention is (y,x) or (row,col)
tpleft = [self.crop_coords[0][0][1], self.crop_coords[0][0][0]]
btmright = [self.crop_coords[0][1][1], self.crop_coords[0][1][0]]
self.cropped = CspyStack(self[tpleft[0]:btmright[0], tpleft[1]:btmright[1]])
elif len(self.shape) == 3:
if cropall:
img = self[0]
select_roi(img)
tpleft = [self.crop_coords[0][0][1], self.crop_coords[0][0][0]]
btmright = [self.crop_coords[0][1][1], self.crop_coords[0][1][0]]
self.cropped = CspyStack([self[i][tpleft[0]:btmright[0], tpleft[1]:btmright[1]]
for i in range(len(self))])
else:
cropped_imgs = []
for i in range(len(self)):
img = self[i]
select_roi(img)
tpleft = [self.crop_coords[i][0][1], self.crop_coords[i][0][0]]
btmright = [self.crop_coords[i][1][1], self.crop_coords[i][1][0]]
cropped_imgs.append(self[i][tpleft[0]:btmright[0], tpleft[1]:btmright[1]])
self.cropped = CspyStack(cropped_imgs)
else:
raise Exception("TypeError in add_cropped - is the stack greyscale?")
def add_otsu(self, nbins=256):
"""
Adds attribute binary_otsu from CspyStack.cropped if available. Otherwise, uses raw stack.
:param nbins: number of bins in image histogram
:return: nothing
"""
if self.cropped is None:
if len(self.shape) == 3:
binary = []
for i in tqdm(range(len(self)), desc='Applying otsu threshold to CspyStack.binary_otsu', leave=True):
otsu = filters.threshold_otsu(self[i], nbins=nbins)
binary.append(img_as_ubyte(self[i] > otsu))
self.binary_otsu = CspyStack(binary)
elif len(self.shape) == 2:
print('Applying otsu threshold to CspyStack.binary_otsu')
otsu = filters.threshold_otsu(self, nbins=nbins)
self.binary_otsu = CspyStack(img_as_ubyte(self > otsu))
else:
raise Exception
else:
if len(self.shape) == 3:
binary = []
for i in tqdm(range(len(self)), desc='Applying otsu threshold to CspyStack.binary_otsu', leave=True):
otsu = filters.threshold_otsu(self.cropped[i], nbins=nbins)
binary.append(img_as_ubyte(self.cropped[i] > otsu))
self.binary_otsu = CspyStack(binary)
elif len(self.shape) == 2:
print('Applying otsu threshold to CspyStack.binary_otsu')
otsu = filters.threshold_otsu(self.cropped, nbins=nbins)
self.binary_otsu = CspyStack(img_as_ubyte(self.cropped > otsu))
else:
raise Exception('TypeError: shape of images not correct. Is stack greyscale?')
def add_local_threshold(self, block_size=71, offset=5, cutoff=0, **kwargs):
"""
Adds attribute binary_loc from CspyStack.cropped if available. Otherwise, uses raw stack.
See https://scikit-image.org/docs/stable/api/skimage.filters.html#skimage.filters.threshold_local
:param block_size: Odd size of pixel neighborhood which is used to calculate the threshold value
:param offset: Constant subtracted from weighted mean of neighborhood to calculate the local threshold value.
:param cutoff: lowest pixel value in gaussian image to be considered in threshold. Useful if large black areas
are showing up as white areas in the thresholded image
:param kwargs: other kwargs from skimage threshold_local() function
:return:
"""
if self.cropped is None:
if len(self.shape) == 3:
binary = []
for i in tqdm(range(len(self)), desc='Applying local threshold to CspyStack.binary_loc', leave=True):
local_thresh = filters.threshold_local(self[i], block_size=block_size, offset=offset, **kwargs)
low_val_flags = local_thresh < cutoff
local_thresh[low_val_flags] = 255
binary.append(img_as_ubyte(self[i] > local_thresh))
self.binary_loc = CspyStack(binary)
elif len(self.shape) == 2:
print('Applying local threshold to CspyStack.binary_loc')
local_thresh = filters.threshold_local(self, block_size=block_size, offset=offset, **kwargs)
low_val_flags = local_thresh < cutoff
local_thresh[low_val_flags] = 255
self.binary_loc = CspyStack(img_as_ubyte(self > local_thresh))
else:
raise Exception('TypeError: shape of images not correct. Is stack greyscale?')
else:
if len(self.shape) == 3:
binary = []
for i in tqdm(range(len(self)), desc='Applying local threshold to CspyStack.binary_loc', leave=True):
local_thresh = filters.threshold_local(self.cropped[i], block_size=block_size, offset=offset, **kwargs)
low_val_flags = local_thresh < cutoff
local_thresh[low_val_flags] = 255
binary.append(img_as_ubyte(self.cropped[i] > local_thresh))
self.binary_loc = CspyStack(binary)
elif len(self.shape) == 2:
print('Applying local threshold to CspyStack.binary_loc')
local_thresh = filters.threshold_local(self.cropped, block_size=block_size, offset=offset, **kwargs)
low_val_flags = local_thresh < cutoff
local_thresh[low_val_flags] = 255
self.binary_loc = CspyStack(img_as_ubyte(self.cropped > local_thresh))
else:
raise Exception('TypeError: shape of images not correct. Is stack greyscale?')
def add_hysteresis_threshold(self, low=20, high=150):
"""
Adds attribute binary_hyst from CspyStack.cropped if available, otherwise uses raw stack.
See https://scikit-image.org/docs/dev/auto_examples/filters/plot_hysteresis.html
Pixels above the high theshold are considered to be a particle, pixels between low and high values are only
considered part of a particle if they touch another particle that was designated as a particle.
:param low: lowest pixel value to be considered as part of a potential particle
:param high: pixel values higher than this threshold area always considered a particle
:return: nothing
"""
if self.cropped is None:
if len(self.shape) == 3:
binary = []
for i in tqdm(range(len(self)),
desc='Applying hysteresis threshold to CspyStack.binary_hyst', leave=True):
binary.append(filters.apply_hysteresis_threshold(self[i], low=low, high=high))
self.binary_hyst = CspyStack(binary)
elif len(self.shape) == 2:
print('Applying hysteresis threshold to CspyStack.binary_hyst')
self.binary_hyst = CspyStack(filters.apply_hysteresis_threshold(self, low=low, high=high))
else:
raise Exception('TypeError: shape of images not correct. Is stack greyscale?')
else:
if len(self.shape) == 3:
binary = []
for i in tqdm(range(len(self)),
desc='Applying hysteresis threshold to CspyStack.binary_hyst', leave=True):
binary.append(filters.apply_hysteresis_threshold(self.cropped[i], low=low, high=high))
self.binary_hyst = CspyStack(binary)
elif len(self.shape) == 2:
print('Applying hysteresis threshold to CspyStack.binary_hyst')
self.binary_hyst = CspyStack(filters.apply_hysteresis_threshold(self.cropped, low=low, high=high))
else:
raise Exception('TypeError: shape of images not correct. Is stack greyscale?')
def add_cleaned(self, bin_stack):
"""
Adds attribute CspyStack.cleaned
Removes small roughly single-pixel imperfections leaving only the major structuring elements.
Uses a binary opening and closing algorithm
:param bin_stack: binary images to clean. Use one of the three binary image attributes (otsu, loc, hyst)
:return:
"""
if len(self.shape) == 3:
self.cleaned = []
for i in tqdm(range(len(self)), desc='Adding cleaned to CspyStack.cleaned', leave=True):
self.cleaned.append(img_as_ubyte(ndimage.binary_closing(ndimage.binary_opening(bin_stack[i]))))
elif len(self.shape) == 2:
print('Adding cleaned to CspyStack.cleaned')
self.cleaned = img_as_ubyte(ndimage.binary_closing(ndimage.binary_opening(bin_stack)))
else:
raise Exception
def find_particles(self, bin_stack, min_distance=7):
"""
Adds attribute CspyStack.particles
Applies a watershed algorithm to the binary stack (preferably cleaned) to identify distinct particles.
Pixels are assigned an integer, unique by particle, where 0 is the background
:param bin_stack: stack of binary images with particles to detect
:param min_distance: minimum distance in pixels between particle centers
:return: nothing
"""
if type(bin_stack) == list or len(bin_stack.shape) == 3:
particles = []
for i in tqdm(range(len(bin_stack)), desc='Adding detected particles to CspyStack.particles', leave=True):
distance = ndimage.distance_transform_edt(bin_stack[i])
local_max = peak_local_max(distance, min_distance=min_distance, labels=bin_stack[i])
local_max_mask = np.zeros(distance.shape, dtype=bool)
local_max_mask[tuple(local_max.T)] = True
markers = ndimage.label(local_max_mask)[0]
labels = watershed(-distance, markers, mask=bin_stack[i])
particles.append(labels)
self.particles = CspyStack(particles)
elif len(bin_stack.shape) == 2:
print('Adding detected particles to CspyStack.particles')
distance = ndimage.distance_transform_edt(bin_stack)
local_max = peak_local_max(distance, min_distance=min_distance, labels=bin_stack)
local_max_mask = np.zeros(distance.shape, dtype=bool)
local_max_mask[tuple(local_max.T)] = True
markers = ndimage.label(local_max_mask)[0]
labels = watershed(-distance, markers, mask=bin_stack)
self.particles = CspyStack(labels)
else:
raise Exception
def analyze_particles(self, particles, min_area=0):
"""
Adds attribute CspyStack.particle_data
:param particles: stack of watershed images where particles are labeled by a unique integer
:param min_area: minimum particle area to be considered a particle
:return: list of pandas dataframes of particle data, or single dataframe if only one image was passed
"""
def single(particles, min_area):
# clusters = np.zeros(image.shape, np.uint8)
clusters = np.zeros(particles.shape, np.uint8)
cl_area = []
cl_perimeter = []
cl_center = []
cl_circularity = []
defect_len_avg = []
defect_len_std = []
defect_len_min = []
defect_len_max = []
for particle in np.unique(particles):
if particle == 0:
continue
# mask = np.zeros(image.shape, np.uint8)
mask = np.zeros(particles.shape, np.uint8)
mask[particles == particle] = 255
try:
cnts, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
except ValueError:
# older version of opencv returns three objects instead of two
ct_im, cnts, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if cv2.contourArea(cnts[0]) < min_area:
continue
cv2.drawContours(clusters, cnts, 0, 255, -1)
cl_area.append(cv2.contourArea(cnts[0]))
cl_perimeter.append(cv2.arcLength(cnts[0], 1))
M = cv2.moments(cnts[0], 0)
if cl_area[-1] != 0:
cl_center.append(tuple([int(M['m10'] / M['m00']), int(M['m01'] / M['m00'])]))
cl_circularity.append(4 * np.pi * (cv2.contourArea(cnts[0])) / (cv2.arcLength(cnts[0], 1) ** 2))
else:
cl_center.append("None")
cl_circularity.append("None")
# find the convex hull of the particle, and extract the defects
cnt = cnts[0]
hull = cv2.convexHull(cnt, returnPoints=False)
# dhull = cv2.convexHull(cnt, returnPoints=True)
defects = cv2.convexityDefects(cnt, hull)
pt_defects = []
if defects is not None:
for j in range(defects.shape[0]):
s, e, f, d = defects[j, 0]
# start = tuple(cnt[s][0])
# end = tuple(cnt[e][0])
far = tuple(cnt[f][0])
tfar = tuple(map(int, far)) # current(?) v of opencv-python doesnt play well with np.int32
# store the length from the defects to the hull
pt_defects.append(cv2.pointPolygonTest(cnt, tfar, True))
# store the mean and stdev of the defect length
# if there are no defects, just store 0
if len(pt_defects) == 0:
defect_len_avg.append(0)
defect_len_std.append(0)
defect_len_min.append(0)
defect_len_max.append(0)
else:
defect_len_avg.append(np.mean(pt_defects))
defect_len_std.append(np.std(pt_defects))
defect_len_min.append(min(pt_defects))
defect_len_max.append(max(pt_defects))
cluster_data = {'Area': cl_area,
'Perimeter': cl_perimeter,
'Center': cl_center,
'Circularity': cl_circularity,
'Average Defect Length': defect_len_avg,
'Stdev of Defect Length': defect_len_std,
'Min Defect Length': defect_len_min,
'Max Defect Length': defect_len_max
}
cluster_df = | pd.DataFrame(cluster_data) | pandas.DataFrame |
import pandas as pd
from selenium import webdriver
from bs4 import BeautifulSoup as bs
from selenium.webdriver.common.keys import Keys
import time
import sys
"""--------- Construction is Running ------------"""
"""--------- Developed by <NAME> ------------"""
# start find Chrome webdriver path location
class AutoBrowser:
# end find Chrome webdriver path location
def __init__(self,driver):
self.driver=driver
self.driver.get("https://www.linkedin.com") # start open an url link
self.driver.implicitly_wait(10)# start waitting until all data is loaded properlly
self.browser_settings()
self.collect_login_data()
# end of initialization panel
def browser_settings(self):
# start setup browser settings
options= webdriver.ChromeOptions()
options.add_argument('headless')
options.add_argument('window-size=500*320')
# end setup browser settings
def collect_login_data(self):
# start login to the linkedin
# start collecting login information
self.email = self.driver.find_element_by_css_selector('input[class=login-email]')
self.password = self.driver.find_element_by_css_selector('input[class=login-password]')
self.login = self.driver.find_element_by_css_selector('input[type="submit"]')
# end collecting login information
self.login_proccess()
def login_proccess(self):
# set user name and password then click login
self.email.send_keys('linkedin user email here')
self.password.send_keys('<PASSWORD>')
self.login.click()
# end login proccess into the linkedin
# start waitting until all data is loaded properlly
self.driver.implicitly_wait(10)
#self.goto_nav()
self.myNetwork()
def myNetwork(self):
#try:
self.driver.get("https://www.linkedin.com/sales/search/people")
self.driver.implicitly_wait(10)
self.driver.find_element_by_css_selector("div[data-test-filter-code='GE']").click()
self.driver.implicitly_wait(15)
time.sleep(15)
self.myNetwork_filter()
def myNetwork_filter(self):
current_url_=self.driver.current_url
names=[]
urls=[]
titles=[]
linkedin_profiles=[]
counter=1
while True:
# while still page running
pr_link=[]
Geography=self.driver.page_source
for profile_link in bs(Geography,"html.parser").find_all("a",{"class":'ember-view'}):
if ("%3D%3D"in profile_link["href"]) and ("NAME_SEARCH" in profile_link["href"]):
p_link = ("https://www.linkedin.com"+profile_link['href'])
if (p_link in pr_link) or ("similar-leads" in p_link):
pass
else:
pr_link.append("https://www.linkedin.com"+profile_link['href'])
for link_ in pr_link:
self.driver.get(link_)
self.driver.implicitly_wait(15)
time.sleep(5) # via this goto profiles of every persons
# do somthing in users profiles or get data from users profile
try:
name=bs(self.driver.page_source,"lxml").find("span",{"class":"profile-topcard-person-entity__name"}).text
names.append(name)
except:
print("name problem")
names.append("Not Found")
try:
title_with_company=bs(self.driver.page_source,"lxml").find("dd",{"class":"mt2"}).text
titles.append(title_with_company)
except:
titles.append("not found")
try:
website=bs(self.driver.page_source,"lxml").find("dd",{"class":"mv2 nowrap-ellipsis mv2 Sans-14px-black-60%"})
urls.append(website.text)
except:
urls.append("not found")
try:
linkedin_pro = bs(self.driver.page_source,"lxml").find("a",{"class":"view-linkedin profile-topcard-actions__overflow-item link-without-visited-state Sans-14px-black-90%-bold block p2"})
linkedin_profiles.append(linkedin_pro)
except:
linkedin_profiles.append("not found")
print("-----------------------------")
try:
self.driver.get(current_url_)
self.driver.implicitly_wait(15)
self.driver.find_element_by_css_selector("[class=search-results__pagination-next-button]").click()
time.sleep(10)
current_url_ = self.driver.current_url
print("goto next page")
except:
data_list={"names":names,"titles":titles,"urls":urls,"linkedin profiles":linkedin_profiles}
writer = pd.ExcelWriter('linkedin_leads.xlsx', engine='xlsxwriter')
dataframe= pd.DataFrame(data_list)
self.driver.quit()
sys.exit()
pr_link=[]
if counter==10:
break
counter+=1
data_list={"names":names,"titles":titles,"urls":urls,"linkedin profiles":linkedin_profiles}
writer = | pd.ExcelWriter('linkedin_leads.xlsx', engine='xlsxwriter') | pandas.ExcelWriter |
from database.database import db
from database.tables.price import StockPrice
from tqdm import tqdm
import datetime as dt
import numpy as np
from numpy.linalg import inv
import pandas as pd
import scipy
import pickle
from pandas_datareader import data
class Black_Litterman(object):
def __init__(self, selected_tickers):
self.selected_tickers = selected_tickers
def read_stock_file(self, tick):
# Use the Flask-SQLAlchemy to query our data from database
stock_data = StockPrice.find_all_by_query(comp=tick)
date_ = []
high = []
low = []
open_ = []
adj_close = []
vol = []
# Store/Split the data into train & test dataframe
for row in stock_data:
date = dt.datetime.strptime(str(row.date), '%Y-%m-%d')
date_.append(date)
high.append(row.high)
low.append(row.low)
open_.append(row.open_)
adj_close.append(row.adj_close)
vol.append(row.vol)
df = pd.DataFrame({
'date': date_,
'high': high,
'low': low,
'open': open_,
'adj_close': adj_close,
'vol': vol
})
df.set_index('date', inplace=True)
# split dataframe into train & test part
train_df, test_df = df['2012-01-01': '2016-12-31'], df['2017-01-01': '2020-06-30']
self.test_df = test_df
return train_df, test_df
def prepare_input(self):
# 1. Collect user selected tickers' stock price
all_data_df = pd.DataFrame({})
time_step = 180
for ticker in self.selected_tickers:
train_df, test_df = self.read_stock_file(ticker)
# axis=0 -> combine vertically
dataset_total = pd.concat([train_df, test_df], axis=0)
inputs = dataset_total[len(dataset_total)-len(test_df)-time_step:]['adj_close']
all_data_df[ticker] = inputs
# 2. Prepare main markowitz inputs
# 2-1. price_df -> price_list
prices_list = []
for i in range(time_step, len(all_data_df)):
price_t = all_data_df[i-time_step:i].T
prices_list.append(price_t)
# 2-2. get market capitalization
end = dt.datetime(2016, 12, 31)
market_caps = list(data.get_quote_yahoo(self.selected_tickers, end)['marketCap'])
return prices_list, market_caps
def assets_historical_returns_covariances(self, prices_list):
all_exp_returns = []
all_covars = []
for prices in prices_list:
prices = np.array(prices)
rows, cols = prices.shape
returns = np.empty([rows, cols - 1])
for r in range(rows):
for c in range(cols-1):
p0, p1 = prices[r, c], prices[r, c+1]
returns[r, c] = (p1/p0) - 1
# calculate returns
exp_returns = np.array([])
for r in range(rows):
exp_returns = np.append(exp_returns, np.mean(returns[r]))
# calculate covariances
covars = np.cov(returns)
# annualize returns, covariances
exp_returns = (1 + exp_returns) ** (365.25) - 1
covars = covars * (365.25)
all_exp_returns.append(exp_returns)
all_covars.append(covars)
return all_exp_returns, all_covars
def portfolioMean(self, W, R):
return sum(R * W)
def portfolioVar(self, W, C):
return np.dot(np.dot(W, C), W)
def portfolioMeanVar(self, W, R, C):
return self.portfolioMean(W, R), self.portfolioVar(W, C)
def solve_weights(self, R, C, rf):
def fitness(W, R, C, rf):
mean, var = self.portfolioMeanVar(W, R, C)
sharp_ratio = (mean - rf) / np.sqrt(var) # sharp ratio
return 1 / sharp_ratio
n = len(R)
W = np.ones([n]) / n
b_ = [(0., 1.) for i in range(n)]
c_ = ({'type': 'eq', 'fun': lambda W: sum(W) - 1.})
optimized = scipy.optimize.minimize(fitness, W, (R, C, rf), method='SLSQP', constraints=c_, bounds=b_)
return optimized.x
def calculate_reverse_pi(self, W, all_R, all_C):
reverse_pi = []
rf = 0.015
for i in range(len(all_R)):
mean, var = self.portfolioMeanVar(W, all_R[i], all_C[i])
lmb = (mean - rf) / var # Calculate risk aversion
temp = np.dot(lmb, all_C[i])
Pi = np.dot(temp, W)
reverse_pi.append(Pi)
return reverse_pi
def get_all_weights(self):
prices_list, W = self.prepare_input() # W: market capitalization
all_R, all_C = self.assets_historical_returns_covariances(prices_list)
BL_reverse_pi = self.calculate_reverse_pi(W, all_R, all_C)
with open('./model/predict_dict_1.pkl', 'rb') as f:
all_predicted_Q_1 = pickle.load(f)
with open('./model/predict_dict_2.pkl', 'rb') as f:
all_predicted_Q_2 = pickle.load(f)
with open('./model/predict_dict_3.pkl', 'rb') as f:
all_predicted_Q_3 = pickle.load(f)
# get predicted return
all_predicted_return = pd.DataFrame({})
for comp in self.selected_tickers:
if comp in all_predicted_Q_1.keys():
comp_return = []
comp_predicted_Q = all_predicted_Q_1[comp][0]
for i in range(1, len(comp_predicted_Q)):
curr = comp_predicted_Q[i]
prev = comp_predicted_Q[i-1]
comp_return.append(np.log(curr) - np.log(prev))
all_predicted_return[comp] = comp_return
elif comp in all_predicted_Q_2.keys():
comp_return = []
comp_predicted_Q = all_predicted_Q_2[comp][0]
for i in range(1, len(comp_predicted_Q)):
curr = comp_predicted_Q[i]
prev = comp_predicted_Q[i-1]
comp_return.append(np.log(curr) - np.log(prev))
all_predicted_return[comp] = comp_return
else:
comp_return = []
comp_predicted_Q = all_predicted_Q_3[comp][0]
for i in range(1, len(comp_predicted_Q)):
curr = comp_predicted_Q[i]
prev = comp_predicted_Q[i-1]
comp_return.append(np.log(curr) - np.log(prev))
all_predicted_return[comp] = comp_return
all_Q_hat_GRU = [i.reshape(-1, 1) for i in all_predicted_return.values]
BL_reverse_pi = BL_reverse_pi[1:]
all_R = all_R[1:]
all_C = all_C[1:]
# get black-litterman weights
all_weights = []
for i in tqdm(range(len(all_R))):
tau = 0.025
rf = 0.015
P = np.identity(len(self.selected_tickers))
Pi = BL_reverse_pi[i]
C = all_C[i]
R = all_R[i]
Q = all_Q_hat_GRU[i]
# Calculate omega - uncertainty matrix about views
omega = np.dot(np.dot(np.dot(tau, P), C), np.transpose(P))
# Calculate equilibrium excess returns with views incorporated
sub_a = inv(np.dot(tau, C))
sub_b = np.dot(np.dot(np.transpose(P), inv(omega)), P)
sub_c = np.dot(inv(np.dot(tau, C)), Pi).reshape(-1, 1)
sub_d = np.dot(np.dot(np.transpose(P), inv(omega)), Q)
Pi_adj = np.dot(inv(sub_a + sub_b), (sub_c + sub_d)).squeeze()
weight = self.solve_weights(Pi_adj+rf, C, rf)
all_weights.append(weight)
self.weights = all_weights
weights = np.clip(np.around(np.array(all_weights) * 100, 2), 0, 100)
transposed_weights = weights.transpose().tolist()
return transposed_weights
def get_backtest_result(self):
# get testing section stock price data
log_return_df = | pd.DataFrame({}) | pandas.DataFrame |
import pytest
from mapping import mappings
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
from pandas.tseries.offsets import BDay
@pytest.fixture
def dates():
return pd.Series(
[TS('2016-10-20'), TS('2016-11-21'), TS('2016-12-20')],
index=['CLX16', 'CLZ16', 'CLF17']
)
def test_not_in_roll_one_generic_static_roller(dates):
dt = dates.iloc[0]
contract_dates = dates.iloc[0:2]
sd, ed = (dt + BDay(-8), dt + BDay(-7))
timestamps = pd.date_range(sd, ed, freq='b')
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
trans = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
midx = pd.MultiIndex.from_product([timestamps, ['CLX16']])
midx.names = ['date', 'contract']
cols = pd.Index([0], name='generic')
wts_exp = pd.DataFrame([1.0, 1.0], index=midx, columns=cols)
# with DatetimeIndex
wts = mappings.roller(timestamps, contract_dates,
mappings.static_transition, transition=trans)
assert_frame_equal(wts, wts_exp)
# with tuple
wts = mappings.roller(tuple(timestamps), contract_dates,
mappings.static_transition, transition=trans)
assert_frame_equal(wts, wts_exp)
def test_not_in_roll_one_generic_static_transition(dates):
contract_dates = dates.iloc[0:2]
ts = dates.iloc[0] + BDay(-8)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLX16', 1.0, ts)]
assert wts == wts_exp
def test_non_numeric_column_static_transition(dates):
contract_dates = dates.iloc[0:2]
ts = dates.iloc[0] + BDay(-8)
cols = pd.MultiIndex.from_product([["CL1"], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [("CL1", 'CLX16', 1.0, ts)]
assert wts == wts_exp
def test_finished_roll_pre_expiry_static_transition(dates):
contract_dates = dates.iloc[0:2]
ts = dates.iloc[0] + BDay(-2)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-9, -8]
transition = pd.DataFrame([[1.0, 0.0], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLZ16', 1.0, ts)]
assert wts == wts_exp
def test_not_in_roll_one_generic_filtering_front_contracts_static_transition(dates): # NOQA
contract_dates = dates.iloc[0:2]
ts = dates.iloc[1] + BDay(-8)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLZ16', 1.0, ts)]
assert wts == wts_exp
def test_roll_with_holiday(dates):
contract_dates = dates.iloc[-2:]
ts = pd.Timestamp("2016-11-17")
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
holidays = [np.datetime64("2016-11-18")]
# the holiday moves the roll schedule up one day, since Friday is
# excluded as a day
wts = mappings.static_transition(ts, contract_dates, transition,
holidays)
wts_exp = [(0, 'CLZ16', 0.5, ts), (0, 'CLF17', 0.5, ts)]
assert wts == wts_exp
def test_not_in_roll_one_generic_zero_weight_back_contract_no_contract_static_transition(dates): # NOQA
contract_dates = dates.iloc[0:1]
ts = dates.iloc[0] + BDay(-8)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLX16', 1.0, ts)]
assert wts == wts_exp
def test_aggregate_weights():
ts = pd.Timestamp("2015-01-01")
wts_list = [(0, 'CLX16', 1.0, ts), (1, 'CLZ16', 1.0, ts)]
wts = mappings.aggregate_weights(wts_list)
idx = pd.MultiIndex.from_product([[ts], ["CLX16", "CLZ16"]],
names=["date", "contract"])
cols = pd.Index([0, 1], name="generic")
wts_exp = pd.DataFrame([[1.0, 0], [0, 1.0]], index=idx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_aggregate_weights_drop_date():
ts = pd.Timestamp("2015-01-01")
wts_list = [(0, 'CLX16', 1.0, ts), (1, 'CLZ16', 1.0, ts)]
wts = mappings.aggregate_weights(wts_list, drop_date=True)
idx = pd.Index(["CLX16", "CLZ16"], name="contract")
cols = pd.Index([0, 1], name="generic")
wts_exp = pd.DataFrame([[1.0, 0], [0, 1.0]], index=idx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_static_bad_transitions(dates):
contract_dates = dates.iloc[[0]]
ts = dates.iloc[0] + | BDay(-8) | pandas.tseries.offsets.BDay |
import os
import sys
import pandas as pd
files = []
path_files = ""
list_diff_ocu = []
count_two = 0
def more_events(val, file_nine, lane_dupli=False):
global count_two
if lane_dupli:
count_occurrences = 0
for i, r in val.iterrows():
count_occurrences += (r.e - r.d) + 1
count_two += count_occurrences
occurrences_found = int(open(path_files + file_nine, 'r').read()
.count(val.head(1).iloc[0]['combined']))
difference = occurrences_found - count_occurrences
if difference != 0:
list_diff_ocu.append(val.head(1).iloc[0]['combined'] + ": "
+ str(difference))
else:
count_occurrences = (val.iloc[0]['e'].astype(int) -
val.iloc[0]['d'].astype(int)) + 1
count_two += count_occurrences
occurrences_found = int(open(path_files + file_nine, 'r').read()
.count(val.iloc[0]['combined']))
difference = occurrences_found - count_occurrences
if difference != 0:
list_diff_ocu.append(val.iloc[0]['combined'] + ": "
+ str(difference))
def open_files():
global path_files
#path_files = "/home/geezylucas/Documentos/Python3/floorfiles/"
path_files = sys.argv[1] + '\\'
list_files = [f for f in os.listdir(path_files)]
if len(list_files) < 2:
raise ValueError(
'should be only one 9A or 2A file in the current directory')
global files
files = list(filter(lambda x: x.endswith("2A")
or x.endswith("9A"), list_files))
def calculate(file_two, file_nine):
my_data = pd.read_csv(path_files + file_two, decimal=",", skiprows=1,
usecols=(4, 5, 6, 8, 9), names=["a", "b", "c", "d", "e"])
df = pd.DataFrame(my_data)
indexs = df[(df['d'].astype(int) == 0) & (df['e'].astype(int) == 0)].index
df.drop(index=indexs, inplace=True)
df["combined"] = df["a"].astype(str) + "," \
+ df["b"].astype(str) + "," + df["c"]
df_occur = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from pandas.testing import assert_frame_equal
from woodwork.logical_types import (
Boolean,
Categorical,
Double,
Integer,
NaturalLanguage
)
from evalml.pipelines.components import Imputer
@pytest.fixture
def imputer_test_data():
return pd.DataFrame({
"categorical col": pd.Series(["zero", "one", "two", "zero", "three"], dtype='category'),
"int col": [0, 1, 2, 0, 3],
"object col": ["b", "b", "a", "c", "d"],
"float col": [0.0, 1.0, 0.0, -2.0, 5.],
"bool col": [True, False, False, True, True],
"categorical with nan": pd.Series([np.nan, "1", np.nan, "0", "3"], dtype='category'),
"int with nan": [np.nan, 1, 0, 0, 1],
"float with nan": [0.0, 1.0, np.nan, -1.0, 0.],
"object with nan": ["b", "b", np.nan, "c", np.nan],
"bool col with nan": pd.Series([True, np.nan, False, np.nan, True], dtype='boolean'),
"all nan": [np.nan, np.nan, np.nan, np.nan, np.nan],
"all nan cat": pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan], dtype='category')
})
def test_invalid_strategy_parameters():
with pytest.raises(ValueError, match="Valid impute strategies are"):
Imputer(numeric_impute_strategy="not a valid strategy")
with pytest.raises(ValueError, match="Valid categorical impute strategies are"):
Imputer(categorical_impute_strategy="mean")
def test_imputer_default_parameters():
imputer = Imputer()
expected_parameters = {
'categorical_impute_strategy': 'most_frequent',
'numeric_impute_strategy': 'mean',
'categorical_fill_value': None,
'numeric_fill_value': None
}
assert imputer.parameters == expected_parameters
@pytest.mark.parametrize("categorical_impute_strategy", ["most_frequent", "constant"])
@pytest.mark.parametrize("numeric_impute_strategy", ["mean", "median", "most_frequent", "constant"])
def test_imputer_init(categorical_impute_strategy, numeric_impute_strategy):
imputer = Imputer(categorical_impute_strategy=categorical_impute_strategy,
numeric_impute_strategy=numeric_impute_strategy,
categorical_fill_value="str_fill_value",
numeric_fill_value=-1)
expected_parameters = {
'categorical_impute_strategy': categorical_impute_strategy,
'numeric_impute_strategy': numeric_impute_strategy,
'categorical_fill_value': 'str_fill_value',
'numeric_fill_value': -1
}
expected_hyperparameters = {
"categorical_impute_strategy": ["most_frequent"],
"numeric_impute_strategy": ["mean", "median", "most_frequent"]
}
assert imputer.name == "Imputer"
assert imputer.parameters == expected_parameters
assert imputer.hyperparameter_ranges == expected_hyperparameters
def test_numeric_only_input(imputer_test_data):
X = imputer_test_data[["int col", "float col",
"int with nan", "float with nan", "all nan"]]
y = | pd.Series([0, 0, 1, 0, 1]) | pandas.Series |
# -*- coding: utf-8 -*-
"""
author: zengbin93
email: <EMAIL>
create_dt: 2021/12/13 21:43
describe: 概念、行业、指数等股票聚类板块感应器
三个问题:
1)如何找出引领大盘的概念、行业、指数
2)板块内股票相比于板块走势,划分强中弱
3)根据指数强弱进行账户总仓位控制
"""
import os
import traceback
import inspect
from datetime import timedelta, datetime
import pandas as pd
from tqdm import tqdm
from typing import Callable
from czsc import envs
from czsc.utils import WordWriter, io
from czsc.data.ts_cache import TsDataCache, Freq
from czsc.sensors.utils import get_index_beta, generate_signals, turn_over_rate, max_draw_down
class ThsConceptsSensor:
"""
输入:同花顺概念列表;同花顺概念日线行情
输出:每一个交易日的同花顺强势概念
"""
def __init__(self,
results_path: str,
sdt: str,
edt: str,
dc: TsDataCache,
get_signals: Callable,
get_event: Callable,
ths_index_type='N'):
"""
:param results_path: 结果保存路径
:param sdt: 开始日期
:param edt: 结束日期
:param dc: 数据缓存对象
:param get_signals: 信号获取函数
:param get_event: 事件定义函数
:param ths_index_type: 同花顺指数类型 N-板块指数 I-行业指数 S-同花顺特色指数
"""
self.name = self.__class__.__name__
self.dc = dc
self.get_signals = get_signals
self.get_event = get_event
self.event = get_event()
self.base_freq = Freq.D.value
self.freqs = [Freq.W.value, Freq.M.value]
self.dc = dc
self.ths_index_type = ths_index_type
self.verbose = envs.get_verbose()
self.cache = dict()
self.results_path = results_path
os.makedirs(self.results_path, exist_ok=True)
self.sdt = sdt
self.edt = edt
self.file_docx = os.path.join(results_path, f'{self.event.name}_{self.ths_index_type}_{sdt}_{edt}.docx')
writer = WordWriter(self.file_docx)
if not os.path.exists(self.file_docx):
writer.add_title(f"同花顺指数({self.ths_index_type})感应器报告")
writer.add_page_break()
writer.add_heading(f"{datetime.now().strftime('%Y-%m-%d %H:%M')} {self.event.name}", level=1)
writer.add_heading("参数配置", level=2)
writer.add_paragraph(f"测试方法描述:{self.event.name}")
writer.add_paragraph(f"测试起止日期:{sdt} ~ {edt}")
writer.add_paragraph(f"信号计算函数:\n{inspect.getsource(self.get_signals)}")
writer.add_paragraph(f"事件具体描述:\n{inspect.getsource(self.get_event)}")
writer.save()
self.writer = writer
self.file_ssd = os.path.join(results_path, f'ths_all_strong_days_{self.ths_index_type}.pkl')
if os.path.exists(self.file_ssd):
self.ssd, self.cache = io.read_pkl(self.file_ssd)
else:
self.ssd = self.get_all_strong_days()
io.save_pkl([self.ssd, self.cache], self.file_ssd)
self.betas = get_index_beta(dc, sdt, edt, freq='D',
indices=['000001.SH', '000016.SH', '000905.SH',
'000300.SH', '399001.SZ', '399006.SZ'])
def get_strong_days(self, ts_code, name):
"""获取单个概念的强势日期
:param ts_code: 同花顺概念代码
:param name: 同花顺概念名称
:return:
"""
dc = self.dc
event = self.event
sdt = self.sdt
edt = self.edt
start_date = pd.to_datetime(sdt) - timedelta(days=3000)
bars = dc.ths_daily(ts_code=ts_code, start_date=start_date, end_date=edt, raw_bar=True)
n_bars = dc.ths_daily(ts_code=ts_code, start_date=start_date, end_date=edt, raw_bar=False)
nb_dicts = {row['trade_date'].strftime("%Y%m%d"): row for row in n_bars.to_dict("records")}
def __strategy(symbol):
return {"symbol": symbol, "base_freq": '日线', "freqs": ['周线', '月线'],
"get_signals": self.get_signals}
signals = generate_signals(bars, sdt, __strategy)
results = []
for s in signals:
m, f = event.is_match(s)
if m:
res = {'ts_code': ts_code, 'name': name, 'reason': f}
nb_info = nb_dicts.get(s['dt'].strftime("%Y%m%d"), None)
if not nb_info:
print(f"not match nb info: {nb_info}")
res.update(nb_info)
results.append(res)
df_res = | pd.DataFrame(results) | pandas.DataFrame |
__all__ = ['class_error', 'groupScatter', 'linear_spline', 'lm', 'mae',
'plotPrediction', 'plot_hist', 'r2', 'statx', 'winsorize',]
import riptable as rt
import numpy as np
from .rt_enum import TypeRegister
from .rt_fastarray import FastArray
from .rt_numpy import zeros
# extra classes
import pandas as pd
from bokeh.plotting import output_notebook, figure, show
from bokeh.models import Label
#TODO: Organize the functions in here better
#TODO: Add documentation
#TODO: Replace pandas dependence with display util
def statx(X):
if not isinstance(X, np.ndarray):
X = np.array(X)
pVals = [0.1, 1, 10, 25, 50, 75, 90, 99, 99.9]
pValNames = ['min', '0.1%', '1%', '10%', '25%', '50%', '75%', '90%','99%','99.9%' , 'max' , 'Mean', 'StdDev', 'Count', 'NaN_Count']
filt = np.isfinite(X)
X_sub = X[filt]
vals = np.percentile(X_sub,pVals)
vals =np.insert(vals,0,np.min(X_sub))
vals =np.append(vals,np.max(X_sub))
vals = np.append(vals,np.mean(X_sub))
vals = np.append(vals,np.std(X_sub))
validcount = np.sum(filt)
# plain count
vals = np.append(vals, X.size)
#nancount
vals = np.append(vals, np.sum(np.isnan(X)))
out = pd.DataFrame({'Stat' : pValNames ,'Value' : vals})
return out
#NOTE: people might prefer name clip/bound?
def winsorize(Y, lb, ub):
out = np.maximum(np.minimum(Y, ub), lb)
return out
def plot_hist(Y, bins):
df = | pd.DataFrame({'Y': Y}) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
from hypothesis import assume, given
from pandas.testing import assert_frame_equal
from janitor.testing_utils.strategies import (
categoricaldf_strategy,
df_strategy,
)
def test_case_when_1():
"""Test case_when function."""
df = pd.DataFrame(
{
"a": [0, 0, 1, 2, "hi"],
"b": [0, 3, 4, 5, "bye"],
"c": [6, 7, 8, 9, "wait"],
}
)
expected = pd.DataFrame(
{
"a": [0, 0, 1, 2, "hi"],
"b": [0, 3, 4, 5, "bye"],
"c": [6, 7, 8, 9, "wait"],
"value": ["x", 0, 8, 9, "hi"],
}
)
result = df.case_when(
((df.a == 0) & (df.b != 0)) | (df.c == "wait"),
df.a,
(df.b == 0) & (df.a == 0),
"x",
df.c,
column_name="value",
)
assert_frame_equal(result, expected)
def test_len_args(dataframe):
"""Raise ValueError if `args` length is less than 3."""
with pytest.raises(ValueError, match="three arguments are required"):
dataframe.case_when(dataframe.a < 10, "less_than_10", column_name="a")
def test_args_even(dataframe):
"""Raise ValueError if `args` length is even."""
with pytest.raises(ValueError, match="`default` argument is missing"):
dataframe.case_when(
dataframe.a < 10,
"less_than_10",
dataframe.a == 5,
"five",
column_name="a",
)
def test_column_name(dataframe):
"""Raise TypeError if `column_name` is not a string."""
with pytest.raises(TypeError):
dataframe.case_when(
dataframe.a < 10,
"less_than_10",
dataframe.a,
column_name=("a",),
)
@given(df=df_strategy())
def test_default_ndim(df):
"""Raise ValueError if `default` ndim > 1."""
with pytest.raises(ValueError):
df.case_when(df.a < 10, "less_than_10", df, column_name="a")
@given(df=df_strategy())
def test_default_length(df):
"""Raise ValueError if `default` length != len(df)."""
assume(len(df) > 10)
with pytest.raises(
ValueError,
match=(
"length of the `default` argument should be equal to the length of"
" the DataFrame"
),
):
df.case_when(
df.a < 10,
"less_than_10",
df.loc[:5, "a"],
column_name="a",
)
@given(df=df_strategy())
def test_error_multiple_conditions(df):
"""Raise ValueError for multiple conditions."""
with pytest.raises(ValueError):
df.case_when(df.a < 10, "baby", df.a + 5, "kid", df.a, column_name="a")
@given(df=df_strategy())
def test_case_when_condition_callable(df):
"""Test case_when for callable."""
result = df.case_when(
lambda df: df.a < 10, "baby", "bleh", column_name="bleh"
)
expected = np.where(df.a < 10, "baby", "bleh")
expected = df.assign(bleh=expected)
assert_frame_equal(result, expected)
@given(df=df_strategy())
def test_case_when_condition_eval(df):
"""Test case_when for callable."""
result = df.case_when("a < 10", "baby", "bleh", column_name="bleh")
expected = np.where(df.a < 10, "baby", "bleh")
expected = df.assign(bleh=expected)
assert_frame_equal(result, expected)
@given(df=df_strategy())
def test_case_when_replacement_callable(df):
"""Test case_when for callable."""
result = df.case_when(
"a > 10", lambda df: df.a + 10, lambda df: df.a * 2, column_name="bleh"
)
expected = np.where(df.a > 10, df.a + 10, df.a * 2)
expected = df.assign(bleh=expected)
assert_frame_equal(result, expected)
@given(df=categoricaldf_strategy())
def test_case_when_default_list(df):
"""
Test case_when for scenarios where `default` is list-like,
but not a Pandas or numpy object.
"""
default = range(len(df))
result = df.case_when(
"numbers > 1", lambda df: df.numbers + 10, default, column_name="bleh"
)
expected = np.where(df.numbers > 1, df.numbers + 10, default)
expected = df.assign(bleh=expected)
assert_frame_equal(result, expected)
@given(df=categoricaldf_strategy())
def test_case_when_default_index(df):
"""Test case_when for scenarios where `default` is an index."""
default = range(len(df))
result = df.case_when(
"numbers > 1",
lambda df: df.numbers + 10,
| pd.Index(default) | pandas.Index |
import pysam
import pandas as pd
import numpy as np
import re
import os
import sys
import collections
import scipy
from scipy import stats
import statsmodels
from statsmodels.stats.multitest import fdrcorrection
try:
from . import global_para
except ImportError:
import global_para
try:
from .consensus_seq import *
except ImportError:
from consensus_seq import *
try:
from .math_stat import *
except ImportError:
from math_stat import *
def f_0cdna():
# if 0 cdna detected, report messages to users
global_para.logger.info("Program finished successfully")
global_para.logger.info("No cDNA detected. Exiting.")
exit(0)
def f_if_0cdna(obj):
if len(obj) == 0:
f_0cdna()
def f_warning_merge_region(df_region):
# df_region = df_region_stat_bed_merge.copy()
df_region['diff'] =abs( df_region.start - df_region.end)
df_region_diff = df_region[df_region['diff']>10000]
del df_region_diff['diff']
if len(df_region_diff)>0:
global_para.logger.warning("%d extreme long regions are detected (>10 kb), please check results carefully"%len(df_region_diff))
global_para.logger.info(df_region_diff)
def read_gene_model(gtf_gene_unique_file):
# load gene model into a dataframe
print('Loading gene model table')
dict_type = {
"seqname":"str",
"start":"int64",
"end":"int64",
"gene_id":"str",
"gene_name":"str",
"transcript_id":"str",
"exon_flank_start20":"str",
"exon_flank_end20":"str",
"is_exon_boundary_start":"str",
"is_exon_boundary_end":"str",
"exon_boundary_start_nearseq20":"str",
"exon_boundary_end_nearseq20":"str"}
df_gene_exon_unique = pd.read_csv(gtf_gene_unique_file, sep = '\t',header = 0)
df_gene_exon_unique = df_gene_exon_unique.astype(dict_type)
# convert all sequences to uppercase
df_gene_exon_unique['exon_flank_start20'] = df_gene_exon_unique['exon_flank_start20'].str.upper()
df_gene_exon_unique['exon_flank_end20'] = df_gene_exon_unique['exon_flank_end20'].str.upper()
df_gene_exon_unique['exon_boundary_start_nearseq20'] = df_gene_exon_unique['exon_boundary_start_nearseq20'].str.upper()
df_gene_exon_unique['exon_boundary_end_nearseq20'] = df_gene_exon_unique['exon_boundary_end_nearseq20'].str.upper()
df_gene_exon_unique = df_gene_exon_unique.fillna('')
print('Loaded %d exons\n'%(len(df_gene_exon_unique)))
return(df_gene_exon_unique)
def check_bam_index(genome_bam_file):
## check index of bam file; if no, generate one.
print('Checking index of input bam file')
if os.path.exists(genome_bam_file + '.bai') or os.path.exists(re.sub('bam$','bai',genome_bam_file)):
print('Index file exists')
else:
print('file is not indexed, now generating index')
pysam.index(genome_bam_file)
print('Index file created\n')
return
def f_overlap_reference(genome_bam_file,df_gene_exon):
# overlap reference for input bam and gene model
bam_genome = pysam.AlignmentFile(genome_bam_file,'rb')
reference_bam = bam_genome.references
bam_genome.close()
reference_exon = df_gene_exon.seqname.unique().tolist()
overlap_reference = [x for x in reference_bam if x in reference_exon]
if len(overlap_reference)==0: global_para.logger.error('chromosome names are not matched between gene model and bam file'); exit(1)
df_gene_exon = df_gene_exon.query('seqname in @overlap_reference')
return df_gene_exon
def f_close_exon_merge(df_transcript_exon):
df_transcript_exon = df_transcript_exon.sort_values(['transcript_id','start'])
df_transcript_exon = df_transcript_exon.reset_index(drop = True)
df_transcript_exon['start_next'] = df_transcript_exon.groupby(['transcript_id'])['start'].shift(-1)
df_transcript_exon['dis_exon'] = abs(df_transcript_exon['end'] - df_transcript_exon['start_next'])
df_transcript_exon_close = df_transcript_exon.query('dis_exon<@global_para.exon_distance')
list_transcript = df_transcript_exon_close.transcript_id.unique().tolist()
if len(list_transcript) >0:
list_df_transcript_merge = []
for transcript_id in list_transcript:
sub_df = df_transcript_exon.query('transcript_id==@transcript_id')
sub_df_new = f_df_1transcript_merge(sub_df)
list_df_transcript_merge.append(sub_df_new)
df_transcript_exon_close_new = pd.concat(list_df_transcript_merge)
df_transcript_exon_noclose = df_transcript_exon.query('transcript_id not in @list_transcript')
df_transcript_exon_new = | pd.concat([df_transcript_exon_close_new, df_transcript_exon_noclose]) | pandas.concat |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import matplotlib as mat
import numpy as np
import pandas as pd
from matplotlib.axes._base import _process_plot_format
from pandas.core.dtypes.inference import is_list_like
from pandas.io.formats.printing import pprint_thing
from pyspark.pandas.plot import (
TopNPlotBase,
SampledPlotBase,
HistogramPlotBase,
BoxPlotBase,
unsupported_function,
KdePlotBase,
)
if LooseVersion(pd.__version__) < LooseVersion("0.25"):
from pandas.plotting._core import (
_all_kinds,
BarPlot as PandasBarPlot,
BoxPlot as PandasBoxPlot,
HistPlot as PandasHistPlot,
MPLPlot as PandasMPLPlot,
PiePlot as PandasPiePlot,
AreaPlot as PandasAreaPlot,
LinePlot as PandasLinePlot,
BarhPlot as PandasBarhPlot,
ScatterPlot as PandasScatterPlot,
KdePlot as PandasKdePlot,
)
else:
from pandas.plotting._matplotlib import (
BarPlot as PandasBarPlot,
BoxPlot as PandasBoxPlot,
HistPlot as PandasHistPlot,
PiePlot as PandasPiePlot,
AreaPlot as PandasAreaPlot,
LinePlot as PandasLinePlot,
BarhPlot as PandasBarhPlot,
ScatterPlot as PandasScatterPlot,
KdePlot as PandasKdePlot,
)
from pandas.plotting._core import PlotAccessor
from pandas.plotting._matplotlib.core import MPLPlot as PandasMPLPlot
_all_kinds = PlotAccessor._all_kinds
class PandasOnSparkBarPlot(PandasBarPlot, TopNPlotBase):
def __init__(self, data, **kwargs):
super().__init__(self.get_top_n(data), **kwargs)
def _plot(self, ax, x, y, w, start=0, log=False, **kwds):
self.set_result_text(ax)
return ax.bar(x, y, w, bottom=start, log=log, **kwds)
class PandasOnSparkBoxPlot(PandasBoxPlot, BoxPlotBase):
def boxplot(
self,
ax,
bxpstats,
notch=None,
sym=None,
vert=None,
whis=None,
positions=None,
widths=None,
patch_artist=None,
bootstrap=None,
usermedians=None,
conf_intervals=None,
meanline=None,
showmeans=None,
showcaps=None,
showbox=None,
showfliers=None,
boxprops=None,
labels=None,
flierprops=None,
medianprops=None,
meanprops=None,
capprops=None,
whiskerprops=None,
manage_ticks=None,
# manage_xticks is for compatibility of matplotlib < 3.1.0.
# Remove this when minimum version is 3.0.0
manage_xticks=None,
autorange=False,
zorder=None,
precision=None,
):
def update_dict(dictionary, rc_name, properties):
"""Loads properties in the dictionary from rc file if not already
in the dictionary"""
rc_str = "boxplot.{0}.{1}"
if dictionary is None:
dictionary = dict()
for prop_dict in properties:
dictionary.setdefault(prop_dict, mat.rcParams[rc_str.format(rc_name, prop_dict)])
return dictionary
# Common property dictionaries loading from rc
flier_props = [
"color",
"marker",
"markerfacecolor",
"markeredgecolor",
"markersize",
"linestyle",
"linewidth",
]
default_props = ["color", "linewidth", "linestyle"]
boxprops = update_dict(boxprops, "boxprops", default_props)
whiskerprops = update_dict(whiskerprops, "whiskerprops", default_props)
capprops = update_dict(capprops, "capprops", default_props)
medianprops = update_dict(medianprops, "medianprops", default_props)
meanprops = update_dict(meanprops, "meanprops", default_props)
flierprops = update_dict(flierprops, "flierprops", flier_props)
if patch_artist:
boxprops["linestyle"] = "solid"
boxprops["edgecolor"] = boxprops.pop("color")
# if non-default sym value, put it into the flier dictionary
# the logic for providing the default symbol ('b+') now lives
# in bxp in the initial value of final_flierprops
# handle all of the `sym` related logic here so we only have to pass
# on the flierprops dict.
if sym is not None:
# no-flier case, which should really be done with
# 'showfliers=False' but none-the-less deal with it to keep back
# compatibility
if sym == "":
# blow away existing dict and make one for invisible markers
flierprops = dict(linestyle="none", marker="", color="none")
# turn the fliers off just to be safe
showfliers = False
# now process the symbol string
else:
# process the symbol string
# discarded linestyle
_, marker, color = _process_plot_format(sym)
# if we have a marker, use it
if marker is not None:
flierprops["marker"] = marker
# if we have a color, use it
if color is not None:
# assume that if color is passed in the user want
# filled symbol, if the users want more control use
# flierprops
flierprops["color"] = color
flierprops["markerfacecolor"] = color
flierprops["markeredgecolor"] = color
# replace medians if necessary:
if usermedians is not None:
if len(np.ravel(usermedians)) != len(bxpstats) or np.shape(usermedians)[0] != len(
bxpstats
):
raise ValueError("usermedians length not compatible with x")
else:
# reassign medians as necessary
for stats, med in zip(bxpstats, usermedians):
if med is not None:
stats["med"] = med
if conf_intervals is not None:
if np.shape(conf_intervals)[0] != len(bxpstats):
err_mess = "conf_intervals length not compatible with x"
raise ValueError(err_mess)
else:
for stats, ci in zip(bxpstats, conf_intervals):
if ci is not None:
if len(ci) != 2:
raise ValueError("each confidence interval must " "have two values")
else:
if ci[0] is not None:
stats["cilo"] = ci[0]
if ci[1] is not None:
stats["cihi"] = ci[1]
should_manage_ticks = True
if manage_xticks is not None:
should_manage_ticks = manage_xticks
if manage_ticks is not None:
should_manage_ticks = manage_ticks
if LooseVersion(mat.__version__) < LooseVersion("3.1.0"):
extra_args = {"manage_xticks": should_manage_ticks}
else:
extra_args = {"manage_ticks": should_manage_ticks}
artists = ax.bxp(
bxpstats,
positions=positions,
widths=widths,
vert=vert,
patch_artist=patch_artist,
shownotches=notch,
showmeans=showmeans,
showcaps=showcaps,
showbox=showbox,
boxprops=boxprops,
flierprops=flierprops,
medianprops=medianprops,
meanprops=meanprops,
meanline=meanline,
showfliers=showfliers,
capprops=capprops,
whiskerprops=whiskerprops,
zorder=zorder,
**extra_args,
)
return artists
def _plot(self, ax, bxpstats, column_num=None, return_type="axes", **kwds):
bp = self.boxplot(ax, bxpstats, **kwds)
if return_type == "dict":
return bp, bp
elif return_type == "both":
return self.BP(ax=ax, lines=bp), bp
else:
return ax, bp
def _compute_plot_data(self):
colname = self.data.name
spark_column_name = self.data._internal.spark_column_name_for(self.data._column_label)
data = self.data
# Updates all props with the rc defaults from matplotlib
self.kwds.update(PandasOnSparkBoxPlot.rc_defaults(**self.kwds))
# Gets some important kwds
showfliers = self.kwds.get("showfliers", False)
whis = self.kwds.get("whis", 1.5)
labels = self.kwds.get("labels", [colname])
# This one is pandas-on-Spark specific to control precision for approx_percentile
precision = self.kwds.get("precision", 0.01)
# # Computes mean, median, Q1 and Q3 with approx_percentile and precision
col_stats, col_fences = BoxPlotBase.compute_stats(data, spark_column_name, whis, precision)
# # Creates a column to flag rows as outliers or not
outliers = BoxPlotBase.outliers(data, spark_column_name, *col_fences)
# # Computes min and max values of non-outliers - the whiskers
whiskers = BoxPlotBase.calc_whiskers(spark_column_name, outliers)
if showfliers:
fliers = BoxPlotBase.get_fliers(spark_column_name, outliers, whiskers[0])
else:
fliers = []
# Builds bxpstats dict
stats = []
item = {
"mean": col_stats["mean"],
"med": col_stats["med"],
"q1": col_stats["q1"],
"q3": col_stats["q3"],
"whislo": whiskers[0],
"whishi": whiskers[1],
"fliers": fliers,
"label": labels[0],
}
stats.append(item)
self.data = {labels[0]: stats}
def _make_plot(self):
bxpstats = list(self.data.values())[0]
ax = self._get_ax(0)
kwds = self.kwds.copy()
for stats in bxpstats:
if len(stats["fliers"]) > 1000:
stats["fliers"] = stats["fliers"][:1000]
ax.text(
1,
1,
"showing top 1,000 fliers only",
size=6,
ha="right",
va="bottom",
transform=ax.transAxes,
)
ret, bp = self._plot(ax, bxpstats, column_num=0, return_type=self.return_type, **kwds)
self.maybe_color_bp(bp)
self._return_obj = ret
labels = [l for l, _ in self.data.items()]
labels = [pprint_thing(l) for l in labels]
if not self.use_index:
labels = [pprint_thing(key) for key in range(len(labels))]
self._set_ticklabels(ax, labels)
@staticmethod
def rc_defaults(
notch=None,
vert=None,
whis=None,
patch_artist=None,
bootstrap=None,
meanline=None,
showmeans=None,
showcaps=None,
showbox=None,
showfliers=None,
**kwargs
):
# Missing arguments default to rcParams.
if whis is None:
whis = mat.rcParams["boxplot.whiskers"]
if bootstrap is None:
bootstrap = mat.rcParams["boxplot.bootstrap"]
if notch is None:
notch = mat.rcParams["boxplot.notch"]
if vert is None:
vert = mat.rcParams["boxplot.vertical"]
if patch_artist is None:
patch_artist = mat.rcParams["boxplot.patchartist"]
if meanline is None:
meanline = mat.rcParams["boxplot.meanline"]
if showmeans is None:
showmeans = mat.rcParams["boxplot.showmeans"]
if showcaps is None:
showcaps = mat.rcParams["boxplot.showcaps"]
if showbox is None:
showbox = mat.rcParams["boxplot.showbox"]
if showfliers is None:
showfliers = mat.rcParams["boxplot.showfliers"]
return dict(
whis=whis,
bootstrap=bootstrap,
notch=notch,
vert=vert,
patch_artist=patch_artist,
meanline=meanline,
showmeans=showmeans,
showcaps=showcaps,
showbox=showbox,
showfliers=showfliers,
)
class PandasOnSparkHistPlot(PandasHistPlot, HistogramPlotBase):
def _args_adjust(self):
if is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
def _compute_plot_data(self):
self.data, self.bins = HistogramPlotBase.prepare_hist_data(self.data, self.bins)
def _make_plot(self):
# TODO: this logic is similar with KdePlot. Might have to deduplicate it.
# 'num_colors' requires to calculate `shape` which has to count all.
# Use 1 for now to save the computation.
colors = self._get_colors(num_colors=1)
stacking_id = self._get_stacking_id()
output_series = HistogramPlotBase.compute_hist(self.data, self.bins)
for (i, label), y in zip(enumerate(self.data._internal.column_labels), output_series):
ax = self._get_ax(i)
kwds = self.kwds.copy()
label = pprint_thing(label if len(label) > 1 else label[0])
kwds["label"] = label
style, kwds = self._apply_style_colors(colors, kwds, i, label)
if style is not None:
kwds["style"] = style
kwds = self._make_plot_keywords(kwds, y)
artists = self._plot(ax, y, column_num=i, stacking_id=stacking_id, **kwds)
self._add_legend_handle(artists[0], label, index=i)
@classmethod
def _plot(cls, ax, y, style=None, bins=None, bottom=0, column_num=0, stacking_id=None, **kwds):
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(bins) - 1)
base = np.zeros(len(bins) - 1)
bottom = bottom + cls._get_stacked_values(ax, stacking_id, base, kwds["label"])
# Since the counts were computed already, we use them as weights and just generate
# one entry for each bin
n, bins, patches = ax.hist(bins[:-1], bins=bins, bottom=bottom, weights=y, **kwds)
cls._update_stacker(ax, stacking_id, n)
return patches
class PandasOnSparkPiePlot(PandasPiePlot, TopNPlotBase):
def __init__(self, data, **kwargs):
super().__init__(self.get_top_n(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super()._make_plot()
class PandasOnSparkAreaPlot(PandasAreaPlot, SampledPlotBase):
def __init__(self, data, **kwargs):
super().__init__(self.get_sampled(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super()._make_plot()
class PandasOnSparkLinePlot(PandasLinePlot, SampledPlotBase):
def __init__(self, data, **kwargs):
super().__init__(self.get_sampled(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super()._make_plot()
class PandasOnSparkBarhPlot(PandasBarhPlot, TopNPlotBase):
def __init__(self, data, **kwargs):
super().__init__(self.get_top_n(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super()._make_plot()
class PandasOnSparkScatterPlot(PandasScatterPlot, TopNPlotBase):
def __init__(self, data, x, y, **kwargs):
super().__init__(self.get_top_n(data), x, y, **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super()._make_plot()
class PandasOnSparkKdePlot(PandasKdePlot, KdePlotBase):
def _compute_plot_data(self):
self.data = KdePlotBase.prepare_kde_data(self.data)
def _make_plot(self):
# 'num_colors' requires to calculate `shape` which has to count all.
# Use 1 for now to save the computation.
colors = self._get_colors(num_colors=1)
stacking_id = self._get_stacking_id()
sdf = self.data._internal.spark_frame
for i, label in enumerate(self.data._internal.column_labels):
# 'y' is a Spark DataFrame that selects one column.
y = sdf.select(self.data._internal.spark_column_for(label))
ax = self._get_ax(i)
kwds = self.kwds.copy()
label = pprint_thing(label if len(label) > 1 else label[0])
kwds["label"] = label
style, kwds = self._apply_style_colors(colors, kwds, i, label)
if style is not None:
kwds["style"] = style
kwds = self._make_plot_keywords(kwds, y)
artists = self._plot(ax, y, column_num=i, stacking_id=stacking_id, **kwds)
self._add_legend_handle(artists[0], label, index=i)
def _get_ind(self, y):
return KdePlotBase.get_ind(y, self.ind)
@classmethod
def _plot(
cls, ax, y, style=None, bw_method=None, ind=None, column_num=None, stacking_id=None, **kwds
):
y = KdePlotBase.compute_kde(y, bw_method=bw_method, ind=ind)
lines = PandasMPLPlot._plot(ax, ind, y, style=style, **kwds)
return lines
_klasses = [
PandasOnSparkHistPlot,
PandasOnSparkBarPlot,
PandasOnSparkBoxPlot,
PandasOnSparkPiePlot,
PandasOnSparkAreaPlot,
PandasOnSparkLinePlot,
PandasOnSparkBarhPlot,
PandasOnSparkScatterPlot,
PandasOnSparkKdePlot,
]
_plot_klass = {getattr(klass, "_kind"): klass for klass in _klasses}
_common_kinds = {"area", "bar", "barh", "box", "hist", "kde", "line", "pie"}
_series_kinds = _common_kinds.union(set())
_dataframe_kinds = _common_kinds.union({"scatter", "hexbin"})
_pandas_on_spark_all_kinds = _common_kinds.union(_series_kinds).union(_dataframe_kinds)
def plot_pandas_on_spark(data, kind, **kwargs):
if kind not in _pandas_on_spark_all_kinds:
raise ValueError("{} is not a valid plot kind".format(kind))
from pyspark.pandas import DataFrame, Series
if isinstance(data, Series):
if kind not in _series_kinds:
return unsupported_function(class_name="pd.Series", method_name=kind)()
return plot_series(data=data, kind=kind, **kwargs)
elif isinstance(data, DataFrame):
if kind not in _dataframe_kinds:
return unsupported_function(class_name="pd.DataFrame", method_name=kind)()
return plot_frame(data=data, kind=kind, **kwargs)
def plot_series(
data,
kind="line",
ax=None, # Series unique
figsize=None,
use_index=True,
title=None,
grid=None,
legend=False,
style=None,
logx=False,
logy=False,
loglog=False,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
rot=None,
fontsize=None,
colormap=None,
table=False,
yerr=None,
xerr=None,
label=None,
secondary_y=False, # Series unique
**kwds
):
"""
Make plots of Series using matplotlib / pylab.
Each plot kind has a corresponding method on the
``Series.plot`` accessor:
``s.plot(kind='line')`` is equivalent to
``s.plot.line()``.
Parameters
----------
data : Series
kind : str
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : boxplot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
ax : matplotlib axes object
If not passed, uses gca()
figsize : a tuple (width, height) in inches
use_index : boolean, default True
Use index as ticks for x axis
title : string or list
Title to use for the plot. If a string is passed, print the string at
the top of the figure. If a list is passed and `subplots` is True,
print each item in the list above the corresponding subplot.
grid : boolean, default None (matlab style default)
Axis grid lines
legend : False/True/'reverse'
Place legend on axis subplots
style : list or dict
matplotlib line style per column
logx : boolean, default False
Use log scaling on x axis
logy : boolean, default False
Use log scaling on y axis
loglog : boolean, default False
Use log scaling on both x and y axes
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal plots)
fontsize : int, default None
Font size for xticks and yticks
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
colorbar : boolean, optional
If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots)
position : float
Specify relative alignments for bar plot layout.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
table : boolean, Series or DataFrame, default False
If True, draw a table using the data in the DataFrame and the data will
be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a table.
yerr : DataFrame, Series, array-like, dict and str
See :ref:`Plotting with Error Bars <visualization.errorbars>` for
detail.
xerr : same types as yerr.
label : label argument to provide to plot
secondary_y : boolean or sequence of ints, default False
If True then y-axis will be on the right
mark_right : boolean, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend
**kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
Notes
-----
- See matplotlib documentation online for more on this subject
- If `kind` = 'bar' or 'barh', you can specify relative alignments
for bar plot layout by `position` keyword.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
"""
# function copied from pandas.plotting._core
# so it calls modified _plot below
import matplotlib.pyplot as plt
if ax is None and len(plt.get_fignums()) > 0:
with plt.rc_context():
ax = plt.gca()
ax = | PandasMPLPlot._get_ax_layer(ax) | pandas.plotting._matplotlib.core.MPLPlot._get_ax_layer |
# ---
# jupyter:
# jupytext:
# cell_metadata_filter: -all
# comment_magics: true
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.8
# kernelspec:
# display_name: 'Python 3.8.12 64-bit (''mapping_parenting_tech'': conda)'
# language: python
# name: python3
# ---
# %%
from mapping_parenting_tech.utils import (
play_store_utils as psu,
text_preprocessing_utils as tpu,
)
from mapping_parenting_tech import PROJECT_DIR, logging
from pathlib import Path
from tqdm import tqdm
from datetime import datetime
import tomotopy as tp
import altair as alt
import pandas as pd
import numpy as np
import csv
INPUT_DATA = PROJECT_DIR / "inputs/data/play_store"
OUTPUT_DATA = PROJECT_DIR / "outputs/data"
REVIEWS_DATA = PROJECT_DIR / "outputs/data/app_reviews"
# %%
# Read in the ids of the relevant apps (as manually id'd by Nesta staff)
app_info = pd.read_csv(INPUT_DATA / "relevant_app_ids.csv")
app_clusters = app_info["cluster"].unique().tolist()
# %%
# load the reviews for the relevant apps
app_reviews = psu.load_some_app_reviews(app_info["appId"].to_list())
# add the cluster that each add is in to the review
app_reviews = app_reviews.merge(app_info, on="appId")
# what have we got...?
app_reviews.shape
# %%
# get a subset of reviews - here it's those written in the last year about apps in the
# cluster 'Numeracy development'
target_reviews = app_reviews.loc[
(app_reviews["at"] >= pd.to_datetime("2021-02-01"))
# & (app_reviews["cluster"] == "Numeracy development")
]
target_reviews.shape
# %%
# if we want to visualise the distribution of the reviews, group the apps by their id and cluster
# and count the number of reviews for each app
app_review_counts = target_reviews.groupby(["appId", "cluster"]).agg(
review_count=("reviewId", "count"),
)
# reset the index so we just have one
app_review_counts.reset_index(inplace=True)
# %%
# plot the data
# x-axis groups clusters together and assigns a 'jitter' value to randomly distribute apps w/in the cluster horizontally
# y-axis is simply the number of reviews for an app
stripplot = (
alt.Chart(app_review_counts, width=75)
.mark_circle(size=20)
.encode(
x=alt.X(
"jitter:Q",
title=None,
axis=alt.Axis(values=[0], ticks=True, grid=False, labels=False),
scale=alt.Scale(),
),
y=alt.Y(
"review_count:Q",
# scale=alt.Scale(range=(0,10000)),
),
color=alt.Color("cluster:N", legend=None),
column=alt.Column(
"cluster:N",
header=alt.Header(
labelAngle=-90,
titleOrient="top",
labelOrient="bottom",
labelAlign="right",
labelPadding=3,
),
),
tooltip=["appId"],
)
.transform_calculate(
# Generate Gaussian jitter with a Box-Muller transform
jitter="sqrt(-2*log(random()))*cos(2*PI*random())"
)
.configure_facet(spacing=0)
.configure_view(stroke=None)
)
stripplot
# %%
# Now let's look at the distributions of reviews for each cluster
app_review_counts.groupby("cluster").agg(
total_reviews=("review_count", "sum"),
mean=("review_count", "mean"),
median=("review_count", np.median),
).sort_values("total_reviews")
# %% [markdown]
# ## Topic modelling
# %%
# get a subset of reviews
tp_reviews = target_reviews[target_reviews.cluster == "Drawing and colouring"]
tp_reviews.shape
# %% [markdown]
# **NB** Following code **should not** be executed unless reviews need to be pre-processed. This step, once complete, should save the reviews for later use
#
# **Ignore all that follows, up until the next markdown cell**
# %%
# get a preprocess the review text ready for topic modelling
# this will take some time for 10,000s of reviews...
tp_reviews.loc[:, "preprocessed_review"] = tpu.get_preprocessed_documents(
[str(review) for review in tp_reviews["content"].to_list()]
)
# %%
# get reviews that have 5 tokens or more
tp_reviews = tp_reviews[tp_reviews.preprocessed_review.str.count(" ") + 1 > 4]
tp_reviews.shape
# %%
# save processed reviews
tp_reviews.to_csv(OUTPUT_DATA / "pre_processed_drawing_app_reviews.csv")
# %%
# load reviews into a dict: {cluster: [review_1, review_2 ... review_n]
raw_reviews_by_cluster = dict()
for cluster in tqdm(app_clusters):
raw_reviews_by_cluster[cluster] = tp_reviews[
tp_reviews.cluster == cluster
].content.to_list()
# %%
# set up a Corpus which we'll load our documents (reviews) into
corpus = tp.utils.Corpus()
# %%
# tokenize the reviews and add to corpus
for review in tp_reviews.preprocessed_review.to_list():
doc = tpu.simple_tokenizer(review)
corpus.add_doc(doc)
# %%
# initialise the model and confirm the number of documents in it
model = tp.LDAModel(k=n_topics, corpus=corpus)
print(len(model.docs))
# %%
# Set up a function to help explore the variables we need to train the model - at what point does log likelihood and/or coherence converge?
def test_model(
corpus,
n_topics: int = 20,
max_iters: int = 1000,
iter_step: int = 50,
seed: int = None,
) -> int:
model = tp.LDAModel(k=n_topics, corpus=corpus)
model_scores = []
for i in range(0, max_iters, iter_step):
model.train(iter=iter_step)
ll_per_word = model.ll_per_word
c_v = tp.coherence.Coherence(model, coherence="c_v")
model_scores.append(
{
"N_TOPICS": n_topics,
"N_ITER": i,
"LOG_LIKELIHOOD": ll_per_word,
"COHERENCE": c_v.get_score(),
"SEED": seed,
}
)
model_scores = pd.DataFrame(model_scores)
return model_scores[model_scores["COHERENCE"] == model_scores["COHERENCE"].max()]
# %%
iters = []
for j in tqdm(range(5, 25), position=0):
for i in range(100, 1500, 250):
iters.append(test_model(corpus, n_topics=j, max_iters=1050, seed=250))
model_scores = pd.concat(iters)
# %%
# model_scores = model_scores.sort_values("COHERENCE", ascending=False)
# model_scores.head()
model_scores.plot.scatter("N_ITER", "LOG_LIKELIHOOD")
# %%
print(
f"Number of unique words: {len(model.used_vocabs):,}",
f"\nTotal number of tokens: {model.num_words:,}",
)
# %%
def print_topic_words(model, top_n=5):
for k in range(model.k):
top_words = model.get_topic_words(topic_id=k, top_n=top_n)
top_words = [f"{tup[0]} ({tup[1]:.04f}%)" for tup in top_words]
print(f"Topic #{k}:", f"\n+ {', '.join(top_words)}")
# %%
print_topic_words(model)
# %%
word_dist = model.get_topic_word_dist(topic_id=2)
pd.Series(word_dist, index=model.used_vocabs).sort_values(ascending=False).head(15)
# %%
topic_sizes = model.get_count_by_topics()
print("Number of words per topic:")
for k in range(0, 5):
print(f"+ Topic #{k}: {topic_sizes[k]:,} words")
# %%
print("Topic proportion across the corpus:")
for k in range(0, 5):
print(f"+ Topic #{k}: {topic_sizes[k] / model.num_words:0.2f}%")
# %%
print_topic_words(best_p)
# %%
n_topic_range = range(5, 35)
n_iters = 1050
coherence_scores = []
for k in tqdm(n_topic_range):
_model = tp.LDAModel(k=k, corpus=corpus, seed=250)
_model.train(iter=n_iters)
coherence = tp.coherence.Coherence(_model, coherence="c_v")
coherence_scores.append({"N_TOPICS": k, "COHERENCE_SCORE": coherence.get_score()})
# %%
coherence_scores = | pd.DataFrame(coherence_scores) | pandas.DataFrame |
"""
HWRF_Rainfall_cron.py
-- cron job script for HWRF data
"""
import sys, os
import yaml
import requests
from bs4 import BeautifulSoup
import logging
import subprocess
from osgeo import gdal
import pandas as pd
import geopandas as gpd
import csv
import json
import rasterio
from rasterio.mask import mask
import numpy as np
from rasterio import Affine
import math
from shapely.geometry import Point
import shutil
import zipfile
def load_config(onetime=''):
"""load configuration file """
with open("HWRF_config.yml", "r") as ymlfile:
cfg = yaml.safe_load(ymlfile)
global hosturl
hosturl = cfg['HWRF']['host']
folderprefix = cfg['datalocation']['folderprefix']
folderprefix = os.path.abspath(folderprefix) + os.path.sep
global HWRFsummary
HWRFsummary = folderprefix + cfg['datalocation']['HWRFsummary'] + os.path.sep
global HWRFimage
HWRFimage = folderprefix + cfg['datalocation']['HWRFimage'] + os.path.sep
global HWRFoutput
HWRFoutput = folderprefix + cfg['datalocation']['HWRFoutput']
global HWRFraw
HWRFraw = folderprefix + cfg['datalocation']['HWRFraw'] + os.path.sep
global flood_HWRF
flood_HWRF= folderprefix + cfg['datalocation']['flood_HWRF'] + os.path.sep
global flooddata
flooddata= folderprefix + cfg['datalocation']['flooddata'] + os.path.sep
# set up logging file
logging.basicConfig(filename = cfg['datalocation']['loggingfile'], format='%(asctime)s %(message)s', level=logging.INFO)
def check_status(adate):
""" check if a give date is processed"""
processed_list = os.listdir(HWRFsummary)
processed = any(adate in x for x in processed_list)
return processed
def generate_procesing_list():
""" generate the processing list"""
reqs = requests.get(hosturl)
soup = BeautifulSoup(reqs.text,"html.parser")
datelist = {}
for link in soup.find_all('a'):
fstr = link.string
if (fstr[:5] == 'hwrf.'):
a_entry = fstr.split('.')[1]
a_entry = a_entry.replace("/","")
if check_status(a_entry):
continue
datelist[a_entry] = hosturl + fstr
return datelist
def HWRF_download(hwrfurl):
""" download rainfall data"""
reqs = requests.get(hwrfurl)
soup = BeautifulSoup(reqs.text,"html.parser")
ascii_list = []
for link in soup.find_all('a'):
fstr = link.string
if "rainfall.ascii" in fstr:
if not os.path.exists(HWRFraw + fstr):
wgetcmd = "wget " + hwrfurl + fstr + " -P " + HWRFraw
subprocess.call(wgetcmd, shell=True)
ascii_list.append(fstr)
return ascii_list
def process_rain(adate,TC_Rain):
"""process rainfall data"""
## VRT template to read the csv
vrt_template="""<OGRVRTDataSource>
<OGRVRTLayer name='{}'>
<SrcDataSource>{}</SrcDataSource>
<GeometryType>wkbPoint</GeometryType>
<GeometryField encoding="PointFromColumns" x="lon" y="lat" z="Z"/>
</OGRVRTLayer>
</OGRVRTDataSource>"""
## Read each text file and create the separate tiff file
for i in TC_Rain:
with open(i,'r') as f:
variable=csv.reader(f, delimiter=' ')
row_count=1
for row in variable:
if row_count == 1:
while ('' in row):
row.remove('')
XLC=float(row[0])
XRC=float(row[1])
YBC=float(row[2])
YTC=float(row[3])
res=float(row[4])
nrows=float(row[5])
ncol=float(row[6])
row_count = row_count + 1
df = (pd.read_table(i, skiprows=1, delim_whitespace=True, names=('lat', 'lon', 'Z'))).fillna(-999)
df.sort_values(by=["lat","lon"], ascending=[False, True])
df=df[['lon','lat','Z']]
df = df[df.lon >= XLC]
df = df[df.lon <= XRC]
df = df[df.lat >= YBC]
df = df[df.lat <= YTC]
df = df[df.Z > 0]
df.to_csv(i.replace(".ascii",".csv"),index=False, sep=" ")
with open(i.replace(".ascii",".vrt"),"w") as g:
g.write(vrt_template.format(i.replace(".ascii",""),i.replace(".ascii",".csv")))
g.close()
r=gdal.Rasterize(i.replace(".ascii",".tiff"),i.replace(".ascii",".vrt"),outputSRS="EPSG:4326",xRes=res, yRes=res,attribute="Z", noData=-999)
r=None
os.remove(i.replace(".ascii",".csv"))
##merge all tiffs file als
# nd delete the individual tiff, vrt and ascii file
TC_Rain_tiff=[]
for i in TC_Rain:
TC_Rain_tiff.append(i.replace(".ascii",".tiff"))
filename="hwrf."+ adate +"rainfall.vrt"
raintiff = filename.replace(".vrt",".tiff")
vrt = gdal.BuildVRT(filename, TC_Rain_tiff)
gdal.Translate(raintiff, vrt)
vrt=None
# no need
#gdalcmd = "gdal_translate -of GTiff " + filename + " " + raintiff
#subprocess.call(gdalcmd, shell=True)
# create a zipfile
zip_file="hwrf."+ adate +"rainfall.zip"
with zipfile.ZipFile(zip_file, 'w',zipfile.ZIP_DEFLATED) as zipObj:
for i in TC_Rain_tiff:
asfile = i.replace(".tiff",".ascii")
zipObj.write(asfile)
for i in TC_Rain_tiff:
os.remove(i)
os.remove(i.replace(".tiff",".ascii"))
os.remove(i.replace(".tiff",".vrt"))
return raintiff
def HWRF_extract_by_mask(mask_json,tiff):
"""extract by each watershed"""
with rasterio.open(tiff) as src:
try:
out_image, out_transform = mask(src, [mask_json['features'][0]['geometry']], crop=True)
except ValueError as e:
#'Input shapes do not overlap raster.'
#print(e)
src = None
# return empty dataframe
return | pd.DataFrame() | pandas.DataFrame |
'''
author: <NAME> || @slothfulwave612
Python module for i/o operations on the dataset.
'''
## import necessary packages/modules
import os
import numpy as np
import pandas as pd
from pandas.io.json import json_normalize
import json
import math
import multiprocessing
from tqdm.auto import tqdm, trange
import statsmodels.api as sm
def get_competition(path):
'''
Function for getting data about all the competitions.
Argument:
path -- str, path to competition.json file.
Returns:
comp_df -- pandas dataframe, all competition data.
'''
## load the json file
comp_data = json.load(open(path))
## make pandas dataframe
comp_df = pd.DataFrame(comp_data)
return comp_df
def flatten_json(sub_str):
'''
Function to take out values from nested dictionary present in
the json file, so to make a representable dataframe.
---> This piece of code was found on stackoverflow <--
Argument:
sub_str -- substructure defined in the json file.
Returns:
flattened out information.
'''
out = {}
def flatten(x, name=''):
if type(x) is dict:
for a in x:
flatten(x[a], name + a + '_')
elif type(x) is list:
i = 0
for a in x:
flatten(a, name + str(i) + '_')
i += 1
else:
out[name[:-1]] = x
flatten(sub_str)
return out
def get_matches(comp_id, season_id, path):
'''
Function for getting match-data for a given competition
Arguments:
comp_id -- int, the competition id.
season_id -- int, the season id.
path -- str, path to .json file containing match data.
Returns:
match_df -- pandas dataframe, containing all the matches
'''
## loading up the data from json file
match_data = json.load(open(path, encoding='utf8'))
## flattening the json file
match_flatten = [flatten_json(x) for x in match_data]
## creating a dataframe
match_df = pd.DataFrame(match_flatten)
match_df_cols = list(match_df.columns)
## renaming the dataframe
for i in range(len(match_df_cols)):
if match_df_cols[i].count('away_team') == 2:
## for away_team columns
match_df_cols[i] = match_df_cols[i][len('away_team_'):]
elif match_df_cols[i].count('_0') == 1:
## for _0 columns
match_df_cols[i] = match_df_cols[i].replace('_0', '')
elif match_df_cols[i].count('competition') == 2:
## for competition columns
match_df_cols[i] = match_df_cols[i][len('competition_'):]
elif match_df_cols[i].count('home_team') == 2:
## for away_team columns
match_df_cols[i] = match_df_cols[i][len('home_team_'):]
elif match_df_cols[i].count('season') == 2:
## for away_team columns
match_df_cols[i] = match_df_cols[i][len('season_'):]
match_df.columns = match_df_cols
return match_df
def make_event_df(match_id, path):
'''
Function for making event dataframe.
Argument:
match_id -- int, the required match id for which event data will be constructed.
path -- str, path to .json file containing event data.
Returns:
df -- pandas dataframe, the event dataframe for the particular match.
'''
## read in the json file
event_json = json.load(open(path, encoding='utf-8'))
## normalize the json data
df = json_normalize(event_json, sep='_')
return df
def full_season_events(match_df, match_ids, path, comp_name=None, leave=True, shot="basic"):
'''
Function to make event dataframe for a full season.
Arguments:
match_df -- pandas dataframe, containing match-data.
match_id -- list, list of match id.
path -- str, path to directory where .json file is listed.
e.g. '../input/Statsbomb/data/events'
comp_name -- str, competition name + season name, default: None.
leave -- keeps all traces of the progressbar upon termination of iteration.
Returns:
event_df -- pandas dataframe, containing event data for the whole season.
'''
## init an empty dataframe
event_df = pd.DataFrame()
if comp_name == None:
t = match_ids
else:
t = tqdm(match_ids, desc=f'Grabbing data for {comp_name}', position=0, leave=leave)
for match_id in t:
## .json file
temp_path = path + f'/{match_id}.json'
temp_df = make_event_df(match_id, temp_path)
event_df = pd.concat([event_df, temp_df], sort=False)
if shot == "basic":
return event_df.loc[event_df['type_name'] == 'Shot']
elif shot == "intermediate":
return intermediate_dataset(event_df)
elif shot == "advance":
return intermediate_dataset(event_df, adv=True)
def multiple_season_event_df(comp_name, comp_id, season_ids, path_match, path_season, shot):
'''
Function for making event dataframe having multile seasons
for the same competition.
Arguments:
comp_name -- str, competition name + season
comp_id -- int, competition id.
season_ids -- list, list containing season ids.
path_match -- str, path to .json file containing match data.
path_season -- str, path to directory where .json file is listed.
e.g. '../input/Statsbomb/data/events'
Returns:
event_df -- pandas dataframe, containing event of multiple seasons.
'''
## init an empty dataframe
event_df = pd.DataFrame()
## making the event-dataframe
for season_id in tqdm(season_ids, desc=f'Grabbing data for {comp_name}', leave=True):
## add season id to path-match
team_path_match = path_match + f'/{comp_id}/{season_id}.json'
## make a match dataframe for a particular season
match_df = get_matches(comp_id, season_id, team_path_match)
## list all the match ids
match_ids = list(match_df['match_id'].unique())
comp_name_ = match_df['competition_name'].unique()[0] + '-' + match_df['season_name'].unique()[0]
## create the event dataframe for the whole season
temp_df = full_season_events(match_df, match_ids, path_season, comp_name=comp_name_, leave=False, shot=shot)
## add competition
temp_df["comp_name"] = comp_name_
## concat the dataframes
event_df = pd.concat([event_df, temp_df], sort=False)
## make final dataframe
event_df = event_df.reset_index(drop=True)
return event_df
def goal(value):
'''
Function to output 1: if goal or 0: otherwise.
Arguments:
value -- str, shot-outcome-name.
Returns:
0 or 1 -- 0 means no goal 1 means goal.
'''
if value == 'Goal':
return 1
else:
return 0
def body_part(value):
'''
Function to output: Head -- if it is a header,
Foot -- if it is right/left foot,
Other -- if any other body part
'''
if value == "Left Foot" or value == "Right Foot":
return "Foot"
else:
return value
def change_dims(old_value, old_min, old_max, new_min, new_max):
'''
Function for changing the coordinates to our pitch dimensions.
Arguments:
old_value, old_min, old_max, new_min, new_max -- float values.
Returns:
new_value -- float value(the coordinate value either x or y).
'''
## calculate the value
new_value = ( (old_value - old_min) / (old_max - old_min) ) * (new_max - new_min) + new_min
return new_value
def coordinates_x(value):
'''
Return x coordinate
'''
value_x = change_dims(value[0], 0, 120, 0, 104)
return value_x
def coordinates_y(value):
'''
Return 80 - x coordinate
'''
value_y = change_dims(80- value[1], 0, 80, 0, 68)
return value_y
def distance_bw_coordinates(x1, y1, x2=104.0, y2=34.0):
'''
Function for calculating the distance between shot location
and the goal post.
Arguments:
x1, y1 -- float, the x and y coordinate for shot location.
x2, y2 -- float, the x and y coordinate for the goal post location.(default for Statsbomb defined goal-post)
'''
diff_sqr_x = (x2 - x1)**2
diff_sqr_y = (y2 - y1)**2
distance = math.sqrt(diff_sqr_x + diff_sqr_y) ## euclidean distnace
return distance
def post_angle(x, y, g1_x=104, g1_y=30.34, g2_x=104, g2_y=37.66):
'''
Function to calculate the post angle.
Arguments:
x -- float, x coordinate from where the shot was taken.
y -- float, y coordinate from where the shot was taken.
g1 and g2 are the coordinates of the two woodwork, default values
specifying the woodwork coordinate for Statsbomb data.
Returns:
angle -- float, the angle in degrees.
'''
if x == 104 and (30.34 <= y <= 37.66):
return 180
if x == 104 and (y > 37.66 or y < 30.34):
return 0
## calculating the three sides of the triangle.
A_dis = distance_bw_coordinates(x, y, g1_x, g1_y)
B_dis = distance_bw_coordinates(x, y, g2_x, g2_y)
C_dis = distance_bw_coordinates(g1_x, g1_y, g2_x, g2_y)
## using cosine law
value = ((A_dis**2) + (B_dis**2) - (C_dis**2)) / (2 * A_dis * B_dis)
angle = np.degrees(np.arccos(value))
return angle
def create_result_df(df, length, col):
'''
Function to create a result dataframe(statsbomb_xg vs predicted_xg).
Arguments:
df -- pandas dataframe.
length -- int, length of the dataframe.
col -- str, column name for predicted xG value.
Returns:
result -- pandas dataframe containing statsbomb_xg and predicted_xg as columns.
'''
## fetch all the player names
players = df.loc[df['target'] == 1, 'player_name'].value_counts()[:length].index
## init a dictionary
result_dict = {
'player_name': [],
'shots': [],
'goals': [],
'statsbomb_xg': [],
'predicted_xg': []
}
## calculate required values
for player in players:
## total number of shots taken by a player
shots = len(df.loc[(df['player_name'] == player)])
## total number of goals scored by a player
goals = len(df.loc[
(df['player_name'] == player) &
(df['target'] == 1)
])
## aggregated statsbomb-xG-value for a player
stats_xg = df.loc[
(df['player_name'] == player),
'shot_statsbomb_xg'
].sum()
## aggregated predicted-xG-value for a player
pred_xg = df.loc[
(df['player_name'] == player),
col
].sum()
## append result to result_dict
result_dict['player_name'].append(player)
result_dict['shots'].append(shots)
result_dict['goals'].append(goals)
result_dict['statsbomb_xg'].append(stats_xg)
result_dict['predicted_xg'].append(pred_xg)
## create pandas dataframe
result = | pd.DataFrame(result_dict) | pandas.DataFrame |
import os
import sys
import subprocess
import datetime
import fire
import pickle
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from xgboost import XGBClassifier
AIP_MODEL_DIR = os.environ["AIP_MODEL_DIR"]
def train_evaluate(training_dataset_path, validation_dataset_path,max_depth,n_estimators):
df_train = | pd.read_csv(training_dataset_path) | pandas.read_csv |
import structurelearning
import pandas as pd
import math
from operator import mul
from functools import reduce
import itertools
from collections import defaultdict
import numpy as np
from collections import namedtuple
def variableElimination(index, observations, model):
#print('Starting Variable Elimination!')
adj_matrix, cpt_list = model
if not observations[0]:
return cpt_list[index]
observed_indices = [o_id for o_id, o_value in observations]
# Local CPT instantiated by evidence
factors_cpts = factorize(adj_matrix, observations, cpt_list)
reduced_factors = factors_cpts.copy()
# Eliminate irrelevant variables, i.e. those which are not successors to any of the observed or queried variable
indices_to_keep = get_all_ancestors(index, observed_indices, adj_matrix)
# Eliminate all hidden variables (i.e. except target and observed)
for hidden_var in range(len(cpt_list)):
if hidden_var not in indices_to_keep:
# ignore all irrelevant variable
for key, value in reduced_factors.items():
if key.var == hidden_var:
del reduced_factors[key]
break
continue
if hidden_var != index and hidden_var not in observed_indices:
reduced_factors = eliminate_variable(hidden_var, reduced_factors)
# Join all remaining factors
if len(reduced_factors) != 1:
#print('Only observed and target left:', reduced_factors)
reduced_factors, reduced_cpt = pointwise_product(index, reduced_factors, reduced_factors.copy())
#print('Variable elimination finished!')
final_cpt = normalize(reduced_cpt)
# print('Final cpt:', final_cpt)
# print('Final factors:', reduced_factors)
return final_cpt
def get_all_ancestors(index, observed_indices, adj_matrix):
all_present_vars = observed_indices
all_present_vars.append(index)
ancestors = []
adj_matrix_transposed = | pd.DataFrame(adj_matrix) | pandas.DataFrame |
from datetime import date, datetime, timedelta
from dateutil import tz
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series, Timestamp, date_range
import pandas._testing as tm
class TestDatetimeIndex:
def test_setitem_with_datetime_tz(self):
# 16889
# support .loc with alignment and tz-aware DatetimeIndex
mask = np.array([True, False, True, False])
idx = date_range("20010101", periods=4, tz="UTC")
df = DataFrame({"a": np.arange(4)}, index=idx).astype("float64")
result = df.copy()
result.loc[mask, :] = df.loc[mask, :]
tm.assert_frame_equal(result, df)
result = df.copy()
result.loc[mask] = df.loc[mask]
tm.assert_frame_equal(result, df)
idx = date_range("20010101", periods=4)
df = DataFrame({"a": np.arange(4)}, index=idx).astype("float64")
result = df.copy()
result.loc[mask, :] = df.loc[mask, :]
tm.assert_frame_equal(result, df)
result = df.copy()
result.loc[mask] = df.loc[mask]
tm.assert_frame_equal(result, df)
def test_indexing_with_datetime_tz(self):
# GH#8260
# support datetime64 with tz
idx = Index(date_range("20130101", periods=3, tz="US/Eastern"), name="foo")
dr = date_range("20130110", periods=3)
df = DataFrame({"A": idx, "B": dr})
df["C"] = idx
df.iloc[1, 1] = pd.NaT
df.iloc[1, 2] = pd.NaT
# indexing
result = df.iloc[1]
expected = Series(
[Timestamp("2013-01-02 00:00:00-0500", tz="US/Eastern"), pd.NaT, pd.NaT],
index=list("ABC"),
dtype="object",
name=1,
)
tm.assert_series_equal(result, expected)
result = df.loc[1]
expected = Series(
[Timestamp("2013-01-02 00:00:00-0500", tz="US/Eastern"), pd.NaT, pd.NaT],
index=list("ABC"),
dtype="object",
name=1,
)
tm.assert_series_equal(result, expected)
# indexing - fast_xs
df = DataFrame({"a": date_range("2014-01-01", periods=10, tz="UTC")})
result = df.iloc[5]
expected = Series(
[Timestamp("2014-01-06 00:00:00+0000", tz="UTC")], index=["a"], name=5
)
tm.assert_series_equal(result, expected)
result = df.loc[5]
tm.assert_series_equal(result, expected)
# indexing - boolean
result = df[df.a > df.a[3]]
expected = df.iloc[4:]
tm.assert_frame_equal(result, expected)
# indexing - setting an element
df = DataFrame(
data=pd.to_datetime(["2015-03-30 20:12:32", "2015-03-12 00:11:11"]),
columns=["time"],
)
df["new_col"] = ["new", "old"]
df.time = df.set_index("time").index.tz_localize("UTC")
v = df[df.new_col == "new"].set_index("time").index.tz_convert("US/Pacific")
# trying to set a single element on a part of a different timezone
# this converts to object
df2 = df.copy()
df2.loc[df2.new_col == "new", "time"] = v
expected = Series([v[0], df.loc[1, "time"]], name="time")
tm.assert_series_equal(df2.time, expected)
v = df.loc[df.new_col == "new", "time"] + pd.Timedelta("1s")
df.loc[df.new_col == "new", "time"] = v
tm.assert_series_equal(df.loc[df.new_col == "new", "time"], v)
def test_consistency_with_tz_aware_scalar(self):
# xef gh-12938
# various ways of indexing the same tz-aware scalar
df = Series([Timestamp("2016-03-30 14:35:25", tz="Europe/Brussels")]).to_frame()
df = pd.concat([df, df]).reset_index(drop=True)
expected = Timestamp("2016-03-30 14:35:25+0200", tz="Europe/Brussels")
result = df[0][0]
assert result == expected
result = df.iloc[0, 0]
assert result == expected
result = df.loc[0, 0]
assert result == expected
result = df.iat[0, 0]
assert result == expected
result = df.at[0, 0]
assert result == expected
result = df[0].loc[0]
assert result == expected
result = df[0].at[0]
assert result == expected
def test_indexing_with_datetimeindex_tz(self):
# GH 12050
# indexing on a series with a datetimeindex with tz
index = date_range("2015-01-01", periods=2, tz="utc")
ser = Series(range(2), index=index, dtype="int64")
# list-like indexing
for sel in (index, list(index)):
# getitem
tm.assert_series_equal(ser[sel], ser)
# setitem
result = ser.copy()
result[sel] = 1
expected = | Series(1, index=index) | pandas.Series |
import pandas as pd
import numpy as np
import pickle
import shap
from lightgbm import LGBMClassifier
def get_new_prediction(bus_line, hour, month, day, bus_carrying_cap, city, temp, pressure, bus_age, total_rain):
'''
This function calculates new predictions for a given bus line, hour, month, day, bus carrying capacity, bus age
(years), city, temperature (degrees celcius), pressure (kPA) and rain (mm). Assumes that a file named
final_fitted.pickle is in the results/ml_model directory.
This is solely for use in the interactive report so the user can dynamically generate a graph
as needed by querying results from the model. Arguments are fed to this function via. user
selected input in the report.
Parameters:
bus_line: A str that represents one of the bus lines in the Greater Vancouver area.
hour: An integer 0-23 representing a particular hour of the day.
month: An integer 1-12 representing a particular month of the year.
day: A str (Mon, Tue, Wed, Thu, Fri, Sat, Sun) that represents a particular day
of the week.
bus_carrying_cap: An integer representing the carrying capacity of a bus.
city: A str representing the city of interest.
temp: A float representing the temperature in degrees celsius.
pressure: A float representing the atmospheric pressure in kPa
bus_age: An integer representing the bus age in years.
total_rain: A float representing the total rain in mm.
Returns:
dict
A dictionary with keys shap, predicted, and column_names containing the
SHAP scores (numpy array), predicted 0/1
scores (numpy array), and column names used in the model fit (list).
'''
shuttles = ["23", "31", "42", "68", "103", "105", "109", "131", "132", "146",
"147", "148", "157", "169", "170", "171", "172", "173", "174", "175", "180", "181",
"182", "184", "185", "186", "187", "189", "215", "227", "251", "252", "256", "262",
"280", "281", "282", "310", "322", "360", "361", "362", "363", "370", "371", "372",
"373", "412", "413", "414", "416", "560", "561", "562", "563", "564", "609", "614",
"616", "617", "618", "619", "719", "722", "733", "741", "743", "744", "745", "746", "748", "749"]
# The values that are held constant: just use the means/modes
new_data = pd.DataFrame({
'hour': pd.Series(hour, dtype='int'),
'day_of_week': pd.Series(day, dtype='str'),
'bus_age': pd.Series(bus_age, dtype='float'),
'bus_carry_capacity': pd.Series(bus_carrying_cap if bus_carrying_cap != "NA" else np.nan, dtype='float'),
'line_no': pd.Series(bus_line, dtype='str'),
'city': pd.Series(city, dtype='str'),
'pressure': pd.Series(pressure, dtype='float'),
'rel_hum': pd.Series(93, dtype='float'),
'elev': pd.Series(2.5, dtype='float'),
'temp': pd.Series(temp, dtype='float'),
'visib': pd.Series(48.3, dtype='float'),
'wind_dir': pd.Series(0, dtype='float'),
'wind_spd': pd.Series(2, dtype='float'),
'total_precip': pd.Series(total_rain, dtype='float'),
'total_rain': pd.Series(total_rain, dtype='float'),
'total_snow': | pd.Series(0, dtype='float') | pandas.Series |
# -*- coding: utf-8 -*-
"""
Input file loading and caching system.
"""
import itertools
import json
import os
import pathlib
import re
import secrets
import sys
from typing import Dict, List, Optional, Tuple, Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.interpolate as scpi
import scipy.spatial.transform as scpt
import seaborn as sns
import swifter
from perceptree.common.cache import update_dict_recursively
from perceptree.common.configuration import Config
from perceptree.common.configuration import Configurable
from perceptree.common.graph_saver import GraphSaver
from perceptree.common.logger import Logger
from perceptree.common.logger import ParsingBar
from perceptree.common.logger import LoadingBar
from perceptree.common.math import carthesian_to_spherical
from perceptree.common.math import spherical_to_carthesian
from perceptree.common.util import dict_of_lists
from perceptree.common.util import parse_bool_string
from perceptree.common.util import tuple_array_to_numpy
from perceptree.data.treeio import TreeFile
from perceptree.data.treeio import TreeImage
from perceptree.data.treeio import TreeStatistic
class BaseDataLoader(Logger):
"""
Input file loading and caching system.
"""
@staticmethod
def _create_empty_scores() -> pd.DataFrame:
""" Create empty results dataframe as per _compile_scores return. """
return pd.DataFrame({
"tree_id": int(),
"tree_variant_id": int(),
"tree_jod": int(),
"tree_jod_low": float(),
"tree_jod_high": float(),
"tree_jod_var": float()
}, index=[ ]).set_index([ "tree_id", "tree_variant_id" ])
@staticmethod
def _generate_reduced_scores(full_scores: pd.DataFrame) -> pd.DataFrame:
""" Generate reduced scores from given full scores. """
return full_scores.reset_index() \
.drop([ "tree_variant_id" ], axis=1) \
.set_index([ "tree_id" ])
@staticmethod
def _create_empty_results() -> pd.DataFrame:
""" Create empty results dataframe as per _extract_results return. """
return pd.DataFrame({
"index": int(),
"first_tree_id": int(),
"first_tree_variant_id": int(),
"first_view_id": int(),
"first_view_variant_id": int(),
"second_tree_id": int(),
"second_tree_variant_id": int(),
"second_view_id": int(),
"second_view_variant_id": int(),
"worker_id": str(),
"choice": int()
}, index=[ ]).set_index("index")
@staticmethod
def _generate_reduced_results(full_results: pd.DataFrame) -> pd.DataFrame:
""" Generate reduced results from given full results. """
return full_results.reset_index() \
.drop([ "first_tree_variant_id", "first_view_variant_id", "second_tree_variant_id", "second_view_variant_id" ], axis=1) \
.set_index([ "index" ])
@staticmethod
def _create_empty_view_catalogue() -> pd.DataFrame:
""" Create empty results dataframe as per _view_catalogue return. """
return pd.DataFrame({
"tree_id": int(),
"tree_variant_id": int(),
"view_id": int(),
"view_variant_id": int(),
"view_type": str(),
"path": str(),
"json_path": str(),
"data": object()
}, index=[ ]).set_index(["tree_id", "view_id", "view_variant_id", "view_type"])
@staticmethod
def _generate_reduced_view_catalogue(full_view_catalogue: pd.DataFrame) -> pd.DataFrame:
""" Generate reduced view catalogue from given full view catalogue. """
return full_view_catalogue.reset_index() \
.drop([ "tree_variant_id", "view_variant_id", "json_path" ], axis=1) \
.set_index([ "tree_id", "view_id", "view_type" ])
@staticmethod
def _create_empty_tree_catalogue() -> pd.DataFrame:
""" Create empty results dataframe as per _tree_catalogue return. """
return pd.DataFrame({
"tree_id": int(),
"tree_variant_id": int(),
"path": str(),
"json_path": str(),
"data": object()
}, index=[ ]).set_index(["tree_id", "tree_variant_id"])
@staticmethod
def _generate_reduced_tree_catalogue(full_tree_catalogue: pd.DataFrame) -> pd.DataFrame:
""" Generate reduced tree catalogue from given full tree catalogue. """
return full_tree_catalogue.reset_index() \
.drop([ "tree_variant_id", "json_path" ], axis=1) \
.set_index([ "tree_id" ])
@staticmethod
def _create_empty_tree_data() -> dict:
""" Create empty results dict as per _tree_data return. """
return { }
@staticmethod
def _create_empty_available_features() -> dict:
""" Create empty results dict as per _available_features return. """
return { }
@staticmethod
def _create_empty_dataset_path() -> str:
""" Create empty results str as per _dataset_path return. """
return ""
@staticmethod
def _create_empty_dataset_meta() -> dict:
""" Create empty results dict as per _dataset_meta return. """
return { "unique_id": "EMPTY" }
@staticmethod
def _create_empty_indexed_scores() -> pd.DataFrame:
""" Create empty results dataframe as per _index_scores return. """
return pd.DataFrame({
"tree_id": int(),
"tree_variant_id": int(),
"view_id": int(),
"view_variant_id": int(),
"jod": float(),
"jod_low": float(),
"jod_high": float(),
"jod_var": float()
}, index=[ ]).set_index(["tree_id", "tree_variant_id", "view_id", "view_variant_id"])
@staticmethod
def _generate_reduced_scores_indexed(full_scores_indexed: pd.DataFrame) -> pd.DataFrame:
""" Generate reduced indexed scores from given full indexed scores. """
return full_scores_indexed.reset_index() \
.drop([ "tree_variant_id", "view_variant_id" ], axis=1) \
.set_index([ "tree_id", "view_id" ])
@staticmethod
def _create_empty_spherical_indexed_scores() -> pd.DataFrame:
""" Create empty results dataframe as per _spherical_scores_indexed return. """
return pd.DataFrame({
"tree_id": int(),
"tree_variant_id": int(),
"view_id": int(),
"view_variant_id": int(),
"jod": float(),
"jod_low": float(),
"jod_high": float(),
"jod_var": float()
}, index=[ ]).set_index(["tree_id", "tree_variant_id", "view_id", "view_variant_id"])
def _index_scores(self, scores: pd.DataFrame) -> pd.DataFrame:
"""
Create indexed score data-frame, where -1st view is
for the complete tree.
:param scores: Input scores data-frame.
:return: Returns data-frame indexed by ("tree_id", "view_id"),
where view_id == -1 contains data for the whole tree. Result
contains following columns:
* tree_id, view_id - Integer index for unique tree/view.
* jod, jod_low, jod_high, jod_var - JOD properties.
"""
self.__l.info(f"Indexing {len(scores)} scores...")
if len(scores) <= 0:
self.__l.info(f"Input scores are empty, returning empty frame!")
return BaseDataLoader._create_empty_indexed_scores()
def convert_row(row):
view_count = (len(row) // 4) - 1
return pd.DataFrame(([ {
"tree_id": row["tree_id"],
"tree_variant_id": row["tree_variant_id"],
"view_id": -1,
"view_variant_id": 0,
# TODO - Add support for tree and view variants.
"jod": row["tree_jod"],
"jod_low": row["tree_jod_low"],
"jod_high": row["tree_jod_high"],
"jod_var": row["tree_jod_var"],
} ] if "tree_jod" in row else [ ])
+
([ {
"tree_id": row["tree_id"],
"tree_variant_id": row["tree_variant_id"],
"view_id": view_idx,
"view_variant_id": 0,
# TODO - Add support for tree and view variants.
"jod": row[f"view{view_idx}_jod"],
"jod_low": row[f"view{view_idx}_jod_low"],
"jod_high": row[f"view{view_idx}_jod_high"],
"jod_var": row[f"view{view_idx}_jod_var"],
} for view_idx in range(view_count) ]))
scores_indexed = pd.concat([ convert_row(row) for index, row in scores.reset_index().iterrows() ])
scores_indexed["tree_id"] = scores_indexed["tree_id"].astype("int64")
scores_indexed["tree_variant_id"] = scores_indexed["tree_variant_id"].astype("int64")
scores_indexed.set_index(["tree_id", "tree_variant_id", "view_id", "view_variant_id"], inplace=True)
self.__l.info(f"\tIndexing complete, resulting in {len(scores_indexed)} records.")
return scores_indexed
def _check_view_tree_catalogue(self, base_path: str,
view_catalogue: pd.DataFrame,
tree_catalogue: pd.DataFrame,
scores_indexed: pd.DataFrame) -> bool:
"""
Check whether all necessary view are accounted for and
present in the data-set.
:param base_path: Base path where the data-set exists.
:param view_catalogue: Catalogue containing all information
about the views.
:param tree_catalogue: Catalogue containing all information
about trees.
:param scores_indexed: Indexed scores for trees and views.
:return: Returns True if all necessary views are present.
"""
self.__l.info(f"Checking view catalogue with {len(view_catalogue)} views...")
tree_count = len(view_catalogue.index.unique(level=0))
tree_variant_count = len(view_catalogue.index.unique(level=1))
view_count = len(view_catalogue.index.unique(level=2))
view_variant_count = len(view_catalogue.index.unique(level=3))
view_type_count = len(view_catalogue.index.unique(level=4))
expected_view_count = tree_count * tree_variant_count * view_count * view_variant_count * view_type_count
if len(view_catalogue) != expected_view_count:
self.__l.warning(f"\tView catalogue does not contain all expected "
f"views ({len(view_catalogue)} / {expected_view_count})!")
#return False
# Check views:
if len(view_catalogue) < 1000:
for index, view in view_catalogue.iterrows():
if not os.path.isfile(f"{base_path}/{view.path}"):
self.__l.warning(f"\tView catalogue contains non-existent view "
f"\"{view.path}\"!")
return False
if view.json_path and not os.path.isfile(f"{base_path}/{view.json_path}"):
self.__l.warning(f"\tView catalogue contains non-existent json description "
f"\"{view.json_path}\"!")
return False
else:
self.__l.warning(f"\tSkipping view catalog checking since it has {len(view_catalogue)} items!")
self.__l.info(f"\tView catalogue successfully checked!")
self.__l.info(f"Checking tree catalogue with {len(tree_catalogue)} trees...")
# Check .tree files:
for index, tree in tree_catalogue.iterrows():
if not os.path.isfile(f"{base_path}/{tree.path}"):
self.__l.warning(f"\tView catalogue contains non-existent tree "
f"\"{tree.path}\"!")
return False
if tree.json_path and not os.path.isfile(f"{base_path}/{tree.json_path}"):
self.__l.warning(f"\tView catalogue contains non-existent json description "
f"\"{tree.json_path}\"!")
return False
self.__l.info(f"\tTree catalogue successfully checked!")
return True
def _prepare_spherical_knots(self, variant_jsons: dict,
tree_scores: pd.DataFrame) -> (dict, np.array):
""" Prepare tree view knot points for spherical interpolation. """
base_view_json = variant_jsons[(0, 0)]
base_height = base_view_json["state"]["camera"]["height"]["base"]
base_distance = base_view_json["state"]["camera"]["distance"]["base"]
origin_pos = np.array([ 0.0, 0.0, 0.0 ])
bottom_pos = np.array([ 0.0, -base_distance, 0.0 ])
top_pos = np.array([ 0.0, base_distance, 0.0 ])
base_pos = np.array([ base_distance, base_height, 0.0 ])
scores = tree_scores.set_index("view_id")
knot_dict = {
view_id: {
"score": scores.loc[view_id].jod,
"pos": scpt.Rotation.from_euler(
"XYZ", variant_json["tree"]["rotation"],
degrees=False
).apply(variant_json["camera"]["pos"])
}
for (view_id, variant_id), variant_json in variant_jsons.items()
if variant_id == 0
}
knot_dict[-3] = {
"score": scores.loc[-1].jod,
"pos": origin_pos
}
knot_dict[-2] = {
"score": scores.loc[-1].jod,
"pos": bottom_pos
}
knot_dict[-1] = {
"score": scores.loc[-1].jod,
"pos": top_pos
}
knots = np.array([
[spherical[1], spherical[2], score["score"]]
for view_id, score in knot_dict.items()
for spherical in [carthesian_to_spherical(score["pos"])]
])
return knot_dict, knots
def _prepare_spherical_lut(self, knots: np.array,
method: str) -> (object, dict):
""" Calculate spherical interpolation look-up table for given knots. """
if method == "rbf":
lut = scpi.Rbf(knots[:, 0], knots[:, 1], knots[:, 2], function="multiquadric")
lut_kws = { }
if method == "wrap_rbf":
def great_circle_distance(u: np.array, v: np.array) -> float:
""" Calculate great circle distance. """
u_lats, v_lats = u[1], v[1]
u_lons, v_lons = u[0], v[0]
delta_lons = np.abs(v_lons - u_lons)
return np.arctan2(
np.sqrt(
(np.cos(v_lats) * np.sin(delta_lons)) ** 2.0 +
(np.cos(u_lats) * np.sin(v_lats) - np.sin(u_lats) * np.cos(v_lats) * np.cos(delta_lons)) ** 2.0
),
np.sin(u_lats) * np.sin(v_lats) + np.cos(u_lats) * np.cos(v_lats) * np.cos(delta_lons)
)
def wrap_around_norm(u: np.array, v: np.array) -> np.array:
return great_circle_distance(u, v)
lut = scpi.Rbf(knots[:, 0], knots[:, 1], knots[:, 2], function="gaussian", norm=wrap_around_norm)
lut_kws = { }
elif method == "smooth":
lut = scpi.SmoothSphereBivariateSpline(knots[:, 0], knots[:, 1], knots[:, 2], s=32.0)
lut_kws = { "grid": False }
elif method == "rect":
orig_resolution = (
np.where(knots[1:, 0] == knots[0, 0])[0][0] + 1,
np.where((knots[1:, 1] - knots[:-1, 1]) > 0.0)[0][0] + 1
)
fit_knots = [
knots[:orig_resolution[0], 0],
knots[::orig_resolution[1], 1],
knots[:, 2].reshape((orig_resolution[0], orig_resolution[1]))
]
fit_knots[0][0] += 0.0001
fit_knots[1][-1] -= 0.0001
lut = scpi.RectSphereBivariateSpline(fit_knots[0], fit_knots[1], fit_knots[2],
pole_continuity=False)
lut_kws = { "grid": False }
elif method == "lsq":
orig_resolution = (
np.where(knots[1:, 0] == knots[0, 0])[0][0] + 1,
np.where((knots[1:, 1] - knots[:-1, 1]) > 0.0)[0][0] + 1
)
fit_knots = [
knots[:orig_resolution[0], 0],
knots[::orig_resolution[1], 1],
knots[:, 2].reshape((orig_resolution[0], orig_resolution[1]))
]
fit_knots[0][0] += 0.0001
fit_knots[0][-1] -= 0.0001
fit_knots[1][0] += 0.0001
fit_knots[1][-1] -= 0.0001
lut = scpi.LSQSphereBivariateSpline(knots[:, 0], knots[:, 1], knots[:, 2], fit_knots[0],
fit_knots[1])
lut_kws = { "grid": False }
return lut, lut_kws
def _prepare_spherical_smooth_grid(self, knots: np.array,
lut: object, lut_kws: dict,
resolution: tuple, visualize: bool = False,
visualize_knots: bool = False) -> (np.array, np.array):
""" Calculate smooth grid using provided knot points and a look-up table. """
smooth_grid = np.meshgrid(np.linspace(0.0, np.pi, resolution[0]),
np.linspace(0.0, 2.0 * np.pi, resolution[1]))
smooth_data = np.reshape(lut(smooth_grid[0].ravel(), smooth_grid[1].ravel(), **lut_kws), resolution)
if visualize:
fig = plt.figure(figsize=(8, 4))
ax = fig.add_subplot()
vmin, vmax = np.min(smooth_data), np.max(smooth_data)
cmap = plt.get_cmap("viridis")
ax.imshow(smooth_data, origin="lower", extent=(0.0, np.pi, 0.0, 2.0 * np.pi),
interpolation="nearest", cmap=cmap, vmin=vmin, vmax=vmax)
if visualize_knots:
ax.scatter(knots[:, 0], knots[:, 1], c=knots[:, 2], s=45,
cmap=cmap, vmin=vmin, vmax=vmax)
ax.scatter(knots[:, 0], knots[:, 1], c="red", s=5)
ax.set_xlabel("Phi")
ax.set_ylabel("Theta")
plt.show()
return smooth_data, smooth_grid
def _prepare_spherical_map(self, knots: np.array,
resolutions: List[tuple],
methods: List[str],
visualize_map: bool = False,
visualize_views: bool = False,
visualize_view_count: int = 10) -> (object, dict):
""" Create final look-up table for spherical coordinate views, mapping to scores. """
if len(resolutions) == 0 or len(resolutions) != len(methods):
return None, { }
current_knots = knots
for idx, (resolution, method) in enumerate(zip(resolutions, methods)):
is_first = idx == 0
is_last = idx == len(resolutions) - 1
lut, lut_kws = self._prepare_spherical_lut(
knots=current_knots, method=method
)
if not is_last:
smooth_data, smooth_grid = self._prepare_spherical_smooth_grid(
knots=current_knots, lut=lut, lut_kws=lut_kws,
resolution=resolution,
visualize=visualize_map,
visualize_knots=is_first
)
current_knots = np.array([
[ phi, theta, score ]
for phi, theta, score in
zip(smooth_grid[0].ravel(), smooth_grid[1].ravel(), smooth_data.ravel())
])
if visualize_views:
smooth_data, smooth_grid = self._prepare_spherical_smooth_grid(
knots=current_knots, lut=lut, lut_kws=lut_kws,
resolution=resolutions[-1],
visualize=visualize_map,
visualize_knots=False
)
points = np.array([
spherical_to_carthesian([ 1.0, phi, theta ])
for phi, theta, score in
zip(smooth_grid[0].ravel(), smooth_grid[1].ravel(), smooth_data.ravel())
])
colors = np.array([
score
for phi, theta, score in
zip(smooth_grid[0].ravel(), smooth_grid[1].ravel(), smooth_data.ravel())
])
colors = (colors - np.min(colors)) / (np.max(colors) - np.min(colors))
cmap = plt.get_cmap("viridis")
fig = plt.figure(figsize=(4 * visualize_view_count, 4))
for idx, rotation in enumerate(np.linspace(0.0, 360.0, visualize_view_count + 1)[:-1]):
ax = fig.add_subplot(1, visualize_view_count, idx + 1, projection="3d")
ax.plot_surface(points[:, 0].reshape(smooth_data.shape),
points[:, 1].reshape(smooth_data.shape),
points[:, 2].reshape(smooth_data.shape),
rstride=1, cstride=1,
facecolors=cmap(colors.reshape(smooth_data.shape)))
ax.set_axis_off()
ax.view_init(0, rotation)
plt.show()
return lut, lut_kws
def _prepare_spherical_scores(self, variant_jsons: dict,
tree_scores: pd.DataFrame,
lut: object, lut_kws: dict) -> dict:
""" Calculate interpolated variant scores using look-up table. """
scores = tree_scores.set_index("view_id")
spherical_scores = {
(view_id, variant_id): {
"car_pos": view_pos,
"sph_pos": sph_pos,
"base_score": scores.loc[view_id].jod,
"score": lut(sph_pos[0], sph_pos[1], **lut_kws)
}
for (view_id, variant_id), variant_json in variant_jsons.items()
for view_pos in [
scpt.Rotation.from_euler(
"XYZ", variant_json["tree"]["rotation"],
degrees=False
).apply(variant_json["camera"]["pos"])
]
for sph_pos in [ carthesian_to_spherical(view_pos) ]
}
# Add spherical score for the complete tree.
spherical_scores[( -1, 0 )] = {
"car_pos": np.array([ 0.0, 0.0, 0.0 ]),
"sph_pos": carthesian_to_spherical(np.array([ 0.0, 0.0, 0.0 ])),
"base_score": scores.loc[-1].jod,
"score": scores.loc[-1].jod,
}
return spherical_scores
def _prepare_spherical_indexed_scores(self, base_path: str,
view_catalogue: pd.DataFrame,
tree_catalogue: pd.DataFrame,
scores_indexed: pd.DataFrame) -> pd.DataFrame:
"""
Augment indexed score data-frame with spherical interpolation
for each view/tree variant.
:param base_path: Base path where the data-set exists.
:param view_catalogue: Catalogue containing all information
about the views.
:param tree_catalogue: Catalogue containing all information
about trees.
:param scores_indexed: Indexed scores for trees and views.
:return: Returns data-frame indexed by ("tree_id", "view_id",
"view_variant_id"), where view_id == -1 contains data for
the whole tree. Result contains following columns:
* tree_id, view_id, view_variant_id - Integer index for
unique tree/view and the specific view variant.
* jod, jod_low, jod_high, jod_var - JOD properties.
"""
if len(scores_indexed) <= 0:
self.__l.info(f"Input scores are empty, returning empty spherical scores frame!")
return BaseDataLoader._create_empty_spherical_indexed_scores()
all_views = view_catalogue.reset_index()
all_scores = scores_indexed.reset_index()
tree_ids = scores_indexed.index.unique(level=0)
# TODO - Support tree variants.
tree_variant = 0
spherical_indexed_scores = [ ]
self.__l.info(f"Preparing spherical indexed scores for {len(tree_catalogue)} trees...")
loading_progress = LoadingBar("", max=len(tree_catalogue))
for tree_id in tree_ids:
# Calculate interpolations for each tree.
tree_views = all_views[
(all_views.tree_id == tree_id) &
(all_views.tree_variant_id == tree_variant)
]
tree_scores = all_scores[
(all_scores.tree_id == tree_id) &
(all_scores.tree_variant_id == tree_variant)
]
# Prepare variants and load descriptions.
variant_jsons = { }
variants = set()
for idx, row in tree_views.iterrows():
variants.add(( row.tree_id, row.tree_variant_id, row.view_id, row.view_variant_id ))
if (row.view_id, row.view_variant_id) in variant_jsons or row.json_path == "":
continue
with open(f"{base_path}/{row.json_path}", "r") as jf:
variant_jsons[(row.view_id, row.view_variant_id)] = json.load(jf)
# Add variant for the complete tree.
variants.add(( tree_id, 0, -1, 0 ))
if len(variant_jsons) == 0:
# No variants or missing json descriptions -> Use existing scores.
for variant in variants:
scores = scores_indexed.loc[(variant[0], variant[1], variant[2], variant[3])]
spherical_indexed_scores.append((
variant[0], variant[1], variant[2], variant[3],
# Use the same JOD for variant as the base.
scores.jod,
scores.jod, scores.jod_low, scores.jod_high, scores.jod_var
))
continue
# Sanity check, we should always have at least view, variant with ID (0, 0).
assert((0, 0) in variant_jsons)
# Calculate spherical interpolation map.
knot_dict, knots = self._prepare_spherical_knots(
variant_jsons=variant_jsons, tree_scores=tree_scores
)
lut, lut_kws = self._prepare_spherical_map(
# TODO - Parameterize this by script arguments.
knots=knots,
resolutions=[ (36, 36), (72, 72) ],
methods=[ "wrap_rbf", "rbf" ],
visualize_map=False, visualize_views=False,
visualize_view_count=10
)
# Interpolate variant scores using the spherical map.
spherical_scores = self._prepare_spherical_scores(
variant_jsons=variant_jsons,
tree_scores=tree_scores,
lut=lut, lut_kws=lut_kws
)
# Save results.
for variant in variants:
scores = scores_indexed.loc[(variant[0], variant[1], variant[2], 0)]
new_scores = spherical_scores[(variant[2], variant[3])]
assert(scores.jod == new_scores["base_score"])
spherical_indexed_scores.append((
variant[0], variant[1], variant[2], variant[3],
# Use the new interpolated JOD score.
new_scores["score"],
scores.jod, scores.jod_low, scores.jod_high, scores.jod_var
))
loading_progress.next(1)
loading_progress.finish()
spherical_scores_indexed = pd.DataFrame(
data=spherical_indexed_scores,
columns=(
"tree_id", "tree_variant_id", "view_id", "view_variant_id",
"jod", "base_jod", "jod_low", "jod_high", "jod_var"
)
)
spherical_scores_indexed.set_index(["tree_id", "tree_variant_id", "view_id", "view_variant_id"], inplace=True)
spherical_scores_indexed.sort_index(inplace=True)
self.__l.info(f"\tDone, prepared {len(spherical_indexed_scores)} spherical scores.")
return spherical_scores_indexed
def _load_tree_data(self, base_path: str = "",
tree_catalogue: pd.DataFrame = pd.DataFrame(),
load_node_data: bool = True,
allow_preloaded: bool = False) -> Dict[Tuple[int, int], TreeFile]:
""" Load all tree data files from given catalogue. """
self.__l.info(f"Loading tree data from {len(tree_catalogue)} .tree files...")
tree_data = { }
parsing_progress = ParsingBar("", max=len(tree_catalogue))
for index, tree in tree_catalogue.iterrows():
if allow_preloaded and tree.data is not None:
tree_data[index] = tree.data
else:
tree_data[index] = TreeFile(
file_path=f"{base_path}/{tree.path}",
load_node=load_node_data,
) if tree.path else None
parsing_progress.next(1)
parsing_progress.finish()
self.__l.info(f"\tDone, loaded {len(tree_data)} tree files.")
return tree_data
def _determine_available_features(self,
view_catalogue: pd.DataFrame,
tree_data: Dict[Tuple[int, int], TreeFile],
load_node_data: bool) -> dict:
""" Create a hierarchy of feature names available for use. """
self.__l.info(f"Determining available features...")
available_features = {
"stat": np.unique([
name
for tree in tree_data.values()
if tree is not None and "stats" in tree.dynamic_meta_data
for name, item in tree.dynamic_meta_data["stats"].items()
if TreeStatistic.is_stat_dict(item)
]),
"image": np.unique([
name
for tree in tree_data.values()
if tree is not None and "stats" in tree.dynamic_meta_data and "visual" in tree.dynamic_meta_data["stats"]
for name, item in tree.dynamic_meta_data["stats"]["visual"].items()
if TreeImage.is_image_dict(item)
]),
"other": dict_of_lists([
v.split(".") for v in np.unique([
f"{name}.{element}"
for tree in tree_data.values()
if tree is not None and "stats" in tree.dynamic_meta_data
for name, item in tree.dynamic_meta_data["stats"].items()
if not TreeStatistic.is_stat_dict(item) and name != "visual"
for element in item.keys()
])
]),
"view": view_catalogue.reset_index().view_type.unique(),
# TODO - Detect available skeleton features?
"skeleton": [ "segment", "position", "thickness" ] if load_node_data else [ ]
}
totals = { name: len(features) for name, features in available_features.items() }
self.__l.info(f"\tDone, found { totals } available features.")
return available_features
def _load_empty(self):
""" Load empty data definitions. """
self._full_results = self._create_empty_results()
self._results = self._generate_reduced_results(
full_results=self._full_results
)
self._users = self._create_empty_users()
self._full_scores = self._create_empty_scores()
self._scores = self._generate_reduced_scores(
full_scores=self._full_scores
)
self._full_scores_indexed = self._create_empty_indexed_scores()
self._scores_indexed = self._generate_reduced_scores_indexed(
full_scores_indexed=self._full_scores_indexed
)
self._full_view_catalogue = self._create_empty_view_catalogue()
self._view_catalogue = self._generate_reduced_view_catalogue(
full_view_catalogue=self._full_view_catalogue
)
self._full_tree_catalogue = self._create_empty_tree_catalogue()
self._tree_catalogue = self._generate_reduced_tree_catalogue(
full_tree_catalogue=self._full_tree_catalogue
)
self._spherical_scores_indexed = self._create_empty_spherical_indexed_scores()
self._tree_data = self._create_empty_tree_data()
self._available_features = self._create_empty_available_features()
self._view_base_path = self._create_empty_dataset_path()
self._dataset_meta = self._create_empty_dataset_meta()
def _load_as_dataset(self, dataset_path: str, use_dithered: bool,
use_augment: bool, use_augment_variants: Optional[int]):
""" Load data as a pre-exported data-set. """
results_path = f"{dataset_path}/results.csv"
if not os.path.isfile(results_path):
raise RuntimeError(f"Dataset at \"{dataset_path}\" does not contain results.csv!")
users_path = f"{dataset_path}/users.csv"
if not os.path.isfile(users_path):
raise RuntimeError(f"Dataset at \"{dataset_path}\" does not contain users.csv!")
scores_path = f"{dataset_path}/scores.csv"
if not os.path.isfile(scores_path):
raise RuntimeError(f"Dataset at \"{dataset_path}\" does not contain scores.csv!")
scores_indexed_path = f"{dataset_path}/scores_indexed.csv"
if not os.path.isfile(scores_indexed_path):
self.__l.warning(f"Dataset at \"{dataset_path}\" does not contain scores_indexed.csv, using a dummy!")
dummy_df = BaseDataLoader._create_empty_indexed_scores()
dummy_df.to_csv(scores_indexed_path, sep=";", index=True)
spherical_scores_indexed_path = f"{dataset_path}/spherical_scores_indexed.csv"
if not os.path.isfile(spherical_scores_indexed_path):
self.__l.warning(f"Dataset at \"{dataset_path}\" does not contain spherical_scores_indexed.csv, using a dummy!")
dummy_df = BaseDataLoader._create_empty_spherical_indexed_scores()
dummy_df.to_csv(spherical_scores_indexed_path, sep=";", index=True)
view_catalogue_path = f"{dataset_path}/view_catalogue.csv"
if not os.path.isfile(view_catalogue_path):
raise RuntimeError(f"Dataset at \"{dataset_path}\" does not contain view_catalogue.csv!")
tree_catalogue_path = f"{dataset_path}/tree_catalogue.csv"
if not os.path.isfile(tree_catalogue_path):
raise RuntimeError(f"Dataset at \"{dataset_path}\" does not contain tree_catalogue.csv!")
dataset_meta_path = f"{dataset_path}/dataset_meta.json"
if not os.path.isfile(dataset_meta_path):
raise RuntimeError(f"Dataset at \"{dataset_path}\" does not contain dataset_meta.json!")
results = pd.read_csv(results_path, sep=";")
if "first_view_variant_id" not in results:
# Old-style dataset -> Add new columns:
results["first_tree_variant_id"] = 0
results["first_view_variant_id"] = 0
results["second_tree_variant_id"] = 0
results["second_view_variant_id"] = 0
users = pd.read_csv(users_path, sep=";")
users.set_index(["worker_id"], inplace=True)
scores = pd.read_csv(scores_path, sep=";")
if "tree_variant_id" not in scores:
# Old-style dataset -> Add new columns:
scores["tree_variant_id"] = 0
scores["tree_id"] = scores["tree_id"].astype(int)
scores.set_index(["tree_id", "tree_variant_id"])
view_catalogue = pd.read_csv(view_catalogue_path, sep=";")
view_catalogue["data"] = None
if "view_variant_id" not in view_catalogue:
# Old-style dataset -> Add new columns:
view_catalogue["tree_variant_id"] = 0
view_catalogue["view_variant_id"] = 0
view_catalogue["json_path"] = ""
view_catalogue.loc[ | pd.isna(view_catalogue.json_path) | pandas.isna |
# Import dependencies
from bs4 import BeautifulSoup
import requests
from splinter import Browser
from webdriver_manager.chrome import ChromeDriverManager
import pandas as pd
def init_browser():
# Note: Replace the path with your actual path to the chromedriver
executable_path = {'executable_path': '/usr/local/bin/chromedriver'}
return Browser('chrome', **executable_path, headless=False)
'''
# @NOTE: Replace the path with your actual path to the chromedriver
executable_path = {'executable_path': '/usr/local/bin/chromedriver'}
browser = Browser('chrome', **executable_path, headless=False)'''
def scrape():
scraped_data={}
output=NASA_Mars_News()
scraped_data['mars_news_title']=output[0]
scraped_data['mars_paragraph']=output[1]
scraped_data['mars_image']=NASA_Mars_Image()
scraped_data['mars_facts']=NASA_Mars_Facts()
scraped_data['mars_hemisphere']=NASA_Mars_Hemispheres()
return scraped_data
def NASA_Mars_News():
browser=init_browser()
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
html = browser.html
# create a Beautifulsoup object and parse with 'html.parser'
soup = BeautifulSoup(html,'html.parser')
latest_news_article_title = soup.find("div", class_='list_text').find('a').text
latest_news_article_paragraph = soup.find("div", class_='article_teaser_body').text
output=[latest_news_article_title,latest_news_article_paragraph]
browser.quit()
return output
def NASA_Mars_Image():
browser = init_browser()
url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(url)
html = browser.html
# create a Beautifulsoup object and parse with 'html.parser'
soup = BeautifulSoup(html,'html.parser')
image = soup.find('div',class_='carousel_items')
image_url = image.article['style']
new_image_url = image_url.split(' ')[1].split("(")[1].split(" ' ")[0][1:-3]
new_image_url
featured_image_url = "https://www.jpl.nasa.gov" + new_image_url
browser.quit()
return featured_image_url
def NASA_Mars_Facts():
browser = init_browser()
url = 'https://space-facts.com/mars/'
browser.visit(url)
mars_facts = pd.read_html(url)
mars_facts_df = | pd.DataFrame(mars_facts[0]) | pandas.DataFrame |
import pandas as pd
from sequence.app.random_request import RandomRequestApp
from sequence.kernel.timeline import Timeline
from sequence.topology.node import QuantumRouter, BSMNode
from sequence.topology.topology import Topology
if __name__ == "__main__":
# Experiment params and config
network_config_file = "example/starlight.json"
runtime = 1e15
tl = Timeline(runtime)
tl.seed(1)
network_topo = Topology("network_topo", tl)
network_topo.load_config(network_config_file)
# set memory parameters
MEMO_FREQ = 2e3
MEMO_EXPIRE = 1.3
MEMO_EFFICIENCY = 0.75
MEMO_FIDELITY = 0.9349367588934053
for name, node in network_topo.nodes.items():
if isinstance(node, QuantumRouter):
node.memory_array.update_memory_params("frequency", MEMO_FREQ)
node.memory_array.update_memory_params("coherence_time", MEMO_EXPIRE)
node.memory_array.update_memory_params("efficiency", MEMO_EFFICIENCY)
node.memory_array.update_memory_params("raw_fidelity", MEMO_FIDELITY)
# set detector parameters
DETECTOR_EFFICIENCY = 0.8
DETECTOR_COUNT_RATE = 5e7
DETECTOR_RESOLUTION = 100
for name, node in network_topo.nodes.items():
if isinstance(node, BSMNode):
node.bsm.update_detectors_params("efficiency", DETECTOR_EFFICIENCY)
node.bsm.update_detectors_params("count_rate", DETECTOR_COUNT_RATE)
node.bsm.update_detectors_params("time_resolution", DETECTOR_RESOLUTION)
# set quantum channel parameters
ATTENUATION = 0.0002
QC_FREQ = 1e11
for qc in network_topo.qchannels:
qc.attenuation = ATTENUATION
qc.frequency = QC_FREQ
# set entanglement swapping parameters
SWAP_SUCC_PROB = 0.64
SWAP_DEGRADATION = 0.99
for name, node in network_topo.nodes.items():
if isinstance(node, QuantumRouter):
node.network_manager.protocol_stack[1].set_swapping_success_rate(SWAP_SUCC_PROB)
node.network_manager.protocol_stack[1].set_swapping_degradation(SWAP_DEGRADATION)
nodes_name = []
for name, node in network_topo.nodes.items():
if isinstance(node, QuantumRouter):
nodes_name.append(name)
apps = []
for i, name in enumerate(nodes_name):
app_node_name = name
others = nodes_name[:]
others.remove(app_node_name)
app = RandomRequestApp(network_topo.nodes[app_node_name], others, i,
min_dur=1e13, max_dur=2e13, min_size=10,
max_size=25, min_fidelity=0.8, max_fidelity=1.0)
apps.append(app)
app.start()
tl.init()
tl.run()
for app in apps:
print(app.node.name)
print(" ", len(app.get_wait_time()))
print(" ", app.get_wait_time())
throughput = app.get_throughput()
print(" ", app.reserves)
print(" ", throughput)
initiators = []
responders = []
start_times = []
end_times = []
memory_sizes = []
fidelities = []
wait_times = []
throughputs = []
for node in network_topo.nodes.values():
if isinstance(node, QuantumRouter):
initiator = node.name
reserves = node.app.reserves
_wait_times = node.app.get_wait_time()
_throughputs = node.app.get_all_throughput()
min_size = min(len(reserves), len(_wait_times), len(_throughputs))
reserves = reserves[:min_size]
_wait_times = _wait_times[:min_size]
_throughputs = _throughputs[:min_size]
for reservation, wait_time, throughput in zip(reserves, _wait_times, _throughputs):
responder, s_t, e_t, size, fidelity = reservation
initiators.append(initiator)
responders.append(responder)
start_times.append(s_t)
end_times.append(e_t)
memory_sizes.append(size)
fidelities.append(fidelity)
wait_times.append(wait_time)
throughputs.append(throughput)
log = {"Initiator": initiators, "Responder": responders, "Start_time": start_times, "End_time": end_times,
"Memory_size": memory_sizes, "Fidelity": fidelities, "Wait_time": wait_times, "Throughput": throughputs}
df = | pd.DataFrame(log) | pandas.DataFrame |
"""
Generic data algorithms. This module is experimental at the moment and not
intended for public consumption
"""
from __future__ import annotations
import operator
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Literal,
Union,
cast,
final,
)
from warnings import warn
import numpy as np
from pandas._libs import (
algos,
hashtable as htable,
iNaT,
lib,
)
from pandas._typing import (
AnyArrayLike,
ArrayLike,
DtypeObj,
Scalar,
TakeIndexer,
npt,
)
from pandas.util._decorators import doc
from pandas.core.dtypes.cast import (
construct_1d_object_array_from_listlike,
infer_dtype_from_array,
sanitize_to_nanoseconds,
)
from pandas.core.dtypes.common import (
ensure_float64,
ensure_object,
ensure_platform_int,
is_array_like,
is_bool_dtype,
is_categorical_dtype,
is_complex_dtype,
is_datetime64_dtype,
is_extension_array_dtype,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_numeric_dtype,
is_object_dtype,
is_scalar,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import PandasDtype
from pandas.core.dtypes.generic import (
ABCDatetimeArray,
ABCExtensionArray,
ABCIndex,
ABCMultiIndex,
ABCRangeIndex,
ABCSeries,
ABCTimedeltaArray,
)
from pandas.core.dtypes.missing import (
isna,
na_value_for_dtype,
)
from pandas.core.array_algos.take import take_nd
from pandas.core.construction import (
array as pd_array,
ensure_wrapped_if_datetimelike,
extract_array,
)
from pandas.core.indexers import validate_indices
if TYPE_CHECKING:
from pandas._typing import (
NumpySorter,
NumpyValueArrayLike,
)
from pandas import (
Categorical,
DataFrame,
Index,
Series,
)
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
TimedeltaArray,
)
_shared_docs: dict[str, str] = {}
# --------------- #
# dtype access #
# --------------- #
def _ensure_data(values: ArrayLike) -> np.ndarray:
"""
routine to ensure that our data is of the correct
input dtype for lower-level routines
This will coerce:
- ints -> int64
- uint -> uint64
- bool -> uint64 (TODO this should be uint8)
- datetimelike -> i8
- datetime64tz -> i8 (in local tz)
- categorical -> codes
Parameters
----------
values : np.ndarray or ExtensionArray
Returns
-------
np.ndarray
"""
if not isinstance(values, ABCMultiIndex):
# extract_array would raise
values = extract_array(values, extract_numpy=True)
# we check some simple dtypes first
if is_object_dtype(values.dtype):
return ensure_object(np.asarray(values))
elif is_bool_dtype(values.dtype):
if isinstance(values, np.ndarray):
# i.e. actually dtype == np.dtype("bool")
return np.asarray(values).view("uint8")
else:
# i.e. all-bool Categorical, BooleanArray
try:
return np.asarray(values).astype("uint8", copy=False)
except TypeError:
# GH#42107 we have pd.NAs present
return np.asarray(values)
elif is_integer_dtype(values.dtype):
return np.asarray(values)
elif is_float_dtype(values.dtype):
# Note: checking `values.dtype == "float128"` raises on Windows and 32bit
# error: Item "ExtensionDtype" of "Union[Any, ExtensionDtype, dtype[Any]]"
# has no attribute "itemsize"
if values.dtype.itemsize in [2, 12, 16]: # type: ignore[union-attr]
# we dont (yet) have float128 hashtable support
return ensure_float64(values)
return np.asarray(values)
elif is_complex_dtype(values.dtype):
# Incompatible return value type (got "Tuple[Union[Any, ExtensionArray,
# ndarray[Any, Any]], Union[Any, ExtensionDtype]]", expected
# "Tuple[ndarray[Any, Any], Union[dtype[Any], ExtensionDtype]]")
return values # type: ignore[return-value]
# datetimelike
elif needs_i8_conversion(values.dtype):
if isinstance(values, np.ndarray):
values = sanitize_to_nanoseconds(values)
npvalues = values.view("i8")
npvalues = cast(np.ndarray, npvalues)
return npvalues
elif is_categorical_dtype(values.dtype):
values = cast("Categorical", values)
values = values.codes
return values
# we have failed, return object
values = np.asarray(values, dtype=object)
return ensure_object(values)
def _reconstruct_data(
values: ArrayLike, dtype: DtypeObj, original: AnyArrayLike
) -> ArrayLike:
"""
reverse of _ensure_data
Parameters
----------
values : np.ndarray or ExtensionArray
dtype : np.dtype or ExtensionDtype
original : AnyArrayLike
Returns
-------
ExtensionArray or np.ndarray
"""
if isinstance(values, ABCExtensionArray) and values.dtype == dtype:
# Catch DatetimeArray/TimedeltaArray
return values
if not isinstance(dtype, np.dtype):
# i.e. ExtensionDtype
cls = dtype.construct_array_type()
if isinstance(values, cls) and values.dtype == dtype:
return values
values = cls._from_sequence(values)
elif is_bool_dtype(dtype):
values = values.astype(dtype, copy=False)
# we only support object dtypes bool Index
if isinstance(original, ABCIndex):
values = values.astype(object, copy=False)
elif dtype is not None:
if is_datetime64_dtype(dtype):
dtype = np.dtype("datetime64[ns]")
elif is_timedelta64_dtype(dtype):
dtype = np.dtype("timedelta64[ns]")
values = values.astype(dtype, copy=False)
return values
def _ensure_arraylike(values) -> ArrayLike:
"""
ensure that we are arraylike if not already
"""
if not is_array_like(values):
inferred = lib.infer_dtype(values, skipna=False)
if inferred in ["mixed", "string", "mixed-integer"]:
# "mixed-integer" to ensure we do not cast ["ss", 42] to str GH#22160
if isinstance(values, tuple):
values = list(values)
values = construct_1d_object_array_from_listlike(values)
else:
values = np.asarray(values)
return values
_hashtables = {
"complex128": htable.Complex128HashTable,
"complex64": htable.Complex64HashTable,
"float64": htable.Float64HashTable,
"float32": htable.Float32HashTable,
"uint64": htable.UInt64HashTable,
"uint32": htable.UInt32HashTable,
"uint16": htable.UInt16HashTable,
"uint8": htable.UInt8HashTable,
"int64": htable.Int64HashTable,
"int32": htable.Int32HashTable,
"int16": htable.Int16HashTable,
"int8": htable.Int8HashTable,
"string": htable.StringHashTable,
"object": htable.PyObjectHashTable,
}
def _get_hashtable_algo(values: np.ndarray):
"""
Parameters
----------
values : np.ndarray
Returns
-------
htable : HashTable subclass
values : ndarray
"""
values = _ensure_data(values)
ndtype = _check_object_for_strings(values)
htable = _hashtables[ndtype]
return htable, values
def _get_values_for_rank(values: ArrayLike) -> np.ndarray:
if is_categorical_dtype(values):
values = cast("Categorical", values)._values_for_rank()
values = _ensure_data(values)
if values.dtype.kind in ["i", "u", "f"]:
# rank_t includes only object, int64, uint64, float64
dtype = values.dtype.kind + "8"
values = values.astype(dtype, copy=False)
return values
def get_data_algo(values: ArrayLike):
values = _get_values_for_rank(values)
ndtype = _check_object_for_strings(values)
htable = _hashtables.get(ndtype, _hashtables["object"])
return htable, values
def _check_object_for_strings(values: np.ndarray) -> str:
"""
Check if we can use string hashtable instead of object hashtable.
Parameters
----------
values : ndarray
Returns
-------
str
"""
ndtype = values.dtype.name
if ndtype == "object":
# it's cheaper to use a String Hash Table than Object; we infer
# including nulls because that is the only difference between
# StringHashTable and ObjectHashtable
if lib.infer_dtype(values, skipna=False) in ["string"]:
ndtype = "string"
return ndtype
# --------------- #
# top-level algos #
# --------------- #
def unique(values):
"""
Hash table-based unique. Uniques are returned in order
of appearance. This does NOT sort.
Significantly faster than numpy.unique for long enough sequences.
Includes NA values.
Parameters
----------
values : 1d array-like
Returns
-------
numpy.ndarray or ExtensionArray
The return can be:
* Index : when the input is an Index
* Categorical : when the input is a Categorical dtype
* ndarray : when the input is a Series/ndarray
Return numpy.ndarray or ExtensionArray.
See Also
--------
Index.unique : Return unique values from an Index.
Series.unique : Return unique values of Series object.
Examples
--------
>>> pd.unique(pd.Series([2, 1, 3, 3]))
array([2, 1, 3])
>>> pd.unique(pd.Series([2] + [1] * 5))
array([2, 1])
>>> pd.unique(pd.Series([pd.Timestamp("20160101"), pd.Timestamp("20160101")]))
array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')
>>> pd.unique(
... pd.Series(
... [
... pd.Timestamp("20160101", tz="US/Eastern"),
... pd.Timestamp("20160101", tz="US/Eastern"),
... ]
... )
... )
<DatetimeArray>
['2016-01-01 00:00:00-05:00']
Length: 1, dtype: datetime64[ns, US/Eastern]
>>> pd.unique(
... pd.Index(
... [
... pd.Timestamp("20160101", tz="US/Eastern"),
... pd.Timestamp("20160101", tz="US/Eastern"),
... ]
... )
... )
DatetimeIndex(['2016-01-01 00:00:00-05:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
>>> pd.unique(list("baabc"))
array(['b', 'a', 'c'], dtype=object)
An unordered Categorical will return categories in the
order of appearance.
>>> pd.unique(pd.Series(pd.Categorical(list("baabc"))))
['b', 'a', 'c']
Categories (3, object): ['a', 'b', 'c']
>>> pd.unique(pd.Series(pd.Categorical(list("baabc"), categories=list("abc"))))
['b', 'a', 'c']
Categories (3, object): ['a', 'b', 'c']
An ordered Categorical preserves the category ordering.
>>> pd.unique(
... pd.Series(
... pd.Categorical(list("baabc"), categories=list("abc"), ordered=True)
... )
... )
['b', 'a', 'c']
Categories (3, object): ['a' < 'b' < 'c']
An array of tuples
>>> pd.unique([("a", "b"), ("b", "a"), ("a", "c"), ("b", "a")])
array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object)
"""
values = _ensure_arraylike(values)
if is_extension_array_dtype(values.dtype):
# Dispatch to extension dtype's unique.
return values.unique()
original = values
htable, values = _get_hashtable_algo(values)
table = htable(len(values))
uniques = table.unique(values)
uniques = _reconstruct_data(uniques, original.dtype, original)
return uniques
unique1d = unique
def isin(comps: AnyArrayLike, values: AnyArrayLike) -> npt.NDArray[np.bool_]:
"""
Compute the isin boolean array.
Parameters
----------
comps : array-like
values : array-like
Returns
-------
ndarray[bool]
Same length as `comps`.
"""
if not is_list_like(comps):
raise TypeError(
"only list-like objects are allowed to be passed "
f"to isin(), you passed a [{type(comps).__name__}]"
)
if not is_list_like(values):
raise TypeError(
"only list-like objects are allowed to be passed "
f"to isin(), you passed a [{type(values).__name__}]"
)
if not isinstance(values, (ABCIndex, ABCSeries, ABCExtensionArray, np.ndarray)):
values = _ensure_arraylike(list(values))
elif isinstance(values, ABCMultiIndex):
# Avoid raising in extract_array
values = np.array(values)
else:
values = extract_array(values, extract_numpy=True, extract_range=True)
comps = _ensure_arraylike(comps)
comps = extract_array(comps, extract_numpy=True)
if not isinstance(comps, np.ndarray):
# i.e. Extension Array
return comps.isin(values)
elif needs_i8_conversion(comps.dtype):
# Dispatch to DatetimeLikeArrayMixin.isin
return pd_array(comps).isin(values)
elif needs_i8_conversion(values.dtype) and not is_object_dtype(comps.dtype):
# e.g. comps are integers and values are datetime64s
return np.zeros(comps.shape, dtype=bool)
# TODO: not quite right ... Sparse/Categorical
elif needs_i8_conversion(values.dtype):
return isin(comps, values.astype(object))
elif is_extension_array_dtype(values.dtype):
return isin(np.asarray(comps), np.asarray(values))
# GH16012
# Ensure np.in1d doesn't get object types or it *may* throw an exception
# Albeit hashmap has O(1) look-up (vs. O(logn) in sorted array),
# in1d is faster for small sizes
if len(comps) > 1_000_000 and len(values) <= 26 and not is_object_dtype(comps):
# If the values include nan we need to check for nan explicitly
# since np.nan it not equal to np.nan
if isna(values).any():
def f(c, v):
return np.logical_or(np.in1d(c, v), np.isnan(c))
else:
f = np.in1d
else:
# error: List item 0 has incompatible type "Union[Any, dtype[Any],
# ExtensionDtype]"; expected "Union[dtype[Any], None, type, _SupportsDType, str,
# Tuple[Any, Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any,
# Any]]"
# error: List item 1 has incompatible type "Union[Any, ExtensionDtype]";
# expected "Union[dtype[Any], None, type, _SupportsDType, str, Tuple[Any,
# Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]"
# error: List item 1 has incompatible type "Union[dtype[Any], ExtensionDtype]";
# expected "Union[dtype[Any], None, type, _SupportsDType, str, Tuple[Any,
# Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]"
common = np.find_common_type(
[values.dtype, comps.dtype], [] # type: ignore[list-item]
)
values = values.astype(common, copy=False)
comps = comps.astype(common, copy=False)
f = htable.ismember
return f(comps, values)
def factorize_array(
values: np.ndarray,
na_sentinel: int = -1,
size_hint: int | None = None,
na_value=None,
mask: np.ndarray | None = None,
) -> tuple[npt.NDArray[np.intp], np.ndarray]:
"""
Factorize a numpy array to codes and uniques.
This doesn't do any coercion of types or unboxing before factorization.
Parameters
----------
values : ndarray
na_sentinel : int, default -1
size_hint : int, optional
Passed through to the hashtable's 'get_labels' method
na_value : object, optional
A value in `values` to consider missing. Note: only use this
parameter when you know that you don't have any values pandas would
consider missing in the array (NaN for float data, iNaT for
datetimes, etc.).
mask : ndarray[bool], optional
If not None, the mask is used as indicator for missing values
(True = missing, False = valid) instead of `na_value` or
condition "val != val".
Returns
-------
codes : ndarray[np.intp]
uniques : ndarray
"""
hash_klass, values = get_data_algo(values)
table = hash_klass(size_hint or len(values))
uniques, codes = table.factorize(
values, na_sentinel=na_sentinel, na_value=na_value, mask=mask
)
codes = ensure_platform_int(codes)
return codes, uniques
@doc(
values=dedent(
"""\
values : sequence
A 1-D sequence. Sequences that aren't pandas objects are
coerced to ndarrays before factorization.
"""
),
sort=dedent(
"""\
sort : bool, default False
Sort `uniques` and shuffle `codes` to maintain the
relationship.
"""
),
size_hint=dedent(
"""\
size_hint : int, optional
Hint to the hashtable sizer.
"""
),
)
def factorize(
values,
sort: bool = False,
na_sentinel: int | None = -1,
size_hint: int | None = None,
) -> tuple[np.ndarray, np.ndarray | Index]:
"""
Encode the object as an enumerated type or categorical variable.
This method is useful for obtaining a numeric representation of an
array when all that matters is identifying distinct values. `factorize`
is available as both a top-level function :func:`pandas.factorize`,
and as a method :meth:`Series.factorize` and :meth:`Index.factorize`.
Parameters
----------
{values}{sort}
na_sentinel : int or None, default -1
Value to mark "not found". If None, will not drop the NaN
from the uniques of the values.
.. versionchanged:: 1.1.2
{size_hint}\
Returns
-------
codes : ndarray
An integer ndarray that's an indexer into `uniques`.
``uniques.take(codes)`` will have the same values as `values`.
uniques : ndarray, Index, or Categorical
The unique valid values. When `values` is Categorical, `uniques`
is a Categorical. When `values` is some other pandas object, an
`Index` is returned. Otherwise, a 1-D ndarray is returned.
.. note ::
Even if there's a missing value in `values`, `uniques` will
*not* contain an entry for it.
See Also
--------
cut : Discretize continuous-valued array.
unique : Find the unique value in an array.
Examples
--------
These examples all show factorize as a top-level method like
``pd.factorize(values)``. The results are identical for methods like
:meth:`Series.factorize`.
>>> codes, uniques = pd.factorize(['b', 'b', 'a', 'c', 'b'])
>>> codes
array([0, 0, 1, 2, 0]...)
>>> uniques
array(['b', 'a', 'c'], dtype=object)
With ``sort=True``, the `uniques` will be sorted, and `codes` will be
shuffled so that the relationship is the maintained.
>>> codes, uniques = pd.factorize(['b', 'b', 'a', 'c', 'b'], sort=True)
>>> codes
array([1, 1, 0, 2, 1]...)
>>> uniques
array(['a', 'b', 'c'], dtype=object)
Missing values are indicated in `codes` with `na_sentinel`
(``-1`` by default). Note that missing values are never
included in `uniques`.
>>> codes, uniques = pd.factorize(['b', None, 'a', 'c', 'b'])
>>> codes
array([ 0, -1, 1, 2, 0]...)
>>> uniques
array(['b', 'a', 'c'], dtype=object)
Thus far, we've only factorized lists (which are internally coerced to
NumPy arrays). When factorizing pandas objects, the type of `uniques`
will differ. For Categoricals, a `Categorical` is returned.
>>> cat = pd.Categorical(['a', 'a', 'c'], categories=['a', 'b', 'c'])
>>> codes, uniques = pd.factorize(cat)
>>> codes
array([0, 0, 1]...)
>>> uniques
['a', 'c']
Categories (3, object): ['a', 'b', 'c']
Notice that ``'b'`` is in ``uniques.categories``, despite not being
present in ``cat.values``.
For all other pandas objects, an Index of the appropriate type is
returned.
>>> cat = pd.Series(['a', 'a', 'c'])
>>> codes, uniques = pd.factorize(cat)
>>> codes
array([0, 0, 1]...)
>>> uniques
Index(['a', 'c'], dtype='object')
If NaN is in the values, and we want to include NaN in the uniques of the
values, it can be achieved by setting ``na_sentinel=None``.
>>> values = np.array([1, 2, 1, np.nan])
>>> codes, uniques = pd.factorize(values) # default: na_sentinel=-1
>>> codes
array([ 0, 1, 0, -1])
>>> uniques
array([1., 2.])
>>> codes, uniques = pd.factorize(values, na_sentinel=None)
>>> codes
array([0, 1, 0, 2])
>>> uniques
array([ 1., 2., nan])
"""
# Implementation notes: This method is responsible for 3 things
# 1.) coercing data to array-like (ndarray, Index, extension array)
# 2.) factorizing codes and uniques
# 3.) Maybe boxing the uniques in an Index
#
# Step 2 is dispatched to extension types (like Categorical). They are
# responsible only for factorization. All data coercion, sorting and boxing
# should happen here.
if isinstance(values, ABCRangeIndex):
return values.factorize(sort=sort)
values = _ensure_arraylike(values)
original = values
if not isinstance(values, ABCMultiIndex):
values = extract_array(values, extract_numpy=True)
# GH35667, if na_sentinel=None, we will not dropna NaNs from the uniques
# of values, assign na_sentinel=-1 to replace code value for NaN.
dropna = True
if na_sentinel is None:
na_sentinel = -1
dropna = False
if (
isinstance(values, (ABCDatetimeArray, ABCTimedeltaArray))
and values.freq is not None
):
codes, uniques = values.factorize(sort=sort)
if isinstance(original, ABCIndex):
uniques = original._shallow_copy(uniques, name=None)
elif isinstance(original, ABCSeries):
from pandas import Index
uniques = Index(uniques)
return codes, uniques
if not isinstance(values.dtype, np.dtype):
# i.e. ExtensionDtype
codes, uniques = values.factorize(na_sentinel=na_sentinel)
dtype = original.dtype
else:
dtype = values.dtype
values = _ensure_data(values)
na_value: Scalar
if original.dtype.kind in ["m", "M"]:
# Note: factorize_array will cast NaT bc it has a __int__
# method, but will not cast the more-correct dtype.type("nat")
na_value = iNaT
else:
na_value = None
codes, uniques = factorize_array(
values, na_sentinel=na_sentinel, size_hint=size_hint, na_value=na_value
)
if sort and len(uniques) > 0:
uniques, codes = safe_sort(
uniques, codes, na_sentinel=na_sentinel, assume_unique=True, verify=False
)
code_is_na = codes == na_sentinel
if not dropna and code_is_na.any():
# na_value is set based on the dtype of uniques, and compat set to False is
# because we do not want na_value to be 0 for integers
na_value = na_value_for_dtype(uniques.dtype, compat=False)
uniques = np.append(uniques, [na_value])
codes = np.where(code_is_na, len(uniques) - 1, codes)
uniques = _reconstruct_data(uniques, dtype, original)
# return original tenor
if isinstance(original, ABCIndex):
if original.dtype.kind in ["m", "M"] and isinstance(uniques, np.ndarray):
original._data = cast(
"Union[DatetimeArray, TimedeltaArray]", original._data
)
uniques = type(original._data)._simple_new(uniques, dtype=original.dtype)
uniques = original._shallow_copy(uniques, name=None)
elif isinstance(original, ABCSeries):
from pandas import Index
uniques = Index(uniques)
return codes, uniques
def value_counts(
values,
sort: bool = True,
ascending: bool = False,
normalize: bool = False,
bins=None,
dropna: bool = True,
) -> Series:
"""
Compute a histogram of the counts of non-null values.
Parameters
----------
values : ndarray (1-d)
sort : bool, default True
Sort by values
ascending : bool, default False
Sort in ascending order
normalize: bool, default False
If True then compute a relative histogram
bins : integer, optional
Rather than count values, group them into half-open bins,
convenience for pd.cut, only works with numeric data
dropna : bool, default True
Don't include counts of NaN
Returns
-------
Series
"""
from pandas.core.series import Series
name = getattr(values, "name", None)
if bins is not None:
from pandas.core.reshape.tile import cut
values = Series(values)
try:
ii = cut(values, bins, include_lowest=True)
except TypeError as err:
raise TypeError("bins argument only works with numeric data.") from err
# count, remove nulls (from the index), and but the bins
result = ii.value_counts(dropna=dropna)
result = result[result.index.notna()]
result.index = result.index.astype("interval")
result = result.sort_index()
# if we are dropna and we have NO values
if dropna and (result._values == 0).all():
result = result.iloc[0:0]
# normalizing is by len of all (regardless of dropna)
counts = np.array([len(ii)])
else:
if is_extension_array_dtype(values):
# handle Categorical and sparse,
result = Series(values)._values.value_counts(dropna=dropna)
result.name = name
counts = result._values
else:
keys, counts = value_counts_arraylike(values, dropna)
result = Series(counts, index=keys, name=name)
if sort:
result = result.sort_values(ascending=ascending)
if normalize:
result = result / counts.sum()
return result
# Called once from SparseArray, otherwise could be private
def value_counts_arraylike(values, dropna: bool):
"""
Parameters
----------
values : arraylike
dropna : bool
Returns
-------
uniques : np.ndarray or ExtensionArray
counts : np.ndarray
"""
values = _ensure_arraylike(values)
original = values
values = _ensure_data(values)
# TODO: handle uint8
keys, counts = htable.value_count(values, dropna)
if needs_i8_conversion(original.dtype):
# datetime, timedelta, or period
if dropna:
msk = keys != iNaT
keys, counts = keys[msk], counts[msk]
res_keys = _reconstruct_data(keys, original.dtype, original)
return res_keys, counts
def duplicated(
values: ArrayLike, keep: Literal["first", "last", False] = "first"
) -> npt.NDArray[np.bool_]:
"""
Return boolean ndarray denoting duplicate values.
Parameters
----------
values : nd.array, ExtensionArray or Series
Array over which to check for duplicate values.
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the first
occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last
occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : ndarray[bool]
"""
values = _ensure_data(values)
return htable.duplicated(values, keep=keep)
def mode(values, dropna: bool = True) -> Series:
"""
Returns the mode(s) of an array.
Parameters
----------
values : array-like
Array over which to check for duplicate values.
dropna : bool, default True
Don't consider counts of NaN/NaT.
Returns
-------
mode : Series
"""
from pandas import Series
from pandas.core.indexes.api import default_index
values = _ensure_arraylike(values)
original = values
# categorical is a fast-path
if is_categorical_dtype(values):
if isinstance(values, Series):
# TODO: should we be passing `name` below?
return Series(values._values.mode(dropna=dropna), name=values.name)
return values.mode(dropna=dropna)
if dropna and needs_i8_conversion(values.dtype):
mask = values.isnull()
values = values[~mask]
values = _ensure_data(values)
npresult = htable.mode(values, dropna=dropna)
try:
npresult = np.sort(npresult)
except TypeError as err:
warn(f"Unable to sort modes: {err}")
result = _reconstruct_data(npresult, original.dtype, original)
# Ensure index is type stable (should always use int index)
return Series(result, index=default_index(len(result)))
def rank(
values: ArrayLike,
axis: int = 0,
method: str = "average",
na_option: str = "keep",
ascending: bool = True,
pct: bool = False,
) -> np.ndarray:
"""
Rank the values along a given axis.
Parameters
----------
values : array-like
Array whose values will be ranked. The number of dimensions in this
array must not exceed 2.
axis : int, default 0
Axis over which to perform rankings.
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
The method by which tiebreaks are broken during the ranking.
na_option : {'keep', 'top'}, default 'keep'
The method by which NaNs are placed in the ranking.
- ``keep``: rank each NaN value with a NaN ranking
- ``top``: replace each NaN with either +/- inf so that they
there are ranked at the top
ascending : bool, default True
Whether or not the elements should be ranked in ascending order.
pct : bool, default False
Whether or not to the display the returned rankings in integer form
(e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1).
"""
is_datetimelike = needs_i8_conversion(values.dtype)
values = _get_values_for_rank(values)
if values.ndim == 1:
ranks = algos.rank_1d(
values,
is_datetimelike=is_datetimelike,
ties_method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
)
elif values.ndim == 2:
ranks = algos.rank_2d(
values,
axis=axis,
is_datetimelike=is_datetimelike,
ties_method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
)
else:
raise TypeError("Array with ndim > 2 are not supported.")
return ranks
def checked_add_with_arr(
arr: np.ndarray,
b,
arr_mask: npt.NDArray[np.bool_] | None = None,
b_mask: npt.NDArray[np.bool_] | None = None,
) -> np.ndarray:
"""
Perform array addition that checks for underflow and overflow.
Performs the addition of an int64 array and an int64 integer (or array)
but checks that they do not result in overflow first. For elements that
are indicated to be NaN, whether or not there is overflow for that element
is automatically ignored.
Parameters
----------
arr : array addend.
b : array or scalar addend.
arr_mask : np.ndarray[bool] or None, default None
array indicating which elements to exclude from checking
b_mask : np.ndarray[bool] or None, default None
array or scalar indicating which element(s) to exclude from checking
Returns
-------
sum : An array for elements x + b for each element x in arr if b is
a scalar or an array for elements x + y for each element pair
(x, y) in (arr, b).
Raises
------
OverflowError if any x + y exceeds the maximum or minimum int64 value.
"""
# For performance reasons, we broadcast 'b' to the new array 'b2'
# so that it has the same size as 'arr'.
b2 = np.broadcast_to(b, arr.shape)
if b_mask is not None:
# We do the same broadcasting for b_mask as well.
b2_mask = np.broadcast_to(b_mask, arr.shape)
else:
b2_mask = None
# For elements that are NaN, regardless of their value, we should
# ignore whether they overflow or not when doing the checked add.
if arr_mask is not None and b2_mask is not None:
not_nan = np.logical_not(arr_mask | b2_mask)
elif arr_mask is not None:
not_nan = np.logical_not(arr_mask)
elif b_mask is not None:
not_nan = np.logical_not(b2_mask)
else:
not_nan = np.empty(arr.shape, dtype=bool)
not_nan.fill(True)
# gh-14324: For each element in 'arr' and its corresponding element
# in 'b2', we check the sign of the element in 'b2'. If it is positive,
# we then check whether its sum with the element in 'arr' exceeds
# np.iinfo(np.int64).max. If so, we have an overflow error. If it
# it is negative, we then check whether its sum with the element in
# 'arr' exceeds np.iinfo(np.int64).min. If so, we have an overflow
# error as well.
i8max = lib.i8max
i8min = iNaT
mask1 = b2 > 0
mask2 = b2 < 0
if not mask1.any():
to_raise = ((i8min - b2 > arr) & not_nan).any()
elif not mask2.any():
to_raise = ((i8max - b2 < arr) & not_nan).any()
else:
to_raise = ((i8max - b2[mask1] < arr[mask1]) & not_nan[mask1]).any() or (
(i8min - b2[mask2] > arr[mask2]) & not_nan[mask2]
).any()
if to_raise:
raise OverflowError("Overflow in int64 addition")
return arr + b
def quantile(x, q, interpolation_method="fraction"):
"""
Compute sample quantile or quantiles of the input array. For example, q=0.5
computes the median.
The `interpolation_method` parameter supports three values, namely
`fraction` (default), `lower` and `higher`. Interpolation is done only,
if the desired quantile lies between two data points `i` and `j`. For
`fraction`, the result is an interpolated value between `i` and `j`;
for `lower`, the result is `i`, for `higher` the result is `j`.
Parameters
----------
x : ndarray
Values from which to extract score.
q : scalar or array
Percentile at which to extract score.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
- fraction: `i + (j - i)*fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
-lower: `i`.
- higher: `j`.
Returns
-------
score : float
Score at percentile.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
x = np.asarray(x)
mask = isna(x)
x = x[~mask]
values = np.sort(x)
def _interpolate(a, b, fraction):
"""
Returns the point at the given fraction between a and b, where
'fraction' must be between 0 and 1.
"""
return a + (b - a) * fraction
def _get_score(at):
if len(values) == 0:
return np.nan
idx = at * (len(values) - 1)
if idx % 1 == 0:
score = values[int(idx)]
else:
if interpolation_method == "fraction":
score = _interpolate(values[int(idx)], values[int(idx) + 1], idx % 1)
elif interpolation_method == "lower":
score = values[np.floor(idx)]
elif interpolation_method == "higher":
score = values[np.ceil(idx)]
else:
raise ValueError(
"interpolation_method can only be 'fraction' "
", 'lower' or 'higher'"
)
return score
if is_scalar(q):
return _get_score(q)
q = np.asarray(q, np.float64)
result = [_get_score(x) for x in q]
return np.array(result, dtype=np.float64)
# --------------- #
# select n #
# --------------- #
class SelectN:
def __init__(self, obj, n: int, keep: str):
self.obj = obj
self.n = n
self.keep = keep
if self.keep not in ("first", "last", "all"):
raise ValueError('keep must be either "first", "last" or "all"')
def compute(self, method: str) -> DataFrame | Series:
raise NotImplementedError
@final
def nlargest(self):
return self.compute("nlargest")
@final
def nsmallest(self):
return self.compute("nsmallest")
@final
@staticmethod
def is_valid_dtype_n_method(dtype: DtypeObj) -> bool:
"""
Helper function to determine if dtype is valid for
nsmallest/nlargest methods
"""
return (
is_numeric_dtype(dtype) and not is_complex_dtype(dtype)
) or needs_i8_conversion(dtype)
class SelectNSeries(SelectN):
"""
Implement n largest/smallest for Series
Parameters
----------
obj : Series
n : int
keep : {'first', 'last'}, default 'first'
Returns
-------
nordered : Series
"""
def compute(self, method: str) -> Series:
from pandas.core.reshape.concat import concat
n = self.n
dtype = self.obj.dtype
if not self.is_valid_dtype_n_method(dtype):
raise TypeError(f"Cannot use method '{method}' with dtype {dtype}")
if n <= 0:
return self.obj[[]]
dropped = self.obj.dropna()
nan_index = self.obj.drop(dropped.index)
if is_extension_array_dtype(dropped.dtype):
# GH#41816 bc we have dropped NAs above, MaskedArrays can use the
# numpy logic.
from pandas.core.arrays import BaseMaskedArray
arr = dropped._values
if isinstance(arr, BaseMaskedArray):
ser = type(dropped)(arr._data, index=dropped.index, name=dropped.name)
result = type(self)(ser, n=self.n, keep=self.keep).compute(method)
return result.astype(arr.dtype)
# slow method
if n >= len(self.obj):
ascending = method == "nsmallest"
return self.obj.sort_values(ascending=ascending).head(n)
# fast method
new_dtype = dropped.dtype
arr = _ensure_data(dropped.values)
if method == "nlargest":
arr = -arr
if is_integer_dtype(new_dtype):
# GH 21426: ensure reverse ordering at boundaries
arr -= 1
elif is_bool_dtype(new_dtype):
# GH 26154: ensure False is smaller than True
arr = 1 - (-arr)
if self.keep == "last":
arr = arr[::-1]
nbase = n
findex = len(self.obj)
narr = len(arr)
n = min(n, narr)
# arr passed into kth_smallest must be contiguous. We copy
# here because kth_smallest will modify its input
kth_val = algos.kth_smallest(arr.copy(order="C"), n - 1)
(ns,) = np.nonzero(arr <= kth_val)
inds = ns[arr[ns].argsort(kind="mergesort")]
if self.keep != "all":
inds = inds[:n]
findex = nbase
if self.keep == "last":
# reverse indices
inds = narr - 1 - inds
return concat([dropped.iloc[inds], nan_index]).iloc[:findex]
class SelectNFrame(SelectN):
"""
Implement n largest/smallest for DataFrame
Parameters
----------
obj : DataFrame
n : int
keep : {'first', 'last'}, default 'first'
columns : list or str
Returns
-------
nordered : DataFrame
"""
def __init__(self, obj, n: int, keep: str, columns):
super().__init__(obj, n, keep)
if not is_list_like(columns) or isinstance(columns, tuple):
columns = [columns]
columns = list(columns)
self.columns = columns
def compute(self, method: str) -> DataFrame:
from pandas.core.api import Int64Index
n = self.n
frame = self.obj
columns = self.columns
for column in columns:
dtype = frame[column].dtype
if not self.is_valid_dtype_n_method(dtype):
raise TypeError(
f"Column {repr(column)} has dtype {dtype}, "
f"cannot use method {repr(method)} with this dtype"
)
def get_indexer(current_indexer, other_indexer):
"""
Helper function to concat `current_indexer` and `other_indexer`
depending on `method`
"""
if method == "nsmallest":
return current_indexer.append(other_indexer)
else:
return other_indexer.append(current_indexer)
# Below we save and reset the index in case index contains duplicates
original_index = frame.index
cur_frame = frame = frame.reset_index(drop=True)
cur_n = n
indexer = Int64Index([])
for i, column in enumerate(columns):
# For each column we apply method to cur_frame[column].
# If it's the last column or if we have the number of
# results desired we are done.
# Otherwise there are duplicates of the largest/smallest
# value and we need to look at the rest of the columns
# to determine which of the rows with the largest/smallest
# value in the column to keep.
series = cur_frame[column]
is_last_column = len(columns) - 1 == i
values = getattr(series, method)(
cur_n, keep=self.keep if is_last_column else "all"
)
if is_last_column or len(values) <= cur_n:
indexer = get_indexer(indexer, values.index)
break
# Now find all values which are equal to
# the (nsmallest: largest)/(nlargest: smallest)
# from our series.
border_value = values == values[values.index[-1]]
# Some of these values are among the top-n
# some aren't.
unsafe_values = values[border_value]
# These values are definitely among the top-n
safe_values = values[~border_value]
indexer = get_indexer(indexer, safe_values.index)
# Go on and separate the unsafe_values on the remaining
# columns.
cur_frame = cur_frame.loc[unsafe_values.index]
cur_n = n - len(indexer)
frame = frame.take(indexer)
# Restore the index on frame
frame.index = original_index.take(indexer)
# If there is only one column, the frame is already sorted.
if len(columns) == 1:
return frame
ascending = method == "nsmallest"
return frame.sort_values(columns, ascending=ascending, kind="mergesort")
# ---- #
# take #
# ---- #
def take(
arr,
indices: TakeIndexer,
axis: int = 0,
allow_fill: bool = False,
fill_value=None,
):
"""
Take elements from an array.
Parameters
----------
arr : array-like or scalar value
Non array-likes (sequences/scalars without a dtype) are coerced
to an ndarray.
indices : sequence of int or one-dimensional np.ndarray of int
Indices to be taken.
axis : int, default 0
The axis over which to select values.
allow_fill : bool, default False
How to handle negative values in `indices`.
* False: negative values in `indices` indicate positional indices
from the right (the default). This is similar to :func:`numpy.take`.
* True: negative values in `indices` indicate
missing values. These values are set to `fill_value`. Any other
negative values raise a ``ValueError``.
fill_value : any, optional
Fill value to use for NA-indices when `allow_fill` is True.
This may be ``None``, in which case the default NA value for
the type (``self.dtype.na_value``) is used.
For multi-dimensional `arr`, each *element* is filled with
`fill_value`.
Returns
-------
ndarray or ExtensionArray
Same type as the input.
Raises
------
IndexError
When `indices` is out of bounds for the array.
ValueError
When the indexer contains negative values other than ``-1``
and `allow_fill` is True.
Notes
-----
When `allow_fill` is False, `indices` may be whatever dimensionality
is accepted by NumPy for `arr`.
When `allow_fill` is True, `indices` should be 1-D.
See Also
--------
numpy.take : Take elements from an array along an axis.
Examples
--------
>>> from pandas.api.extensions import take
With the default ``allow_fill=False``, negative numbers indicate
positional indices from the right.
>>> take(np.array([10, 20, 30]), [0, 0, -1])
array([10, 10, 30])
Setting ``allow_fill=True`` will place `fill_value` in those positions.
>>> take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True)
array([10., 10., nan])
>>> take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True,
... fill_value=-10)
array([ 10, 10, -10])
"""
if not is_array_like(arr):
arr = np.asarray(arr)
indices = np.asarray(indices, dtype=np.intp)
if allow_fill:
# Pandas style, -1 means NA
validate_indices(indices, arr.shape[axis])
result = take_nd(
arr, indices, axis=axis, allow_fill=True, fill_value=fill_value
)
else:
# NumPy style
result = arr.take(indices, axis=axis)
return result
# ------------ #
# searchsorted #
# ------------ #
def searchsorted(
arr: ArrayLike,
value: NumpyValueArrayLike | ExtensionArray,
side: Literal["left", "right"] = "left",
sorter: NumpySorter = None,
) -> npt.NDArray[np.intp] | np.intp:
"""
Find indices where elements should be inserted to maintain order.
.. versionadded:: 0.25.0
Find the indices into a sorted array `arr` (a) such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `arr` would be preserved.
Assuming that `arr` is sorted:
====== ================================
`side` returned index `i` satisfies
====== ================================
left ``arr[i-1] < value <= self[i]``
right ``arr[i-1] <= value < self[i]``
====== ================================
Parameters
----------
arr: np.ndarray, ExtensionArray, Series
Input array. If `sorter` is None, then it must be sorted in
ascending order, otherwise `sorter` must be an array of indices
that sort it.
value : array-like or scalar
Values to insert into `arr`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array-like, optional
Optional array of integer indices that sort array a into ascending
order. They are typically the result of argsort.
Returns
-------
array of ints or int
If value is array-like, array of insertion points.
If value is scalar, a single integer.
See Also
--------
numpy.searchsorted : Similar method from NumPy.
"""
if sorter is not None:
sorter = ensure_platform_int(sorter)
if (
isinstance(arr, np.ndarray)
and is_integer_dtype(arr.dtype)
and (is_integer(value) or is_integer_dtype(value))
):
# if `arr` and `value` have different dtypes, `arr` would be
# recast by numpy, causing a slow search.
# Before searching below, we therefore try to give `value` the
# same dtype as `arr`, while guarding against integer overflows.
iinfo = np.iinfo(arr.dtype.type)
value_arr = np.array([value]) if is_scalar(value) else np.array(value)
if (value_arr >= iinfo.min).all() and (value_arr <= iinfo.max).all():
# value within bounds, so no overflow, so can convert value dtype
# to dtype of arr
dtype = arr.dtype
else:
dtype = value_arr.dtype
if is_scalar(value):
# We know that value is int
value = cast(int, dtype.type(value))
else:
value = pd_array(cast(ArrayLike, value), dtype=dtype)
elif not (
is_object_dtype(arr) or is_numeric_dtype(arr) or is_categorical_dtype(arr)
):
# E.g. if `arr` is an array with dtype='datetime64[ns]'
# and `value` is a pd.Timestamp, we may need to convert value
arr = ensure_wrapped_if_datetimelike(arr)
# Argument 1 to "searchsorted" of "ndarray" has incompatible type
# "Union[NumpyValueArrayLike, ExtensionArray]"; expected "NumpyValueArrayLike"
return arr.searchsorted(value, side=side, sorter=sorter) # type: ignore[arg-type]
# ---- #
# diff #
# ---- #
_diff_special = {"float64", "float32", "int64", "int32", "int16", "int8"}
def diff(arr, n: int, axis: int = 0, stacklevel: int = 3):
"""
difference of n between self,
analogous to s-s.shift(n)
Parameters
----------
arr : ndarray or ExtensionArray
n : int
number of periods
axis : {0, 1}
axis to shift on
stacklevel : int, default 3
The stacklevel for the lost dtype warning.
Returns
-------
shifted
"""
n = int(n)
na = np.nan
dtype = arr.dtype
is_bool = is_bool_dtype(dtype)
if is_bool:
op = operator.xor
else:
op = operator.sub
if isinstance(dtype, PandasDtype):
# PandasArray cannot necessarily hold shifted versions of itself.
arr = arr.to_numpy()
dtype = arr.dtype
if not isinstance(dtype, np.dtype):
# i.e ExtensionDtype
if hasattr(arr, f"__{op.__name__}__"):
if axis != 0:
raise ValueError(f"cannot diff {type(arr).__name__} on axis={axis}")
return op(arr, arr.shift(n))
else:
warn(
"dtype lost in 'diff()'. In the future this will raise a "
"TypeError. Convert to a suitable dtype prior to calling 'diff'.",
FutureWarning,
stacklevel=stacklevel,
)
arr = np.asarray(arr)
dtype = arr.dtype
is_timedelta = False
if needs_i8_conversion(arr.dtype):
dtype = np.int64
arr = arr.view("i8")
na = iNaT
is_timedelta = True
elif is_bool:
# We have to cast in order to be able to hold np.nan
dtype = np.object_
elif is_integer_dtype(dtype):
# We have to cast in order to be able to hold np.nan
# int8, int16 are incompatible with float64,
# see https://github.com/cython/cython/issues/2646
if arr.dtype.name in ["int8", "int16"]:
dtype = np.float32
else:
dtype = np.float64
orig_ndim = arr.ndim
if orig_ndim == 1:
# reshape so we can always use algos.diff_2d
arr = arr.reshape(-1, 1)
# TODO: require axis == 0
dtype = np.dtype(dtype)
out_arr = np.empty(arr.shape, dtype=dtype)
na_indexer = [slice(None)] * 2
na_indexer[axis] = slice(None, n) if n >= 0 else slice(n, None)
out_arr[tuple(na_indexer)] = na
if arr.dtype.name in _diff_special:
# TODO: can diff_2d dtype specialization troubles be fixed by defining
# out_arr inside diff_2d?
algos.diff_2d(arr, out_arr, n, axis, datetimelike=is_timedelta)
else:
# To keep mypy happy, _res_indexer is a list while res_indexer is
# a tuple, ditto for lag_indexer.
_res_indexer = [slice(None)] * 2
_res_indexer[axis] = slice(n, None) if n >= 0 else slice(None, n)
res_indexer = tuple(_res_indexer)
_lag_indexer = [slice(None)] * 2
_lag_indexer[axis] = slice(None, -n) if n > 0 else slice(-n, None)
lag_indexer = tuple(_lag_indexer)
out_arr[res_indexer] = op(arr[res_indexer], arr[lag_indexer])
if is_timedelta:
out_arr = out_arr.view("timedelta64[ns]")
if orig_ndim == 1:
out_arr = out_arr[:, 0]
return out_arr
# --------------------------------------------------------------------
# Helper functions
# Note: safe_sort is in algorithms.py instead of sorting.py because it is
# low-dependency, is used in this module, and used private methods from
# this module.
def safe_sort(
values,
codes=None,
na_sentinel: int = -1,
assume_unique: bool = False,
verify: bool = True,
) -> np.ndarray | tuple[np.ndarray, np.ndarray]:
"""
Sort ``values`` and reorder corresponding ``codes``.
``values`` should be unique if ``codes`` is not None.
Safe for use with mixed types (int, str), orders ints before strs.
Parameters
----------
values : list-like
Sequence; must be unique if ``codes`` is not None.
codes : list_like, optional
Indices to ``values``. All out of bound indices are treated as
"not found" and will be masked with ``na_sentinel``.
na_sentinel : int, default -1
Value in ``codes`` to mark "not found".
Ignored when ``codes`` is None.
assume_unique : bool, default False
When True, ``values`` are assumed to be unique, which can speed up
the calculation. Ignored when ``codes`` is None.
verify : bool, default True
Check if codes are out of bound for the values and put out of bound
codes equal to na_sentinel. If ``verify=False``, it is assumed there
are no out of bound codes. Ignored when ``codes`` is None.
.. versionadded:: 0.25.0
Returns
-------
ordered : ndarray
Sorted ``values``
new_codes : ndarray
Reordered ``codes``; returned when ``codes`` is not None.
Raises
------
TypeError
* If ``values`` is not list-like or if ``codes`` is neither None
nor list-like
* If ``values`` cannot be sorted
ValueError
* If ``codes`` is not None and ``values`` contain duplicates.
"""
if not is_list_like(values):
raise TypeError(
"Only list-like objects are allowed to be passed to safe_sort as values"
)
if not isinstance(values, (np.ndarray, ABCExtensionArray)):
# don't convert to string types
dtype, _ = infer_dtype_from_array(values)
# error: Argument "dtype" to "asarray" has incompatible type "Union[dtype[Any],
# ExtensionDtype]"; expected "Union[dtype[Any], None, type, _SupportsDType, str,
# Union[Tuple[Any, int], Tuple[Any, Union[int, Sequence[int]]], List[Any],
# _DTypeDict, Tuple[Any, Any]]]"
values = np.asarray(values, dtype=dtype) # type: ignore[arg-type]
sorter = None
if (
not is_extension_array_dtype(values)
and lib.infer_dtype(values, skipna=False) == "mixed-integer"
):
ordered = _sort_mixed(values)
else:
try:
sorter = values.argsort()
ordered = values.take(sorter)
except TypeError:
# Previous sorters failed or were not applicable, try `_sort_mixed`
# which would work, but which fails for special case of 1d arrays
# with tuples.
if values.size and isinstance(values[0], tuple):
ordered = _sort_tuples(values)
else:
ordered = _sort_mixed(values)
# codes:
if codes is None:
return ordered
if not is_list_like(codes):
raise TypeError(
"Only list-like objects or None are allowed to "
"be passed to safe_sort as codes"
)
codes = ensure_platform_int(np.asarray(codes))
if not assume_unique and not len(unique(values)) == len(values):
raise ValueError("values should be unique if codes is not None")
if sorter is None:
# mixed types
hash_klass, values = get_data_algo(values)
t = hash_klass(len(values))
t.map_locations(values)
sorter = ensure_platform_int(t.lookup(ordered))
if na_sentinel == -1:
# take_nd is faster, but only works for na_sentinels of -1
order2 = sorter.argsort()
new_codes = take_nd(order2, codes, fill_value=-1)
if verify:
mask = (codes < -len(values)) | (codes >= len(values))
else:
mask = None
else:
reverse_indexer = np.empty(len(sorter), dtype=np.int_)
reverse_indexer.put(sorter, np.arange(len(sorter)))
# Out of bound indices will be masked with `na_sentinel` next, so we
# may deal with them here without performance loss using `mode='wrap'`
new_codes = reverse_indexer.take(codes, mode="wrap")
mask = codes == na_sentinel
if verify:
mask = mask | (codes < -len(values)) | (codes >= len(values))
if mask is not None:
np.putmask(new_codes, mask, na_sentinel)
return ordered, ensure_platform_int(new_codes)
def _sort_mixed(values) -> np.ndarray:
"""order ints before strings in 1d arrays, safe in py3"""
str_pos = np.array([isinstance(x, str) for x in values], dtype=bool)
nums = np.sort(values[~str_pos])
strs = np.sort(values[str_pos])
return np.concatenate([nums, np.asarray(strs, dtype=object)])
def _sort_tuples(values: np.ndarray) -> np.ndarray:
"""
Convert array of tuples (1d) to array or array (2d).
We need to keep the columns separately as they contain different types and
nans (can't use `np.sort` as it may fail when str and nan are mixed in a
column as types cannot be compared).
"""
from pandas.core.internals.construction import to_arrays
from pandas.core.sorting import lexsort_indexer
arrays, _ = to_arrays(values, None)
indexer = | lexsort_indexer(arrays, orders=True) | pandas.core.sorting.lexsort_indexer |
# dataset.py
import audformat
#import audb
import pandas as pd
import ast
import os
from random import sample
from util import Util
from plots import Plots
import glob_conf
import configparser
import os.path
class Dataset:
""" Class to represent datasets"""
name = '' # An identifier for the dataset
config = None # The configuration
db = None # The database object
df = None # The whole dataframe
df_train = None # The training split
df_test = None # The evaluation split
def __init__(self, name):
"""Constructor setting up name and configuration"""
self.name = name
self.target = glob_conf.config['DATA']['target']
self.util = Util()
self.plot = Plots()
self.limit = int(self.util.config_val_data(self.name, 'limit', 0))
def _get_tables(self):
tables = []
targets = self.util.config_val_data(self.name, 'target_tables', False)
if targets:
target_tables = ast.literal_eval(targets)
tables += target_tables
files = self.util.config_val_data(self.name, 'files_tables', False)
if files:
files_tables = ast.literal_eval(files)
tables += files_tables
tests = self.util.config_val_data(self.name, 'test_tables', False)
if tests:
test_tables = ast.literal_eval(tests)
tables += test_tables
trains = self.util.config_val_data(self.name, 'train_tables', False)
if trains:
train_tables = ast.literal_eval(trains)
tables += train_tables
return tables
def load(self):
"""Load the dataframe with files, speakers and task labels"""
self.util.debug(f'{self.name}: loading ...')
store = self.util.get_path('store')
store_file = f'{store}{self.name}.pkl'
if os.path.isfile(store_file):
self.util.debug(f'{self.name}: reusing previously stored file {store_file}')
self.df = pd.read_pickle(store_file)
got_target = self.target in self.df
got_gender = 'gender' in self.df
got_speaker = 'speaker' in self.df
self.is_labeled = got_target
self.util.debug(f'{self.name}: loaded with {self.df.shape[0]} '\
f'samples: got targets: {got_target}, got speakers: {got_speaker}, '\
f'got sexes: {got_gender}')
return
root = self.util.config_val_data(self.name, '', '')
self.util.debug(f'{self.name}: loading from {root}')
try:
db = audformat.Database.load(root)
except FileNotFoundError:
self.util.error( f'{self.name}: no database found at {root}')
tables = self._get_tables()
self.util.debug(f'{self.name}: loading tables: {tables}')
#db = audb.load(root, )
# map the audio file paths
db.map_files(lambda x: os.path.join(root, x))
# the dataframes (potentially more than one) with at least the file names
df_files = self.util.config_val_data(self.name, 'files_tables', '[\'files\']')
df_files_tables = ast.literal_eval(df_files)
# The label for the target column
self.col_label = self.util.config_val_data(self.name, 'label', self.target)
df, got_target, got_speaker, got_gender = self._get_df_for_lists(db, df_files_tables)
if False in {got_target, got_speaker, got_gender}:
try :
# There might be a separate table with the targets, e.g. emotion or age
df_targets = self.util.config_val_data(self.name, 'target_tables', f'[\'{self.target}\']')
df_target_tables = ast.literal_eval(df_targets)
df_target, got_target2, got_speaker2, got_gender2 = self._get_df_for_lists(db, df_target_tables)
got_target = got_target2 or got_target
got_speaker = got_speaker2 or got_speaker
got_gender = got_gender2 or got_gender
if got_target2:
df[self.target] = df_target[self.target]
if got_speaker2:
df['speaker'] = df_target['speaker']
if got_gender2:
df['gender'] = df_target['gender']
except audformat.core.errors.BadKeyError:
pass
try:
# for experiments that do separate sex models
s = glob_conf.config['DATA']['sex']
df = df[df.gender==s]
except KeyError:
pass
if got_target:
# remember the target in case they get labelencoded later
df['class_label'] = df[self.target]
df.is_labeled = got_target
self.df = df
self.db = db
self.util.debug(f'{self.name}: loaded data with {df.shape[0]} '\
f'samples: got targets: {got_target}, got speakers: {got_speaker}, '\
f'got sexes: {got_gender}')
if self.util.config_val_data(self.name, 'value_counts', False):
if not got_gender or not got_speaker:
self.util.error('can\'t plot value counts if no speaker or gender is given')
else:
self.plot.describe_df(self.name, df, self.target, f'{self.name}_distplot.png')
self.is_labeled = got_target
self.df.is_labeled = self.is_labeled
# Perform some filtering if desired
required = self.util.config_val_data(self.name, 'required', False)
if required:
pre = self.df.shape[0]
self.df = self.df[self.df[required].notna()]
post = self.df.shape[0]
self.util.debug(f'{self.name}: kept {post} samples with {required} (from {pre}, filtered {pre-post})')
samples_per_speaker = self.util.config_val_data(self.name, 'max_samples_per_speaker', False)
if samples_per_speaker:
pre = self.df.shape[0]
self.df = self._limit_speakers(self.df, int(samples_per_speaker))
post = self.df.shape[0]
self.util.debug(f'{self.name}: kept {post} samples with {samples_per_speaker} per speaker (from {pre}, filtered {pre-post})')
if self.limit:
pre = self.df.shape[0]
self.df = self.df.head(self.limit)
post = self.df.shape[0]
self.util.debug(f'{self.name}: lmited to {post} samples (from {pre}, filtered {pre-post})')
# store the dataframe
self.df.to_pickle(store_file)
def _get_df_for_lists(self, db, df_files):
got_target, got_speaker, got_gender = False, False, False
df = pd.DataFrame()
for table in df_files:
source_df = db.tables[table].df
# create a dataframe with the index (the filenames)
df_local = pd.DataFrame(index=source_df.index)
# try to get the targets from this dataframe
try:
# try to get the target values
df_local[self.target] = source_df[self.col_label]
got_target = True
except (KeyError, ValueError, audformat.errors.BadKeyError) as e:
pass
try:
# try to get the speaker values
df_local['speaker'] = source_df['speaker']
got_speaker = True
except (KeyError, ValueError, audformat.errors.BadKeyError) as e:
pass
try:
# try to get the gender values
df_local['gender'] = source_df['gender']
got_gender = True
except (KeyError, ValueError, audformat.errors.BadKeyError) as e:
pass
try:
# also it might be possible that the sex is part of the speaker description
df_local['gender'] = db[table]['speaker'].get(map='gender')
got_gender = True
except (ValueError, audformat.errors.BadKeyError) as e:
pass
try:
# same for the target, e.g. "age"
df_local[self.target] = db[table]['speaker'].get(map=self.target)
got_target = True
except (ValueError, audformat.core.errors.BadKeyError) as e:
pass
df = df.append(df_local)
return df, got_target, got_speaker, got_gender
def _limit_speakers(self, df, max=20):
""" limit number of samples per speaker
the samples are selected randomly
"""
df_ret = pd.DataFrame()
for s in df.speaker.unique():
s_df = df[df['speaker'].eq(s)]
if s_df.shape[0] < max:
df_ret = df_ret.append(s_df)
else:
df_ret = df_ret.append(s_df.sample(max))
return df_ret
def split(self):
"""Split the datbase into train and development set"""
store = self.util.get_path('store')
storage_test = f'{store}{self.name}_testdf.pkl'
storage_train = f'{store}{self.name}_traindf.pkl'
split_strategy = self.util.config_val_data(self.name,'split_strategy', 'database')
# 'database' (default), 'speaker_split', 'specified', 'reuse'
if os.path.isfile(storage_test) and os.path.isfile(storage_train) and split_strategy != 'speaker_split':
self.util.debug(f'splits: reusing previously stored files {storage_test} and {storage_train}')
self.df_test = pd.read_pickle(storage_test)
self.df_train = pd.read_pickle(storage_train)
return
if split_strategy == 'database':
# use the splits from the database
testdf = self.db.tables[self.target+'.test'].df
traindf = self.db.tables[self.target+'.train'].df
# use only the train and test samples that were not perhaps filtered out by an earlier processing step
self.df_test = self.df.loc[self.df.index.intersection(testdf.index)]
self.df_train = self.df.loc[self.df.index.intersection(traindf.index)]
elif split_strategy == 'train':
self.df_train = self.df
self.df_test = pd.DataFrame()
elif split_strategy == 'test':
self.df_test = self.df
self.df_train = pd.DataFrame()
elif split_strategy == 'specified':
traindf, testdf = pd.DataFrame(), | pd.DataFrame() | pandas.DataFrame |
from PriceIndices import Indices, MarketHistory
import pandas as pd
import numpy as np
history = MarketHistory()
def get_coin_data(crypto='bitcoin', start_date='20130428', end_date='20200501', save_data=None):
df = history.get_price(crypto, start_date, end_date)
df_bi = Indices.get_bvol_index(df) # Bitmax Volatility Index
df_bi.drop('price', axis=1, inplace=True)
df_rsi = Indices.get_rsi(df) # Relative Strength Index
df_rsi.drop(['price', 'RS_Smooth', 'RSI_1'], axis=1, inplace=True)
df_sma = Indices.get_simple_moving_average(df) # Simple Moving Average
df_sma.drop(['price'], axis=1, inplace=True)
df_bb = Indices.get_bollinger_bands(df) # Bollunger Bands
df_bb.drop(['price'], axis=1, inplace=True)
df_ema = Indices.get_exponential_moving_average(df, [20, 50]) # Exponential Moving Average
df_ema.drop(['price'], axis=1, inplace=True)
df_macd = Indices.get_moving_average_convergence_divergence(df) # Moving Average Convergence Divergence
df_macd.drop(['price',], axis=1, inplace=True)
df = | pd.merge(df, df_macd, on='date', how='left') | pandas.merge |
# pylint: disable=invalid-name
""" Tests for Evaluation class """
import unittest
import pandas as pd
from ftpvl.evaluation import Evaluation
from ftpvl.processors import MinusOne
from pandas.testing import assert_frame_equal
class TestEvaluation(unittest.TestCase):
"""
Testing by partition:
__init__()
get_df()
defensive copying
get_eval_num()
get_copy()
process(List[Processor])
0, 1, 1+ processors
__add__()
direct add
reverse add
sum
"""
def test_evaluation_get_df_equality(self):
"""
get_df() should return a dataframe with values equal to constructor
argument
"""
df = pd.DataFrame([{"a": 1, "b": 2}, {"a": 3, "b": 4}])
result = Evaluation(df).get_df()
self.assertIsInstance(result, pd.DataFrame)
self.assertTrue(result.equals(df))
def test_evaluation_get_df_defensive_copy(self):
"""
get_df() should return a copy of the constructor argument to prevent
caller from mutating the dataframe
"""
df = pd.DataFrame([{"a": 1, "b": 2}, {"a": 3, "b": 4}])
result = Evaluation(df).get_df()
result["a"][0] = 5 # mutate result
self.assertFalse(result.equals(df))
def test_evaluation_get_eval_id(self):
"""
get_eval_id() should return the eval_id specified when initialized
"""
df = pd.DataFrame([{"a": 1, "b": 2}, {"a": 3, "b": 4}])
result = Evaluation(df, eval_id=1000)
assert result.get_eval_id() == 1000
result = Evaluation(df, eval_id=0)
assert result.get_eval_id() == 0
def test_evaluation_get_copy(self):
"""
get_copy() should return a deep copy of Evaluation
"""
df = pd.DataFrame([{"a": 1, "b": 2}, {"a": 3, "b": 4}])
eval1 = Evaluation(df, eval_id=1000)
result = eval1.get_copy()
assert result.get_eval_id() == 1000
assert_frame_equal(result.get_df(), df)
# change original
df.iloc[0]["a"] = 0
# assert that copy has not changed
assert result.get_df().iloc[0]["a"] == 1
def test_evaluation_process_empty(self):
"""
Calling process() with an empty list of processors should return the
input DF without any changes.
"""
df = pd.DataFrame([{"a": 1, "b": 2}, {"a": 3, "b": 4}])
result = Evaluation(df).process([]).get_df()
self.assertTrue(result.equals(df))
def test_evaluation_process_single(self):
"""
Calling process() with one MinusOne should return a new df that
subtract one from every input value, but not affect the input dataframe.
"""
pipeline = [MinusOne()]
df = pd.DataFrame([{"a": 1, "b": 2}, {"a": 3, "b": 4}])
result = Evaluation(df).process(pipeline).get_df()
expected = pd.DataFrame([{"a": 0, "b": 1}, {"a": 2, "b": 3}])
# check if input eval has been altered
self.assertFalse(result.equals(df))
self.assertFalse(df.equals(expected))
# check if output eval has correct values
self.assertTrue(result.equals(expected))
def test_evaluation_process_multiple(self):
"""
Calling process() with two MinusOne should return a new df that
subtract two from every input value, but not affect the input dataframe.
"""
pipeline = [MinusOne(), MinusOne()]
df = pd.DataFrame([{"a": 1, "b": 2}, {"a": 3, "b": 4}])
result = Evaluation(df).process(pipeline).get_df()
expected = pd.DataFrame([{"a": -1, "b": 0}, {"a": 1, "b": 2}])
# check if input eval has been altered
self.assertFalse(result.equals(df))
self.assertFalse(df.equals(expected))
# check if output eval has correct values
self.assertTrue(result.equals(expected))
def test_evaluation_add(self):
"""
Using the + magic method should return a new Evaluation that consists
of the concatenated Evaluations.
"""
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 3, "b": 4}])
result1 = Evaluation(df1)
df2 = pd.DataFrame([{"a": 5, "b": 6}, {"a": 7, "b": 8}])
result2 = Evaluation(df2)
sum_result = result1 + result2
expected = pd.DataFrame([
{"a": 1, "b": 2},
{"a": 3, "b": 4},
{"a": 5, "b": 6},
{"a": 7, "b": 8}
])
assert_frame_equal(sum_result.get_df(), expected)
assert sum_result.get_eval_id() is None
def test_evaluation_add_different_columns(self):
"""
Using the + magic method should return a new Evaluation that consists
of the concatenated Evaluations, even if columns don't match
"""
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 3, "b": 4}])
result1 = Evaluation(df1)
df2 = pd.DataFrame([{"a": 5}, {"a": 7}])
result2 = Evaluation(df2)
sum_result = result1 + result2
expected = pd.DataFrame([
{"a": 1, "b": 2},
{"a": 3, "b": 4},
{"a": 5},
{"a": 7}
])
assert_frame_equal(sum_result.get_df(), expected)
assert sum_result.get_eval_id() is None
def test_evaluation_add_multiple(self):
"""
Using the sum() built-in function should return a new Evaluation
that consists of concatenated Evaluations
"""
eval1 = Evaluation(pd.DataFrame([{"a": 1, "b": 2}]))
eval2 = Evaluation(pd.DataFrame([{"a": 3, "b": 4}]))
eval3 = Evaluation( | pd.DataFrame([{"a": 5, "b": 6}]) | pandas.DataFrame |
import pandas as pd
raw_csv_data = pd.read_csv('data/Absenteeism-data.csv')
df = raw_csv_data.copy()
# dropping the ID columns
df = df.drop(['ID'], axis=1)
# print(df.head())
# converting the categorical column reason for absence into dummy columns
reason_columns = | pd.get_dummies(df['Reason for Absence'], drop_first=True) | pandas.get_dummies |
#!/usr/bin/env python
import sys
import re
import pandas as pd
import datetime as dt
import numpy as np
def to_time(s):
return dt.datetime.strptime(s, '%Y-%m-%d %H:%M:%S,%f')
def main(filename):
detector_sizes = {}
bits_per_symbol = {}
# Format: (time string, block, uuid, samples, time elapsed)
vals = []
spy_errors = []
with open(filename, "r") as f:
i = 0
redetect = re.compile('([0-9\-]+ [0-9:,]+) :DEBUG: (packet detect) ([a-f0-9]{6}) [\w\s\(\)]+ ([0-9]+) samples in ([0-9\.]+) seconds')
reother = re.compile('([0-9\-]+ [0-9:,]+) :DEBUG: ([a-z2_\ ]+) ([a-f0-9]{6}) [\w\s]+ ([0-9]+) [\w\s]+ ([0-9\.\-e]+) seconds')
respy = re.compile('([0-9\-]+ [0-9:,]+) :DEBUG: ([a-z2_\ ]+) ([a-f0-9]{6}) spy ber ([0-9\.]+) above threshold ([0-9\.]+)')
for line in f:
# Read in initialization info
if i < 25:
s = re.search('DETECTOR ([a-f0-9]{6}): Packet Size = ([0-9]+) samples', line)
if s is not None:
detector_sizes[s.group(1)] = int(s.group(2))
s = re.search('([\w]+ [\w]+) ([a-f0-9]{6}): ([0-9]+) bits per symbol', line)
if s is not None:
bits_per_symbol[s.group(2)] = (int(s.group(3)), s.group(1))
i += 1
# Read runtime logging
det = redetect.search(line)
oth = reother.search(line)
spy = respy.search(line)
if det is not None:
# time, type, uuid, samples, telapsed
vals.append([to_time(det.group(1)), det.group(2), det.group(3),
int(det.group(4)), float(det.group(5))])
elif oth is not None:
# time, type, uuid, bits, telapsed
vals.append([to_time(oth.group(1)), oth.group(2), oth.group(3),
int(oth.group(4)), float(oth.group(5))])
if spy is not None:
# time, type, uuid, BER, cutoff
spy_errors.append([to_time(spy.group(1)), spy.group(2), spy.group(3),
float(spy.group(4)), float(spy.group(5))])
# Print initialization info
print("Detector Packet Sizes")
for k in detector_sizes:
print("\tdetector {} {}".format(k, detector_sizes[k]))
print("Bits Per Symbol")
for k in bits_per_symbol:
print("\t{} \t{} \t{}".format(bits_per_symbol[k][1], k, bits_per_symbol[k][0]))
# Analyze log body
# Processing time info
df = pd.DataFrame(vals, columns=['log_time', 'block', 'uuid', 'samples', 'time'])
vals = []
for b in | pd.unique(df.block) | pandas.unique |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import pandas as pd
import re
import util
import os
import entrez as ez
import stats
import parallel
import xgmml
import db
import random
from six.moves import range
import setting
class Cache(object):
DATA_DIR=setting.go['DATA_DIR']
# share by all tax_id
CATEGORY={'LOCAL':None, 'GPDB':None, 'L1k':None}
GO_DESCRIPTION={'LOCAL':None, 'GPDB':None, 'L1k':None}
GO_CATEGORY={'LOCAL':None, 'GPDB':None, 'L1k':None}
# per tax_id
TOTAL_GENE_COUNT={'LOCAL':{}, 'GPDB':{}, 'L1k':{}}
ALL_GENE={'LOCAL':{}, 'GPDB':{}, 'L1k':{}}
CATEGORY_COUNT={'LOCAL':{}, 'GPDB':{}, 'L1k':{}}
GO_GENE_ENRICH={'LOCAL':{}, 'GPDB':{}, 'L1k':{}}
GO_GENE={'LOCAL':{}, 'GPDB':{}, 'L1k':{}}
GENE_GO={'LOCAL':{}, 'GPDB':{}, 'L1k':{}}
N_TRIVIAL=800
@staticmethod
def get(l_use_GPDB=True, tax_id=9606, l_L1k=False):
if tax_id==-9606: tax_id=9606
s_key=Cache.key(l_use_GPDB, l_L1k)
if l_L1k and tax_id!=9606:
util.error_msg('L1k is only for tax_id 9606!')
if tax_id not in Cache.TOTAL_GENE_COUNT[s_key]:
Cache.load(tax_id=tax_id, l_use_GPDB=l_use_GPDB, l_L1k=l_L1k)
#if l_L1k:
# Cache.loadL1k()
#else:
# Cache.load(tax_id=tax_id, l_use_GPDB=l_use_GPDB)
return (Cache.CATEGORY[s_key],
Cache.GO_DESCRIPTION[s_key],
Cache.GO_CATEGORY[s_key],
# per tax_id, above are shared across tax_id
Cache.TOTAL_GENE_COUNT[s_key][tax_id],
Cache.ALL_GENE[s_key][tax_id],
Cache.CATEGORY_COUNT[s_key][tax_id],
Cache.GO_GENE_ENRICH[s_key][tax_id],
Cache.GO_GENE[s_key][tax_id],
Cache.GENE_GO[s_key][tax_id]
)
@staticmethod
def info():
for s_key in ('LOCAL','GPDB', 'L1k'):
print(">Databases: %s" % s_key)
print("CATEGORY=%d" % (0 if Cache.CATEGORY[s_key] is None else len(Cache.CATEGORY[s_key])))
print("GO_DESCRIPTION=%d" % (0 if Cache.GO_DESCRIPTION[s_key] is None else len(Cache.GO_DESCRIPTION[s_key])))
print("GO_CATEGORY=%d" % (0 if Cache.GO_CATEGORY[s_key] is None else len(Cache.GO_CATEGORY[s_key])))
for tax_id in Cache.TOTAL_GENE_COUNT[s_key].keys():
print("TAX_ID=%d (%s)" % (tax_id, ez.Cache.C_TAX_NAME.get(tax_id, "UNKNOWN")))
print("TOTAL_GENE_COUNT=%d" % Cache.TOTAL_GENE_COUNT[s_key][tax_id])
print("ALL_GENE=%d" % len(Cache.ALL_GENE[s_key][tax_id]))
print("CATEGORY_COUNT=%d" % len(Cache.CATEGORY_COUNT[s_key][tax_id]))
print("GO_GENE_ENRICH=%d" % len(Cache.GO_GENE_ENRICH[s_key][tax_id]))
print("GO_GENE=%d" % len(Cache.GO_GENE[s_key][tax_id]))
print("GENE_GO=%d" % len(Cache.GENE_GO[s_key][tax_id]))
print("")
@staticmethod
def unload(tax_id, l_use_GPDB, l_L1k):
if tax_id==-9606: tax_id=9606
s_key=Cache.key(l_use_GPDB, l_L1k)
if tax_id in Cache.TOTAL_GENE_COUNT[s_key]:
del Cache.CATEGORY_COUNT[s_key][tax_id]
del Cache.TOTAL_GENE_COUNT[s_key][tax_id]
del Cache.ALL_GENE[s_key][tax_id]
del Cache.GO_GENE_ENRICH[s_key][tax_id]
del Cache.GO_GENE[s_key][tax_id]
del Cache.GENE_GO[s_key][tax_id]
@staticmethod
def key(l_use_GPDB, l_L1k):
if l_L1k: return "L1k"
return 'GPDB' if l_use_GPDB else 'LOCAL'
@staticmethod
def load(tax_id=9606, l_use_GPDB=True, user_go=None, l_L1k=False):
"""tax_id is None, defaults to 9606, if 0, means load all supported species,
entrez_gene is only used in local mode to accelerate Symbol retrieval"""
if tax_id is None:
util.error_msg('tax_id must be an9606 int, or 0 mans all supported species')
tax_id=abs(tax_id)
s_key=Cache.key(l_use_GPDB, l_L1k=l_L1k)
if tax_id!=0 and tax_id in Cache.TOTAL_GENE_COUNT[s_key]: return
S_tax_id=[]
# performance optimization
if l_L1k: return Cache.loadL1k()
if not l_use_GPDB:
if tax_id not in (0,9606):
util.error_msg('Local database only supports human!')
tax_id=9606
if tax_id in Cache.TOTAL_GENE_COUNT[s_key]: return
S_tax_id=[tax_id]
else:
mydb=db.DB('METASCAPE')
if tax_id>0:
S_tax_id=[tax_id]
else:
t=mydb.from_sql('SELECT DISTINCT tax_id FROM gid2source_id')
S_tax_id=[x for x in t.tax_id.astype(int).tolist() if x not in Cache.TOTAL_GENE_COUNT[s_key]]
if len(S_tax_id)==0: return
s_tax_id=",".join(util.iarray2sarray(S_tax_id))
print("Load %s GO database for tax_id: %s ..." % (s_key, s_tax_id))
if l_use_GPDB:
s_where_L1k="term_category_id>=91" if l_L1k else "term_category_id<91"
if Cache.CATEGORY[s_key] is None:
t=mydb.from_sql("SELECT term_category_id,category_name FROM term_category where "+s_where_L1k)
Cache.CATEGORY[s_key] = {t.ix[i,'term_category_id']:t.ix[i,'category_name'] for i in t.index}
t=mydb.from_sql("SELECT t.term_id GO,term_name AS DESCRIPTION,term_category_id CATEGORY_ID FROM term t where "+s_where_L1k)
X=t.DESCRIPTION.isnull()
if sum(X):
t.ix[X, 'DESCRIPTION']=t.ix[X, 'GO']
#if not util.is_python3():
# t['DESCRIPTION']=t['DESCRIPTION'].apply(lambda x: unicode(x, encoding="ISO-8859-1", errors='ignore')) # L1000 has micro Mol
Cache.GO_DESCRIPTION[s_key]=dict(zip(t.GO, t.DESCRIPTION))
t['CATEGORY_ID']=t['CATEGORY_ID'].astype(int)
Cache.GO_CATEGORY[s_key]={re.sub(r'^\d+_', '', row.GO):int(row.CATEGORY_ID) for row in t.itertuples() }
if tax_id==0:
t=mydb.from_sql("SELECT COUNT(*) as N,tax_id FROM annotation a where a.annotation_type_id=3 AND content='protein-coding' group by tax_id")
else:
t=mydb.sql_in("SELECT COUNT(*) as N,tax_id FROM annotation a where a.annotation_type_id=3 AND content='protein-coding' and tax_id in (", ") group by tax_id", S_tax_id)
Cache.TOTAL_GENE_COUNT[s_key]=dict(zip(t.tax_id, t.N))
if tax_id==0:
t=mydb.from_sql("SELECT term_id GO,gids GENES,tax_id FROM term2gids where "+s_where_L1k)
else:
t=mydb.sql_in("SELECT term_id GO,gids GENES,tax_id FROM term2gids WHERE "+s_where_L1k+" and tax_id in (", ")", S_tax_id)
#tmp=t[t.GO.apply(lambda x: x.startswith('6'))]
#print tmp[:4]
else:
DATA_FILE=setting.go['DATA_FILE']
#TAX_ID,GeneID
t_gene=pd.read_csv(DATA_FILE)
t_gene=t_gene[t_gene.TAX_ID==tax_id]
C_GENE=set(t_gene['GeneID'].astype(str).tolist())
Cache.TOTAL_GENE_COUNT[s_key][tax_id]=len(C_GENE)
if user_go is not None:
if os.path.isfile(user_go):
if user_go.upper().endswith(".CSV"):
t=pd.read_csv(user_go)
else:
t=pd.read_table(user_go)
elif os.path.isfile(Cache.DATA_DIR+"AllAnnotations.tsv"):
t=pd.read_csv(Cache.DATA_DIR+"AllAnnotations.tsv", sep='\t')
if t is None:
util.error_msg('No GO Annotations available.')
#GO TYPE GENES DESCRIPTION
S=util.unique(t.TYPE)
Cache.CATEGORY[s_key] = dict(zip(S, S))
Cache.GO_CATEGORY[s_key]=dict(zip(t.GO, t.TYPE))
Cache.GO_DESCRIPTION[s_key]=dict(zip(t.GO, t.DESCRIPTION))
t['tax_id']=tax_id
for x in S_tax_id:
Cache.ALL_GENE[s_key][x]=set()
Cache.GENE_GO[s_key][x]={}
Cache.GO_GENE[s_key][x]={}
Cache.CATEGORY_COUNT[s_key][x]={}
Cache.GO_GENE_ENRICH[s_key][x]=set()
#sw=util.StopWatch("AAAAAAA")
for tax_id2,t_v in t.groupby('tax_id'):
#t_v=t_v.copy()
GENE_GO={}
GO_GENE={}
GO_GENE_ENRICH=set()
ALL_GENE=set()
CATEGORY_COUNT={}
s_cat=0
S_genes=[ (row.GO, row.GENES.split(",")) for row in t_v.itertuples() ]
if not l_use_GPDB:
S_genes=[ (x, [y for y in Y if (y in C_GENE)]) for x,Y in S_genes ]
GO_GENE={x: set(Y) for x,Y in S_genes if (len(Y)>0 and len(Y)<=Cache.N_TRIVIAL) }
GO_GENE_ENRICH=set(GO_GENE.keys())
if l_use_GPDB:
for x in GO_GENE_ENRICH:
if re.sub(r'^\d+_', '', x) not in Cache.GO_CATEGORY[s_key]:
print(">>>>>>>>>>>>>>>>>>>", x, s_key, re.sub(r'^\d+_', '', x))
exit()
S_cat=[ Cache.GO_CATEGORY[s_key][re.sub(r'^\d+_','', x)] for x in GO_GENE_ENRICH ]
else:
S_cat=[ Cache.GO_CATEGORY[s_key][x] for x in GO_GENE_ENRICH ]
CATEGORY_COUNT=util.unique_count(S_cat)
# reduce is slower
#ALL_GENE=reduce(lambda a,b : a|b, GO_GENE.values())
ALL_GENE=set([x for Y in GO_GENE.values() for x in Y])
#for row in t_v.itertuples():
##for i in t_v.index:
# s_go=row.GO #t_v.ix[i, 'GO']
# S_genes=row.GENES.split(",") #t_v.ix[i, 'GENES'].split(",")
# if not l_use_GPDB:
# ### warning, gene ids not recognized are treated as tax ID 0!!!
# S_genes=[s for s in S_genes if s in C_GENE]
# if len(S_genes)==0: continue
# if len(S_genes)<=Cache.N_TRIVIAL:
# GO_GENE_ENRICH.add(s_go)
# if l_use_GPDB:
# s_cat=Cache.GO_CATEGORY[s_key].get(re.sub(r'^\d+_','',s_go), 0)
# CATEGORY_COUNT[s_cat]=CATEGORY_COUNT.get(s_cat, 0)+1
# GO_GENE[s_go]=set(S_genes)
# ALL_GENE.update(GO_GENE[s_go])
#sw.check("TTTTTTTTT "+str(tax_id))
for k,v in GO_GENE.items():
for s_gene in v:
if s_gene not in GENE_GO:
GENE_GO[s_gene]={k}
else:
GENE_GO[s_gene].add(k)
Cache.ALL_GENE[s_key][tax_id2]=ALL_GENE
Cache.GENE_GO[s_key][tax_id2]=GENE_GO
Cache.TOTAL_GENE_COUNT[s_key][tax_id2]=max(Cache.TOTAL_GENE_COUNT[s_key][tax_id2], len(GENE_GO))
Cache.CATEGORY_COUNT[s_key][tax_id2]=CATEGORY_COUNT
Cache.GO_GENE[s_key][tax_id2]=GO_GENE
Cache.GO_GENE_ENRICH[s_key][tax_id2]=GO_GENE_ENRICH
if l_L1k:
s_path=setting.go['L1000_PATH']
S_gene=util.read_list(s_path+'/L1kAllGenes.txt')
Cache.ALL_GENE[s_key][tax_id]=set(S_gene)
Cache.TOTAL_GENE_COUNT[s_key][tax_id]=len(S_gene)
@staticmethod
def loadL1k():
"""Load L1000 terms"""
sw=util.StopWatch()
print("Loading L1k terms ...")
tax_id=9606
s_key="L1k"
s_path=setting.go['L1000_PATH']
S_gene=util.read_list(s_path+"/L1kAllGenes.txt")
Cache.TOTAL_GENE_COUNT[s_key][tax_id]=len(S_gene)
t1= | pd.read_csv(s_path+"/Term2Gid_L1000_PhaseI.csv") | pandas.read_csv |
import numpy as np
import pandas as pd
ots = pd.read_csv("data/ot.csv",sep=',')
ots=ots.sort_values('Pedido')
xlsx_file = "data/layout.xlsx"
layout = pd.read_excel(xlsx_file, sheet_name="layout")
adyacencia=pd.read_excel(xlsx_file, sheet_name="adyacencia")
cant_ordenes=len(ots['Pedido'].unique())
pasillos=layout['pasillo'].unique()
tiempo_pickeo=20
velocidad=20
lista=[]
for x in range(cant_ordenes):
obj=list(ots.loc[ots["Pedido"]==x+1]["Cod.Prod"])
lista.append(obj)
ordenes=np.array(lista)
l=[]
for pasillo in pasillos:
l.append([pasillo,False, 0])
pasillo_bool= | pd.DataFrame(l,columns=["pasillo","ocupado","pickeador"]) | pandas.DataFrame |
import os
import pandas as pd
import sys
sys.path.append('../')
from load_paths import load_box_paths
from datetime import date, timedelta, datetime
from processing_helpers import *
from scenario_sets import *
from data_comparison import load_sim_data
datapath, projectpath, wdir,exe_dir, git_dir = load_box_paths()
today = datetime.today()
mixed_scenarios = True
simdate = "20200506"
plot_first_day = pd.to_datetime('2020/3/1')
plot_last_day = pd.to_datetime('2021/4/1')
channels = ['infected', 'new_detected', 'new_deaths', 'hospitalized', 'critical', 'ventilators']
if mixed_scenarios == False:
sim_path = os.path.join(wdir, 'simulation_output')
plotdir = os.path.join(sim_path, '_plots')
out_dir = os.path.join(projectpath, 'NU_civis_outputs', today.strftime('%Y%m%d'), 'csv')
plot_name = simdate + '_' + stem + '_test'
sim_scenarios_1 = [x for x in os.listdir(os.path.join(wdir, 'simulation_output')) if 'scenario1' in x]
sim_scenarios_2 = [x for x in os.listdir(os.path.join(wdir, 'simulation_output')) if 'scenario2' in x]
sim_scenarios_3 = [x for x in os.listdir(os.path.join(wdir, 'simulation_output')) if 'scenario3' in x]
#sim_scenarios = sim_scenarios[2:] + sim_scenarios[:2] ## workaround to get right order 1-11
sim_scenarios = [sim_scenarios_1, sim_scenarios_2, sim_scenarios_3]
filenames = [ 'nu_ems_endsip_'+ simdate +'.csv' , 'nu_ems_neversip_'+ simdate +'.csv' , 'nu_ems_baseline_'+ simdate +'.csv' ]
if mixed_scenarios == True:
sim_path = os.path.join(wdir, 'simulation_output', simdate + '_mixed_reopening', 'simulations')
plotdir = os.path.join(wdir, 'simulation_output', simdate + '_mixed_reopening', 'plots')
out_dir = os.path.join(wdir, 'simulation_output', simdate + '_mixed_reopening', 'csv')
sim_scenarios, sim_label, intervention_label = def_scenario_set(simdate)
nsets = len(sim_scenarios)
filenames = []
for i in range(1, nsets):
filenames = filenames + ['nu_ems_set' + str(i) + '.csv']
for num, exp_names in enumerate(sim_scenarios):
adf = pd.DataFrame()
for d, exp_name in enumerate(exp_names):
sim_output_path = os.path.join(sim_path, exp_name)
ems = int(exp_name.split('_')[2])
df = pd.read_csv(os.path.join(sim_output_path, 'projection_for_civis.csv'))
#first_day = datetime.strptime(df['startdate'].unique()[0], '%Y-%m-%d')
df = df.rename(columns={"date": "Date",
"infected_median": "Number of Covid-19 infections",
"infected_95CI_lower": "Lower error bound of covid-19 infections",
"infected_95CI_upper": "Upper error bound of covid-19 infections",
"new_symptomatic_median": "Number of Covid-19 symptomatic",
"new_symptomatic_95CI_lower": "Lower error bound of covid-19 symptomatic",
"new_symptomatic_95CI_upper": "Upper error bound of covid-19 symptomatic",
"new_deaths_median": "Number of covid-19 deaths",
"new_deaths_95CI_lower": "Lower error bound of covid-19 deaths",
"new_deaths_95CI_upper": "Upper error bound of covid-19 deaths",
"hospitalized_median": "Number of hospital beds occupied",
"hospitalized_95CI_lower": "Lower error bound of number of hospital beds occupied",
"hospitalized_95CI_upper": "Upper error bound of number of hospital beds occupied",
"critical_median": "Number of ICU beds occupied",
"critical_95CI_lower": "Lower error bound of number of ICU beds occupied",
"critical_95CI_upper": "Upper error bound of number of ICU beds occupied",
"ventilators_median": "Number of ventilators used",
"ventilators_95CI_lower": "Lower error bound of number of ventilators used",
"ventilators_95CI_upper": "Upper error bound of number of ventilators used"})
df['ems'] = ems
df['Date'] = pd.to_datetime(df['Date'])
df = df[(df['Date'] >= plot_first_day) & (df['Date'] <= plot_last_day)]
adf = | pd.concat([adf, df]) | pandas.concat |
import numpy as np
import pandas as pd
from tqdm import tqdm
import os
import sys
import hashlib
def generateId(image_url, prefix, exisiting_ids):
id = f"{prefix}:{hashlib.md5(image_url.encode()).hexdigest()}"
# while id in exisiting_ids:
# print(f"id {id} exists!")
# image_url = image_url + "1"
# id = f"{prefix}:{hashlib.blake2s(image_url.encode()).hexdigest()}"
# print(f"Changed to {id}")
return id
def combine(taxonfiles, imagefiles, outputfolder, previousImageList):
tqdm.pandas()
taxa = pd.read_csv(taxonfiles[0])
for i, file in enumerate(taxonfiles):
if i > 0:
taxa = pd.concat([taxa, | pd.read_csv(file) | pandas.read_csv |
import vectorbt as vbt
import numpy as np
import pandas as pd
from numba import njit
from datetime import datetime
import pytest
from vectorbt.signals import nb
seed = 42
day_dt = np.timedelta64(86400000000000)
sig = pd.DataFrame([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]), columns=['a', 'b', 'c'])
ts = pd.Series([1., 2., 3., 2., 1.], index=sig.index)
price = pd.DataFrame({
'open': [10, 11, 12, 11, 10],
'high': [11, 12, 13, 12, 11],
'low': [9, 10, 11, 10, 9],
'close': [10, 11, 12, 11, 10]
})
# ############# accessors.py ############# #
class TestAccessors:
def test_freq(self):
assert sig.vbt.signals.wrapper.freq == day_dt
assert sig['a'].vbt.signals.wrapper.freq == day_dt
assert sig.vbt.signals(freq='2D').wrapper.freq == day_dt * 2
assert sig['a'].vbt.signals(freq='2D').wrapper.freq == day_dt * 2
assert pd.Series([False, True]).vbt.signals.wrapper.freq is None
assert pd.Series([False, True]).vbt.signals(freq='3D').wrapper.freq == day_dt * 3
assert pd.Series([False, True]).vbt.signals(freq=np.timedelta64(4, 'D')).wrapper.freq == day_dt * 4
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_fshift(self, test_n):
pd.testing.assert_series_equal(sig['a'].vbt.signals.fshift(test_n), sig['a'].shift(test_n, fill_value=False))
np.testing.assert_array_equal(
sig['a'].vbt.signals.fshift(test_n).values,
nb.fshift_1d_nb(sig['a'].values, test_n)
)
pd.testing.assert_frame_equal(sig.vbt.signals.fshift(test_n), sig.shift(test_n, fill_value=False))
def test_empty(self):
pd.testing.assert_series_equal(
pd.Series.vbt.signals.empty(5, index=np.arange(10, 15), name='a'),
pd.Series(np.full(5, False), index=np.arange(10, 15), name='a')
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.empty((5, 3), index=np.arange(10, 15), columns=['a', 'b', 'c']),
pd.DataFrame(np.full((5, 3), False), index=np.arange(10, 15), columns=['a', 'b', 'c'])
)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.empty_like(sig['a']),
pd.Series(np.full(sig['a'].shape, False), index=sig['a'].index, name=sig['a'].name)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.empty_like(sig),
pd.DataFrame(np.full(sig.shape, False), index=sig.index, columns=sig.columns)
)
def test_generate(self):
@njit
def choice_func_nb(from_i, to_i, col, n):
if col == 0:
return np.arange(from_i, to_i)
elif col == 1:
return np.full(1, from_i)
else:
return np.full(1, to_i - n)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate(5, choice_func_nb, 1, index=sig['a'].index, name=sig['a'].name),
pd.Series(
np.array([True, True, True, True, True]),
index=sig['a'].index,
name=sig['a'].name
)
)
with pytest.raises(Exception) as e_info:
_ = pd.Series.vbt.signals.generate((5, 2), choice_func_nb, 1)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate((5, 3), choice_func_nb, 1, index=sig.index, columns=sig.columns),
pd.DataFrame(
np.array([
[True, True, False],
[True, False, False],
[True, False, False],
[True, False, False],
[True, False, True]
]),
index=sig.index,
columns=sig.columns
)
)
def test_generate_both(self):
@njit
def entry_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
@njit
def exit_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
temp_int = np.empty((sig.shape[0],), dtype=np.int_)
en, ex = pd.Series.vbt.signals.generate_both(
5, entry_func_nb, exit_func_nb, (temp_int,), (temp_int,),
index=sig['a'].index, name=sig['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, True, False, True]),
index=sig['a'].index,
name=sig['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, True, False]),
index=sig['a'].index,
name=sig['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_both(
(5, 3), entry_func_nb, exit_func_nb, (temp_int,), (temp_int,),
index=sig.index, columns=sig.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[True, True, True],
[False, False, False],
[True, True, True]
]),
index=sig.index,
columns=sig.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[True, True, True],
[False, False, False]
]),
index=sig.index,
columns=sig.columns
)
)
en, ex = pd.Series.vbt.signals.generate_both(
(5,), entry_func_nb, exit_func_nb, (temp_int,), (temp_int,),
index=sig['a'].index, name=sig['a'].name, entry_wait=1, exit_wait=0)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, True, True, True, True]),
index=sig['a'].index,
name=sig['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([True, True, True, True, True]),
index=sig['a'].index,
name=sig['a'].name
)
)
en, ex = pd.Series.vbt.signals.generate_both(
(5,), entry_func_nb, exit_func_nb, (temp_int,), (temp_int,),
index=sig['a'].index, name=sig['a'].name, entry_wait=0, exit_wait=1)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, True, True, True, True]),
index=sig['a'].index,
name=sig['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, True, True, True]),
index=sig['a'].index,
name=sig['a'].name
)
)
def test_generate_exits(self):
@njit
def choice_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
temp_int = np.empty((sig.shape[0],), dtype=np.int_)
pd.testing.assert_series_equal(
sig['a'].vbt.signals.generate_exits(choice_func_nb, temp_int),
pd.Series(
np.array([False, True, False, False, True]),
index=sig['a'].index,
name=sig['a'].name
)
)
pd.testing.assert_frame_equal(
sig.vbt.signals.generate_exits(choice_func_nb, temp_int),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=sig.index,
columns=sig.columns
)
)
pd.testing.assert_frame_equal(
sig.vbt.signals.generate_exits(choice_func_nb, temp_int, wait=0),
pd.DataFrame(
np.array([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
]),
index=sig.index,
columns=sig.columns
)
)
def test_clean(self):
entries = pd.DataFrame([
[True, False, True],
[True, False, False],
[True, True, True],
[False, True, False],
[False, True, True]
], index=sig.index, columns=sig.columns)
exits = pd.Series([True, False, True, False, True], index=sig.index)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(),
pd.DataFrame(
np.array([
[True, False, True],
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True]
]),
index=sig.index,
columns=sig.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries),
pd.DataFrame(
np.array([
[True, False, True],
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True]
]),
index=sig.index,
columns=sig.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=sig.index,
columns=sig.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits)[1],
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=sig.index,
columns=sig.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits, entry_first=False)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=sig.index,
columns=sig.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits, entry_first=False)[1],
pd.DataFrame(
np.array([
[False, True, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=sig.index,
columns=sig.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries, exits)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=sig.index,
columns=sig.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries, exits)[1],
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=sig.index,
columns=sig.columns
)
)
with pytest.raises(Exception) as e_info:
_ = pd.Series.vbt.signals.clean(entries, entries, entries)
def test_generate_random(self):
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate_random(
5, n=3, seed=seed, index=sig['a'].index, name=sig['a'].name),
pd.Series(
np.array([False, True, True, False, True]),
index=sig['a'].index,
name=sig['a'].name
)
)
with pytest.raises(Exception) as e_info:
_ = pd.Series.vbt.signals.generate_random((5, 2), n=3)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), n=3, seed=seed, index=sig.index, columns=sig.columns),
pd.DataFrame(
np.array([
[False, False, True],
[True, True, True],
[True, True, False],
[False, True, True],
[True, False, False]
]),
index=sig.index,
columns=sig.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), n=[0, 1, 2], seed=seed, index=sig.index, columns=sig.columns),
pd.DataFrame(
np.array([
[False, False, True],
[False, False, True],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=sig.index,
columns=sig.columns
)
)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate_random(
5, prob=0.5, seed=seed, index=sig['a'].index, name=sig['a'].name),
pd.Series(
np.array([True, False, False, False, True]),
index=sig['a'].index,
name=sig['a'].name
)
)
with pytest.raises(Exception) as e_info:
_ = pd.Series.vbt.signals.generate_random((5, 2), prob=3)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), prob=0.5, seed=seed, index=sig.index, columns=sig.columns),
pd.DataFrame(
np.array([
[True, True, True],
[False, True, False],
[False, False, False],
[False, False, True],
[True, False, True]
]),
index=sig.index,
columns=sig.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), prob=[0., 0.5, 1.], seed=seed, index=sig.index, columns=sig.columns),
pd.DataFrame(
np.array([
[False, True, True],
[False, True, True],
[False, False, True],
[False, False, True],
[False, False, True]
]),
index=sig.index,
columns=sig.columns
)
)
with pytest.raises(Exception) as e_info:
pd.DataFrame.vbt.signals.generate_random((5, 3))
def test_generate_random_exits(self):
pd.testing.assert_series_equal(
sig['a'].vbt.signals.generate_random_exits(seed=seed),
pd.Series(
np.array([False, False, True, False, True]),
index=sig['a'].index,
name=sig['a'].name
)
)
pd.testing.assert_frame_equal(
sig.vbt.signals.generate_random_exits(seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[True, True, False],
[False, False, False],
[True, False, True]
]),
index=sig.index,
columns=sig.columns
)
)
pd.testing.assert_frame_equal(
sig.vbt.signals.generate_random_exits(seed=seed, wait=0),
pd.DataFrame(
np.array([
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, True],
[True, True, False]
]),
index=sig.index,
columns=sig.columns
)
)
pd.testing.assert_series_equal(
sig['a'].vbt.signals.generate_random_exits(prob=1., seed=seed),
pd.Series(
np.array([False, True, False, False, True]),
index=sig['a'].index,
name=sig['a'].name
)
)
pd.testing.assert_frame_equal(
sig.vbt.signals.generate_random_exits(prob=1., seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=sig.index,
columns=sig.columns
)
)
pd.testing.assert_frame_equal(
sig.vbt.signals.generate_random_exits(prob=[0., 0.5, 1.], seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, True],
[False, False, False]
]),
index=sig.index,
columns=sig.columns
)
)
pd.testing.assert_frame_equal(
sig.vbt.signals.generate_random_exits(prob=1., wait=0, seed=seed),
pd.DataFrame(
np.array([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
]),
index=sig.index,
columns=sig.columns
)
)
def test_generate_random_both(self):
# n
en, ex = pd.Series.vbt.signals.generate_random_both(
5, n=2, seed=seed, index=sig['a'].index, name=sig['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, True, False, False]),
index=sig['a'].index,
name=sig['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, False, True]),
index=sig['a'].index,
name=sig['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), n=2, seed=seed, index=sig.index, columns=sig.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[True, True, False],
[False, False, True],
[False, False, False]
]),
index=sig.index,
columns=sig.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[False, True, False],
[True, False, True]
]),
index=sig.index,
columns=sig.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), n=[0, 1, 2], seed=seed, index=sig.index, columns=sig.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[False, False, True],
[False, True, False],
[False, False, False],
[False, False, True],
[False, False, False]
]),
index=sig.index,
columns=sig.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, True],
[False, False, False],
[False, True, False],
[False, False, True]
]),
index=sig.index,
columns=sig.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both((2, 3), n=2, seed=seed, entry_wait=1, exit_wait=0)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
])
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True]
])
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both((3, 3), n=2, seed=seed, entry_wait=0, exit_wait=1)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[False, False, False]
])
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[True, True, True],
])
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both((7, 3), n=2, seed=seed, entry_wait=2, exit_wait=2)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[True, True, True],
[False, False, False],
[False, False, False]
])
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[True, True, True]
])
)
)
n = 10
a = np.full(n * 2, 0.)
for i in range(10000):
en, ex = pd.Series.vbt.signals.generate_random_both(1000, n, entry_wait=2, exit_wait=2)
_a = np.empty((n * 2,), dtype=np.int_)
_a[0::2] = np.flatnonzero(en)
_a[1::2] = np.flatnonzero(ex)
a += _a
greater = a > 10000000 / (2 * n + 1) * np.arange(0, 2 * n)
less = a < 10000000 / (2 * n + 1) * np.arange(2, 2 * n + 2)
assert np.all(greater & less)
# probs
en, ex = pd.Series.vbt.signals.generate_random_both(
5, entry_prob=0.5, exit_prob=1., seed=seed, index=sig['a'].index, name=sig['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, False, False, True]),
index=sig['a'].index,
name=sig['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, False, False]),
index=sig['a'].index,
name=sig['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=0.5, exit_prob=1., seed=seed, index=sig.index, columns=sig.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, True],
[True, False, False]
]),
index=sig.index,
columns=sig.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, True]
]),
index=sig.index,
columns=sig.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=[0., 0.5, 1.], exit_prob=[0., 0.5, 1.],
seed=seed, index=sig.index, columns=sig.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[False, True, True],
[False, False, False],
[False, False, True],
[False, False, False],
[False, False, True]
]),
index=sig.index,
columns=sig.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True],
[False, False, False]
]),
index=sig.index,
columns=sig.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=1., exit_prob=1., exit_wait=0,
seed=seed, index=sig.index, columns=sig.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=sig.index,
columns=sig.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=sig.index,
columns=sig.columns
)
)
# none
with pytest.raises(Exception) as e_info:
pd.DataFrame.vbt.signals.generate_random((5, 3))
def test_generate_stop_exits(self):
e = pd.Series([True, False, False, False, False, False])
t = pd.Series([2, 3, 4, 3, 2, 1]).astype(np.float64)
# stop loss
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1),
pd.Series(np.array([False, False, False, False, False, True]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True),
pd.Series(np.array([False, False, False, True, False, False]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, first=False),
pd.Series(np.array([False, False, False, True, True, True]))
)
pd.testing.assert_frame_equal(
e.vbt.signals.generate_stop_exits(t.vbt.tile(3), [-0., -0.5, -1.], trailing=True, first=False),
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, False],
[False, True, False]
]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, exit_wait=3),
pd.Series(np.array([False, False, False, False, True, False]))
)
# take profit
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1),
pd.Series(np.array([False, False, False, False, False, True]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1, trailing=True),
pd.Series(np.array([False, False, False, True, False, False]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1, trailing=True, first=False),
pd.Series(np.array([False, False, False, True, True, True]))
)
pd.testing.assert_frame_equal(
e.vbt.signals.generate_stop_exits((4 - t).vbt.tile(3), [0., 0.5, 1.], trailing=True, first=False),
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, True],
[False, True, True],
[False, True, True]
]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1, trailing=True, exit_wait=3),
pd.Series(np.array([False, False, False, False, True, False]))
)
# iteratively
e = | pd.Series([True, True, True, True, True, True]) | pandas.Series |
import csv
from io import StringIO
import os
import numpy as np
import pytest
from pandas.errors import ParserError
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
read_csv,
to_datetime,
)
import pandas._testing as tm
import pandas.core.common as com
from pandas.io.common import get_handle
MIXED_FLOAT_DTYPES = ["float16", "float32", "float64"]
MIXED_INT_DTYPES = [
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
]
class TestDataFrameToCSV:
def read_csv(self, path, **kwargs):
params = {"index_col": 0, "parse_dates": True}
params.update(**kwargs)
return read_csv(path, **params)
def test_to_csv_from_csv1(self, float_frame, datetime_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv1__") as path:
float_frame["A"][:5] = np.nan
float_frame.to_csv(path)
float_frame.to_csv(path, columns=["A", "B"])
float_frame.to_csv(path, header=False)
float_frame.to_csv(path, index=False)
# test roundtrip
# freq does not roundtrip
datetime_frame.index = datetime_frame.index._with_freq(None)
datetime_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(datetime_frame, recons)
datetime_frame.to_csv(path, index_label="index")
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(datetime_frame.columns) + 1
# no index
datetime_frame.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(datetime_frame.values, recons.values)
# corner case
dm = DataFrame(
{
"s1": Series(range(3), index=np.arange(3)),
"s2": Series(range(2), index=np.arange(2)),
}
)
dm.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(dm, recons)
def test_to_csv_from_csv2(self, float_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv2__") as path:
# duplicate index
df = DataFrame(
np.random.randn(3, 3), index=["a", "a", "b"], columns=["x", "y", "z"]
)
df.to_csv(path)
result = self.read_csv(path)
tm.assert_frame_equal(result, df)
midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)])
df = DataFrame(np.random.randn(3, 3), index=midx, columns=["x", "y", "z"])
df.to_csv(path)
result = self.read_csv(path, index_col=[0, 1, 2], parse_dates=False)
tm.assert_frame_equal(result, df, check_names=False)
# column aliases
col_aliases = Index(["AA", "X", "Y", "Z"])
float_frame.to_csv(path, header=col_aliases)
rs = self.read_csv(path)
xp = float_frame.copy()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
msg = "Writing 4 cols but got 2 aliases"
with pytest.raises(ValueError, match=msg):
float_frame.to_csv(path, header=["AA", "X"])
def test_to_csv_from_csv3(self):
with tm.ensure_clean("__tmp_to_csv_from_csv3__") as path:
df1 = DataFrame(np.random.randn(3, 1))
df2 = DataFrame(np.random.randn(3, 1))
df1.to_csv(path)
df2.to_csv(path, mode="a", header=False)
xp = pd.concat([df1, df2])
rs = read_csv(path, index_col=0)
rs.columns = [int(label) for label in rs.columns]
xp.columns = [int(label) for label in xp.columns]
tm.assert_frame_equal(xp, rs)
def test_to_csv_from_csv4(self):
with tm.ensure_clean("__tmp_to_csv_from_csv4__") as path:
# GH 10833 (TimedeltaIndex formatting)
dt = pd.Timedelta(seconds=1)
df = DataFrame(
{"dt_data": [i * dt for i in range(3)]},
index=Index([i * dt for i in range(3)], name="dt_index"),
)
df.to_csv(path)
result = read_csv(path, index_col="dt_index")
result.index = pd.to_timedelta(result.index)
result["dt_data"] = pd.to_timedelta(result["dt_data"])
tm.assert_frame_equal(df, result, check_index_type=True)
def test_to_csv_from_csv5(self, timezone_frame):
# tz, 8260
with tm.ensure_clean("__tmp_to_csv_from_csv5__") as path:
timezone_frame.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=["A"])
converter = (
lambda c: to_datetime(result[c])
.dt.tz_convert("UTC")
.dt.tz_convert(timezone_frame[c].dt.tz)
)
result["B"] = converter("B")
result["C"] = converter("C")
tm.assert_frame_equal(result, timezone_frame)
def test_to_csv_cols_reordering(self):
# GH3454
chunksize = 5
N = int(chunksize * 2.5)
df = tm.makeCustomDataframe(N, 3)
cs = df.columns
cols = [cs[2], cs[0]]
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
tm.assert_frame_equal(df[cols], rs_c, check_names=False)
def test_to_csv_new_dupe_cols(self):
def _check_df(df, cols=None):
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
# we wrote them in a different order
# so compare them in that order
if cols is not None:
if df.columns.is_unique:
rs_c.columns = cols
else:
indexer, missing = df.columns.get_indexer_non_unique(cols)
rs_c.columns = df.columns.take(indexer)
for c in cols:
obj_df = df[c]
obj_rs = rs_c[c]
if isinstance(obj_df, Series):
tm.assert_series_equal(obj_df, obj_rs)
else:
tm.assert_frame_equal(obj_df, obj_rs, check_names=False)
# wrote in the same order
else:
rs_c.columns = df.columns
tm.assert_frame_equal(df, rs_c, check_names=False)
chunksize = 5
N = int(chunksize * 2.5)
# dupe cols
df = tm.makeCustomDataframe(N, 3)
df.columns = ["a", "a", "b"]
_check_df(df, None)
# dupe cols with selection
cols = ["b", "a"]
_check_df(df, cols)
@pytest.mark.slow
def test_to_csv_dtnat(self):
# GH3437
def make_dtnat_arr(n, nnat=None):
if nnat is None:
nnat = int(n * 0.1) # 10%
s = list(date_range("2000", freq="5min", periods=n))
if nnat:
for i in np.random.randint(0, len(s), nnat):
s[i] = NaT
i = np.random.randint(100)
s[-i] = NaT
s[i] = NaT
return s
chunksize = 1000
# N=35000
s1 = make_dtnat_arr(chunksize + 5)
s2 = make_dtnat_arr(chunksize + 5, 0)
# s3=make_dtnjat_arr(chunksize+5,0)
with tm.ensure_clean("1.csv") as pth:
df = DataFrame({"a": s1, "b": s2})
df.to_csv(pth, chunksize=chunksize)
recons = self.read_csv(pth).apply(to_datetime)
tm.assert_frame_equal(df, recons, check_names=False)
@pytest.mark.slow
def test_to_csv_moar(self):
def _do_test(
df, r_dtype=None, c_dtype=None, rnlvl=None, cnlvl=None, dupe_col=False
):
kwargs = {"parse_dates": False}
if cnlvl:
if rnlvl is not None:
kwargs["index_col"] = list(range(rnlvl))
kwargs["header"] = list(range(cnlvl))
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
df.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
else:
kwargs["header"] = 0
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
df.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
def _to_uni(x):
if not isinstance(x, str):
return x.decode("utf8")
return x
if dupe_col:
# read_Csv disambiguates the columns by
# labeling them dupe.1,dupe.2, etc'. monkey patch columns
recons.columns = df.columns
if rnlvl and not cnlvl:
delta_lvl = [recons.iloc[:, i].values for i in range(rnlvl - 1)]
ix = MultiIndex.from_arrays([list(recons.index)] + delta_lvl)
recons.index = ix
recons = recons.iloc[:, rnlvl - 1 :]
type_map = {"i": "i", "f": "f", "s": "O", "u": "O", "dt": "O", "p": "O"}
if r_dtype:
if r_dtype == "u": # unicode
r_dtype = "O"
recons.index = np.array(
[_to_uni(label) for label in recons.index], dtype=r_dtype
)
df.index = np.array(
[_to_uni(label) for label in df.index], dtype=r_dtype
)
elif r_dtype == "dt": # unicode
r_dtype = "O"
recons.index = np.array(
[Timestamp(label) for label in recons.index], dtype=r_dtype
)
df.index = np.array(
[Timestamp(label) for label in df.index], dtype=r_dtype
)
elif r_dtype == "p":
r_dtype = "O"
idx_list = | to_datetime(recons.index) | pandas.to_datetime |
#!/usr/bin/env python3
import pandas as pd
import numpy as np
import logging
def find_all_columns(csv_file, columns_to_exclude, range_fraction=0.1, separator=','):
"""
Sometimes, csv files have way too many columns to make you want to list them all. This method will create
a list of column objects for you, excluding whatever columns are in the columns_to_exclude_list.
If columns are numeric/ranges acceptable range is set to 10 percent (range_fraction, modify if you want) of the
average of the field. If you need more fine-grained control over this,
:param csv_file: Full path to csv file.
:param columns_to_exclude: List of column headers you DO NOT want Column objects created for.
:param range_fraction: How much numeric columns can vary by, as a fraction of the mean of the column
:param separator: Delimiter used by pandas when reading the report. Allows for parsing of .tsv ('\t' delimiter) as
well as .csv (',' delimiter) files. Default is ','
:return: List of column objects to be used by a Validator
"""
column_list = list()
df = | pd.read_csv(csv_file, sep=separator) | pandas.read_csv |
#attendance_file is the source file and list_file is the destination file
__author__ = "<NAME>"
__version__ = '0.0.0'
from os import listdir
from os.path import isfile, join
import tkinter as tk
import pandas as pd
import re
from tkinter import *
from tkinter import filedialog
from tkinter.filedialog import askopenfile
threshold = 20 #Minimum time of attendance to be marked present
root=Tk()
def attendance_marker(attendance_file_path, list_file_path):#Core Function
pd.set_option('display.max_colwidth', 1000)
file = pd.read_csv(attendance_file_path)
file.drop(file.index[0])
names = re.findall('([A-Za-z]+.*)', file.to_string())
date = re.findall("[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]", names[0])[0]#getting date on which the lecture was delivered
names.remove(names[0])
durations = [] #stores duration of each duration of attendance (read README file to understand)
for item in names:
x = re.findall('\((\S+)min\)', item)
if(len(x)>0):
durations.append(x)
duration = []#stores the net duration of attnedance of each student
for i in range(len(durations)):
duration.append(0)
for j in range(len(durations[i])):
duration[i] = duration[i]+int(float(durations[i][j]))
for i in range(len(names)):
index = names[i].find('\\')
names[i] = names[i][0:index]
names.remove(names[0])
#Feature 4 implemented (see README file)
for i in range(len(names)):
for j in range(len(names[i])):
if ((names[i][j] == ' ') and (names[i][0:j] == names[i][j+1:len(names[i][j])])):
names[i] = names[i][0:j]
#Changing date to DD-MM-YYYY format
elements = re.findall("([0-9]+)", date)
date = elements[2]+'-'+elements[1]+'-'+elements[0]
slist = pd.read_excel(list_file_path)
nameList = pd.DataFrame(slist, columns= ['Name'])
attendance_list = []
for row_index, row in nameList.iterrows():
attendance_list.append(re.findall('Name (\S+.*)', row.to_string())[0])
numbers_list = []#0 or 1 to be marked
l = len(duration)
i = 0
#feature 3 implemented
while(i<l):
if(duration[i]<threshold):
names.remove(names[i])
duration.remove(duration[i])
l = l-1
else:
i = i+1
for i in range(len(attendance_list)):
status = 0
for j in range(len(names)):
if(attendance_list[i].casefold() == names[j].casefold()):
status = 1
break
numbers_list.append(status)
slist[date] = numbers_list
writer = pd.ExcelWriter(list_file_path)
slist.to_excel(writer, index = False) #Writing to destination file
# save the excel
writer.save()
writer.close()
def file_submit():#Implements feature 1(see README file)
pd.set_option('display.max_colwidth', 1000)
file = askopenfile(mode ='r', filetypes =[('Attendance File(csv)', '*.csv')])#Getting souce file
attendance_file_path = re.findall('\'(.+\.csv)\'', str(file))[0]#Getting souce file path
canvas.pack()
file1 = askopenfile(mode ='r', filetypes =[('Class list File(xlsx)', '*.xlsx')])#Getting destination file
list_file_path = re.findall('\'(.+\.xlsx)\'', str(file1))[0]#Getting destination file path
attendance_marker(attendance_file_path, list_file_path)
print("Done")
canvas.create_text(310, 280, font = ("Purisa", 13), text = "DONE", fill = 'black')
canvas.create_text(310, 310, font = ("Purisa", 13), text = "You may quit now.", fill = 'black')
canvas.create_text(310, 330, font = ("Purisa", 13), text = "or", fill = 'black')
canvas.create_text(310, 350, font = ("Purisa", 13), text = "Continue by clicking one of the buttons again.", fill = 'black')
def folder_submit():#Implements feature 2(see README file)
mypath = filedialog.askdirectory()#Getting destination folder
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
files = []# stores path of each source fle in the folder
for item in onlyfiles:
if(item[-3:] == 'csv'):# only csv files in the selected folder will be considered
files.append(mypath+'\\'+item)
file1 = askopenfile(mode ='r', filetypes =[('Class list File(xlsx)', '*.xlsx')])#Getting destination file
list_file_path = re.findall('\'(.+\.xlsx)\'', str(file1))[0]#Getting destination file(Only xcel files) path
| pd.set_option('display.max_colwidth', 1000) | pandas.set_option |
"""Implementation of prototype set models with sklearn compatible interface.
Copyright by <NAME>
Released under the MIT license - see LICENSE file for details
This submodule creates a logger named like itself that logs to a NullHandler and tracks progress on model fitting at log
level INFO. The invoking application needs to manage log output.
"""
from abc import ABCMeta, abstractmethod
import logging
import numpy as np
import pandas as pd
from scipy.optimize import fmin_l_bfgs_b
from scipy.stats import rankdata
from sklearn.base import BaseEstimator
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import check_X_y
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.validation import check_array, check_is_fitted, check_random_state
from statsmodels.distributions.empirical_distribution import ECDF
from proset.objective import ClassifierObjective
from proset.set_manager import ClassifierSetManager
from proset.shared import find_changes, check_feature_names, check_scale_offset, LOG_OFFSET
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
LOG_CAPTION = " ".join(["{:>10s}"] * 6 + ["{:s}"]).format(
"Iterations", "Calls", "Objective", "Gradient", "Features", "Prototypes", "Status"
)
LOG_MESSAGE = " ".join(["{:10d}", "{:10d}", "{:10.1e}", "{:10.1e}", "{:10d}", "{:10d}", "{:s}"])
LIMITED_M = 10 # parameters controlling L-BFGS-B fit
LIMITED_FACTR = 1e7
LIMITED_PGTOL = 1e-5
LIMITED_MAXFUN = 15000
LIMITED_MAXITER = 15000
LIMITED_MAXLS = 20
# noinspection PyPep8Naming, PyAttributeOutsideInit
class Model(BaseEstimator, metaclass=ABCMeta):
"""Base class for prototype set models.
"""
def __init__(
self,
n_iter=1,
lambda_v=1e-3,
lambda_w=1e-8,
alpha_v=0.95,
alpha_w=0.95,
num_candidates=1000,
max_fraction=0.5,
random_state=None
):
"""Initialize prototype set model with hyperparameters.
:param n_iter: non-negative integer; number of batches of prototypes to fit
:param lambda_v: non-negative float; penalty weight for the feature weights
:param lambda_w: non-negative float; penalty weight for the prototype weights
:param alpha_v: float in [0.0, 1.0]; fraction of lambda_v assigned as l2 penalty weight to feature weights; the
remainder is assigned as l1 penalty weight
:param alpha_w: float in [0.0, 1.0]; fraction of lambda_w assigned as l2 penalty weight to prototype weights;
the remainder is assigned as l1 penalty weight
:param num_candidates: positive integer; number of candidates for prototypes to try for each batch
:param max_fraction: float in (0.0, 1.0); maximum fraction of candidates to draw from one group of candidates;
candidates are grouped by class and whether the current model classifies them correctly or not
:param random_state: instance of np.random.RandomState, integer, or None; if a random state is passed, that
state will be used for randomization; if an integer or None is passed, a new random state is generated using
the argument as seed for every call to fit()
"""
self.n_iter = n_iter
self.lambda_v = lambda_v
self.lambda_w = lambda_w
self.alpha_v = alpha_v
self.alpha_w = alpha_w
self.num_candidates = num_candidates
self.max_fraction = max_fraction
self.random_state = random_state
def fit(self, X, y, sample_weight=None, warm_start=False):
"""Fit proset model to data.
:param X: 2D numpy float array; feature matrix; sparse matrices or infinite/missing values not supported
:param y: list-like object; target for supervised learning
:param sample_weight: 1D numpy array of positive floats or None; sample weights used for likelihood calculation;
pass None to use unit weights
:param warm_start: boolean; whether to create a new model or to add batches to an existing model
:return: no return value; model updated in place
"""
self._check_hyperparameters()
X, y, sample_weight = self._validate_arrays(X=X, y=y, sample_weight=sample_weight, reset=not warm_start)
logger.info("Fit proset model with {} batches and penalties lambda_v = {:0.2e}, lambda_w = {:0.2e}".format(
self.n_iter, self.lambda_v, self.lambda_w
))
MySetManager, MyObjective = self._get_compute_classes() # pylint: disable=invalid-name
if not warm_start or not hasattr(self, "set_manager_"):
self.set_manager_ = MySetManager(target=y) # pylint: disable=attribute-defined-outside-init
for i in range(self.n_iter):
objective = MyObjective(
features=X,
target=y,
weights=sample_weight,
num_candidates=self.num_candidates,
max_fraction=self.max_fraction,
set_manager=self.set_manager_,
lambda_v=self.lambda_v,
lambda_w=self.lambda_w,
alpha_v=self.alpha_v,
alpha_w=self.alpha_w,
random_state=check_random_state(self.random_state)
)
starting_point, bounds = objective.get_starting_point_and_bounds()
solution = fmin_l_bfgs_b(
func=objective.evaluate,
x0=starting_point,
bounds=bounds,
m=LIMITED_M,
factr=LIMITED_FACTR,
pgtol=LIMITED_PGTOL,
maxfun=LIMITED_MAXFUN,
maxiter=LIMITED_MAXITER,
maxls=LIMITED_MAXLS
)
batch_info = objective.get_batch_info(solution[0]) # solution[0] is the parameter vector
self.set_manager_.add_batch(batch_info)
if logger.isEnabledFor(logging.INFO): # pragma: no cover
logger.info("Batch {} fit results".format(i + 1))
logger.info(LOG_CAPTION)
logger.info(LOG_MESSAGE.format(
solution[2]["nit"],
solution[2]["funcalls"],
solution[1],
np.max(np.abs(solution[2]["grad"])),
len(np.nonzero(batch_info["feature_weights"])[0]),
len(np.nonzero(batch_info["prototype_weights"])[0]),
self._parse_solver_status(solution[2])
))
logger.info("Model fit complete")
return self
def _check_hyperparameters(self):
"""Check that model hyperparameters are valid.
:return: no return value; raises a ValueError if an issue is found
"""
if not np.issubdtype(type(self.n_iter), np.integer):
raise TypeError("Parameter n_iter must be integer.")
if self.n_iter < 0:
raise ValueError("Parameter n_iter must not be negative.")
# validation of other parameters is left to the classes or functions relying on them
# noinspection PyMethodMayBeStatic, PyUnresolvedReferences
def _validate_arrays(self, X, y, sample_weight, reset):
"""Check or transform input target, features, and sample weights as appropriate for the model.
:param X: see docstring of fit() for details
:param y: see docstring of fit() for details
:param sample_weight: see docstring of fit() for details
:param reset: boolean; whether to prepare the model for a new fit or enable warm start
:return: transformed versions of X and y; may also update the state of the model instance
"""
X, y = check_X_y(X=X, y=y)
if reset or not hasattr(self, "n_features_in_"):
self.n_features_in_ = X.shape[1] # pylint: disable=attribute-defined-outside-init
# the n_features_in_ attribute for tabular input is an sklearn convention
elif self.n_features_in_ != X.shape[1]:
raise ValueError("Parameter X must have {} columns.".format(self.n_features_in_))
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
if sample_weight.shape[0] != X.shape[0]:
raise ValueError("Parameter sample_weight must have one element per row of X if not None.")
return X, self._validate_y(y, reset), sample_weight
@abstractmethod
def _validate_y(self, y, reset): # pragma: no cover
"""Perform checks on estimator target that depend on estimator type.
:param y: 1D numpy array; target for supervised learning
:param reset: boolean; whether to prepare the model for a new fit or enable warm start
:return: y after applying appropriate checks and transforms
"""
raise NotImplementedError("Abstract method Model._validate_y() has no default implementation.")
@staticmethod
@abstractmethod
def _get_compute_classes(): # pragma: no cover
"""Provide classes implementing the set manager and objective function for the model.
:return: subclasses of proset.set_manager.SetManager and proset.objective.Objective
"""
raise NotImplementedError("Abstract method Model._get_compute_classes() has no default implementation.")
@staticmethod
def _parse_solver_status(solver_status):
"""Translate L-BFGS-B solver status into human-readable format.
:param solver_status: dict; third output argument of scipy.fmin_l_bfgs_b()
:return: string; solver exit status
"""
if solver_status["warnflag"] == 0:
return "converged"
if solver_status["warnflag"] == 1:
return "reached limit on iterations or function calls"
return "not converged ({})".format(solver_status["task"])
def predict(self, X, n_iter=None, compute_familiarity=False):
"""Predict class labels for a feature matrix.
:param X: 2D numpy array; feature matrix; sparse matrices or infinite/missing values not supported
:param n_iter: non-negative integer, 1D numpy array of non-negative and strictly increasing integers, or None;
number of batches to use for evaluation; pass None for all batches; pass an array to evaluate for multiple
values at once
:param compute_familiarity: boolean; whether to compute the familiarity for each sample
:return: 1D numpy array or list of 1D numpy arrays; if n_iter is integer or None, a single set of predictions is
returned as an array; if n_iter is an array, a list of predictions is returned with one element for each
element of the array; if compute_familiarity is True, also returns a 1D numpy float array or list of float
arrays containing the familiarity of each sample
"""
check_is_fitted(self, attributes="set_manager_")
return self._compute_prediction(X=check_array(X), n_iter=n_iter, compute_familiarity=compute_familiarity)
@abstractmethod
def _compute_prediction(self, X, n_iter, compute_familiarity): # pragma: no cover
"""Compute prediction.
:param X: see docstring of predict() for details
:param n_iter: see docstring of predict() for details
:param compute_familiarity: see docstring of predict() for details
:return: see docstring of predict() for details
"""
raise NotImplementedError("Abstract method Model._get_prediction() has no default implementation.")
def score(self, X, y, sample_weight=None, n_iter=None):
"""Use trained model to score sample data.
:param X: 2D numpy array; feature matrix; sparse matrices or infinite/missing values not supported
:param y: list-like object; target for supervised learning
:param sample_weight: 1D numpy array of positive floats or None; sample weights used for likelihood calculation;
pass None to use unit weights
:param n_iter: non-negative integer, 1D numpy array of non-negative and strictly increasing integers, or None;
number of batches to use for evaluation; pass None for all batches; pass an array to evaluate for multiple
values at once
:return: float or 1D numpy array of floats; if n_iter is integer or None, a single score is returned as a float
value; if n_iter is an array, an array of scores of the same length is returned
"""
check_is_fitted(self, attributes="set_manager_")
X, y, sample_weight = self._validate_arrays(X=X, y=y, sample_weight=sample_weight, reset=False)
return self._compute_score(X=X, y=y, sample_weight=sample_weight, n_iter=n_iter)
@abstractmethod
def _compute_score(self, X, y, sample_weight, n_iter): # pragma: no cover
"""Compute score.
:param X: see docstring of score() for details
:param y: numpy array; target for supervised learning
:param sample_weight: see docstring of score() for details
:param n_iter: see docstring of score() for details
:return: as return value of score()
"""
raise NotImplementedError("Abstract method Model._compute_score() has no default implementation.")
def export(
self,
n_iter=None,
train_names=None,
include_features=True,
feature_names=None,
scale=None,
offset=None
):
"""Export information on prototypes and parameters from trained model.
:param n_iter: non-negative integer, or None; number of batches to use for evaluation; pass None for all
batches
:param train_names: list of strings or None; names for the original training samples in order; these are
associated with the prototypes in the report; pass None to use default names 'sample 0', 'sample 1', etc.
:param include_features: boolean; whether to include information on relevant features
:param feature_names: list of strings or None; if not None, must have one element per column of features;
feature names to be used as column headers; pass None to use default names X0, X1, etc.; only used if
include_features is True
:param scale: 1D numpy array of positive floats or None; if not None, must have one element per column of
features; use this to scale features back to their original values for the report; pass None for no scaling;
only used if include_features is True
:param offset: 1D numpy array of floats or None; if not None, must have one element per column of features; use
this to shift features back to their original values for the report; pass None for no offset; only used if
include_features is True
:return: pandas data frame with the following columns; columns containing the feature name are repeated once for
each active feature; active features are ordered by decreasing weight over batches as per
set_manager.SetManager.get_feature_weights():
- batch: non-negative float; integer batch index for prototypes, np.Nan for properties of the baseline
distribution
- sample: non-negative float; integer sample index for prototypes, np.Nan for properties of the baseline
distribution
- sample name: string; sample name
- target: varies; target for supervised learning
- prototype weight: positive float; prototype weight
- <feature> weight: non-negative float; feature weight for the associated batch, np.NaN means the feature
plays no role for the batch; only included of include_features is True
- <feature> value: float; feature value as used by the model; set to np.NaN if the feature weight is np.NaN;
only included of include_features is True
- <feature> original: float; original feature value; set to np.NaN if the feature weight is np.Nan; this
column is not generated if both scale and offset are None; only included of include_features is True
"""
check_is_fitted(self, attributes="set_manager_")
feature_columns, include_original, scale, offset = self._check_report_input(
feature_names=feature_names,
num_features=self.n_features_in_,
scale=scale,
offset=offset,
sample_name=None
)[:4]
batches = self.set_manager_.get_batches(features=None, num_batches=n_iter)
report = self._make_prototype_report(batches=batches, train_names=train_names, compute_impact=False)
if include_features:
report = pd.concat([report, self._make_feature_report(
batches=batches,
feature_columns=feature_columns,
include_original=include_original,
scale=scale,
offset=offset,
active_features=self.set_manager_.get_feature_weights(num_batches=n_iter)["feature_index"],
include_similarities=False
)], axis=1)
report = report.sort_values(["batch", "prototype weight"], ascending=[True, False])
report = pd.concat([self._make_baseline_for_export(), report])
report.reset_index(inplace=True, drop=True)
return report
@staticmethod
def _check_report_input(feature_names, num_features, scale, offset, sample_name):
"""Check input for export() and explain() for consistency and apply defaults.
:param feature_names: see docstring of export() for details
:param num_features: positive integer; number of features
:param scale: see docstring of export() for details
:param offset: see docstring of export() for details
:param sample_name: string or None; name used for reference sample
:return: five return arguments:
- list of lists of strings; each list contains column names associated with one feature in the report
- boolean; whether original values need to be included in the report
- 1D numpy float array; scale as input or vector of ones if input is None
- 1D numpy float array; offset as input or vector of zeros if input is None
- string; sample name as input or default
raise an error if a check fails
"""
feature_names = check_feature_names(
num_features=num_features,
feature_names=feature_names,
active_features=None
)
feature_columns = [[
"{} weight".format(feature_name),
"{} value".format(feature_name),
"{} original".format(feature_name),
"{} similarity".format(feature_name)
] for feature_name in feature_names]
include_original = scale is not None or offset is not None
scale, offset = check_scale_offset(num_features=num_features, scale=scale, offset=offset)
if sample_name is None:
sample_name = "new sample"
return feature_columns, include_original, scale, offset, sample_name
@classmethod
def _make_prototype_report(cls, batches, train_names, compute_impact):
"""Format prototype information for report.
:param batches: list as generated by set_manager.SetManager.get_batches()
:param train_names: see docstring of export() for details
:param compute_impact: boolean; whether to compute the similarity and impact for each prototype relative to a
reference sample; if True, the information for each non-empty batch needs to contain the key 'similarities'
:return: pandas data frame with the following columns:
- batch: positive integer; batch index
- sample: non-negative integer; sample index for prototypes
- sample name: string; sample name
- target: varies; target for supervised learning
- prototype weight: positive float; prototype weight
- similarity: float in (0.0, 1.0]; similarity between prototype and reference sample; only included if
compute_impact is True
- impact: positive float; impact of prototype on reference sample; only included if compute_impact is True
"""
parts = [
cls._format_batch(batch=batch, batch_index=i, train_names=train_names)
for i, batch in enumerate(batches) if batch is not None
]
if len(parts) > 0:
report = pd.concat(parts, axis=0)
report.reset_index(inplace=True, drop=True)
return report
columns = ["batch", "sample", "sample name", "target", "prototype weight"]
if compute_impact:
columns.extend(["similarity", "impact"])
return pd.DataFrame(columns=columns)
@staticmethod
def _format_batch(batch, batch_index, train_names):
"""Format information for a single batch of prototypes to include in the report.
:param batch: one element from the output list generated by set_manager.SetManager.get_batches(); must not be
None
:param batch_index: non-negative integer; batch index
:param train_names: see docstring of export() for details
:return: as return value of _make_prototype_report(); the function determines whether impact needs to be
computed by checking whether the batch definitions contain the key "similarities"
"""
formatted = {
"batch": batch_index + 1,
"sample": batch["sample_index"],
"sample name": [
train_names[j] if train_names is not None else "sample {}".format(j) for j in batch["sample_index"]
],
"target": batch["target"],
"prototype weight": batch["prototype_weights"]
}
columns = ["batch", "sample", "sample name", "target", "prototype weight"]
if "similarities" in batch.keys():
formatted["similarity"] = np.exp(np.sum(np.log(batch["similarities"] + LOG_OFFSET), axis=1))
# use sum of logarithms instead of product for numerical stability
formatted["impact"] = formatted["similarity"] * formatted["prototype weight"]
columns.extend(["similarity", "impact"])
return pd.DataFrame(formatted, columns=columns)
@classmethod
def _make_feature_report(
cls,
batches,
feature_columns,
include_original,
scale,
offset,
active_features,
include_similarities
):
"""Format feature information for report.
:param batches: list as generated by set_manager.SetManager.get_batches()
:param feature_columns: as first return value of _check_report_input()
:param include_original: boolean; whether to include original feature values in the report
:param scale: see docstring of export() for details; None is not allowed
:param offset: see docstring of export() for details; None is not allowed
:param active_features: 1D numpy array of non-negative integers; indices of active features across all batches
:param include_similarities: boolean; whether to include per-feature similarities in the report; if True, the
information for each non-empty batch needs to contain the key 'similarities'
:return: pandas data frame with the following columns:
- <feature> weight: non-negative float; feature weight for the associated batch, np.NaN means the feature
plays no role for the batch
- <feature> value: float; feature value as used by the model; set to np.NaN if the feature weight is np.NaN
- <feature> original: float; original feature value; set to np.NaN if the feature weight is np.Nan; this
column is not generated if both scale and offset are None
- <feature> similarity: float in (0.0, 1.0]; per-feature similarity between the prototype and reference
sample; this is only included if include_similarities is True
"""
if active_features.shape[0] == 0:
return | pd.DataFrame() | pandas.DataFrame |
"""
Additional tests for PandasArray that aren't covered by
the interface tests.
"""
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.arrays import PandasArray
from pandas.core.arrays.numpy_ import PandasDtype
@pytest.fixture(
params=[
np.array(["a", "b"], dtype=object),
np.array([0, 1], dtype=float),
np.array([0, 1], dtype=int),
np.array([0, 1 + 2j], dtype=complex),
np.array([True, False], dtype=bool),
np.array([0, 1], dtype="datetime64[ns]"),
np.array([0, 1], dtype="timedelta64[ns]"),
]
)
def any_numpy_array(request):
"""
Parametrized fixture for NumPy arrays with different dtypes.
This excludes string and bytes.
"""
return request.param
# ----------------------------------------------------------------------------
# PandasDtype
@pytest.mark.parametrize(
"dtype, expected",
[
("bool", True),
("int", True),
("uint", True),
("float", True),
("complex", True),
("str", False),
("bytes", False),
("datetime64[ns]", False),
("object", False),
("void", False),
],
)
def test_is_numeric(dtype, expected):
dtype = PandasDtype(dtype)
assert dtype._is_numeric is expected
@pytest.mark.parametrize(
"dtype, expected",
[
("bool", True),
("int", False),
("uint", False),
("float", False),
("complex", False),
("str", False),
("bytes", False),
("datetime64[ns]", False),
("object", False),
("void", False),
],
)
def test_is_boolean(dtype, expected):
dtype = PandasDtype(dtype)
assert dtype._is_boolean is expected
def test_repr():
dtype = PandasDtype(np.dtype("int64"))
assert repr(dtype) == "PandasDtype('int64')"
def test_constructor_from_string():
result = PandasDtype.construct_from_string("int64")
expected = PandasDtype(np.dtype("int64"))
assert result == expected
# ----------------------------------------------------------------------------
# Construction
def test_constructor_no_coercion():
with pytest.raises(ValueError, match="NumPy array"):
PandasArray([1, 2, 3])
def test_series_constructor_with_copy():
ndarray = np.array([1, 2, 3])
ser = pd.Series(PandasArray(ndarray), copy=True)
assert ser.values is not ndarray
def test_series_constructor_with_astype():
ndarray = np.array([1, 2, 3])
result = pd.Series(PandasArray(ndarray), dtype="float64")
expected = pd.Series([1.0, 2.0, 3.0], dtype="float64")
tm.assert_series_equal(result, expected)
def test_from_sequence_dtype():
arr = np.array([1, 2, 3], dtype="int64")
result = | PandasArray._from_sequence(arr, dtype="uint64") | pandas.arrays.PandasArray._from_sequence |
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import pickle
import statsmodels.formula.api as smf
import statsmodels.api as sm
from statsmodels.tsa.seasonal import seasonal_decompose
import seaborn as sns
import pandas_profiling
import datetime
import sqlite3
import calendar
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('TkAgg')
def submission_format(row):
return row['air_store_id']+"_"+str(row['visit_year'])+"-"+str(row['visit_month']).zfill(2)+"-"+str(row['visit_day']).zfill(2)
def prediction_creation(data_train, data_test, models_dict, half_models_dict,nodata_model):
# Generation of predictions and submission file
#
# With all the models created, we can finally predict the visitors for the test dataset.
# We'll start by filtering the test dataset by the first predicted date.
data_test.set_index(pd.to_datetime(data_test.visit_year*10000+data_test.visit_month*100+data_test.visit_day,format='%Y%m%d'), inplace=True)
data_test = data_test[data_test.index > '2017-04-22']
# We need to complete the forecasting days, as not all days had visitors and only those were the ones in our datasets.
sample_submission = pd.read_csv('../data/raw/sample_submission.csv')
restaurants = sample_submission.id.str[:20].unique()
start_date = datetime.datetime.strptime('2017-04-23', "%Y-%m-%d")
end_date = datetime.datetime.strptime('2017-05-31', "%Y-%m-%d")
#We'll use a new dataframe to store the prediction data and the new dates
predict_df = pd.DataFrame(columns=data_test.columns)
for restaurant in restaurants:
while start_date <= end_date:
if len(data_test[(data_test['air_store_id']==restaurant) & (data_test['visit_month']==start_date.month) & (data_test['visit_day']==start_date.day)]):
predict_df = predict_df.append(data_test[(data_test['air_store_id']==restaurant) & (data_test['visit_month']==start_date.month) & (data_test['visit_day']==start_date.day)], ignore_index=True)
else:
position = len(predict_df)
predict_df.loc[position,"air_store_id"] = restaurant
predict_df.loc[position,"visit_year"] = start_date.year
predict_df.loc[position,"visit_month"] = start_date.month
predict_df.loc[position,"visit_day"] = start_date.day
predict_df.loc[position,"dow"] = calendar.day_name[start_date.weekday()]
start_date += datetime.timedelta(days=1)
start_date = datetime.datetime.strptime('2017-04-23', "%Y-%m-%d")
# We now have a complete test dataset, for all ids and dates to be forecasted.
# Lets format now the dataframe in order to be able to use it in the previously obtained models.
date_info = pd.read_csv('../data/raw/date_info.csv',parse_dates=['calendar_date'])
date_info['calendar_year'] = date_info['calendar_date'].dt.year
date_info['calendar_month'] = date_info['calendar_date'].dt.month
date_info['calendar_day'] = date_info['calendar_date'].dt.day
date_info.drop(['calendar_date'], axis=1, inplace=True)
predict_df = pd.merge(predict_df, date_info, left_on=['visit_year','visit_month','visit_day'], right_on=['calendar_year','calendar_month','calendar_day'], how='left')
predict_df = pd.get_dummies(predict_df, columns=['dow'])
predict_df = pd.merge(predict_df, data_train[['air_store_id','visitors_rest_mean']].drop_duplicates(), on='air_store_id', how='left')
predict_df.drop(['holiday_flg_x','day_of_week','calendar_year','calendar_month','calendar_day','latitude', 'longitude','air_area_name','genre'], axis=1, inplace=True)
predict_df=predict_df.rename(columns = {'holiday_flg_y':'holiday_flg'})
predict_df.sort_values(by=['reserve_visitors','visitors_rest_mean'], ascending=[True,True], inplace=True)
predict_df.reserve_visitors = pd.to_numeric(predict_df.reserve_visitors)
predict_df.visit_year = pd.to_numeric(predict_df.visit_year)
predict_df.visit_month = | pd.to_numeric(predict_df.visit_month) | pandas.to_numeric |
# -*- coding: utf-8 -*-
"""Device curtailment plots.
This module creates plots are related to the curtailment of generators.
@author: <NAME>
"""
import os
import logging
import pandas as pd
from collections import OrderedDict
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.dates as mdates
import matplotlib.ticker as mtick
import marmot.config.mconfig as mconfig
import marmot.plottingmodules.plotutils.plot_library as plotlib
from marmot.plottingmodules.plotutils.plot_data_helper import PlotDataHelper
from marmot.plottingmodules.plotutils.plot_exceptions import (MissingInputData, DataSavedInModule,
UnderDevelopment, MissingZoneData)
class MPlot(PlotDataHelper):
"""curtailment MPlot class.
All the plotting modules use this same class name.
This class contains plotting methods that are grouped based on the
current module name.
The curtailment.py module contains methods that are
related to the curtailment of generators .
MPlot inherits from the PlotDataHelper class to assist in creating figures.
"""
def __init__(self, argument_dict: dict):
"""
Args:
argument_dict (dict): Dictionary containing all
arguments passed from MarmotPlot.
"""
# iterate over items in argument_dict and set as properties of class
# see key_list in Marmot_plot_main for list of properties
for prop in argument_dict:
self.__setattr__(prop, argument_dict[prop])
# Instantiation of MPlotHelperFunctions
super().__init__(self.Marmot_Solutions_folder, self.AGG_BY, self.ordered_gen,
self.PLEXOS_color_dict, self.Scenarios, self.ylabels,
self.xlabels, self.gen_names_dict, Region_Mapping=self.Region_Mapping)
self.logger = logging.getLogger('marmot_plot.'+__name__)
self.x = mconfig.parser("figure_size","xdimension")
self.y = mconfig.parser("figure_size","ydimension")
self.y_axes_decimalpt = mconfig.parser("axes_options","y_axes_decimalpt")
self.curtailment_prop = mconfig.parser("plot_data","curtailment_property")
def curt_duration_curve(self, prop: str = None,
start_date_range: str = None, end_date_range: str = None, **_):
"""Curtailment duration curve (line plot)
Displays curtailment sorted from highest occurrence to lowest
over given time period.
Args:
prop (str, optional): Controls type of re to include in plot.
Controlled through the plot_select.csv.
Defaults to None.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,f"generator_{self.curtailment_prop}",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
self.logger.info(f"{self.AGG_BY} = {zone_input}")
RE_Curtailment_DC = pd.DataFrame()
PV_Curtailment_DC = pd.DataFrame()
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
re_curt = self[f"generator_{self.curtailment_prop}"].get(scenario)
# Timeseries [MW] RE curtailment [MWh]
try: #Check for regions missing all generation.
re_curt = re_curt.xs(zone_input,level = self.AGG_BY)
except KeyError:
self.logger.info(f'No curtailment in {zone_input}')
continue
re_curt = self.df_process_gen_inputs(re_curt)
# If using Marmot's curtailment property
if self.curtailment_prop == 'Curtailment':
re_curt = self.assign_curtailment_techs(re_curt)
# Timeseries [MW] PV curtailment [MWh]
pv_curt = re_curt[re_curt.columns.intersection(self.pv_gen_cat)]
re_curt = re_curt.sum(axis=1)
pv_curt = pv_curt.sum(axis=1)
re_curt = re_curt.squeeze() #Convert to Series
pv_curt = pv_curt.squeeze() #Convert to Series
if pd.notna(start_date_range):
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
re_curt = re_curt[start_date_range : end_date_range]
pv_curt = pv_curt[start_date_range : end_date_range]
if re_curt.empty is True and prop == "PV+Wind":
self.logger.warning('No data in selected Date Range')
continue
if pv_curt.empty is True and prop == "PV":
self.logger.warning('No data in selected Date Range')
continue
# Sort from larget to smallest
re_cdc = re_curt.sort_values(ascending=False).reset_index(drop=True)
pv_cdc = pv_curt.sort_values(ascending=False).reset_index(drop=True)
re_cdc.rename(scenario, inplace=True)
pv_cdc.rename(scenario, inplace=True)
RE_Curtailment_DC = pd.concat([RE_Curtailment_DC, re_cdc], axis=1, sort=False)
PV_Curtailment_DC = pd.concat([PV_Curtailment_DC, pv_cdc], axis=1, sort=False)
# Remove columns that have values less than 1
RE_Curtailment_DC = RE_Curtailment_DC.loc[:, (RE_Curtailment_DC >= 1).any(axis=0)]
PV_Curtailment_DC = PV_Curtailment_DC.loc[:, (PV_Curtailment_DC >= 1).any(axis=0)]
# Replace _ with white space
RE_Curtailment_DC.columns = RE_Curtailment_DC.columns.str.replace('_',' ')
PV_Curtailment_DC.columns = PV_Curtailment_DC.columns.str.replace('_',' ')
# Create Dictionary from scenario names and color list
colour_dict = dict(zip(RE_Curtailment_DC.columns, self.color_list))
fig2, ax = plt.subplots(figsize=(self.x,self.y))
if prop == "PV":
if PV_Curtailment_DC.empty:
out = MissingZoneData()
outputs[zone_input] = out
continue
# unit conversion return divisor and energy units
unitconversion = PlotDataHelper.capacity_energy_unitconversion(PV_Curtailment_DC.values.max())
PV_Curtailment_DC = PV_Curtailment_DC/unitconversion['divisor']
Data_Table_Out = PV_Curtailment_DC
Data_Table_Out = Data_Table_Out.add_suffix(f" ({unitconversion['units']})")
x_axis_lim = 1.25 * len(PV_Curtailment_DC)
for column in PV_Curtailment_DC:
ax.plot(PV_Curtailment_DC[column], linewidth=3, color=colour_dict[column],
label=column)
ax.legend(loc='lower left',bbox_to_anchor=(1,0),
facecolor='inherit', frameon=True)
ax.set_ylabel(f"PV Curtailment ({unitconversion['units']})", color='black', rotation='vertical')
if prop == "PV+Wind":
if RE_Curtailment_DC.empty:
out = MissingZoneData()
outputs[zone_input] = out
continue
# unit conversion return divisor and energy units
unitconversion = PlotDataHelper.capacity_energy_unitconversion(RE_Curtailment_DC.values.max())
RE_Curtailment_DC = RE_Curtailment_DC/unitconversion['divisor']
Data_Table_Out = RE_Curtailment_DC
Data_Table_Out = Data_Table_Out.add_suffix(f" ({unitconversion['units']})")
x_axis_lim = 1.25 * len(RE_Curtailment_DC)
for column in RE_Curtailment_DC:
ax.plot(RE_Curtailment_DC[column], linewidth=3, color=colour_dict[column],
label=column)
ax.legend(loc='lower left',bbox_to_anchor=(1,0),
facecolor='inherit', frameon=True)
ax.set_ylabel(f"PV + Wind Curtailment ({unitconversion['units']})", color='black', rotation='vertical')
ax.set_xlabel('Hours', color='black', rotation='horizontal')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(axis='y', which='major', length=5, width=1)
ax.tick_params(axis='x', which='major', length=5, width=1)
ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(lambda x, p: format(x, f',.{self.y_axes_decimalpt}f')))
ax.margins(x=0.01)
#ax.set_xlim(0, 9490)
ax.set_xlim(0,x_axis_lim)
ax.set_ylim(bottom=0)
if mconfig.parser("plot_title_as_region"):
ax.set_title(zone_input)
outputs[zone_input] = {'fig': fig2, 'data_table': Data_Table_Out}
return outputs
def curt_pen(self, prop: str = None,
start_date_range: str = None, end_date_range: str = None, **_):
"""Plot of curtailment vs penetration.
Each scenario is represented by a different symbel on a x, y axis
Args:
prop (str, optional): Controls type of re to include in plot.
Controlled through the plot_select.csv.
Defaults to None.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True, "generator_Generation", self.Scenarios),
(True, "generator_Available_Capacity", self.Scenarios),
(True, f"generator_{self.curtailment_prop}", self.Scenarios),
(True, "generator_Total_Generation_Cost", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
Penetration_Curtailment_out = pd.DataFrame()
self.logger.info(f"{self.AGG_BY } = {zone_input}")
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
gen = self["generator_Generation"].get(scenario)
try: #Check for regions missing all generation.
gen = gen.xs(zone_input,level = self.AGG_BY)
except KeyError:
self.logger.info(f'No generation in {zone_input}')
continue
avail_gen = self["generator_Available_Capacity"].get(scenario)
avail_gen = avail_gen.xs(zone_input,level=self.AGG_BY)
re_curt = self[f"generator_{self.curtailment_prop}"].get(scenario)
try:
re_curt = re_curt.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.info(f'No curtailment in {zone_input}')
continue
re_curt = self.df_process_gen_inputs(re_curt)
# If using Marmot's curtailment property
if self.curtailment_prop == 'Curtailment':
re_curt = self.assign_curtailment_techs(re_curt)
# Finds the number of unique hours in the year
no_hours_year = len(gen.index.unique(level="timestamp"))
# Total generation across all technologies [MWh]
total_gen = float(gen.sum())
# Timeseries [MW] and Total VRE generation [MWh]
vre_gen = (gen.loc[(slice(None), self.vre_gen_cat),:])
total_vre_gen = float(vre_gen.sum())
# Timeseries [MW] and Total RE generation [MWh]
re_gen = (gen.loc[(slice(None), self.re_gen_cat),:])
total_re_gen = float(re_gen.sum())
# Timeseries [MW] and Total PV generation [MWh]
pv_gen = (gen.loc[(slice(None), self.pv_gen_cat),:])
total_pv_gen = float(pv_gen.sum())
# % Penetration of generation classes across the year
VRE_Penetration = (total_vre_gen/total_gen)*100
RE_Penetration = (total_re_gen/total_gen)*100
PV_Penetration = (total_pv_gen/total_gen)*100
# Timeseries [MW] and Total RE available [MWh]
re_avail = (avail_gen.loc[(slice(None), self.re_gen_cat),:])
total_re_avail = float(re_avail.sum())
# Timeseries [MW] and Total PV available [MWh]
pv_avail = (avail_gen.loc[(slice(None), self.pv_gen_cat),:])
total_pv_avail = float(pv_avail.sum())
# Total RE curtailment [MWh]
total_re_curt = float(re_curt.sum().sum())
# Timeseries [MW] and Total PV curtailment [MWh]
pv_curt = re_curt[re_curt.columns.intersection(self.pv_gen_cat)]
total_pv_curt = float(pv_curt.sum().sum())
# % of hours with curtailment
Prct_hr_RE_curt = (len((re_curt.sum(axis=1)).loc[(re_curt.sum(axis=1))>0])/no_hours_year)*100
Prct_hr_PV_curt = (len((pv_curt.sum(axis=1)).loc[(pv_curt.sum(axis=1))>0])/no_hours_year)*100
# Max instantaneous curtailment
if re_curt.empty == True:
continue
else:
Max_RE_Curt = max(re_curt.sum(axis=1))
if pv_curt.empty == True:
continue
else:
Max_PV_Curt = max(pv_curt.sum(axis=1))
# % RE and PV Curtailment Capacity Factor
if total_pv_curt > 0:
RE_Curt_Cap_factor = (total_re_curt/Max_RE_Curt)/no_hours_year
PV_Curt_Cap_factor = (total_pv_curt/Max_PV_Curt)/no_hours_year
else:
RE_Curt_Cap_factor = 0
PV_Curt_Cap_factor = 0
# % Curtailment across the year
if total_re_avail == 0:
continue
else:
Prct_RE_curt = (total_re_curt/total_re_avail)*100
if total_pv_avail == 0:
continue
else:
Prct_PV_curt = (total_pv_curt/total_pv_avail)*100
# Total generation cost
Total_Gen_Cost = self["generator_Total_Generation_Cost"].get(scenario)
Total_Gen_Cost = Total_Gen_Cost.xs(zone_input,level=self.AGG_BY)
Total_Gen_Cost = float(Total_Gen_Cost.sum())
vg_out = pd.Series([PV_Penetration ,RE_Penetration, VRE_Penetration, Max_PV_Curt,
Max_RE_Curt, Prct_PV_curt, Prct_RE_curt, Prct_hr_PV_curt,
Prct_hr_RE_curt, PV_Curt_Cap_factor, RE_Curt_Cap_factor, Total_Gen_Cost],
index=["% PV Penetration", "% RE Penetration", "% VRE Penetration",
"Max PV Curtailment [MW]", "Max RE Curtailment [MW]",
"% PV Curtailment", '% RE Curtailment',"% PV hrs Curtailed",
"% RE hrs Curtailed", "PV Curtailment Capacity Factor",
"RE Curtailment Capacity Factor", "Gen Cost"])
vg_out = vg_out.rename(scenario)
Penetration_Curtailment_out = pd.concat([Penetration_Curtailment_out, vg_out], axis=1, sort=False)
Penetration_Curtailment_out = Penetration_Curtailment_out.T
# Data table of values to return to main program
Data_Table_Out = Penetration_Curtailment_out
VG_index = pd.Series(Penetration_Curtailment_out.index)
# VG_index = VG_index.str.split(n=1, pat="_", expand=True)
# VG_index.rename(columns = {0:"Scenario"}, inplace=True)
VG_index.rename("Scenario", inplace=True)
# VG_index = VG_index["Scenario"]
Penetration_Curtailment_out.loc[:, "Scenario"] = VG_index[:,].values
marker_dict = dict(zip(VG_index.unique(), self.marker_style))
colour_dict = dict(zip(VG_index.unique(), self.color_list))
Penetration_Curtailment_out["colour"] = [colour_dict.get(x, '#333333') for x in Penetration_Curtailment_out.Scenario]
Penetration_Curtailment_out["marker"] = [marker_dict.get(x, '.') for x in Penetration_Curtailment_out.Scenario]
if Penetration_Curtailment_out.empty:
self.logger.warning(f'No Generation in {zone_input}')
out = MissingZoneData()
outputs[zone_input] = out
continue
fig1, ax = plt.subplots(figsize=(self.x,self.y))
for index, row in Penetration_Curtailment_out.iterrows():
if prop == "PV":
ax.scatter(row["% PV Penetration"], row["% PV Curtailment"],
marker=row["marker"], c=row["colour"], s=100, label = row["Scenario"])
ax.set_ylabel('% PV Curtailment', color='black', rotation='vertical')
ax.set_xlabel('% PV Penetration', color='black', rotation='horizontal')
elif prop == "PV+Wind":
ax.scatter(row["% RE Penetration"], row["% RE Curtailment"],
marker=row["marker"], c=row["colour"], s=40, label = row["Scenario"])
ax.set_ylabel('% PV + Wind Curtailment', color='black', rotation='vertical')
ax.set_xlabel('% PV + Wind Penetration', color='black', rotation='horizontal')
ax.set_ylim(bottom=0)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(axis='y', which='major', length=5, width=1)
ax.tick_params(axis='x', which='major', length=5, width=1)
ax.margins(x=0.01)
if mconfig.parser("plot_title_as_region"):
ax.set_title(zone_input)
handles, labels = plt.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys(), loc = 'lower right')
outputs[zone_input] = {'fig': fig1, 'data_table': Data_Table_Out}
return outputs
def curt_total(self, start_date_range: str = None, end_date_range: str = None, **_):
"""Creates stacked barplots of total curtailment by technology.
A separate bar is created for each scenario.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True, f"generator_{self.curtailment_prop}", self.Scenarios),
(True, "generator_Available_Capacity", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
self.logger.info(f"{self.AGG_BY} = {zone_input}")
Total_Curtailment_out = pd.DataFrame()
Total_Available_gen = pd.DataFrame()
vre_curt_chunks = []
avail_gen_chunks = []
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
vre_collection = {}
avail_vre_collection = {}
vre_curt = self[f"generator_{self.curtailment_prop}"].get(scenario)
try:
vre_curt = vre_curt.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.info(f'No curtailment in {zone_input}')
continue
vre_curt = self.df_process_gen_inputs(vre_curt)
# If using Marmot's curtailment property
if self.curtailment_prop == 'Curtailment':
vre_curt = self.assign_curtailment_techs(vre_curt)
avail_gen = self["generator_Available_Capacity"].get(scenario)
try: #Check for regions missing all generation.
avail_gen = avail_gen.xs(zone_input,level = self.AGG_BY)
except KeyError:
self.logger.info(f'No available generation in {zone_input}')
continue
avail_gen = self.df_process_gen_inputs(avail_gen)
# If using Marmot's curtailment property
if self.curtailment_prop == 'Curtailment':
avail_gen = self.assign_curtailment_techs(avail_gen)
all_empty = True
if pd.notna(start_date_range):
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
for vre_type in self.vre_gen_cat:
try:
vre_curt_type = vre_curt[vre_type]
# vre_curt_type = vre_curt.xs(vre_type,level='tech')
except KeyError:
self.logger.info(f'No {vre_type} in {zone_input}')
continue
avail_gen_type = avail_gen[vre_type]
# Code to index data by date range, if a date range is listed in marmot_plot_select.csv
if pd.notna(start_date_range):
avail_gen_type = avail_gen_type.groupby(['timestamp']).sum()
vre_curt_type = vre_curt_type.groupby(['timestamp']).sum()
vre_curt_type = vre_curt_type[start_date_range : end_date_range]
avail_gen_type = avail_gen_type[start_date_range : end_date_range]
if vre_curt_type.empty is False and avail_gen_type.empty is False:
all_empty = False
vre_collection[vre_type] = float(vre_curt_type.sum())
avail_vre_collection[vre_type] = float(avail_gen_type.sum())
if all_empty:
self.logger.warning('No data in selected Date Range')
continue
vre_table = pd.DataFrame(vre_collection,index=[scenario])
avail_gen_table = pd.DataFrame(avail_vre_collection,index=[scenario])
vre_curt_chunks.append(vre_table)
avail_gen_chunks.append(avail_gen_table)
if not vre_curt_chunks:
outputs[zone_input] = MissingZoneData()
continue
Total_Curtailment_out = pd.concat(vre_curt_chunks, axis=0, sort=False)
Total_Available_gen = | pd.concat(avail_gen_chunks, axis=0, sort=False) | pandas.concat |
"""Methods to find information in the different pipelines of Clinica."""
import os
from glob import glob
from os import path
import pandas as pd
def pet_volume_pipeline(
caps_dir,
df,
group_selection=None,
volume_atlas_selection=None,
pvc_restriction=None,
tracers_selection=None,
**kwargs,
):
"""Merge the data of the PET-Volume pipeline to the merged file containing the BIDS information.
Args:
caps_dir: the path to the CAPS directory
df: the DataFrame containing the BIDS information
group_selection: allows to choose the DARTEL groups to merge. If None all groups are selected.
volume_atlas_selection: allows to choose the atlas to merge (default = 'all')
pvc_restriction: gives the restriction on the inclusion or not of the file with the label 'pvc-rbv'
1 --> only the atlases containing the label will be used
0 --> the atlases containing the label won't be used
None --> all the atlases will be used
tracers_selection: allows to choose the PET tracer to merge (default = 'all')
Returns:
final_df: a DataFrame containing the information of the bids and the pipeline
"""
pet_path = path.join("pet", "preprocessing")
return volume_pipeline(
caps_dir,
df,
pet_path,
group_selection=group_selection,
atlas_selection=volume_atlas_selection,
pvc_restriction=pvc_restriction,
pipeline_name="pet-volume",
tracers_selection=tracers_selection,
)
def t1_freesurfer_pipeline(caps_dir, df, freesurfer_atlas_selection=None, **kwargs):
"""Merge the data of the PET-Volume pipeline to the merged file containing the BIDS information.
Args:
caps_dir: the path to the CAPS directory
df: the DataFrame containing the BIDS information
freesurfer_atlas_selection: allows to choose the atlas to merge (default = 'all')
Returns:
final_df: a DataFrame containing the information of the bids and the pipeline
"""
from clinica.iotools.converters.adni_to_bids.adni_utils import (
replace_sequence_chars,
)
from clinica.utils.stream import cprint
# Ensures that df is correctly indexed
if "participant_id" in df.columns.values:
df.set_index(["participant_id", "session_id"], inplace=True, drop=True)
subjects_dir = path.join(caps_dir, "subjects")
pipeline_df = pd.DataFrame()
for participant_id, session_id in df.index.values:
ses_path = path.join(subjects_dir, participant_id, session_id)
mod_path = path.join(
ses_path, "t1", "freesurfer_cross_sectional", "regional_measures"
)
ses_df = pd.DataFrame(
[[participant_id, session_id]], columns=["participant_id", "session_id"]
)
ses_df.set_index(["participant_id", "session_id"], inplace=True, drop=True)
if os.path.exists(mod_path):
# Looking for atlases
atlas_paths = glob(
path.join(mod_path, f"{participant_id}_{session_id}_*thickness.tsv")
)
for atlas_path in atlas_paths:
atlas_name = atlas_path.split("_parcellation-")[1].split("_")[0]
if path.exists(atlas_path) and (
not freesurfer_atlas_selection
or (
freesurfer_atlas_selection
and atlas_name in freesurfer_atlas_selection
)
):
atlas_df = | pd.read_csv(atlas_path, sep="\t") | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# The decision which model to use or what hyperparameters are most suitable is often based on some Cross-Validation technique producing an estimate of the out-of-sample prediction error $\bar{Err}$.
#
# An alternative technique to produce an estimate of the out-of-sample error is the bootstrap, specifically the .632 estimator and the .632+ estimator mentioned in Elements of Statistical Learning. Surprisingly though, I could not find an implementation in sklearn.
#
# Both techniques at first estimate an upward biased estimate of the prediction error $\hat{Err}$ and then reduce that bias differently. <br />
# $\hat{Err}$ is obtained through
#
# $$\hat{Err} = \frac {1}{N} \displaystyle\sum_{i=1}^{N} \frac {1}{|C^{-i}|} \displaystyle\sum_{b \in {C^{-i}}} L(y_{i}, \hat{f}^{*b}(x_{i})).$$
#
# Where
# * $N$ denotes the sample size.
# * $b$ denotes a specific bootstrap sample, whereas $B$ denotes the set of bootstrap samples.
# * $C^{-i}$ denotes the number of bootstrap samples $b$ where observation $i$ is not contained in.
# * $\hat{f}^{*b}(x_{i})$ denotes the estimated value of target $y_{i}$ by model $\hat{f}$ based on bootstrap sample $b$ and data $x_{i}$.
# * $L(y_{i}, \hat{f}^{*b}(x_{i}))$ denotes the loss-function between real value $y_{i}$ and estimated value $\hat{f}^{*b}(x_{i})$.
#
# The pseudo-algorithm looks like this:
# 1. Create $B$ bootstrap samples $b$ with the same size $N$ as the original data <br />
# 2. For $i = 1, ..., N$ <br />
# I) For $b = 1, ..., B$ <br />
# Ia) If $i$ not in $b$ <br />
# Iai) Estimate $\hat{f}^{*b}(x_{i})$ <br />
# Iaii) Compute $L(y_{i}, \hat{f}^{*b}(x_{i}))$ <br />
# Ib) else next $b$ <br />
# II) Compute $\frac {1}{|C^{-i}|} \displaystyle\sum_{b \in {C^{-i}}} L(y_{i}, \hat{f}^{*b}(x_{i}))$ <br />
# 3. Compute $\frac {1}{N} \displaystyle\sum_{i=1}^{N} \frac {1}{|C^{-i}|} \displaystyle\sum_{b \in {C^{-i}}} L(y_{i}, \hat{f}^{*b}(x_{i}))$
#
# The .632 estimator then calculates
# $$\bar{Err} = 0.632*\hat{Err} + 0.368*inSampleError$$,
# whereas the .632+ estimator demands a slightly more complex procedure to estimate $\bar{Err}$.
# However, due to its simplicity only the .632 estimator is presented in this kernel.
#
# This is computationally intensive but when forced to work with a small data set where cross-validation is unreasonable. Estimating the test error through the bootstrap is a viable option.
#
# After some brief data exploration and manipulation the above algorithm is implemented. Afterwards, the 5-fold cross-validation estimate of the test error is also computed and both are compared to the true test error.
#
# In this kernel $\hat{f}$ is always represented by the linear regression and $L(y, \hat{f}(x))$ is represented by the MSE.
# A reduced data set is used because the implementation in python is not very fast.
# In[ ]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import jarque_bera
data = pd.read_csv('../input/kc_house_data.csv')
data = data.iloc[0:1000,:]
data.drop_duplicates('id', inplace=True)
print(('Take a look at the data: \n', data.head(), '\n'))
print(('Examine data types of each predictor: \n', data.info(), '\n'))
print(('Check out summary statistics: \n', data.describe(), '\n'))
print(('Missing values?', data.columns.isnull().any(), '\n'))
print(('Columns names:', data.columns.values.tolist()))
# In[ ]:
data = data.drop('zipcode', axis=1)
data = data.drop('date', axis=1)
nums = ['id', 'price', 'bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'sqft_above', 'sqft_basement',
'yr_built', 'sqft_living15', 'sqft_lot15']
numsData = data[nums]
numsData.hist(bins=50, figsize=(20,15))
plt.show()
# price, sqft_above, sqft_living, sqft_living15, sqft_lot, sqft_lot15 seem to be right-skewed and are transformed.
# In this case inverse-hyperbolic tranform is used, because, unlike log, it can handle zeros.
# Normally, one would re-transform the produced predictions of the target and the target itself before the loss-function is applied, however, in this case the scale of the target is not of interest.
# In[ ]:
def arcsinh(data, colList):
for item in colList:
data.loc[:,item] = np.arcsinh(data.loc[:,item].values)
return data
jbCols = ['price', 'sqft_above', 'sqft_living', 'sqft_living15', 'sqft_lot', 'sqft_lot15']
numsData = arcsinh(numsData, jbCols)
numsData.hist(bins=50, figsize=(20,15))
data.loc[:,nums] = numsData
# Splitting data set and obtaining the $inSampleError$.
# In[ ]:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
data.drop('price', axis=1), data['price'], test_size=0.25, random_state=42)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
lr = LinearRegression()
lr.fit(X_train, y_train)
inSamplePreds = lr.predict(X_train)
inSampleErr = mean_squared_error(inSamplePreds, y_train)
print(('In-sample-error:', inSampleErr))
# Now, the Leave-One-Out Bootstrap function is implemented.
# It needs 4 arguments to be passed in.
# 1. The data as a numpy array WITH an id-column, which uniquely identifies each observation, as the first column and
# NO target column.
# 2. The target column as a numpy array.
# 3. The number of bootstrap samples to be created, and
# 4. keyworded arguments of the model to be used.
#
# While coding this function, it came to my mind that it is better to create $B$ bootstraped id-columns instead of $B$ complete data sets that all have to be stored in memory the whole time the function is running.
# This way, only the id-columns are stored all the time and each corresponding bootstrap data set is created through a JOIN-command as needed and then deleted when not in use anymore.
# However, because I could not get the numpy-JOIN to work as I wanted it to, the function unfortunately switches to pandas to execute the join command and then switches back to numpy.
# These cumbersome operations definitely do not improve the function's execution speed.
# In[ ]:
kwargs = {'fit_intercept': True, 'normalize': False, 'copy_X': True, 'n_jobs': 1}
# or kwargs = {}
def LOOB(data, targetCol, B_samples, **kwargs):
avgLossVec = np.zeros((data.shape[0], 1))
bootMat = np.zeros((data.shape[0], B_samples))
idCol = np.zeros((data.shape[0], 1))
idCol = data[:, 0]
targetCol = np.stack((idCol, targetCol))
targetCol = targetCol.T
for column in range(bootMat.shape[1]):
bootMat[:,column] = np.random.choice(idCol, idCol.shape[0],replace=True)
for i in np.nditer(idCol):
bootLossVec = np.zeros((1, 1))
target = targetCol[targetCol[:,0]==i,1]
targetData = data[data[:,0]==i, 1:]
for column in range(bootMat.shape[1]):
if i not in bootMat[:,column]:
tempVec = | pd.DataFrame(bootMat[:,column]) | pandas.DataFrame |
import datetime
from time import sleep
import pandas as pd
from loguru import logger
import ofanalysis.const as const
import ofanalysis.utility as ut
import tushare as ts
class TSDataUpdate:
def __init__(self, ts_pro_token:str):
self.__pro = ts.pro_api(ts_pro_token)
self.__today = datetime.date.today()
def retrieve_all(self):
self.retrieve_stock_basic()
self.retrieve_stock_daily_basic()
self.retrieve_stock_daily()
self.retrieve_fund_basic()
self.retrieve_fund_nav()
self.retrieve_fund_share()
self.retrieve_fund_manager()
self.retrieve_fund_portfolio()
def retrieve_stock_basic(self):
logger.info('全量更新股票基础信息stock_basic')
# 分页读取数据
df_stock_basic = pd.DataFrame()
i = 0
while True: # 分页读取数据
df_batch_result = self.__pro.stock_basic(**{
"ts_code": "",
"name": "",
"exchange": "",
"market": "",
"is_hs": "",
"list_status": "",
"limit": const.EACH_TIME_ITEM,
"offset": i
}, fields=[
"ts_code",
"symbol",
"name",
"area",
"industry",
"market",
"list_date",
"is_hs",
"delist_date",
"list_status",
"curr_type",
"exchange",
"cnspell",
"enname",
"fullname"
])
if len(df_batch_result) == 0:
break
df_stock_basic = pd.concat([df_stock_basic, df_batch_result], ignore_index=True)
i += const.EACH_TIME_ITEM
ut.db_del_dict_from_mongodb( # 非增量更新 先清空数据
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_STOCK_BASIC,
query_dict={}
)
ut.db_save_dict_to_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_STOCK_BASIC,
target_dict=df_stock_basic.to_dict(orient='records')
)
def retrieve_stock_daily_basic(self):
check_field = 'trade_date' # 设置增量更新依据字段
logger.info('更新股票每日指标stock_daily_basic')
existed_records = ut.db_get_distinct_from_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_STOCK_DAILY_BASIC,
field=check_field
)
if len(existed_records) == 0: # 空表
trade_cal_start_date = '20000101'
else:
existed_records.sort(reverse=True) # 倒排
trade_cal_start_date = pd.to_datetime(existed_records[-1]) + datetime.timedelta(days=1)
trade_cal_start_date = trade_cal_start_date.strftime('%Y%m%d')
trade_cal_list = ut.get_trade_cal_from_ts(ts_pro_token=self.__pro, start_date=trade_cal_start_date)
for date in [x for x in trade_cal_list if x not in existed_records]:
logger.info('更新股票每日指标stock_daily_basic: %s的数据' % date)
df_daily = pd.DataFrame()
i = 0
while True: # 分页读取数据
for _ in range(const.RETRY_TIMES): # 重试机制
try:
df_batch_daily = self.__pro.daily_basic(**{
"ts_code": "",
"trade_date": date,
"start_date": "",
"end_date": "",
"limit": const.EACH_TIME_ITEM,
"offset": i
}, fields=[
"ts_code",
"trade_date",
"close",
"turnover_rate",
"turnover_rate_f",
"volume_ratio",
"pe",
"pe_ttm",
"pb",
"ps",
"ps_ttm",
"dv_ratio",
"dv_ttm",
"total_share",
"float_share",
"free_share",
"total_mv",
"circ_mv"
])
except:
sleep(1)
else:
break
if len(df_batch_daily) == 0:
break
df_daily = pd.concat([df_daily, df_batch_daily], ignore_index=True)
i += const.EACH_TIME_ITEM
if len(df_daily) == 0:
logger.info('日期:%s, 股票每日指标stock_daily_basic返回为空' % date)
continue
ut.db_save_dict_to_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_STOCK_DAILY_BASIC,
target_dict=df_daily.to_dict(orient='records')
)
def retrieve_stock_daily(self):
check_field = 'trade_date' # 设置增量更新依据字段
logger.info('更新股票日线行情stock_daily')
existed_records = ut.db_get_distinct_from_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_STOCK_DAILY,
field=check_field
)
if len(existed_records) == 0: # 空表
trade_cal_start_date = '20000101'
else:
existed_records.sort(reverse=True) # 倒排
trade_cal_start_date = pd.to_datetime(existed_records[-1]) + datetime.timedelta(days=1)
trade_cal_start_date = trade_cal_start_date.strftime('%Y%m%d')
trade_cal_list = ut.get_trade_cal_from_ts(ts_pro_token=self.__pro, start_date=trade_cal_start_date)
for date in [x for x in trade_cal_list if x not in existed_records]:
logger.info('更新股票日线行情stock_daily: %s的数据' % date)
df_daily = pd.DataFrame()
i = 0
while True: # 分页读取数据
for _ in range(const.RETRY_TIMES): # 重试机制
try:
df_batch_daily = self.__pro.daily(**{
"ts_code": "",
"trade_date": date,
"start_date": "",
"end_date": "",
"offset": i,
"limit": const.EACH_TIME_ITEM
}, fields=[
"ts_code",
"trade_date",
"open",
"high",
"low",
"close",
"pre_close",
"change",
"pct_chg",
"vol",
"amount"
])
except:
sleep(1)
else:
break
if len(df_batch_daily) == 0:
break
df_daily = pd.concat([df_daily, df_batch_daily], ignore_index=True)
i += const.EACH_TIME_ITEM
if len(df_daily) == 0:
logger.info('日期:%s, 股票日线行情stock_daily返回为空' % date)
continue
ut.db_save_dict_to_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_STOCK_DAILY,
target_dict=df_daily.to_dict(orient='records')
)
def retrieve_fund_basic(self):
logger.info('全量更新基金基础信息fund_basic')
df_all_fund = pd.DataFrame()
i = 0
while True: # 分页读取数据
df_batch_result = self.__pro.fund_basic(**{
"ts_code": "",
"market": "",
"update_flag": "",
"offset": i,
"limit": const.EACH_TIME_ITEM,
"status": ""
}, fields=[
"ts_code",
"name",
"management",
"custodian",
"fund_type",
"found_date",
"due_date",
"list_date",
"issue_date",
"delist_date",
"issue_amount",
"m_fee",
"c_fee",
"duration_year",
"p_value",
"min_amount",
"exp_return",
"benchmark",
"status",
"invest_type",
"type",
"trustee",
"purc_startdate",
"redm_startdate",
"market"
])
if len(df_batch_result) == 0:
break
df_all_fund = pd.concat([df_all_fund, df_batch_result], ignore_index=True)
i += const.EACH_TIME_ITEM
sleep(8)
ut.db_del_dict_from_mongodb( # 非增量更新 先清空数据
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_FUND_BASIC,
query_dict={}
)
ut.db_save_dict_to_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_FUND_BASIC,
target_dict=df_all_fund.to_dict(orient='records')
)
def retrieve_fund_nav(self):
check_field = 'nav_date' # 设置增量更新依据字段
logger.info('更新基金净值行情fund_nav')
existed_records = ut.db_get_distinct_from_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_FUND_NAV,
field=check_field
)
if len(existed_records) == 0: # 空表
trade_cal_start_date = '20000101'
else:
existed_records.sort(reverse=True) # 倒排
trade_cal_start_date = pd.to_datetime(existed_records[-1]) + datetime.timedelta(days=1)
trade_cal_start_date = trade_cal_start_date.strftime('%Y%m%d')
trade_cal_list = ut.get_trade_cal_from_ts(ts_pro_token=self.__pro, start_date=trade_cal_start_date)
for date in [x for x in trade_cal_list if x not in existed_records]:
logger.info('更新基金净值行情fund_nav: %s的数据' % date)
df_daily = pd.DataFrame()
i = 0
while True: # 分页读取数据
for _ in range(const.RETRY_TIMES): # 重试机制
try:
df_batch_daily = self.__pro.fund_nav(**{
"ts_code": "",
"nav_date": date,
"offset": i,
"limit": const.EACH_TIME_ITEM,
"market": "",
"start_date": "",
"end_date": ""
}, fields=[
"ts_code",
"ann_date",
"nav_date",
"unit_nav",
"accum_nav",
"accum_div",
"net_asset",
"total_netasset",
"adj_nav",
"update_flag"
])
except:
sleep(1)
else:
break
if len(df_batch_daily) == 0:
break
df_daily = pd.concat([df_daily, df_batch_daily], ignore_index=True)
i += const.EACH_TIME_ITEM
if len(df_daily) == 0:
logger.info('日期:%s, 基金净值行情fund_nav返回为空' % date)
continue
ut.db_save_dict_to_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_FUND_NAV,
target_dict=df_daily.to_dict(orient='records')
)
def retrieve_fund_share(self):
check_field = 'trade_date' # 设置增量更新依据字段
logger.info('更新基金净值规模fund_share')
existed_records = ut.db_get_distinct_from_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_FUND_SHARE,
field=check_field
)
if len(existed_records) == 0: # 空表
trade_cal_start_date = '20000101'
else:
existed_records.sort(reverse=True) # 倒排
trade_cal_start_date = | pd.to_datetime(existed_records[-1]) | pandas.to_datetime |
from __future__ import division
import copy
import bt
from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy
import pandas as pd
import numpy as np
from nose.tools import assert_almost_equal as aae
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
def test_node_tree():
c1 = Node('c1')
c2 = Node('c2')
p = Node('p', children=[c1, c2])
c1 = p['c1']
c2 = p['c2']
assert len(p.children) == 2
assert 'c1' in p.children
assert 'c2' in p.children
assert p == c1.parent
assert p == c2.parent
m = Node('m', children=[p])
p = m['p']
c1 = p['c1']
c2 = p['c2']
assert len(m.children) == 1
assert 'p' in m.children
assert p.parent == m
assert len(p.children) == 2
assert 'c1' in p.children
assert 'c2' in p.children
assert p == c1.parent
assert p == c2.parent
def test_strategybase_tree():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
s1 = s['s1']
s2 = s['s2']
assert len(s.children) == 2
assert 's1' in s.children
assert 's2' in s.children
assert s == s1.parent
assert s == s2.parent
def test_node_members():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
s1 = s['s1']
s2 = s['s2']
actual = s.members
assert len(actual) == 3
assert s1 in actual
assert s2 in actual
assert s in actual
actual = s1.members
assert len(actual) == 1
assert s1 in actual
actual = s2.members
assert len(actual) == 1
assert s2 in actual
def test_node_full_name():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
# we cannot access s1 and s2 directly since they are copied
# we must therefore access through s
assert s.full_name == 'p'
assert s['s1'].full_name == 'p>s1'
assert s['s2'].full_name == 'p>s2'
def test_security_setup_prices():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
# now with setup
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
def test_strategybase_tree_setup():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
assert len(s.data) == 3
assert len(c1.data) == 3
assert len(c2.data) == 3
assert len(s._prices) == 3
assert len(c1._prices) == 3
assert len(c2._prices) == 3
assert len(s._values) == 3
assert len(c1._values) == 3
assert len(c2._values) == 3
def test_strategybase_tree_adjust():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
assert s.capital == 1000
assert s.value == 1000
assert c1.value == 0
assert c2.value == 0
assert c1.weight == 0
assert c2.weight == 0
def test_strategybase_tree_update():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
c1.price == 100
c2.price == 100
i = 1
s.update(dts[i], data.ix[dts[i]])
c1.price == 105
c2.price == 95
i = 2
s.update(dts[i], data.ix[dts[i]])
c1.price == 100
c2.price == 100
def test_update_fails_if_price_is_nan_and_position_open():
c1 = SecurityBase('c1')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1'], data=100)
data['c1'][dts[1]] = np.nan
c1.setup(data)
i = 0
# mock in position
c1._position = 100
c1.update(dts[i], data.ix[dts[i]])
# test normal case - position & non-nan price
assert c1._value == 100 * 100
i = 1
# this should fail, because we have non-zero position, and price is nan, so
# bt has no way of updating the _value
try:
c1.update(dts[i], data.ix[dts[i]])
assert False
except Exception as e:
assert str(e).startswith('Position is open')
# on the other hand, if position was 0, this should be fine, and update
# value to 0
c1._position = 0
c1.update(dts[i], data.ix[dts[i]])
assert c1._value == 0
def test_strategybase_tree_allocate():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.allocate(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate directly to child
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
def test_strategybase_tree_allocate_child_from_strategy():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.allocate(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate to c1
s.allocate(500, 'c1')
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
def test_strategybase_tree_allocate_level2():
c1 = SecurityBase('c1')
c12 = copy.deepcopy(c1)
c2 = SecurityBase('c2')
c22 = copy.deepcopy(c2)
s1 = StrategyBase('s1', [c1, c2])
s2 = StrategyBase('s2', [c12, c22])
m = StrategyBase('m', [s1, s2])
s1 = m['s1']
s2 = m['s2']
c1 = s1['c1']
c2 = s1['c2']
c12 = s2['c1']
c22 = s2['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
m.setup(data)
i = 0
m.update(dts[i], data.ix[dts[i]])
m.adjust(1000)
# since children have w == 0 this should stay in s
m.allocate(1000)
assert m.value == 1000
assert m.capital == 1000
assert s1.value == 0
assert s2.value == 0
assert c1.value == 0
assert c2.value == 0
# now allocate directly to child
s1.allocate(500)
assert s1.value == 500
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
# now allocate directly to child of child
c1.allocate(200)
assert s1.value == 500
assert s1.capital == 500 - 200
assert c1.value == 200
assert c1.weight == 200.0 / 500
assert c1.position == 2
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
assert c12.value == 0
def test_strategybase_tree_allocate_long_short():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
s.adjust(1000)
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert c1.weight == 500.0 / 1000
assert s.capital == 1000 - 500
assert s.value == 1000
c1.allocate(-200)
assert c1.position == 3
assert c1.value == 300
assert c1.weight == 300.0 / 1000
assert s.capital == 1000 - 500 + 200
assert s.value == 1000
c1.allocate(-400)
assert c1.position == -1
assert c1.value == -100
assert c1.weight == -100.0 / 1000
assert s.capital == 1000 - 500 + 200 + 400
assert s.value == 1000
# close up
c1.allocate(-c1.value)
assert c1.position == 0
assert c1.value == 0
assert c1.weight == 0
assert s.capital == 1000 - 500 + 200 + 400 - 100
assert s.value == 1000
def test_strategybase_tree_allocate_update():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.ix[dts[i]])
assert s.price == 100
s.adjust(1000)
assert s.price == 100
assert s.value == 1000
assert s._value == 1000
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert c1.weight == 500.0 / 1000
assert s.capital == 1000 - 500
assert s.value == 1000
assert s.price == 100
i = 1
s.update(dts[i], data.ix[dts[i]])
assert c1.position == 5
assert c1.value == 525
assert c1.weight == 525.0 / 1025
assert s.capital == 1000 - 500
assert s.value == 1025
assert np.allclose(s.price, 102.5)
def test_strategybase_universe():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i])
assert len(s.universe) == 1
assert 'c1' in s.universe
assert 'c2' in s.universe
assert s.universe['c1'][dts[i]] == 105
assert s.universe['c2'][dts[i]] == 95
# should not have children unless allocated
assert len(s.children) == 0
def test_strategybase_allocate():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 100
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
s.allocate(100, 'c1')
c1 = s['c1']
assert c1.position == 1
assert c1.value == 100
assert s.value == 1000
def test_strategybase_close():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
s.allocate(100, 'c1')
c1 = s['c1']
assert c1.position == 1
assert c1.value == 100
assert s.value == 1000
s.close('c1')
assert c1.position == 0
assert c1.value == 0
assert s.value == 1000
def test_strategybase_flatten():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
s.allocate(100, 'c1')
c1 = s['c1']
s.allocate(100, 'c2')
c2 = s['c2']
assert c1.position == 1
assert c1.value == 100
assert c2.position == 1
assert c2.value == 100
assert s.value == 1000
s.flatten()
assert c1.position == 0
assert c1.value == 0
assert s.value == 1000
def test_strategybase_multiple_calls():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.c2[dts[0]] = 95
data.c1[dts[1]] = 95
data.c2[dts[2]] = 95
data.c2[dts[3]] = 95
data.c2[dts[4]] = 95
data.c1[dts[4]] = 105
s.setup(data)
# define strategy logic
def algo(target):
# close out any open positions
target.flatten()
# get stock w/ lowest price
c = target.universe.ix[target.now].idxmin()
# allocate all capital to that stock
target.allocate(target.value, c)
# replace run logic
s.run = algo
# start w/ 1000
s.adjust(1000)
# loop through dates manually
i = 0
# update t0
s.update(dts[i])
assert len(s.children) == 0
assert s.value == 1000
# run t0
s.run(s)
assert len(s.children) == 1
assert s.value == 1000
assert s.capital == 50
c2 = s['c2']
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update out t0
s.update(dts[i])
c2 == s['c2']
assert len(s.children) == 1
assert s.value == 1000
assert s.capital == 50
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update t1
i = 1
s.update(dts[i])
assert s.value == 1050
assert s.capital == 50
assert len(s.children) == 1
assert 'c2' in s.children
c2 == s['c2']
assert c2.value == 1000
assert c2.weight == 1000.0 / 1050.0
assert c2.price == 100
# run t1 - close out c2, open c1
s.run(s)
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
c1 = s['c1']
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update out t1
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
assert c1 == s['c1']
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update t2
i = 2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 5
assert c1.value == 1100
assert c1.weight == 1100.0 / 1105
assert c1.price == 100
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 95
# run t2
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t3
i = 3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t3
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t4
i = 4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
# accessing price should refresh - this child has been idle for a while -
# must make sure we can still have a fresh prices
assert c1.price == 105
assert len(c1.prices) == 5
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t4
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
def test_strategybase_multiple_calls_preset_secs():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('s', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.c2[dts[0]] = 95
data.c1[dts[1]] = 95
data.c2[dts[2]] = 95
data.c2[dts[3]] = 95
data.c2[dts[4]] = 95
data.c1[dts[4]] = 105
s.setup(data)
# define strategy logic
def algo(target):
# close out any open positions
target.flatten()
# get stock w/ lowest price
c = target.universe.ix[target.now].idxmin()
# allocate all capital to that stock
target.allocate(target.value, c)
# replace run logic
s.run = algo
# start w/ 1000
s.adjust(1000)
# loop through dates manually
i = 0
# update t0
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1000
# run t0
s.run(s)
assert len(s.children) == 2
assert s.value == 1000
assert s.capital == 50
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update out t0
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1000
assert s.capital == 50
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update t1
i = 1
s.update(dts[i])
assert s.value == 1050
assert s.capital == 50
assert len(s.children) == 2
assert c2.value == 1000
assert c2.weight == 1000.0 / 1050.
assert c2.price == 100
# run t1 - close out c2, open c1
s.run(s)
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
# update out t1
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update t2
i = 2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 5
assert c1.value == 1100
assert c1.weight == 1100.0 / 1105
assert c1.price == 100
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 95
# run t2
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t3
i = 3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t3
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t4
i = 4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
# accessing price should refresh - this child has been idle for a while -
# must make sure we can still have a fresh prices
assert c1.price == 105
assert len(c1.prices) == 5
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t4
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
def test_strategybase_multiple_calls_no_post_update():
s = StrategyBase('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.c2[dts[0]] = 95
data.c1[dts[1]] = 95
data.c2[dts[2]] = 95
data.c2[dts[3]] = 95
data.c2[dts[4]] = 95
data.c1[dts[4]] = 105
s.setup(data)
# define strategy logic
def algo(target):
# close out any open positions
target.flatten()
# get stock w/ lowest price
c = target.universe.ix[target.now].idxmin()
# allocate all capital to that stock
target.allocate(target.value, c)
# replace run logic
s.run = algo
# start w/ 1000
s.adjust(1000)
# loop through dates manually
i = 0
# update t0
s.update(dts[i])
assert len(s.children) == 0
assert s.value == 1000
# run t0
s.run(s)
assert len(s.children) == 1
assert s.value == 999
assert s.capital == 49
c2 = s['c2']
assert c2.value == 950
assert c2.weight == 950.0 / 999
assert c2.price == 95
# update t1
i = 1
s.update(dts[i])
assert s.value == 1049
assert s.capital == 49
assert len(s.children) == 1
assert 'c2' in s.children
c2 == s['c2']
assert c2.value == 1000
assert c2.weight == 1000.0 / 1049.0
assert c2.price == 100
# run t1 - close out c2, open c1
s.run(s)
assert len(s.children) == 2
assert s.value == 1047
assert s.capital == 2
c1 = s['c1']
assert c1.value == 1045
assert c1.weight == 1045.0 / 1047
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update t2
i = 2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1102
assert s.capital == 2
assert c1.value == 1100
assert c1.weight == 1100.0 / 1102
assert c1.price == 100
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 95
# run t2
s.run(s)
assert len(s.children) == 2
assert s.value == 1100
assert s.capital == 55
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1100
assert c2.price == 95
# update t3
i = 3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1100
assert s.capital == 55
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1100
assert c2.price == 95
# run t3
s.run(s)
assert len(s.children) == 2
assert s.value == 1098
assert s.capital == 53
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1098
assert c2.price == 95
# update t4
i = 4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1098
assert s.capital == 53
assert c1.value == 0
assert c1.weight == 0
# accessing price should refresh - this child has been idle for a while -
# must make sure we can still have a fresh prices
assert c1.price == 105
assert len(c1.prices) == 5
assert c2.value == 1045
assert c2.weight == 1045.0 / 1098
assert c2.price == 95
# run t4
s.run(s)
assert len(s.children) == 2
assert s.value == 1096
assert s.capital == 51
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1096
assert c2.price == 95
def test_strategybase_prices():
dts = pd.date_range('2010-01-01', periods=21)
rawd = [13.555, 13.75, 14.16, 13.915, 13.655,
13.765, 14.02, 13.465, 13.32, 14.65,
14.59, 14.175, 13.865, 13.865, 13.89,
13.85, 13.565, 13.47, 13.225, 13.385,
12.89]
data = pd.DataFrame(index=dts, data=rawd, columns=['a'])
s = StrategyBase('s')
s.set_commissions(lambda q, p: 1)
s.setup(data)
# buy 100 shares on day 1 - hold until end
# just enough to buy 100 shares + 1$ commission
s.adjust(1356.50)
s.update(dts[0])
# allocate all capital to child a
# a should be dynamically created and should have
# 100 shares allocated. s.capital should be 0
s.allocate(s.value, 'a')
assert s.capital == 0
assert s.value == 1355.50
assert len(s.children) == 1
aae(s.price, 99.92628, 5)
a = s['a']
assert a.position == 100
assert a.value == 1355.50
assert a.weight == 1
assert a.price == 13.555
assert len(a.prices) == 1
# update through all dates and make sure price is ok
s.update(dts[1])
aae(s.price, 101.3638, 4)
s.update(dts[2])
aae(s.price, 104.3863, 4)
s.update(dts[3])
aae(s.price, 102.5802, 4)
# finish updates and make sure ok at end
for i in range(4, 21):
s.update(dts[i])
assert len(s.prices) == 21
aae(s.prices[-1], 95.02396, 5)
aae(s.prices[-2], 98.67306, 5)
def test_fail_if_root_value_negative():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = | pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100) | pandas.DataFrame |
import unittest
import pandas as pd
from mmvec.heatmap import (
_parse_taxonomy_strings, _parse_heatmap_metadata_annotations,
_process_microbe_metadata, _process_metabolite_metadata,
_normalize_table)
import pandas.util.testing as pdt
class TestParseTaxonomyStrings(unittest.TestCase):
def setUp(self):
self.taxa = pd.Series([
'k__Bacteria; p__Proteobacteria; c__Deltaproteobacteria; '
'o__Desulfobacterales; f__Desulfobulbaceae; g__; s__',
'k__Bacteria; p__Cyanobacteria; c__Chloroplast; o__Streptophyta',
'k__Bacteria; p__Proteobacteria; c__Alphaproteobacteria; '
'o__Rickettsiales; f__mitochondria; g__Lardizabala; s__biternata',
'k__Archaea; p__Euryarchaeota; c__Methanomicrobia; '
'o__Methanosarcinales; f__Methanosarcinaceae; g__Methanosarcina',
'k__Bacteria; p__Proteobacteria; c__Alphaproteobacteria; '
'o__Rickettsiales; f__mitochondria; g__Pavlova; s__lutheri',
'k__Archaea; p__[Parvarchaeota]; c__[Parvarchaea]; o__WCHD3-30',
'k__Bacteria; p__Proteobacteria; c__Alphaproteobacteria; '
'o__Sphingomonadales; f__Sphingomonadaceae'],
index= | pd.Index([c for c in 'ABCDEFG'], name='feature-id') | pandas.Index |
#src module
from src import utilities as u
from src.enums import *
from src.utilities import Preprocessing
#sklearn
from sklearn.model_selection import ShuffleSplit
from sklearn.utils import shuffle
from sklearn.utils import resample
#other
import pandas as pd
class MakeDataset:
'''Prepares data.'''
def read_csv(self, full_path, delimiter='\t'):
'''Reads raw data from the full path.'''
return u.read_csv(full_path, delimiter=delimiter)
def preprocess(self, text):
try:
upre=Preprocessing()
text=upre.remove_emojis(text)
text=upre.remove_hashtag(text)
text=upre.remove_mention(text)
text=upre.remove_rt(text)
text=upre.remove_urls(text)
text=upre.remove_non_alnum(text)
text=upre.remove_space(text)
text=upre.lower_text(text)
text=upre.strip_text(text)
text=upre.compress_words(text)
return text
except Exception as e:
print('text> {}'.format(text))
raise Exception(e)
def preprocess_data(self, data):
data=data[~data.sexist.isnull()]
data=data[data.text.notna()]
data=data[data.text != '']
data['preprocessed']=data.text.apply(lambda x: self.preprocess(x))
data=data[data.preprocessed != '']
data['sexist'] = data.copy()['sexist'].astype(int)
return data
def read_data(self, full_path):
data = self.read_csv(full_path)
return self.preprocess_data(data)
def get_n_splits(self, X, n_splits, test_size, random_state):
'''Split data into training and test set.
Returns:
split_dict (dictionary) = dictionary that includes split training and test set
'''
split_dict = []
ss = ShuffleSplit(n_splits=n_splits, test_size=test_size, random_state=random_state)
for train_index, test_index in ss.split(X):
split = {'X_train': X.iloc[train_index], 'X_test': X.iloc[test_index] }
split_dict.append(split)
return split_dict
def get_splits(self, X, n_splits, test_size, random_state):
split_dict_s=self.get_n_splits(X[X.sexist == 1], n_splits, test_size, random_state)
split_dict_ns=self.get_n_splits(X[X.sexist == 0], n_splits, test_size, random_state)
X_train=pd.concat([split_dict_s[0]['X_train'], split_dict_ns[0]['X_train']])
X_test=pd.concat([split_dict_s[0]['X_test'], split_dict_ns[0]['X_test']])
return X_train, X_test
def get_original_data(self, data):
'''Gets original data.'''
return data[data['of_id'].isnull()]
def get_adversarial_examples(self, data):
'''Gets adversarial examples.'''
return data[data['of_id'].notna()]
def get_modified_data(self, original_data, adversarial_examples, sample_proportion):
'''Modifies given data domain by injecting adversarial examples (while maintaining equal size)
Step 1.Sample 'sample_proportion' of the sexist examples from original data domain.
Step 2.Retrieve the modified version of sexist examples on step 1 from adversarial examples
Step 3.Discard a corresponding number of non-sexist examples from the original data set. (to maintain equal size)
Step 4.Inject retrieved adversarial examples on step 2
Step 5.Shuffle
Example:
>>> training_sample_proportion=0.5, test_sample_proportion=1
>>> get_modified_data(original_data, adversarial_examples, training_sample_proportion)
'''
original_data = shuffle(original_data, random_state=0)
### To check the size end of the method
begin_len_sexist = len(original_data[original_data['sexist'] == 1])
begin_len_nonsexist = len(original_data[original_data['sexist'] == 0])
###
#Step 1.Sample 'sample_proportion' of the sexist examples
sexist = original_data[original_data['sexist'] == 1]
sample_count = int(len(sexist) * sample_proportion)
sexist = sexist.head(sample_count)
#Step 2.Retrieve the modified version of sexist examples on step 1
adversarial_examples = adversarial_examples[adversarial_examples['of_id'].isin(sexist['_id'])]
#print(len(adversarial_examples))
if len(adversarial_examples) > sample_count:
#There might be more than one modified example for a sexist tweet. Select the first one.
adversarial_examples = adversarial_examples.drop_duplicates(subset ='of_id', keep = 'first')
#print(len(adversarial_examples))
#Step 3.Discard a corresponding number of non-sexist examples from the original data set. (to maintain equal size)
non_sexist = original_data[original_data['sexist'] == 0].head(len(adversarial_examples))
original_data = original_data[~original_data['_id'].isin(non_sexist['_id'])]
#Step 4.Inject retrieved adversarial examples on step 2
#NOTE: When sexist examples > nonsexist examples, adversarial_examples might be more than non_sexist example count.
#To maintain equal size in that case, concat original_data with: adversarial_examples.head(len(non_sexist)
original_data = pd.concat([original_data, adversarial_examples.head(len(non_sexist))], axis=0)
######### To compare the data size before and after injection ##############
end_len_sexist = len(original_data[original_data['sexist'] == 1])
end_len_nonsexist = len(original_data[original_data['sexist'] == 0])
if begin_len_sexist != end_len_sexist or begin_len_nonsexist != end_len_nonsexist:
print('equal size did not maintain: sexist begin {} sexist end {} nonsexist begin {} nonsexist end {}'.format(begin_len_sexist, end_len_sexist, begin_len_nonsexist, end_len_nonsexist))
##########################################################################
#Step 5.Shuffle
original_data=shuffle(original_data, random_state=0)
return original_data
def downsample(self, df, random_state):
'''Balances dataset by downsampling the majority class.
Uses sklearn resample method to downsample.
'''
nonsexist_count=len(df[df.sexist==0])
sexist_count=len(df[df.sexist==1])
# Separate majority and minority classes
df_minority, df_majority=None, None
n_samples= 0
if sexist_count < nonsexist_count:
df_minority = df[df.sexist==1]
df_majority = df[df.sexist==0]
n_samples=sexist_count
else:
df_minority = df[df.sexist==0]
df_majority = df[df.sexist==1]
n_samples=nonsexist_count
# Downsample majority class
df_majority_downsampled = resample(df_majority,
replace=False, # sample without replacement
n_samples=n_samples, # to match minority class
random_state=random_state) # reproducible results
# Combine minority class with downsampled majority class
df_downsampled = | pd.concat([df_majority_downsampled, df_minority]) | pandas.concat |
# The script generates the Dataframes with the VC, PBLH, u_mean.
import BoundaryLayerToolbox as blt
import pandas as pd
import numpy as np
import h5py
import os
import shutil
path2wrf = '/Volumes/BUFFALO_SOLDIER/datos_VC/'
path2DataFrames = "../datos/dataframes_VC/"
path2pollutants = "../datos/contaminantes/2015/"
months = {'jan': '01',
'feb': '02',
'mar': '03',
'apr': '04',
'may': '05',
'jun': '06',
'jul': '07',
'aug': '08',
'sep': '09',
'oct': '10',
'nov': '11',
'dic': '12'}
def E1or30(month):
if month in ['jan', 'mar', 'may', 'jul', 'aug', 'oct', 'dic']:
return '31'
elif month in ['apr', 'jun', 'sep', 'jul', 'nov']:
return '30'
elif month == 'feb':
return '28'
location = ['MER', 'PED', 'SAG', 'TLA', 'UIZ', 'SFE']
stations = pd.read_csv('../datos/Stations_Info.csv', index_col=0)
xlat = np.loadtxt('../datos/xlat_d02_interpolado.txt')
xlong = np.loadtxt('../datos/xlong_d02_interpolado.txt')
h_i = "00:00:00"
h_f = "23:50:00"
h_f_wrf = "23:00:00"
print('***START***')
for st in location:
print('-- ' + st + ' --')
xlat_st, xlong_st = stations.loc[st][['Latitud', 'Longitud']]
# contaminantes
o3_path = path2pollutants + st + "/" + st + "_O3_2015.csv"
o3_2015 = pd.read_csv(o3_path,
names=['date', 'station', 'pollutant', 'O3',
'units'])
o3_2015.index = pd.date_range('2015-01-01 01:00', '2016-01-01 00:00',
freq='1H')
o3_2015 = o3_2015.reindex(pd.date_range('2015-01-01 00:00',
'2015-12-31 23:00', freq='1H'))
o3_2015 = o3_2015.drop(['date', 'station', 'pollutant', 'units'], axis=1)
pm25_path = path2pollutants + st + "/" + st + "_PM2.5_2015.csv"
pm25_2015 = pd.read_csv(pm25_path, names=['date', 'station',
'pollutant', 'PM2.5', 'units'])
pm25_2015.index = pd.date_range('2015-01-01 01:00',
'2016-01-01 00:00', freq='1H')
pm25_2015 = pm25_2015.reindex(pd.date_range('2015-01-01 00:00',
'2015-12-31 23:00', freq='1H'))
pm25_2015 = pm25_2015.drop(['date', 'station', 'pollutant', 'units'],
axis=1)
pm10_path = path2pollutants + st + "/" + st + "_PM10_2015.csv"
pm10_2015 = pd.read_csv(pm10_path, names=['date', 'station', 'pollutant',
'PM10', 'units'])
pm10_2015.index = pd.date_range('2015-01-01 01:00', '2016-01-01 00:00',
freq='1H')
pm10_2015 = pm10_2015.reindex(pd.date_range('2015-01-01 00:00',
'2015-12-31 23:00', freq='1H'))
pm10_2015 = pm10_2015.drop(['date', 'station', 'pollutant', 'units'],
axis=1)
Pollutants = pd.concat([o3_2015, pm25_2015, pm10_2015], axis=1)
VC_year = pd.DataFrame()
dir = path2DataFrames + st
if os.path.exists(dir):
shutil.rmtree(dir)
os.makedirs(dir)
for mm in months.keys():
date_i = '2015-' + months[mm] + '-01' + ' ' + h_i
date_f = '2015-' + months[mm] + '-' + E1or30(mm) + ' ' + h_f
date_f_wrf = '2015-' + months[mm] + '-' + E1or30(mm) + ' ' + h_f_wrf
xx, yy = blt.near_coord_loc(xlong, xlat, xlong_st, xlat_st)
file_24 = h5py.File(path2wrf + months[mm] + '/' + mm + '_24.h5', 'r')
vc_24 = np.array(file_24.get('vc_24h'))
pblh_24 = np.array(file_24.get('pblh_24h'))
u_mean_24 = np.array(file_24.get('u_mean_24h'))
if mm == 'nov':
BEG = pd.date_range('2015-11-01 00:00:00', '2015-11-05 23:00:00',
freq='1H')
ENDD = pd.date_range('2015-11-07 00:00:00', '2015-11-18 23:00:00',
freq='1H')
BEG = BEG.union(ENDD)
ENDD = pd.date_range('2015-11-20 00:00:00', '2015-11-20 23:00:00',
freq='1H')
BEG = BEG.union(ENDD)
ENDD = pd.date_range('2015-11-22 00:00:00', '2015-11-30 23:00:00',
freq='1H')
month_t_range = BEG.union(ENDD)
elif mm == 'jun':
BEG = pd.date_range('2015-06-01 00:00:00', '2015-06-11 23:00:00',
freq='1H')
ENDD = pd.date_range('2015-06-13 00:00:00', '2015-06-30 23:00:00',
freq='1H')
month_t_range = BEG.union(ENDD)
elif mm == 'oct':
BEG = pd.date_range('2015-10-01 00:00:00', '2015-10-03 23:00:00',
freq='1H')
ENDD = pd.date_range('2015-10-05 00:00:00', '2015-10-31 23:00:00',
freq='1H')
month_t_range = BEG.union(ENDD)
elif mm == 'jan':
month_t_range = pd.date_range('2015-01-15 00:00:00',
'2015-01-31 23:00:00', freq='1H')
elif mm == 'apr':
month_t_range = pd.date_range('2015-04-02 00:00:00',
'2015-04-30 23:00:00', freq='1H')
else:
month_t_range = pd.date_range(date_i, date_f_wrf, freq='1H')
wrf_df = blt.wrf2dataframe(vc_24, pblh_24, u_mean_24, month_t_range,
xx, yy)
wrf_df = wrf_df.asfreq('1H')
VC_year = | pd.concat([VC_year, wrf_df], axis=0) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# # COVID-19 - Global Cases - EDA and Forecasting
# This is the data repository for the 2019 Novel Coronavirus Visual Dashboard operated by the Johns Hopkins University Center for Systems Science and Engineering (JHU CSSE). Also, Supported by ESRI Living Atlas Team and the Johns Hopkins University Applied Physics Lab (JHU APL).
#
# Data is sourced from https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data
#
#
# * Visual Dashboard (desktop):
# https://www.arcgis.com/apps/opsdashboard/index.html#/bda7594740fd40299423467b48e9ecf6
#
# * Visual Dashboard (mobile):
# http://www.arcgis.com/apps/opsdashboard/index.html#/85320e2ea5424dfaaa75ae62e5c06e61
#
# * Lancet Article:
# An interactive web-based dashboard to track COVID-19 in real time
#
# * Provided by Johns Hopkins University Center for Systems Science and Engineering (JHU CSSE):
# https://systems.jhu.edu/
#
# * Data Sources:
#
# - World Health Organization (WHO): https://www.who.int/
# - DXY.cn. Pneumonia. 2020. http://3g.dxy.cn/newh5/view/pneumonia.
# - BNO News: https://bnonews.com/index.php/2020/02/the-latest-coronavirus-cases/
# - National Health Commission of the People’s Republic of China (NHC):
# http://www.nhc.gov.cn/xcs/yqtb/list_gzbd.shtml
# - China CDC (CCDC): http://weekly.chinacdc.cn/news/TrackingtheEpidemic.htm
# - Hong Kong Department of Health: https://www.chp.gov.hk/en/features/102465.html
# - Macau Government: https://www.ssm.gov.mo/portal/
# - Taiwan CDC: https://sites.google.com/cdc.gov.tw/2019ncov/taiwan?authuser=0
# - US CDC: https://www.cdc.gov/coronavirus/2019-ncov/index.html
# - Government of Canada: https://www.canada.ca/en/public-health/services/diseases/coronavirus.html
# - Australia Government Department of Health: https://www.health.gov.au/news/coronavirus-update-at-a-glance
# - European Centre for Disease Prevention and Control (ECDC): https://www.ecdc.europa.eu/en/geographical-distribution-2019-ncov-cases
# - Ministry of Health Singapore (MOH): https://www.moh.gov.sg/covid-19
# - Italy Ministry of Health: http://www.salute.gov.it/nuovocoronavirus
#
# - Additional Information about the Visual Dashboard:
# https://systems.jhu.edu/research/public-health/ncov/
#
# Contact Us:
#
# Email: <EMAIL>
#
# Terms of Use:
#
# This GitHub repo and its contents herein, including all data, mapping, and analysis, copyright 2020 Johns Hopkins University, all rights reserved, is provided to the public strictly for educational and academic research purposes. The Website relies upon publicly available data from multiple sources, that do not always agree. The Johns Hopkins University hereby disclaims any and all representations and warranties with respect to the Website, including accuracy, fitness for use, and merchantability. Reliance on the Website for medical guidance or use of the Website in commerce is strictly prohibited.
# __For better viewing experience, I recommend to enable NBextensions as guided @__
#
# https://github.com/lsunku/DataScience/tree/master/JupyterNotebook
# # Steps invoved in this notebook
# 1. Import Python Libraries for data analysis and ML
# 2. Local user defined functions
# 3. Sourcing the Data
# 4. Inspect and Clean the Data
# 5. Exploratory Data Analysis
# 6. Preparing the data for modelling(train-test split, rescaling etc)
# 7. Model evaluation for Advanced Regression Criteria
# 8. Linear Regression Model for World Wide Case Predictions
# 9. Linear Regression Model for Italy Predictions
# 10. Linear Regression Model for US Predictions
# 11. Linear Regression Model for Spain Predictions
# 12. Linear Regression Model for Germany Predictions
# 13. Linear Regression Model for India Predictions
# __Notes:__ Currently, I have used only time_series_covid19_confirmed_global for the following analysis. When I get time, I shall enhance the same with additional files time_series_covid19_deaths_global, time_series_covid19_recovered_global and integrate with daily reports.
# # __Import Python Functions__
# In[284]:
# Local classes and Local flags
# Local Classes
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
# Debug flag for investigative purpose
DEBUG = 0
# Default random_state
rndm_stat = 42
# In[285]:
# Python libraries for Data processing and analysis
import time as time
strt = time.time()
import pandas as pd
pd.set_option('display.max_columns', 200)
pd.set_option('display.max_rows', 100)
pd.options.mode.use_inf_as_na = True
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
import glob
from matplotlib.pyplot import figure
import warnings
import math
import itertools
warnings.filterwarnings('ignore')
sns.set_style("whitegrid")
from math import sqrt
import re
from prettytable import PrettyTable
# ML Libraries
import statsmodels
import statsmodels.api as sm
import sklearn as sk
from sklearn.model_selection import train_test_split,GridSearchCV, KFold,RandomizedSearchCV,StratifiedKFold
from sklearn.metrics import r2_score,mean_squared_error,mean_absolute_error
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder, StandardScaler,OrdinalEncoder,LabelEncoder,Normalizer,RobustScaler,PowerTransformer,PolynomialFeatures
from statsmodels.stats.outliers_influence import variance_inflation_factor
import xgboost
from sklearn.ensemble import GradientBoostingRegressor,RandomForestRegressor
# # __Local User Defined Functions__
# ## Local functions for data overview and data cleaning
# In[286]:
# local functions
# Function to read a file & Store it in Pandas
# read_file takes either csv or excel file as input and reuturns a pandas DF and
# also prints head, tail, description, info and shape of the DF
def read_file(l_fname,l_path,head=0):
i = l_fname.split(".")
f_path = l_path+'/'+l_fname
print(f_path,i[0],i[1])
if (i[1] == "xlsx"):
l_df = | pd.read_excel(f_path,header=head,encoding = "ISO-8859-1",infer_datetime_format=True) | pandas.read_excel |
'''
CIS 419/519 project: Using decision tree ensembles to infer the pathological
cause of age-related neurodegenerative changes based on clinical assessment
nadfahors: <NAME>, <NAME>, & <NAME>
This file contains code for preparing NACC data for analysis, including:
* synthesis of pathology data to create pathology class outcomes
* dropping uninformative variables from predictor set
* identifying and merging/resolving redundant clusters of variables
* identifying missing data codes and replacing with NaNs as appropriate
* creating change variables from longitudinal data
* imputation of missing data
* categorizing retained variables as interval/ratio, ordinal, or nominal
* creation of dummy variables for nominal variables
* standardizing interval/ratio and ordinal variables
* creating date variables, then converting these to useful ages or intervals
* quadratic expansion for interval/ratio variables?
'''
# Module imports
import pandas as pd
import numpy as np
import datetime
# Read in full dataset. Warning: this is about 340 MB.
fulldf = pd.read_csv('investigator_nacc48.csv')
# List of Uniform Data Set (UDS) values that will serve as potential
# predictors. Those with a "False" next to them will be excluded after data
# preparation; those with a True will be kept.
xvar = pd.read_csv('xvar.csv')
# Variables from the NACC neuropathology table that will be used to group
# individuals by pathology class:
# 1) Alzheimer's disease (AD);
# 2) frontotemporal lobar degeneration due to tauopathy (FTLD-tau)
# 3) frontotemporal lobar degeneration due to TDP-43 (FTLD-TDP)
# 4) Lewy body disease due to alpha synuclein (including Lewy body dementia and Parkinson's disease)
# 5) vascular disease
# Path classes: AD (ABC criteria); FTLD-tau; FTLD-TDP, including ALS; Lewy body disease (are PD patients captured here?); vascular
npvar = pd.DataFrame(np.array(["NPPMIH",0, # Postmortem interval--keep in as a potential confound variable?
"NPFIX",0,
"NPFIXX",0,
"NPWBRWT",0,
"NPWBRF",0,
"NACCBRNN",0,
"NPGRCCA",0,
"NPGRLA",0,
"NPGRHA",0,
"NPGRSNH",0,
"NPGRLCH",0,
"NACCAVAS",0,
"NPTAN",False,
"NPTANX",False,
"NPABAN",False,
"NPABANX",False,
"NPASAN",False,
"NPASANX",False,
"NPTDPAN",False,
"NPTDPANX",False,
"NPHISMB",False,
"NPHISG",False,
"NPHISSS",False,
"NPHIST",False,
"NPHISO",False,
"NPHISOX",False,
"NPTHAL",False,# Use for ABC scoring to create ordinal measure of AD change
"NACCBRAA",False,# Use for ABC scoring to create ordinal measure of AD change
"NACCNEUR",False,# Use for ABC scoring to create ordinal measure of AD change
"NPADNC",False,# Use for ABC scoring to create ordinal measure of AD change
"NACCDIFF",False,
"NACCVASC",False,# Vasc presence/absence
"NACCAMY",False,
"NPLINF",False,
"NPLAC",False,
"NPINF",False,# Derived variable summarizing several assessments of infarcts and lacunes
"NPINF1A",False,
"NPINF1B",False,
"NPINF1D",False,
"NPINF1F",False,
"NPINF2A",False,
"NPINF2B",False,
"NPINF2D",False,
"NPINF2F",False,
"NPINF3A",False,
"NPINF3B",False,
"NPINF3D",False,
"NPINF3F",False,
"NPINF4A",False,
"NPINF4B",False,
"NPINF4D",False,
"NPINF4F",False,
"NACCINF",False,
"NPHEM",False,
"NPHEMO",False,
"NPHEMO1",False,
"NPHEMO2",False,
"NPHEMO3",False,
"NPMICRO",False,
"NPOLD",False,
"NPOLD1",False,
"NPOLD2",False,
"NPOLD3",False,
"NPOLD4",False,
"NACCMICR",False,# Derived variable for microinfarcts
"NPOLDD",False,
"NPOLDD1",False,
"NPOLDD2",False,
"NPOLDD3",False,
"NPOLDD4",False,
"NACCHEM",False,# Derived variables for microbleeds and hemorrhages
"NACCARTE",False,
"NPWMR",False,
"NPPATH",False,# Other ischemic/vascular pathology
"NACCNEC",False,
"NPPATH2",False,
"NPPATH3",False,
"NPPATH4",False,
"NPPATH5",False,
"NPPATH6",False,
"NPPATH7",False,
"NPPATH8",False,
"NPPATH9",False,
"NPPATH10",False,
"NPPATH11",False,
"NPPATHO",False,
"NPPATHOX",False,
"NPART",False,
"NPOANG",False,
"NACCLEWY",False,# Note that limbic/transitional and amygdala-predominant are not differentiated
"NPLBOD",False,# But here they are differentiated!
"NPNLOSS",False,
"NPHIPSCL",False,
"NPSCL",False,
"NPFTDTAU",False,# FTLD-tau
"NACCPICK",False,# FTLD-tau
"NPFTDT2",False,# FTLD-tau
"NACCCBD",False,# FTLD-tau
"NACCPROG",False,# FTLD-tau
"NPFTDT5",False,# FTLD-tau
"NPFTDT6",False,# FTLD-tau
"NPFTDT7",False,# FTLD-tau
"NPFTDT8",False,# This is FTLD-tau but associated with ALS/parkinsonism--wut?
"NPFTDT9",False,# tangle-dominant disease--is this PART? Maybe exclude cases who have this as only path type.
"NPFTDT10",False,# FTLD-tau: other 3R+4R tauopathy. What is this if not AD? Maybe exclude. How many cases?
"NPFRONT",False,# FTLD-tau
"NPTAU",False,# FTLD-tau
"NPFTD",False,# FTLD-TDP
"NPFTDTDP",False,# FTLD-TDP
"NPALSMND",False,# FTLD-TDP (but exclude FUS and SOD1)
"NPOFTD",False,
"NPOFTD1",False,
"NPOFTD2",False,
"NPOFTD3",False,
"NPOFTD4",False,
"NPOFTD5",False,
"NPFTDNO",False,
"NPFTDSPC",False,
"NPTDPA",False,# In second pass, use anatomical distribution to stage
"NPTDPB",False,# In second pass, use anatomical distribution to stage
"NPTDPC",False,# In second pass, use anatomical distribution to stage
"NPTDPD",False,# In second pass, use anatomical distribution to stage
"NPTDPE",False,# In second pass, use anatomical distribution to stage
"NPPDXA",False,# Exclude?
"NPPDXB",False,# Exclude
"NACCPRIO",False,# Exclude
"NPPDXD",False,# Exclude
"NPPDXE",False,
"NPPDXF",False,
"NPPDXG",False,
"NPPDXH",False,
"NPPDXI",False,
"NPPDXJ",False,
"NPPDXK",False,
"NPPDXL",False,
"NPPDXM",False,
"NPPDXN",False,
"NACCDOWN",False,
"NACCOTHP",False,# Survey for exclusion criteria
"NACCWRI1",False,# Survey for exclusion criteria
"NACCWRI2",False,# Survey for exclusion criteria
"NACCWRI3",False,# Survey for exclusion criteria
"NACCBNKF",False,
"NPBNKB",False,
"NACCFORM",False,
"NACCPARA",False,
"NACCCSFP",False,
"NPBNKF",False,
"NPFAUT",False,
"NPFAUT1",False,
"NPFAUT2",False,
"NPFAUT3",False,
"NPFAUT4",False,
"NACCINT",False,
"NPNIT",False,
"NPCERAD",False,# What sort of variable?
"NPADRDA",False,
"NPOCRIT",False,
"NPVOTH",False,
"NPLEWYCS",False,
"NPGENE",True,# Family history--include in predictors?
"NPFHSPEC",False,# Code as dummy variables if useful.
"NPCHROM",False,# Exclusion factor? Genetic/chromosomal abnormalities
"NPPNORM",False,# Check all the following variables for redundancy with the ones above.
"NPCNORM",False,
"NPPADP",False,
"NPCADP",False,
"NPPAD",False,
"NPCAD",False,
"NPPLEWY",False,
"NPCLEWY",False,
"NPPVASC",False,
"NPCVASC",False,
"NPPFTLD",False,
"NPCFTLD",False,
"NPPHIPP",False,
"NPCHIPP",False,
"NPPPRION",False,
"NPCPRION",False,
"NPPOTH1",False,
"NPCOTH1",False,
"NPOTH1X",False,
"NPPOTH2",False,
"NPCOTH2",False,
"NPOTH2X",False,
"NPPOTH3",False,
"NPCOTH3",False,
"NPOTH3X",0]).reshape((-1,2)))
npvar.columns = ['Variable','Keep']
## Case selection process.
# Include only those with autopsy data.
aut = fulldf[fulldf.NACCAUTP == 1]
del fulldf
def table(a,b):
print(pd.crosstab(aut[a],aut[b],dropna=False,margins=True))
# Exclude for Down's, Huntington's, and other conditions.
aut = aut.loc[aut.DOWNS != 1]
aut = aut.loc[aut.HUNT != 1]
aut = aut.loc[aut.PRION != 1]
aut = aut.loc[~aut.MSAIF.isin([1,2,3])]
aut = aut.loc[~aut.NEOPIF.isin([1,2,3])]
aut = aut.loc[~aut.SCHIZOIF.isin([1,2,3])]
aut.index = list(range(aut.shape[0]))
# How many unique IDs?
# For now, keep in follow-up visits to increase our training data.
uids = aut.NACCID[~aut.NACCID.duplicated()]
#aut = aut[~aut.NACCID.duplicated()]
## Coding of pathology class outcomes.
# Create binary variables for the presence of each pathology class of interest.
# Code Alzheimer's disease pathology based on NPADNC, which implements
# ABC scoring based on Montine et al. (2012).
aut = aut.assign(ADPath = 0)
aut.loc[aut.NPADNC.isin((2,3)),'ADPath'] = 1
aut.loc[aut.NPPAD == 1,'ADPath'] = 1
# The following two commands make the ADPath variable false if the AD path
# diagnosis is as contributing, not as primary.
aut.loc[aut.NPPAD == 2,'ADPath'] = 0
aut.loc[aut.NPCAD == 1,'ADPath'] = 0
aut.loc[aut.NPPVASC == 1,'ADPath'] = 0
aut.loc[aut.NPPLEWY == 1,'ADPath'] = 0
aut.loc[aut.NPPFTLD == 1,'ADPath'] = 0
# Several variables pertain to FTLD tauopathies.
aut = aut.assign(TauPath = [0 for i in range(aut.shape[0])])
aut.loc[aut.NPFTDTAU == 1,'TauPath'] = 1
aut.loc[aut.NACCPICK == 1,'TauPath'] = 1
aut.loc[aut.NACCCBD == 1,'TauPath'] = 1
aut.loc[aut.NACCPROG == 1,'TauPath'] = 1
aut.loc[aut.NPFTDT2 == 1,'TauPath'] = 1
aut.loc[aut.NPFTDT5 == 1,'TauPath'] = 1
aut.loc[aut.NPFTDT6 == 1,'TauPath'] = 1
aut.loc[aut.NPFTDT7 == 1,'TauPath'] = 1
aut.loc[aut.NPFTDT9 == 1,'TauPath'] = 1
aut.loc[aut.NPFRONT == 1,'TauPath'] = 1
aut.loc[aut.NPTAU == 1,'TauPath'] = 1
aut.loc[aut.ADPath == 1, 'TauPath'] = 0
aut.loc[aut.NPCFTLD == 1, 'TauPath'] = 0
# Code Lewy body disease based on NPLBOD variable. Do not include amygdala-
# predominant, brainstem-predominant, or olfactory-only cases.
# See Toledo et al. (2016, Acta Neuropathol) and Irwin et al. (2018, Nat Rev
# Neuro).
aut = aut.assign(LBPath = [0 for i in range(aut.shape[0])])
aut.loc[aut.NPLBOD.isin((2,3)),'LBPath'] = 1
aut.loc[aut.NPPLEWY == 1,'LBPath'] = 1
aut.loc[aut.NPPLEWY == 2,'LBPath'] = 0
aut.loc[aut.NPCLEWY == 1,'LBPath'] = 0
aut.loc[aut.ADPath == 1 & (aut.NPPLEWY != 1), 'LBPath'] = 0
aut.loc[aut.TauPath == 1 & (aut.NPPLEWY != 1),'LBPath'] = 0
# Code TDP-43 pathology based on NPFTDTDP and NPALSMND, excluding FUS and SOD1
# cases.
aut = aut.assign(TDPPath = [0 for i in range(aut.shape[0])])
aut.loc[aut.NPFTD == 1,'TDPPath'] = 1
aut.loc[aut.NPFTDTDP == 1,'TDPPath'] = 1
aut.loc[aut.NPALSMND == 1,'TDPPath'] = 1
aut.loc[aut.ADPath == 1, 'TDPPath'] = 0
aut.loc[aut.LBPath == 1, 'TDPPath'] = 0
aut.loc[aut.TauPath == 1, 'TDPPath'] = 0
# Code vascular disease based on relevant derived variables:
aut = aut.assign(VPath = [0 for i in range(aut.shape[0])])
aut.loc[aut.NPINF == 1,'VPath'] = 1
aut.loc[aut.NACCMICR == 1,'VPath'] = 1
aut.loc[aut.NACCHEM == 1,'VPath'] = 1
aut.loc[aut.NPPATH == 1,'VPath'] = 1
aut.loc[aut.NPPVASC == 1,'VPath'] = 1
aut.loc[aut.NPPVASC == 2,'VPath'] = 0
aut.loc[aut.NPCVASC == 1,'VPath'] = 0
aut.loc[aut.ADPath == 1 & (aut.NPPVASC != 1), 'VPath'] = 0
aut.loc[aut.LBPath == 1 & (aut.NPPVASC != 1), 'VPath'] = 0
aut.loc[aut.NPPFTLD == 1 & (aut.NPPVASC != 1),'VPath'] = 0
aut.loc[aut.TDPPath == 1 & (aut.NPPVASC != 1), 'VPath'] = 0
aut.loc[aut.TauPath == 1 & (aut.NPPVASC != 1), 'VPath'] = 0
aut = aut.assign(Class = aut.ADPath)
aut.loc[aut.TauPath == 1,'Class'] = 2
aut.loc[aut.TDPPath == 1,'Class'] = 3
aut.loc[aut.LBPath == 1,'Class'] = 4
aut.loc[aut.VPath == 1,'Class'] = 5
aut = aut.loc[aut.Class != 0]
aut.index = list(range(aut.shape[0]))
## Predictor variable preparation: one-hot-encoding, date/age/interval operations,
# consolidating redundant variables, consolidating free-text variables.
aut = aut.assign(DOB = aut.BIRTHYR)
aut = aut.assign(DOD = aut.NACCYOD)
aut = aut.assign(VISITDATE = aut.VISITYR)
for i in range(aut.shape[0]):
aut.loc[i,'DOB'] = datetime.datetime.strptime('-'.join([str(aut.BIRTHYR.loc[i]),str(aut.BIRTHMO.loc[i]),'01']),'%Y-%m-%d')
aut.loc[i,'DOD'] = datetime.datetime.strptime('-'.join([str(aut.NACCYOD.loc[i]),str(aut.NACCMOD.loc[i]),'01']),'%Y-%m-%d')
aut.loc[i,'VISITDATE'] = datetime.datetime.strptime('-'.join([str(aut.VISITYR.loc[i]),str(aut.VISITMO.loc[i]),str(aut.VISITDAY.loc[i])]),'%Y-%m-%d')
# Some time/interval variables
aut = aut.assign(SinceQUITSMOK = aut.NACCAGE - aut.QUITSMOK) # Years since quitting smoking
aut = aut.assign(AgeStroke = aut.NACCSTYR - aut.BIRTHYR)
aut = aut.assign(AgeTIA = aut.NACCTIYR - aut.BIRTHYR)
aut = aut.assign(AgePD = aut.PDYR - aut.BIRTHYR)
aut = aut.assign(AgePDOTHR = aut.PDOTHRYR - aut.BIRTHYR)
aut = aut.assign(AgeTBI = aut.TBIYEAR - aut.BIRTHYR)
aut = aut.assign(Duration = aut.NACCAGE - aut.DECAGE)
# Hispanic origin
aut.HISPORX = aut.HISPORX.str.lower()
aut.loc[aut.HISPORX == 'spanish','HISPORX'] = 'spain'
# Race. RACESECX and RACETERX have too few values to be useful.
aut.RACEX = aut.RACEX.str.lower().str.replace(' ','').str.replace('-','')
aut.loc[aut.RACEX.isin(['hispanic','puerto rican']),'RACEX'] = 'latino'
aut.loc[aut.RACEX.isin(['guam - chamorro']),'RACEX'] = 'chamorro'
aut.loc[aut.RACEX.isin(['multi racial']),'RACEX'] = 'multiracial'
# Other language. But actually, let's just drop this and code as English/non-English.
#aut.PRIMLANX = aut.PRIMLANX.str.lower().str.replace(' ','').str.replace('-','')
# Drug list. First get a list of all the unique drug names, then code as dummy variables.
# Update as of 04/01/2020: drugs alone are going to be a huge amount of work.
# For now, just rely on the NACC derived variables for diabetes meds, cardiac drugs, etc.
drugcols = ['DRUG' + str(i) for i in range(1,41)]
drugs = aut[drugcols].stack()
# Several varieties of insulin--important to distinguish?
# drop "*not-codable"
# drop "diphtheria/hepb/pertussis,acel/polio/tetanus"
drugs = drugs.unique()
drugs = [eachdrug.lower() for eachdrug in drugs.tolist()]
drugs = pd.Series(drugs)
drug_corrections = [("multivitamin with minerals","multivitamin"),
("multivitamin, prenatal","multivitamin"),
("omega 3-6-9","omega369"),
("omega-3","omega3"),
("vitamin-d","vitamin d"),
("acetyl-l-carnitine","acetyl l carnitine"),
("levodopa","levadopa"),
("pro-stat","prostat"),
("alpha-d-galactosidase","alpha d galactosidase"),
("indium pentetate in-111","indium pentetate in111"),
("fludeoxyglucose f-18","fludeoxyglucose f18"),
("calcium with vitamins d and k", "calcium-vitamin d-vitamin k"),
("aloe vera topical", "aloe vera"),
("ammonium lactate topical", "ammonium lactate")]
for i in range(len(drug_corrections)):
oldval = drug_corrections[i][0]
newval = drug_corrections[i][1]
drugs = drugs.str.replace(pat = oldval, repl = newval)
drugs = drugs.loc[drugs != "*not codable*"]
drugs = drugs.loc[drugs != "diphtheria/hepb/pertussis,acel/polio/tetanus"]
drugs = np.unique([ss for eachdrug in drugs for ss in eachdrug.split('-')])
drugs = np.unique([ss for eachdrug in drugs for ss in eachdrug.split('/')])
drugs.sort()
## Combining redundant variables. Often this reflects a change in form or
# variable name between UDS version 2 & 3.
aut.loc[(aut.CVPACE == -4) & (aut.CVPACDEF == 0),'CVPACE'] = 0
aut.loc[(aut.CVPACE == -4) & (aut.CVPACDEF == 1),'CVPACE'] = 1
xvar.loc[xvar.Variable == 'CVPACDEF','Keep'] = False
# Combine TBIBRIEF and TRAUMBRF.
aut.loc[(aut.TBIBRIEF == -4) & (aut.TRAUMBRF.isin([0])),'TBIBRIEF'] = 0
aut.loc[(aut.TBIBRIEF == -4) & (aut.TRAUMBRF.isin([1,2])),'TBIBRIEF'] = 1
xvar.loc[xvar.Variable == 'TRAUMBRF','Keep'] = False
# More data cleaning
aut.ABRUPT = aut.ABRUPT.replace(to_replace = 2, value = 1)
aut.FOCLSYM = aut.FOCLSYM.replace(to_replace = 2, value = 1)
aut.FOCLSIGN = aut.FOCLSIGN.replace(to_replace = 2, value = 1)
# Convert language to a binary variable (English/non-English)
aut = aut.assign(English = 0)
aut.loc[aut.PRIMLANG == 1,'English'] = 1
xvar.loc[xvar.Variable == 'PRIMLANG','Keep'] = False
# Some dummy coding
vv = xvar.Variable.loc[(xvar.Keep) & (xvar.Comments == "Dummy coding for (95,96,97,98)")]
for v in vv:
aut[v + '_couldnt'] = 0
aut.loc[aut[v].isin([95,96,97,98]),v + '_couldnt'] = 1
vv = xvar.Variable.loc[xvar.Comments == "Dummy coding for (995,996,997,998)"]
for v in vv:
aut[v + '_couldnt'] = 0
aut.loc[aut[v].isin([995,996,997,998]),v + '_couldnt'] = 1
# Drop all columns where xvar.Keep == False.
aut2 = aut
xvar.loc[xvar.Variable == 'NACCID','Keep'] = True
xvar.loc[xvar.Variable == 'NACCID','Type'] = "ID"
xvar.loc[xvar.Variable == 'VISITDATE','Keep'] = True
xvar.loc[xvar.Variable == 'VISITDATE','Type'] = "ID"
aut = aut.drop(columns = xvar.Variable[~xvar.Keep])
# Fill with NA values
xvar = xvar.loc[xvar.Keep]
xvar.index = range(xvar.shape[0])
for i in range(xvar.shape[0]):
if not xvar.NaNValues.isna()[i]:
v = xvar.Variable[i]
badval = eval(xvar.NaNValues[i])
#print(v,badval)
if isinstance(badval,int):
badval = [badval]
aut[v].mask(aut[v].isin(badval),inplace = True)
# Get rid of variables with very few meaningful observations.
valcounts = aut.describe().iloc[0]
aut = aut.drop(columns = valcounts.loc[valcounts < 100].index)
#aut = aut[valcounts.loc[valcounts >= 100].index]
# Find correlated variables and drop.
ac = aut.corr()
acs = ac.unstack(level = 0)
acs = acs.loc[abs(acs)>0.8]
acsind = list(acs.index)
diagnames = [ind for ind in acsind if ind[0] == ind[1]]
acs = acs.drop(labels=diagnames)
acs = pd.DataFrame(acs)
acs.columns = ['r']
acs['v1'] = acs.index
acs[['v1','v2']] = pd.DataFrame(acs['v1'].tolist(),index = acs.index)
y = aut.Class
X = aut.drop(columns = npvar.Variable.loc[npvar.Variable.isin(aut.columns)])
X = X.drop(columns = ['Class','ADPath','TauPath','TDPPath','LBPath','VPath'])
xd = X.describe().iloc[0]
# Impute numeric variables with the mean.
from sklearn.impute import SimpleImputer
numvar = X.columns.intersection(xvar.Variable.loc[xvar.Type == "Numeric"])
imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
imp_mean.fit(X[numvar])
Xnumimp = imp_mean.transform(X[numvar])
Xnumimp = pd.DataFrame(Xnumimp)
Xnumimp.columns = X[numvar].columns
# Impute ordinal variables with the median.
ordvar = X.columns.intersection(xvar.Variable.loc[xvar.Type == "Ordinal"])
imp_med = SimpleImputer(missing_values=np.nan, strategy='median')
imp_med.fit(X[ordvar])
Xordimp = imp_med.transform(X[ordvar])
Xordimp = pd.DataFrame(Xordimp)
Xordimp.columns = X[ordvar].columns
# Impute boolean variables with zero.
boolvar = X.columns.intersection(xvar.Variable.loc[xvar.Type == "Boolean"])
boolenc = SimpleImputer(missing_values = np.nan, strategy = 'constant',
fill_value = 0)
boolenc.fit(X[boolvar])
Xbool = boolenc.transform(X[boolvar])
Xbool = pd.DataFrame(Xbool)
Xbool.columns = X[boolvar].columns
# One-hot encoding for nominal (not boolean, ordinal, or numeric) variables.
from sklearn.preprocessing import OneHotEncoder
nomvar = X.columns.intersection(xvar.Variable.loc[xvar.Type == "Nominal"])
enc = OneHotEncoder(handle_unknown='ignore',sparse = False)
Xfull = X[nomvar].fillna(value = 0)
enc.fit(Xfull)
Xohe = enc.transform(Xfull)
Xohe = | pd.DataFrame(Xohe) | pandas.DataFrame |
import datetime
from dateutil.relativedelta import *
from fuzzywuzzy import fuzz
import argparse
import glob
import numpy as np
import pandas as pd
from scipy.stats import ttest_1samp
import sys
import xarray as xr
from paths_bra import *
sys.path.append('./..')
from refuelplot import *
setup()
from utils import *
gen_path = bra_path + '/generation'
# get GWA version
parser = argparse.ArgumentParser(description='Insert optionally GWA')
parser.add_argument('-GWA')
args = parser.parse_args()
if(args.GWA == None):
GWA = "3"
else:
GWA = args.GWA
if GWA == "2":
results_path2 = results_path
results_path = results_path + '/results_GWA2'
# load generation data
print('load generation data')
# load usinas hourly
if gen_path + '/hourly/usinas.pkl' not in glob.glob(gen_path + '/hourly/*.pkl'):
USIh = pd.read_csv(gen_path + '/hourly/Comparativo_Geração_de_Energia_Semana_data_usinas.csv',
sep = ';', index_col = 0, parse_dates = True, dayfirst = True).iloc[1:,[6,8]].sort_index()
# remove missing values
USIh = USIh.loc[USIh.index.notnull()].dropna()
USIh.columns = ['usina','prod_GWh']
# in RIO DO FOGO there is one duplicate hour after one missing hour -> change timestamps of those hours
idxUSIh = USIh.index.values
midxUSIh = USIh.reset_index().set_index(['usina','Data Escala de Tempo 1 GE Comp 3']).index
idxUSIh[midxUSIh.duplicated(keep='last')] = idxUSIh[midxUSIh.duplicated(keep='first')] - np.timedelta64(1,'h')
USIh.index = pd.DatetimeIndex(idxUSIh)
USIhs = USIh.reset_index().set_index(['usina','index']).unstack(level=0).prod_GWh
USIhs.to_csv(gen_path + '/hourly/usinas.csv')
USIhs.to_pickle(gen_path + '/hourly/usinas.pkl')
wpUSIhs = pd.read_pickle(gen_path + '/hourly/usinas.pkl')
# load and match aneel and ons windparks
def get_cap_df(cap,comdate):
com = pd.DataFrame({'capacity': cap}).groupby(comdate).sum()
cap_cum = com.capacity.cumsum()
# if only years given for commissioning dates -> gradual capacity increase over year, full capacity at end of year
if type(cap_cum.index.values[0]) == np.int64:
cap_cum.index = [np.datetime64(str(int(year))+"-12-31 23:00:00") for year in cap_cum.index.values]
# create yearly dates at yearends
drcc = pd.date_range(np.datetime64('2005-12-31 23:00:00'),
np.datetime64('2019-12-31 23:00:00'),freq= 'y')
cap_cum = pd.Series(drcc.map(cap_cum),index = drcc)
# if first year emtpy: either year before or 0 if nothing before
if(sum(com.index<2000) > 0):
cap_cum[0] = com.cumsum()[com.index<2000].max()
else:
cap_cum[0] = 0
# if missing years -> put capacity of year before
cap_cum = cap_cum.ffill()
dr = pd.date_range('1/1/2006','31/12/2019 23:00:00',freq = 'h')
cap_ts = pd.Series(dr.map(cap_cum),index = dr)
cap_ts[0] = cap_cum[cap_cum.index<=pd.Timestamp('2006-01-01')].max()
if type(comdate[0]) == np.int64:
return(cap_ts.interpolate(method='linear'))
else:
return(cap_ts.fillna(method='ffill'))
def matchWords(word, statements):
# function to match a word to different statements
# output: ratio of matching (0-100) for all provided statements
results = []
for s in statements:
r = fuzz.ratio(word, s)
results.append(r)
return results
def match_string(string, array):
# function for matching casefolded strings
Slc = string.strip().casefold()
Alc = [arr.casefold() for arr in array.str.strip().unique()]
scores = matchWords(Slc, Alc)
mscore = max(scores)
strarr = array.unique()[np.where(np.array(scores)==mscore)][0]
return(string,strarr,mscore)
def match_anl(string):
# function to match ONS to ANL windparks
return(match_string(string,ANL2.name))
print('match wind parks')
# load ANEEL and ONS windparks
ONS = pd.read_csv(bra_path + '/ONS_windparks.csv', index_col = 0)
# remove those with CONJUNTO EOLICO - they're there twice and capacities don't match with ANEEL data
ONS = ONS[~ONS.usina.str.contains('CONJUNTO EOLICO')]
# remove some other duplicate windparks
ONS = ONS[[d not in [' CANOA QUEBRADA (E-RV-ACEP)',' PV DO NORDESTE',' SM (SANTA MARIA)',' SÃO BENTO NORTE II'] for d in ONS.usina]]
ANL = pd.read_csv(bra_path + '/turbine_data.csv', index_col = 0)
# characters and strings to replace for better matching
letters = {'õ':'õ',
'ó':'o',
'ã':'a',
'á':'a',
'â':'a',
'é':'e',
'Ã':'A',
'Á':'A',
'Â':'A',
'Ó':'O',
'É':'E',
'ú':'u',
'ô':'o',
'Ô':'O',
'ú':'u',
'Ú':'U',
'ç':'c',
'Ç':'C',
'í':'i',
'Í':'I',
'Ê':'E'}
remove = {' 2LER':'',
' 2LFA':'',
' LFA':'',
'EOL ':'',
' 3LER':'',
'Usina Eolica ':'',
'Eólica ':'',
' ENERGIAS RENOVAVEIS':'',
# ' CONJUNTO EOLICO':'',
'\(E-BV-ACEP\)':'',
'\(E-RV-ACEP\)':'',
'\(BELA BISTA\)':'',
'\(ENERGEN\)':'',
'\(Antiga Ventos Maranhenses 05\)':'',
'PARQUE EOLICO ':'',
' - N HORIZ':'',
'ENERGETICA S/A':'',
'\(ILHEUS\)':'',
' EOLOS':'',
'S\.A\.':''}
replace = {'LAG DO':'LAGOA DO',
'VENTOS S VICENTE':'VENTOS DE SAO VICENTE',
'SERRA BABILONIA':'SERRA DA BABILONIA',
'CORREDOR SENANDES':'CORREDOR DO SENANDES',
'SAO BENTO NORTE':'SAO BENTO DO NORTE',
'GAMELEIRAS':'GAMELERIAS',
'Lagoinha':'Lagoinh',
'PAPAGAIOS':'PAPAGAIO',
'VENTOS DE SAO ABRAAO':'VENTOS DO SANTO ABRAAO',
'VENTOS DO SAO MARIO':'VENTOS DE SAO MARIO',
'DAGUA':'D AGUA',
'B VEN':'BONS VENTOS',
'NOVA BURITI':'BURITI',
'NOVA CAJUCOCO':'CAJUCOCO',
'PALMAS':'DE PALMAS',
'DE PALMARES':'PALMARES',
'PV DO NORDESTE':'VENTOS DO NORDESTE',
'Aura Lagoa do Barro':'Lagoa do Barro',
'AURA LAGOA DO BARRO':'LAGOA DO BARRO',
'LAGOA BARRO':'LAGOA DO BARRO',
'GRAVATA':'GRAVATA FRUITRADE',
'FAZENDA DO ROSARIO':'FAZENDA ROSARIO',
'Parque Eolico do Horizonte':'Ventos de Horizonte',
'S BENTO':'SAO BENTO',
'SANTO ANTONIO (BTG PACTUAL)':'SANTO ANTONIO DE PADUA',
'SM \(SANTA MARIA\)':'SANTA MARIA',
'SAO JORGE CE':'SAO JORGE',
'VENT DA ST ESPERANCA':'VENTOS DA SANTA ESPERANCA',
'VENTOS DA STA DULCE':'VENTOS DA SANTA DULCE',
'ESPERANCA NORDESTE':'ESPERANCA DO NORDESTE',
'Eolica Delta':'Delta',
'Eolica Serra das Vacas':'Serra das Vacas',
'Ventos de Santo Augusto':'Santo Augusto',
'Ventos do Sao Gabriel':'Sao Gabriel',
'GE <NAME>':'<NAME>'}
numbers = {'10':'X',
'11':'XI',
'12':'XII',
'13':'XIII',
'14':'XIV',
'15':'XV',
'17':'XVII',
'19':'XIX',
'21':'XXI',
'23':'XXIII',
'24':'XXIV',
'25':'XXV',
'26':'XXVI',
'27':'XXVII',
'28':'XXVIII',
'29':'XXIX',
'31':'XXXI',
'34':'XXXIV',
'35':'XXXV',
'36':'XXXVI',
'01':'I',
'02':'II',
'03':'III',
'04':'IV',
'05':'V',
'06':'VI',
'07':'VII',
'08':'VIII',
'09':'IX',
'1':'I',
'2':'II',
'3':'III',
'4':'IV',
'5':'V',
'6':'VI',
'7':'VII',
'8':'VIII',
'9':'IX'}
# replace characters
ONS2 = ONS.copy(deep=True)
ANL2 = ANL.copy(deep=True)
for i in letters:
ONS2.usina = ONS2.usina.str.replace(i,letters.get(i))
ANL2.name = ANL2.name.str.replace(i,letters.get(i))
for i in replace:
ONS2.usina = ONS2.usina.str.replace(i,replace.get(i))
ANL2.name = ANL2.name.str.replace(i,replace.get(i))
for i in remove:
ONS2.usina = ONS2.usina.str.replace(i,remove.get(i))
for i in numbers:
ONS2.usina = ONS2.usina.str.replace(i,numbers.get(i))
ANL2.name = ANL2.name.str.replace(i,numbers.get(i))
# match windparks
matches = ONS2.usina.apply(match_anl).apply(pd.Series)
matches.columns = ['ONS_name','ANL_name','score']
ONSd = | pd.Series(ONS.usina.values,index=ONS2.usina.values) | pandas.Series |
import pandas as pd
import numpy as np
def read_data(grb):
outpath='./data/'
db=pd.read_csv(outpath+grb+'.txt',skiprows=[0,1],header=None,sep='\s+')
# find index where each block starts
idx = db[db[0]=='NO'].index.values.astype(int)
idx_name = db[db[0]=='!'].index
# NOTE: 140903 has inverted blocks
if grb == '140903A':
db01gamma=db[0:idx[0]]
else:
db01=db[0:idx[0]]
if len(idx) > 1:
if grb == '140903A':
db01=db[idx[0]+2:idx[1]]
else:
db01gamma=db[idx[0]+2:idx[1]]
else:
if grb == '140903A':
db01=db[idx[0]+2:]
else:
db01gamma=db[idx[0]+2:]
db0=db01
db0gamma=db01gamma
if len(idx_name) > 1:
if 'wt' in db[1].iloc[idx_name[1]]:
# if second block is wt then first block is slew
db02=db[idx[1]+2:idx[2]]
db02gamma=db[idx[2]+2:idx[3]]
db03=db[idx[3]+2:idx[4]]
db03gamma=db[idx[4]+2:]
db0= | pd.concat([db01,db02,db03]) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import os.path
import configparser
import pandas as pd
from tqdm import tqdm
from tsfresh.utilities.dataframe_functions import impute
from tsfresh.feature_extraction import feature_calculators
def main():
if len(sys.argv) < 2:
print('Usage: ./extract_best_features.py datafile.csv')
exit(1)
load_params()
if not os.path.isfile('timeseries.csv') or not os.path.isfile('labels.csv'):
filename = sys.argv[1]
raw_price_data = | pd.read_csv(filename, index_col=None, header=0, thousands=',') | pandas.read_csv |
import random
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
NaT,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestDataFrameSortValues:
def test_sort_values(self):
frame = DataFrame(
[[1, 1, 2], [3, 1, 0], [4, 5, 6]], index=[1, 2, 3], columns=list("ABC")
)
# by column (axis=0)
sorted_df = frame.sort_values(by="A")
indexer = frame["A"].argsort().values
expected = frame.loc[frame.index[indexer]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by="A", ascending=False)
indexer = indexer[::-1]
expected = frame.loc[frame.index[indexer]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by="A", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=["A"], ascending=[False])
tm.assert_frame_equal(sorted_df, expected)
# multiple bys
sorted_df = frame.sort_values(by=["B", "C"])
expected = frame.loc[[2, 1, 3]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=["B", "C"], ascending=False)
tm.assert_frame_equal(sorted_df, expected[::-1])
sorted_df = frame.sort_values(by=["B", "A"], ascending=[True, False])
tm.assert_frame_equal(sorted_df, expected)
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=["A", "B"], axis=2, inplace=True)
# by row (axis=1): GH#10806
sorted_df = frame.sort_values(by=3, axis=1)
expected = frame
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=3, axis=1, ascending=False)
expected = frame.reindex(columns=["C", "B", "A"])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 2], axis="columns")
expected = frame.reindex(columns=["B", "A", "C"])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=[True, False])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=False)
expected = frame.reindex(columns=["C", "B", "A"])
tm.assert_frame_equal(sorted_df, expected)
msg = r"Length of ascending \(5\) != length of by \(2\)"
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=["A", "B"], axis=0, ascending=[True] * 5)
def test_sort_values_by_empty_list(self):
# https://github.com/pandas-dev/pandas/issues/40258
expected = DataFrame({"a": [1, 4, 2, 5, 3, 6]})
result = expected.sort_values(by=[])
tm.assert_frame_equal(result, expected)
assert result is not expected
def test_sort_values_inplace(self):
frame = DataFrame(
np.random.randn(4, 4), index=[1, 2, 3, 4], columns=["A", "B", "C", "D"]
)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", inplace=True)
assert return_value is None
expected = frame.sort_values(by="A")
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by=1, axis=1, inplace=True)
assert return_value is None
expected = frame.sort_values(by=1, axis=1)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", ascending=False, inplace=True)
assert return_value is None
expected = frame.sort_values(by="A", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by=["A", "B"], ascending=False, inplace=True
)
assert return_value is None
expected = frame.sort_values(by=["A", "B"], ascending=False)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_multicolumn(self):
A = np.arange(5).repeat(20)
B = np.tile(np.arange(5), 20)
random.shuffle(A)
random.shuffle(B)
frame = DataFrame({"A": A, "B": B, "C": np.random.randn(100)})
result = frame.sort_values(by=["A", "B"])
indexer = np.lexsort((frame["B"], frame["A"]))
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
result = frame.sort_values(by=["A", "B"], ascending=False)
indexer = np.lexsort(
(frame["B"].rank(ascending=False), frame["A"].rank(ascending=False))
)
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
result = frame.sort_values(by=["B", "A"])
indexer = np.lexsort((frame["A"], frame["B"]))
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
def test_sort_values_multicolumn_uint64(self):
# GH#9918
# uint64 multicolumn sort
df = DataFrame(
{
"a": | pd.Series([18446637057563306014, 1162265347240853609]) | pandas.Series |
from app import app
from bokeh.io import show
from bokeh.embed import components
from bokeh.plotting import figure
from bokeh.resources import INLINE
from bokeh.models import NumeralTickFormatter
from bokeh.models import ColumnDataSource, HoverTool
from bokeh.palettes import Category20c
from bokeh.transform import cumsum
from bokeh.layouts import gridplot
from flask import render_template, flash, redirect, url_for, request, jsonify
from flask_login import current_user, login_user, logout_user, login_required
from datetime import datetime
from math import pi
from app.db import get_db, query
from app.plot import formatter, hbar, multiline
import pandas as pd
import numpy as np
import math
@app.route('/employees', methods=['GET', 'POST'])
@login_required
def employees():
"""
Render employee page
"""
date_start = request.form.get('date_start', '2018-01-01')
date_end = request.form.get('date_end', '2018-01-31')
time_frame = request.form.get('time_frame')
if request.form.get('time_frame') is None:
time_frame = 'date'
else:
time_frame = request.form.get('time_frame')
# average order_numbers
avg = get_avg_selling_per(date_start, date_end)
avg_order = formatter(avg[0][0])
avg_revenue = formatter(avg[1][0], 'dollar')
# most revenue
revenue_total = get_employee_revenue_total(date_start, date_end)
# sql result is reversed due to the hbar layout
most_revenue_name = revenue_total.loc[9, 'employee']
# Revenue by employee
js_revenue_total, div_revenue_total = hbar(revenue_total, 'revenue', 'employee')
# most orders
orders_total = get_employee_orders_total(date_start, date_end)
# sql result is reversed due to the hbar layout
most_orders_name = orders_total.loc[9, 'employee']
# Order numbers by employee
js_orders_total, div_orders_total = hbar(orders_total, 'order_number', 'employee')
time_dict = {'date': 'date', 'ww': 'week', 'mon': 'month', 'q': 'quarter'}
# Top 5 revenue employee trend
rev_top10 = revenue_total.loc[::-1, 'employee'].tolist()
# sql result is reversed thus first reverse to correct sequence
rev_top5 = rev_top10[: 5]
rev_trend_data = get_employee_trend(date_start, date_end, time_frame, rev_top5, 'revenue')
rev_trend_js, rev_trend_div = multiline(rev_trend_data, time_dict[time_frame], 'revenue', 'dollar',
rev_top5[0], rev_top5[1], rev_top5[2], rev_top5[3], rev_top5[4])
# top 5 order number employee trend
num_top10 = orders_total.loc[::-1 , 'employee'].tolist()
num_top5 = num_top10[: 5]
num_trend_data = get_employee_trend(date_start, date_end, time_frame, num_top5, 'order_number')
num_trend_js, num_trend_div = multiline(num_trend_data, time_dict[time_frame], 'order_number', 'number',
num_top5[0], num_top5[1], num_top5[2], num_top5[3], num_top5[4])
# gender relation distribution in order
g = get_ec_gender(date_start, date_end)
gender = pd.Series(g).reset_index(name='orders').rename(columns={'index':'gender'})
gender['angle'] = gender['orders']/gender['orders'].sum() * 2*pi
gender['color'] = Category20c[len(g)]
gender_hover = HoverTool(tooltips=[('Gender', '@gender'), ('Order number', '@orders{0.00 a}')])
gender_fig = figure(sizing_mode='scale_width', height=400, toolbar_location=None,
tools=[gender_hover], x_range=(-0.5, 1.0))
gender_fig.wedge(x=0, y=1, radius=0.4,
start_angle=cumsum('angle', include_zero=True), end_angle=cumsum('angle'),
line_color="white", fill_color='color', legend='gender', source=gender,
hover_color='red', hover_fill_alpha=0.8)
gender_fig.axis.axis_label=None
gender_fig.axis.visible=False
gender_fig.grid.grid_line_color = None
js_gender, div_gender = components(gender_fig)
# state relation distribution in order
s = get_ec_state(date_start, date_end)
state = pd.Series(s).reset_index(name='orders').rename(columns={'index':'state'})
state['angle'] = state['orders']/state['orders'].sum() * 2*pi
state['color'] = ["#3182bd", "#9ecae1"]
state_hover = HoverTool(tooltips=[('State', '@state'), ('Order number', '@orders{0.00 a}')])
state_fig = figure(sizing_mode='scale_width', height=400, toolbar_location=None,
tools=[state_hover], x_range=(-0.5, 1.0))
state_fig.wedge(x=0, y=1, radius=0.4,
start_angle=cumsum('angle', include_zero=True), end_angle=cumsum('angle'),
line_color="white", fill_color='color', legend='state', source=state,
hover_color='red', hover_fill_alpha=0.8)
state_fig.axis.axis_label=None
state_fig.axis.visible=False
state_fig.grid.grid_line_color = None
js_state, div_state = components(state_fig)
js_resources = INLINE.render_js()
css_resources = INLINE.render_css()
html = render_template(
'employees.html',
js_resources=js_resources,
css_resources=css_resources,
rev_trend_js=rev_trend_js,
rev_trend_div=rev_trend_div,
num_trend_js=num_trend_js,
num_trend_div=num_trend_div,
div_revenue_total=div_revenue_total,
js_revenue_total=js_revenue_total,
div_orders_total=div_orders_total,
js_orders_total=js_orders_total,
js_gender=js_gender,
div_gender=div_gender,
js_state=js_state,
div_state=div_state,
most_orders_name=most_orders_name,
most_revenue_name=most_revenue_name,
avg_order=avg_order,
avg_revenue=avg_revenue,
date_start=date_start,
date_end=date_end,
)
return html
def get_employee_revenue_total(date_start, date_end):
"""
return employee name and revenue for top 10
"""
sql = f"""
select *
from(select employee.name, sum(sales.total) as revenue
from sales, employee
where salesdate between to_date('{date_start}', 'YYYY-MM-DD') and to_date('{date_end}', 'YYYY-MM-DD')
and sales.employeeID = employee.employeeID
group by employee.name
order by sum(sales.total) desc)
where rownum < 11
order by revenue asc
"""
# need to make output to be ascending order to further plot be descending order
rows = query(sql)
df = pd.DataFrame(rows, columns=['employee', 'revenue'])
return df
def get_employee_orders_total(date_start, date_end):
"""
return employee name and order number for top 10
"""
sql = f"""
select *
from (select employee.name as name, count(sales.salesID) as order_number
from sales, employee
where salesdate between to_date('{date_start}', 'YYYY-MM-DD') and to_date('{date_end}', 'YYYY-MM-DD')
and sales.employeeID = employee.employeeID
group by employee.name
order by count(sales.salesID) desc)
where rownum < 11
order by order_number asc
"""
rows = query(sql)
df = pd.DataFrame(rows, columns=['employee', 'order_number'])
# print(df.head())
return df
def get_avg_selling_per(date_start, date_end):
"""
Return the average order numbers of each employee within the time range.
"""
sql = f"""
select sum(order_num) / count(name), sum(revenue) / count(name)
from (select count(sales.salesID) as order_num, sum(sales.total) as revenue, employee.name as name
from sales, employee
where salesdate between to_date('{date_start}', 'YYYY-MM-DD') and to_date('{date_end}', 'YYYY-MM-DD')
and sales.employeeID = employee.employeeID
group by employee.name)
"""
rows = query(sql)
df = pd.DataFrame(rows)
return df
def get_employee_trend(date_start, date_end, time_frame, employee, basis='revenue'):
"""
Return the revenue trend of top 5 employee
Returned employee names replaced space to underscore
"""
# employee = get_employee_top5(date_start, date_end, basis)
basis_dict = {'revenue': 'sum(sales.total)', 'order_number': 'count(sales.salesID)'}
time_dict = {'date': 'date', 'ww': 'week', 'mon': 'month', 'q': 'quarter'}
if time_frame == 'date' or time_frame is None: # None is used for switch page default frame
sql = f'''
select salesdate,
sum(case when employee = '{employee[0]}' then {basis} else 0 end) as name1,
sum(case when employee = '{employee[1]}' then {basis} else 0 end) as name2,
sum(case when employee = '{employee[2]}' then {basis} else 0 end) as name3,
sum(case when employee = '{employee[3]}' then {basis} else 0 end) as name4,
sum(case when employee = '{employee[4]}' then {basis} else 0 end) as name5
from
(select salesdate, employee.name as employee, {basis_dict[basis]} as {basis}
from sales, employee
where salesdate between to_date('{date_start}', 'YYYY-MM-DD') and to_date('{date_end}', 'YYYY-MM-DD')
and sales.employeeID = employee.employeeID
and employee.name in ('{employee[0]}', '{employee[1]}', '{employee[2]}', '{employee[3]}', '{employee[4]}')
group by salesdate, employee.name)
group by salesdate
order by salesdate
'''
# the reason use name1/2/3/4/5 here is because {employee[0]} includes space -> error
rows = query(sql)
# replace the space in name to underscore otherwise will have problem to access dataframe column
df = pd.DataFrame(columns=['date', employee[0].replace(" ", "_"), employee[1].replace(" ", "_"), employee[2].replace(" ", "_"),
employee[3].replace(" ", "_"), employee[4].replace(" ", "_")])
for row in rows:
df.loc[len(df), :] = row
df['date'] = | pd.to_datetime(df['date']) | pandas.to_datetime |
import random
import numpy as np
import pandas as pd
from pandas import DataFrame
from rake_nltk import Rake
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from users.models import Student
from .models import *
def get_recommendations_cf(uid):
subs = get_enrolled_subjects(uid)
if len(subs) == 0:
recommmend_list = random.choices(list(Course.objects.all()), k=min(len(list(Course.objects.all())),10))
else:
l1=[]
cats=[sub.course.category for sub in subs]
for cat in cats:
l1 += list(Course.objects.filter(category = cat))
l1=list(set(l1))
l2=[]
for sub in subs:
l2 +=list(Course.objects.filter(instructor = sub.course.instructor))
#l2 +=list(Course.objects.filter(cost = sub.cost))
l2 +=list(Course.objects.filter(platform = sub.course.platform))
l2 +=list(Course.objects.filter(language = sub.course.language))
l2 +=list(Course.objects.filter(level = sub.course.level))
l2=list(set(l2))
recommmend_list = random.choices(list(set(l1+l2)), k=min(len(list(set(l1+l2))),10))
def get_recommmendations(user):
is_valid, enrolled_subjects = _validate(user)
if is_valid:
recommendations = _from_content_based(enrolled_subjects)
else:
recommendations = _from_random()
return recommendations
def _validate(user):
is_valid = False
enrolled_subjects = []
if user.is_authenticated: # atuthenticated user
enrolled_subjects = get_enrolled_subjects(user.id).values_list('course', flat=True)
if enrolled_subjects.count() >= 1:
is_valid = True
return is_valid, enrolled_subjects
def _from_random():
subjects_size = Course.objects.count()
random_list = random.sample(range(0, subjects_size), 3)
recommendations = _retrieve_recommendations_and_sort_by(random_list)
return recommendations
def _from_content_based(subject_list):
df = DataFrame(list(Course.objects.values('name', 'category__name')))
df = _data_clean(df, subject_list)
# instantiating and generating the count matrix
count = CountVectorizer()
count_matrix = count.fit_transform(df['key_words'])
# generating the cosine similarity matrix
cosine_sim = cosine_similarity(count_matrix, count_matrix)
rd_list, rating = _recommendations(subject_list, df, cosine_sim)
recommendations = _retrieve_recommendations_and_sort_by(rd_list)
_calculate_ratings(rd_list, df, cosine_sim)
return recommendations
def _data_clean(dataframe, subject_list):
enrolled_key_words = ""
dataframe['name_keywords'] = ""
for index, row in dataframe.iterrows():
name = row['name']
r = Rake()
r.extract_keywords_from_text(name)
keywords_dict = r.get_word_degrees()
name_keywords_str = ' '.join(list(keywords_dict.keys()))
row['name_keywords'] = name_keywords_str
if index+1 in subject_list:
enrolled_key_words += name_keywords_str + " " + row['category__name'] + " "
dataframe['key_words'] = dataframe['name_keywords'] + ' ' + dataframe['category__name'].map(str)
dataframe = dataframe.append({'key_words': enrolled_key_words}, ignore_index=True)
return dataframe
def _recommendations(subject_list, df, cosine_sim):
enrolledIndex = df.shape[0] - 1
indices = pd.Series(df.index)
# initializing the empty list of recommended subjects
recommended_subjects = []
# gettin the index of the subject that matches the id
idx = indices[indices == enrolledIndex].index[0]
# creating a Series with the similarity scores in descending order
score_series = pd.Series(cosine_sim[idx]).sort_values(ascending=False)
# select top 10 recommended subjects that are not in the enrolled subject list
real_sims = []
sum_of_product = 0
sum_of_sims = 0
for items in score_series.iteritems():
if len(recommended_subjects) > 9:
break
indx = items[0]
real_sim = items[1]
if indx is not enrolledIndex:
subjectId = indx + 1
if subjectId not in subject_list:
recommended_subjects.append(subjectId)
real_sims.append(real_sim)
subject_name = Course.objects.get(id=subjectId)
rating_list = SubjectRating.objects.filter(subject = subject_name).values_list('rating', flat=True)
if(rating_list.count() > 0):
average_rating = sum(rating_list) / rating_list.count()
else:
average_rating = 0
sum_of_product += real_sim * average_rating
sum_of_sims+=real_sim
if(sum_of_sims > 0):
rating = sum_of_product / sum_of_sims
else:
rating = 0
'''
print(real_sims)
print(rating)
#evaluation()'''
return recommended_subjects, rating
def _retrieve_recommendations_and_sort_by(subject_list):
# return detailed information of recommendation list
recommendations = list(Course.objects.filter(pk__in=subject_list).values())
recommendations.sort(key=lambda t: subject_list.index(t['id']))
return recommendations
def _calculate_ratings(rd_list, df, cosine_sim):
index=0
single_list = []
rating_list = []
for subject in rd_list:
single_list.append(rd_list[index])
tmp_list, rating = _recommendations(single_list, df, cosine_sim)
rating_list.append(rating)
index +=1
rating_series = | pd.Series(rd_list, index=rating_list) | pandas.Series |
#Copyright (c) 2018 <NAME> - MIT License
import json
import pandas as pd
import os
from operator import itemgetter
import gspread
from oauth2client.service_account import ServiceAccountCredentials
from expworkup.handlers import parser
from expworkup.handlers import calcmmol
from expworkup.handlers import calcmolarity
from expworkup.handlers import inchigen
debug = 0 #args.Debug
finalvol_entries=2 ## Hard coded number of formic acid entries at the end of the run (this needs fixing)
### General Setup Information ###
##GSpread Authorization information
scope= ['https://www.googleapis.com/auth/spreadsheets.readonly']
credentials = ServiceAccountCredentials.from_json_keyfile_name('expworkup/creds/creds.json', scope)
gc =gspread.authorize(credentials)
#Import the most recent chemical data sheet from google drive to process the inchi keys and data about chemicals
#Eventually needs to be linked to database import and broader database information
def ChemicalData():
print('Obtaining chemical information from Google Drive..', end='')
chemsheetid = "1JgRKUH_ie87KAXsC-fRYEw_5SepjOgVt7njjQBETxEg"
ChemicalBook = gc.open_by_key(chemsheetid)
chemicalsheet = ChemicalBook.get_worksheet(0)
chemical_list = chemicalsheet.get_all_values()
chemdf=pd.DataFrame(chemical_list, columns=chemical_list[0])
chemdf=chemdf.iloc[1:]
chemdf=chemdf.reset_index(drop=True)
chemdf=chemdf.set_index(['InChI Key (ID)'])
print('.done')
return(chemdf)
#Will eventually create a dataframe from the robot handling information
def robo_handling():
pass
#The name cleaner is hard coded at the moment for the chemicals we are using. This will need to be generalized somehow...
def nameCleaner(sub_dirty_df):
inorganic_list=[]
organic_df=pd.DataFrame()
cleaned_M=pd.DataFrame()
for header in sub_dirty_df.columns:
#GBl handling -- > Solvent labeled (or other solvent such as DMF)
if 'YEJRWHAVMIAJKC-UHFFFAOYSA-N' in header:# or 'ZMXDDKWLCZADIW-UHFFFAOYSA-N' in header:
print("1")
pass
#Acid handling --> Acid labeld --> will need to declare type in the future or something
elif "BDAGIHXWWSANSR-UHFFFAOYSA-N" in header:
cleaned_M['_rxn_M_acid']=sub_dirty_df[header]
# molarity_df['_rxn_M_acid'] = mmol_reagent_df[header] / (calculated_volumes_df['_raw_final_volume']/1000)
#PBI2 handling --> inorganic label
elif 'RQQRAHKHDFPBMC-UHFFFAOYSA-L' in header:# or 'ZASWJUOMEGBQCQ-UHFFFAOYSA-L' in header:
cleaned_M['_rxn_M_inorganic']=sub_dirty_df[header]
# molarity_df['_rxn_M_inorganic'] = mmol_reagent_df[header] / (calculated_volumes_df['_raw_final_volume']/1000)
else:
organic_df[header]=sub_dirty_df[header]
cleaned_M['_rxn_M_organic']=organic_df.sum(axis=1)
return(cleaned_M)
#cleans up the name space and the csv output for distribution
def cleaner(dirty_df, raw):
rxn_M_clean = nameCleaner(dirty_df.filter(like='_raw_M_'))
rxn_df=dirty_df.filter(like='_rxn_')
feat_df=dirty_df.filter(like='_feat_')
out_df=dirty_df.filter(like='_out_')
if raw == 0:
raw_df=dirty_df.filter(like='_raw_')
squeaky_clean_df=pd.concat([out_df,rxn_M_clean,rxn_df,feat_df, raw_df], axis=1)
else:
squeaky_clean_df=pd.concat([out_df,rxn_M_clean,rxn_df,feat_df], axis=1)
return(squeaky_clean_df)
## Unpack logic
#most granular data for each row of the final CSV is the well information.
#Each well will need all associated information of chemicals, run, etc.
#Unpack those values first and then copy the generated array to each of the invidual wells
### developed enough now that it should be broken up into smaller pieces!
def unpackJSON(myjson_fol):
chem_df=(ChemicalData()) #Grabs relevant chemical data frame from google sheets (only once no matter how many runs)
concat_df_raw= | pd.DataFrame() | pandas.DataFrame |
from datetime import datetime
from unittest.mock import mock_open, patch
import numpy as np
import pandas as pd
import pytest
from pgcom import exc
from .conftest import commuter, delete_table, with_table
def create_test_table(table_name):
return f"""
CREATE TABLE IF NOT EXISTS {table_name} (
var_1 timestamp,
var_2 integer NOT NULL PRIMARY KEY,
var_3 text,
var_4 real,
var_5 integer);
"""
def create_test_table_serial(table_name):
return f"""
CREATE TABLE IF NOT EXISTS {table_name} (
id SERIAL PRIMARY KEY,
var_1 timestamp,
var_2 integer NOT NULL,
var_3 text,
var_4 real);
"""
def create_child_table(child_name, parent_name):
return f"""
CREATE TABLE IF NOT EXISTS {child_name} (
var_1 integer,
var_2 integer,
var_3 integer,
FOREIGN KEY (var_1) REFERENCES {parent_name}(var_2));
"""
def create_category_table(table_name):
return f"""
CREATE TABLE IF NOT EXISTS {table_name} (
category_id SERIAL PRIMARY KEY,
category TEXT);
"""
def create_test_table_with_categories(table_name):
return f"""
CREATE TABLE IF NOT EXISTS {table_name} (
var_1 integer NOT NULL PRIMARY KEY,
var_2 text,
var_3 text,
var_4 text);
"""
def create_composite_category_table(table_name):
return f"""
CREATE TABLE IF NOT EXISTS {table_name} (
category_id SERIAL PRIMARY KEY,
category_1 TEXT,
category_2 TEXT,
category_3 TEXT);
"""
def create_test_data():
return pd.DataFrame(
{
"var_1": pd.date_range(datetime.now(), periods=3),
"var_2": [1, 2, 3],
"var_3": ["x", "xx", "xxx"],
"var_4": [1.1, 2.2, 3.3],
"var_5": [1, 2, 3],
}
)
def test_repr():
assert repr(commuter)[0] == "("
assert repr(commuter)[-1] == ")"
@with_table("test_table", create_test_table)
def test_execute():
assert commuter.is_table_exist("test_table")
with pytest.raises(exc.QueryExecutionError) as e:
commuter.execute("SELECT 1 FROM fake_table")
assert e.type == exc.QueryExecutionError
@with_table("test_table", create_test_table)
def test_execute_script():
assert commuter.is_table_exist("test_table")
with patch("builtins.open", mock_open(read_data="DROP TABLE test_table")):
commuter.execute_script("path/to/open")
assert not commuter.is_table_exist("test_table")
@with_table("test_table", create_test_table)
def test_select_insert():
commuter.insert("test_table", create_test_data())
df = commuter.select("SELECT * FROM test_table")
df["date"] = pd.to_datetime(df["var_1"])
assert df["date"][0].date() == datetime.now().date()
assert len(df) == 3
@with_table("test_table", create_test_table)
def test_multiple_select():
commuter.insert("test_table", create_test_data())
n_conn = commuter.get_connections_count()
for i in range(300):
df = commuter.select("SELECT * FROM test_table")
assert len(df) == 3
assert commuter.get_connections_count() - n_conn < 10
def test_insert():
with pytest.raises(exc.QueryExecutionError) as e:
commuter.insert("fake_table", create_test_data())
assert e.type == exc.QueryExecutionError
@with_table("test_table", create_test_table)
def test_select_one():
cmd = "SELECT MAX(var_2) FROM test_table"
value = commuter.select_one(cmd=cmd, default=0)
assert value == 0
commuter.copy_from("test_table", create_test_data())
value = commuter.select_one("SELECT MAX(var_2) FROM test_table")
assert value == 3
cmd = "SELECT MAX(var_2) FROM test_table WHERE var_2 > 10"
value = commuter.select_one(cmd=cmd, default=-1)
assert value == -1
value = commuter.select_one("DROP TABLE test_table", default=1)
assert value == 1
@with_table("test_table", create_test_table)
def test_table_exist():
assert commuter.is_table_exist("test_table")
delete_table(table_name="test_table")
assert not commuter.is_table_exist("test_table")
@with_table("test_table", create_test_table)
def test_copy_from():
commuter.copy_from("test_table", create_test_data())
df = commuter.select("SELECT * FROM test_table")
df["date"] = pd.to_datetime(df["var_1"])
assert df["date"][0].date() == datetime.now().date()
assert len(df) == 3
with pytest.raises(exc.CopyError) as e:
commuter.copy_from("fake_table", create_test_data())
assert e.type == exc.CopyError
@with_table("model.test_table", create_test_table)
def test_copy_from_schema():
assert commuter.is_table_exist("model.test_table")
df = create_test_data()
df["var_2"] = [1, 2, 3.01]
df["new_var_1"] = 1
df.insert(loc=0, column="new_var_2", value=[3, 2, 1])
assert df.shape == (3, 7)
commuter.copy_from("model.test_table", df, format_data=True)
data = commuter.select("SELECT * FROM model.test_table")
data["date"] = | pd.to_datetime(data["var_1"]) | pandas.to_datetime |
import pandas as pd
from .datastore import merge_postcodes
from .types import ErrorDefinition
from .utils import add_col_to_tables_CONTINUOUSLY_LOOKED_AFTER as add_CLA_column # Check 'Episodes' present before use!
def validate_165():
error = ErrorDefinition(
code = '165',
description = 'Data entry for mother status is invalid.',
affected_fields = ['MOTHER', 'SEX', 'ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
valid_values = ['0','1']
# prepare to merge
oc3.reset_index(inplace=True)
header.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['EPS'] = (episodes['DECOM']>=collection_start) & (episodes['DECOM']<=collection_end)
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er']).merge(oc3, on='CHILD', how='left')
# Raise error if provided <MOTHER> is not a valid value.
value_validity = merged['MOTHER'].notna() & (~merged['MOTHER'].isin(valid_values))
# If not provided
female = (merged['SEX']=='1')
eps_in_year = (merged['EPS_COUNT']>0)
none_provided = (merged['ACTIV'].isna()& merged['ACCOM'].isna()& merged['IN_TOUCH'].isna())
# If provided <MOTHER> must be a valid value. If not provided <MOTHER> then either <GENDER> is male or no episode record for current year and any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided
mask = value_validity | (merged['MOTHER'].isna() & (female & (eps_in_year | none_provided)))
# That is, if value not provided and child is a female with eps in current year or no values of IN_TOUCH, ACTIV and ACCOM, then raise error.
error_locs_eps = merged.loc[mask, 'index_eps']
error_locs_header = merged.loc[mask, 'index_er']
error_locs_oc3 = merged.loc[mask, 'index']
return {'Header':error_locs_header.dropna().unique().tolist(),
'OC3':error_locs_oc3.dropna().unique().tolist()}
return error, _validate
def validate_1014():
error = ErrorDefinition(
code='1014',
description='UASC information is not required for care leavers',
affected_fields=['ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'UASC' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
uasc = dfs['UASC']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
# prepare to merge
oc3.reset_index(inplace=True)
uasc.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
date_check = (
((episodes['DECOM'] >= collection_start) & (episodes['DECOM'] <= collection_end))
| ((episodes['DEC'] >= collection_start) & (episodes['DEC'] <= collection_end))
| ((episodes['DECOM'] <= collection_start) & episodes['DEC'].isna())
)
episodes['EPS'] = date_check
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
# inner merge to take only episodes of children which are also found on the uasc table
merged = episodes.merge(uasc, on='CHILD', how='inner', suffixes=['_eps', '_sc']).merge(oc3, on='CHILD',
how='left')
# adding suffixes with the secondary merge here does not go so well yet.
some_provided = (merged['ACTIV'].notna() | merged['ACCOM'].notna() | merged['IN_TOUCH'].notna())
mask = (merged['EPS_COUNT'] == 0) & some_provided
error_locs_uasc = merged.loc[mask, 'index_sc']
error_locs_oc3 = merged.loc[mask, 'index']
return {'UASC': error_locs_uasc.unique().tolist(), 'OC3': error_locs_oc3.unique().tolist()}
return error, _validate
# !# not sure what this rule is actually supposed to be getting at - description is confusing
def validate_197B():
error = ErrorDefinition(
code='197B',
description="SDQ score or reason for no SDQ should be reported for 4- or 17-year-olds.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
ERRRR = (
(
(oc2['DOB'] + pd.DateOffset(years=4) == start) # ???
| (oc2['DOB'] + pd.DateOffset(years=17) == start)
)
& oc2['CONTINUOUSLY_LOOKED_AFTER']
& oc2['SDQ_SCORE'].isna()
& oc2['SDQ_REASON'].isna()
)
return {'OC2': oc2[ERRRR].index.to_list()}
return error, _validate
def validate_157():
error = ErrorDefinition(
code='157',
description="Child is aged 4 years or over at the beginning of the year or 16 years or under at the end of the "
"year and Strengths and Difficulties Questionnaire (SDQ) 1 has been recorded as the reason for no "
"Strengths and Difficulties Questionnaire (SDQ) score.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
ERRRR = (
oc2['CONTINUOUSLY_LOOKED_AFTER']
& (oc2['DOB'] + pd.DateOffset(years=4) <= start)
& (oc2['DOB'] + pd.DateOffset(years=16) >= endo)
& oc2['SDQ_SCORE'].isna()
& (oc2['SDQ_REASON'] == 'SDQ1')
)
return {'OC2': oc2[ERRRR].index.to_list()}
return error, _validate
def validate_357():
error = ErrorDefinition(
code='357',
description='If this is the first episode ever for this child, reason for new episode must be S. '
'Check whether there is an episode immediately preceding this one, which has been left out. '
'If not the reason for new episode code must be amended to S.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
eps = dfs['Episodes']
eps['DECOM'] = pd.to_datetime(eps['DECOM'], format='%d/%m/%Y', errors='coerce')
eps = eps.loc[eps['DECOM'].notnull()]
first_eps = eps.loc[eps.groupby('CHILD')['DECOM'].idxmin()]
errs = first_eps[first_eps['RNE'] != 'S'].index.to_list()
return {'Episodes': errs}
return error, _validate
def validate_117():
error = ErrorDefinition(
code='117',
description='Date of decision that a child should/should no longer be placed for adoption is beyond the current collection year or after the child ceased to be looked after.',
affected_fields=['DATE_PLACED_CEASED', 'DATE_PLACED', 'DEC', 'REC', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placed_adoption = dfs['PlacedAdoption']
collection_end = dfs['metadata']['collection_end']
# datetime
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
# Drop nans and continuing episodes
episodes = episodes.dropna(subset=['DECOM'])
episodes = episodes[episodes['REC'] != 'X1']
episodes = episodes.loc[episodes.groupby('CHILD')['DECOM'].idxmax()]
# prepare to merge
placed_adoption.reset_index(inplace=True)
episodes.reset_index(inplace=True)
p4a_cols = ['DATE_PLACED', 'DATE_PLACED_CEASED']
# latest episodes
merged = episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa'])
mask = (
(merged['DATE_PLACED'] > collection_end)
| (merged['DATE_PLACED'] > merged['DEC'])
| (merged['DATE_PLACED_CEASED'] > collection_end)
| (merged['DATE_PLACED_CEASED'] > merged['DEC'])
)
# If provided <DATE_PLACED> and/or <DATE_PLACED_CEASED> must not be > <COLLECTION_END_DATE> or <DEC> of latest episode where <REC> not = 'X1'
pa_error_locs = merged.loc[mask, 'index_pa']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()}
return error, _validate
def validate_118():
error = ErrorDefinition(
code='118',
description='Date of decision that a child should no longer be placed for adoption is before the current collection year or before the date the child started to be looked after.',
affected_fields=['DECOM', 'DECOM', 'LS']
)
def _validate(dfs):
if ('PlacedAdoption' not in dfs) or ('Episodes' not in dfs):
return {}
else:
placed_adoption = dfs['PlacedAdoption']
episodes = dfs['Episodes']
collection_start = dfs['metadata']['collection_start']
code_list = ['V3', 'V4']
# datetime
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4'
filter_by_ls = episodes[~(episodes['LS'].isin(code_list))]
earliest_episode_idxs = filter_by_ls.groupby('CHILD')['DECOM'].idxmin()
earliest_episodes = episodes[episodes.index.isin(earliest_episode_idxs)]
# prepare to merge
placed_adoption.reset_index(inplace=True)
earliest_episodes.reset_index(inplace=True)
# merge
merged = earliest_episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa'])
# drop rows where DATE_PLACED_CEASED is not provided
merged = merged.dropna(subset=['DATE_PLACED_CEASED'])
# If provided <DATE_PLACED_CEASED> must not be prior to <COLLECTION_START_DATE> or <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4'
mask = (merged['DATE_PLACED_CEASED'] < merged['DECOM']) | (merged['DATE_PLACED_CEASED'] < collection_start)
# error locations
pa_error_locs = merged.loc[mask, 'index_pa']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()}
return error, _validate
def validate_352():
error = ErrorDefinition(
code='352',
description='Child who started to be looked after was aged 18 or over.',
affected_fields=['DECOM', 'RNE'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
header['DOB18'] = header['DOB'] + pd.DateOffset(years=18)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
care_start = episodes_merged['RNE'].str.upper().astype(str).isin(['S'])
started_over_18 = episodes_merged['DOB18'] <= episodes_merged['DECOM']
error_mask = care_start & started_over_18
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_209():
error = ErrorDefinition(
code='209',
description='Child looked after is of school age and should not have an unknown Unique Pupil Number (UPN) code of UN1.',
affected_fields=['UPN', 'DOB']
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
collection_start = dfs['metadata']['collection_start']
# convert to datetime
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
yr = collection_start.year - 1
reference_date = pd.to_datetime('31/08/' + str(yr), format='%d/%m/%Y', errors='coerce')
# If <DOB> >= 4 years prior to 31/08/YYYY then <UPN> should not be 'UN1' Note: YYYY in this instance refers to the year prior to the collection start (for collection year 2019-2020, it would be looking at the 31/08/2018).
mask = (reference_date >= (header['DOB'] + pd.offsets.DateOffset(years=4))) & (header['UPN'] == 'UN1')
# error locations
error_locs_header = header.index[mask]
return {'Header': error_locs_header.tolist()}
return error, _validate
def validate_198():
error = ErrorDefinition(
code='198',
description="Child has not been looked after continuously for at least 12 months at 31 March but a reason "
"for no Strengths and Difficulties (SDQ) score has been completed. ",
affected_fields=['SDQ_REASON'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
error_mask = oc2['SDQ_REASON'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER']
error_locs = oc2.index[error_mask].to_list()
return {'OC2': error_locs}
return error, _validate
def validate_185():
error = ErrorDefinition(
code='185',
description="Child has not been looked after continuously for at least 12 months at " +
"31 March but a Strengths and Difficulties (SDQ) score has been completed.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
error_mask = oc2['SDQ_SCORE'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER']
error_locs = oc2.index[error_mask].to_list()
return {'OC2': error_locs}
return error, _validate
def validate_186():
error = ErrorDefinition(
code='186',
description="Children aged 4 or over at the start of the year and children aged under 17 at the " +
"end of the year and who have been looked after for at least 12 months continuously " +
"should have a Strengths and Difficulties (SDQ) score completed.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
collection_start_str = dfs['metadata']['collection_start']
collection_end_str = dfs['metadata']['collection_end']
collection_start = pd.to_datetime(collection_start_str, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce')
oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
oc2 = add_CLA_column(dfs, 'OC2')
oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4)
oc2['17th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=17)
error_mask = (
(oc2['4th_bday'] <= collection_start)
& (oc2['17th_bday'] > collection_end)
& oc2['CONTINUOUSLY_LOOKED_AFTER']
& oc2['SDQ_SCORE'].isna()
)
oc2_errors = oc2.loc[error_mask].index.to_list()
return {'OC2': oc2_errors}
return error, _validate
def validate_187():
error = ErrorDefinition(
code='187',
description="Child cannot be looked after continuously for 12 months at " +
"31 March (OC2) and have any of adoption or care leavers returns completed.",
affected_fields=['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR', # OC3
'IN_TOUCH', 'ACTIV', 'ACCOM'], # AD1
)
def _validate(dfs):
if (
'OC3' not in dfs
or 'AD1' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
ad1, oc3 = add_CLA_column(dfs, ['AD1', 'OC3'])
# OC3
should_be_blank = ['IN_TOUCH', 'ACTIV', 'ACCOM']
oc3_mask = oc3['CONTINUOUSLY_LOOKED_AFTER'] & oc3[should_be_blank].notna().any(axis=1)
oc3_error_locs = oc3[oc3_mask].index.to_list()
# AD1
should_be_blank = ['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR']
ad1_mask = ad1['CONTINUOUSLY_LOOKED_AFTER'] & ad1[should_be_blank].notna().any(axis=1)
ad1_error_locs = ad1[ad1_mask].index.to_list()
return {'AD1': ad1_error_locs,
'OC3': oc3_error_locs}
return error, _validate
def validate_188():
error = ErrorDefinition(
code='188',
description="Child is aged under 4 years at the end of the year, "
"but a Strengths and Difficulties (SDQ) score or a reason "
"for no SDQ score has been completed. ",
affected_fields=['SDQ_SCORE', 'SDQ_REASON'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
collection_end_str = dfs['metadata']['collection_end']
collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce')
oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4)
error_mask = (
(oc2['4th_bday'] > collection_end)
& oc2[['SDQ_SCORE', 'SDQ_REASON']].notna().any(axis=1)
)
oc2_errors = oc2.loc[error_mask].index.to_list()
return {'OC2': oc2_errors}
return error, _validate
def validate_190():
error = ErrorDefinition(
code='190',
description="Child has not been looked after continuously for at least 12 months at 31 March but one or more "
"data items relating to children looked after for 12 months have been completed.",
affected_fields=['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
, # AD1
)
def _validate(dfs):
if (
'OC2' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
oc2 = add_CLA_column(dfs, 'OC2')
should_be_blank = ['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
mask = ~oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_blank].notna().any(axis=1)
error_locs = oc2[mask].index.to_list()
return {'OC2': error_locs}
return error, _validate
def validate_191():
error = ErrorDefinition(
code='191',
description="Child has been looked after continuously for at least 12 months at 31 March but one or more "
"data items relating to children looked after for 12 months have been left blank.",
affected_fields=['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'], # OC2
)
def _validate(dfs):
if (
'OC2' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
oc2 = add_CLA_column(dfs, 'OC2')
should_be_present = ['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE']
mask = oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_present].isna().any(axis=1)
error_locs = oc2[mask].index.to_list()
return {'OC2': error_locs}
return error, _validate
def validate_607():
error = ErrorDefinition(
code='607',
description='Child ceased to be looked after in the year, but mother field has not been completed.',
affected_fields=['DEC', 'REC', 'MOTHER', 'LS', 'SEX']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
code_list = ['V3', 'V4']
# convert to datetiime format
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# CEASED_TO_BE_LOOKED_AFTER = DEC is not null and REC is filled but not equal to X1
CEASED_TO_BE_LOOKED_AFTER = merged['DEC'].notna() & ((merged['REC'] != 'X1') & merged['REC'].notna())
# and <LS> not = ‘V3’ or ‘V4’
check_LS = ~(merged['LS'].isin(code_list))
# and <DEC> is in <CURRENT_COLLECTION_YEAR
check_DEC = (collection_start <= merged['DEC']) & (merged['DEC'] <= collection_end)
# Where <CEASED_TO_BE_LOOKED_AFTER> = ‘Y’, and <LS> not = ‘V3’ or ‘V4’ and <DEC> is in <CURRENT_COLLECTION_YEAR> and <SEX> = ‘2’ then <MOTHER> should be provided.
mask = CEASED_TO_BE_LOOKED_AFTER & check_LS & check_DEC & (merged['SEX'] == '2') & (merged['MOTHER'].isna())
header_error_locs = merged.loc[mask, 'index_er']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_210():
error = ErrorDefinition(
code='210',
description='Children looked after for more than a week at 31 March should not have an unknown Unique Pupil Number (UPN) code of UN4.',
affected_fields=['UPN', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
collection_end = dfs['metadata']['collection_end']
# convert to datetime
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
yr = collection_end.year
reference_date = ref_date = pd.to_datetime('24/03/' + str(yr), format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
# the logical way is to merge left on UPN but that will be a one to many merge and may not go as well as a many to one merge that we've been doing.
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# If <UPN> = 'UN4' then no episode <DECOM> must be >` = 24/03/YYYY Note: YYYY refers to the current collection year.
mask = (merged['UPN'] == 'UN4') & (merged['DECOM'] >= reference_date)
# error locations
error_locs_header = merged.loc[mask, 'index_er']
error_locs_eps = merged.loc[mask, 'index_eps']
return {'Episodes': error_locs_eps.tolist(), 'Header': error_locs_header.unique().tolist()}
return error, _validate
def validate_1010():
error = ErrorDefinition(
code='1010',
description='This child has no episodes loaded for current year even though there was an open episode of '
+ 'care at the end of the previous year, and care leaver data has been entered.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Episodes_last' not in dfs or 'OC3' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
oc3 = dfs['OC3']
# convert DECOM to datetime, drop missing/invalid sort by CHILD then DECOM,
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last = episodes_last.dropna(subset=['DECOM']).sort_values(['CHILD', 'DECOM'], ascending=True)
# Keep only the final episode for each child (ie where the following row has a different CHILD value)
episodes_last = episodes_last[
episodes_last['CHILD'].shift(-1) != episodes_last['CHILD']
]
# Keep only the final episodes that were still open
episodes_last = episodes_last[episodes_last['DEC'].isna()]
# The remaining children ought to have episode data in the current year if they are in OC3
has_current_episodes = oc3['CHILD'].isin(episodes['CHILD'])
has_open_episode_last = oc3['CHILD'].isin(episodes_last['CHILD'])
error_mask = ~has_current_episodes & has_open_episode_last
validation_error_locations = oc3.index[error_mask]
return {'OC3': validation_error_locations.tolist()}
return error, _validate
def validate_525():
error = ErrorDefinition(
code='525',
description='A child for whom the decision to be placed for adoption has been reversed cannot be adopted during the year.',
affected_fields=['DATE_PLACED_CEASED', 'DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR',
'LS_ADOPTR']
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs or 'AD1' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
ad1 = dfs['AD1']
# prepare to merge
placed_adoption.reset_index(inplace=True)
ad1.reset_index(inplace=True)
merged = placed_adoption.merge(ad1, on='CHILD', how='left', suffixes=['_placed', '_ad1'])
# If <DATE_PLACED_CEASED> not Null, then <DATE_INT>; <DATE_MATCH>; <FOSTER_CARE>; <NB_ADOPTR>; <SEX_ADOPTR>; and <LS_ADOPTR> should not be provided
mask = merged['DATE_PLACED_CEASED'].notna() & (
merged['DATE_INT'].notna() | merged['DATE_MATCH'].notna() | merged['FOSTER_CARE'].notna() |
merged['NB_ADOPTR'].notna() | merged['SEX_ADOPTR'].notna() | merged['LS_ADOPTR'].notna())
# error locations
pa_error_locs = merged.loc[mask, 'index_placed']
ad_error_locs = merged.loc[mask, 'index_ad1']
# return result
return {'PlacedAdoption': pa_error_locs.tolist(), 'AD1': ad_error_locs.tolist()}
return error, _validate
def validate_335():
error = ErrorDefinition(
code='335',
description='The current foster value (0) suggests that child is not adopted by current foster carer, but last placement is A2, A3, or A5. Or the current foster value (1) suggests that child is adopted by current foster carer, but last placement is A1, A4 or A6.',
affected_fields=['PLACE', 'FOSTER_CARE']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'AD1' not in dfs:
return {}
else:
episodes = dfs['Episodes']
ad1 = dfs['AD1']
# prepare to merge
episodes.reset_index(inplace=True)
ad1.reset_index(inplace=True)
merged = episodes.merge(ad1, on='CHILD', how='left', suffixes=['_eps', '_ad1'])
# Where <PL> = 'A2', 'A3' or 'A5' and <DEC> = 'E1', 'E11', 'E12' <FOSTER_CARE> should not be '0'; Where <PL> = ‘A1’, ‘A4’ or ‘A6’ and <REC> = ‘E1’, ‘E11’, ‘E12’ <FOSTER_CARE> should not be ‘1’.
mask = (
merged['REC'].isin(['E1', 'E11', 'E12']) & (
(merged['PLACE'].isin(['A2', 'A3', 'A5']) & (merged['FOSTER_CARE'].astype(str) == '0'))
| (merged['PLACE'].isin(['A1', 'A4', 'A6']) & (merged['FOSTER_CARE'].astype(str) == '1'))
)
)
eps_error_locs = merged.loc[mask, 'index_eps']
ad1_error_locs = merged.loc[mask, 'index_ad1']
# use .unique since join is many to one
return {'Episodes': eps_error_locs.tolist(), 'AD1': ad1_error_locs.unique().tolist()}
return error, _validate
def validate_215():
error = ErrorDefinition(
code='215',
description='Child has care leaver information but one or more data items relating to children looked after for 12 months have been completed.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM', 'CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK',
'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
)
def _validate(dfs):
if 'OC3' not in dfs or 'OC2' not in dfs:
return {}
else:
oc3 = dfs['OC3']
oc2 = dfs['OC2']
# prepare to merge
oc3.reset_index(inplace=True)
oc2.reset_index(inplace=True)
merged = oc3.merge(oc2, on='CHILD', how='left', suffixes=['_3', '_2'])
# If any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided then <CONVICTED>; <HEALTH_CHECK>; <IMMUNISATIONS>; <TEETH_CHECK>; <HEALTH_ASSESSMENT>; <SUBSTANCE MISUSE>; <INTERVENTION_RECEIVED>; <INTERVENTION_OFFERED>; should not be provided
mask = (merged['IN_TOUCH'].notna() | merged['ACTIV'].notna() | merged['ACCOM'].notna()) & (
merged['CONVICTED'].notna() | merged['HEALTH_CHECK'].notna() | merged['IMMUNISATIONS'].notna() |
merged['TEETH_CHECK'].notna() | merged['HEALTH_ASSESSMENT'].notna() | merged[
'SUBSTANCE_MISUSE'].notna() | merged['INTERVENTION_RECEIVED'].notna() | merged[
'INTERVENTION_OFFERED'].notna())
# error locations
oc3_error_locs = merged.loc[mask, 'index_3']
oc2_error_locs = merged.loc[mask, 'index_2']
return {'OC3': oc3_error_locs.tolist(), 'OC2': oc2_error_locs.tolist()}
return error, _validate
def validate_399():
error = ErrorDefinition(
code='399',
description='Mother field, review field or participation field are completed but '
+ 'child is looked after under legal status V3 or V4.',
affected_fields=['MOTHER', 'LS', 'REVIEW', 'REVIEW_CODE']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs or 'Reviews' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
reviews = dfs['Reviews']
code_list = ['V3', 'V4']
# prepare to merge
episodes['index_eps'] = episodes.index
header['index_hdr'] = header.index
reviews['index_revs'] = reviews.index
# merge
merged = (episodes.merge(header, on='CHILD', how='left')
.merge(reviews, on='CHILD', how='left'))
# If <LS> = 'V3' or 'V4' then <MOTHER>, <REVIEW> and <REVIEW_CODE> should not be provided
mask = merged['LS'].isin(code_list) & (
merged['MOTHER'].notna() | merged['REVIEW'].notna() | merged['REVIEW_CODE'].notna())
# Error locations
eps_errors = merged.loc[mask, 'index_eps']
header_errors = merged.loc[mask, 'index_hdr'].unique()
revs_errors = merged.loc[mask, 'index_revs'].unique()
return {'Episodes': eps_errors.tolist(),
'Header': header_errors.tolist(),
'Reviews': revs_errors.tolist()}
return error, _validate
def validate_189():
error = ErrorDefinition(
code='189',
description='Child is aged 17 years or over at the beginning of the year, but an Strengths and Difficulties '
+ '(SDQ) score or a reason for no Strengths and Difficulties (SDQ) score has been completed.',
affected_fields=['DOB', 'SDQ_SCORE', 'SDQ_REASON']
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
collection_start = dfs['metadata']['collection_start']
# datetime format allows appropriate comparison between dates
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# If <DOB> >17 years prior to <COLLECTION_START_DATE> then <SDQ_SCORE> and <SDQ_REASON> should not be provided
mask = ((oc2['DOB'] + pd.offsets.DateOffset(years=17)) <= collection_start) & (
oc2['SDQ_REASON'].notna() | oc2['SDQ_SCORE'].notna())
# That is, raise error if collection_start > DOB + 17years
oc_error_locs = oc2.index[mask]
return {'OC2': oc_error_locs.tolist()}
return error, _validate
def validate_226():
error = ErrorDefinition(
code='226',
description='Reason for placement change is not required.',
affected_fields=['REASON_PLACE_CHANGE', 'PLACE']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
code_list = ['T0', 'T1', 'T2', 'T3', 'T4']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
# create column to see previous REASON_PLACE_CHANGE
episodes = episodes.sort_values(['CHILD', 'DECOM'])
episodes['PREVIOUS_REASON'] = episodes.groupby('CHILD')['REASON_PLACE_CHANGE'].shift(1)
# If <PL> = 'T0'; 'T1'; 'T2'; 'T3' or 'T4' then <REASON_PLACE_CHANGE> should be null in current episode and current episode - 1
mask = episodes['PLACE'].isin(code_list) & (
episodes['REASON_PLACE_CHANGE'].notna() | episodes['PREVIOUS_REASON'].notna())
# error locations
error_locs = episodes.index[mask]
return {'Episodes': error_locs.tolist()}
return error, _validate
def validate_358():
error = ErrorDefinition(
code='358',
description='Child with this legal status should not be under 10.',
affected_fields=['DECOM', 'DOB', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
code_list = ['J1', 'J2', 'J3']
# convert dates to datetime format
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# Where <LS> = ‘J1’ or ‘J2’ or ‘J3’ then <DOB> should <= to 10 years prior to <DECOM>
mask = merged['LS'].isin(code_list) & (merged['DOB'] + pd.offsets.DateOffset(years=10) < merged['DECOM'])
# That is, raise error if DECOM > DOB + 10years
# error locations
header_error_locs = merged.loc[mask, 'index_er']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_407():
error = ErrorDefinition(
code='407',
description='Reason episode ceased is Special Guardianship Order, but child has reached age 18.',
affected_fields=['DEC', 'DOB', 'REC']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
code_list = ['E45', 'E46', 'E47', 'E48']
# convert dates to datetime format
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# If <REC> = ‘E45’ or ‘E46’ or ‘E47’ or ‘E48’ then <DOB> must be < 18 years prior to <DEC>
mask = merged['REC'].isin(code_list) & (merged['DOB'] + pd.offsets.DateOffset(years=18) < merged['DEC'])
# That is, raise error if DEC > DOB + 10years
# error locations
header_error_locs = merged.loc[mask, 'index_er']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_1007():
error = ErrorDefinition(
code='1007',
description='Care leaver information is not required for 17- or 18-year olds who are still looked after.',
affected_fields=['DEC', 'REC', 'DOB', 'IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_end = dfs['metadata']['collection_end']
# convert dates to datetime format
oc3['DOB'] = pd.to_datetime(oc3['DOB'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
oc3.reset_index(inplace=True)
merged = episodes.merge(oc3, on='CHILD', how='left', suffixes=['_eps', '_oc3'])
# If <DOB> < 19 and >= to 17 years prior to <COLLECTION_END_DATE> and current episode <DEC> and or <REC> not provided then <IN_TOUCH>, <ACTIV> and <ACCOM> should not be provided
check_age = (merged['DOB'] + pd.offsets.DateOffset(years=17) <= collection_end) & (
merged['DOB'] + pd.offsets.DateOffset(years=19) > collection_end)
# That is, check that 17<=age<19
check_dec_rec = merged['REC'].isna() | merged['DEC'].isna()
# if either DEC or REC are absent
mask = check_age & check_dec_rec & (
merged['IN_TOUCH'].notna() | merged['ACTIV'].notna() | merged['ACCOM'].notna())
# Then raise an error if either IN_TOUCH, ACTIV, or ACCOM have been provided too
# error locations
oc3_error_locs = merged.loc[mask, 'index_oc3']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'OC3': oc3_error_locs.unique().tolist()}
return error, _validate
def validate_442():
error = ErrorDefinition(
code='442',
description='Unique Pupil Number (UPN) field is not completed.',
affected_fields=['UPN', 'LS']
)
def _validate(dfs):
if ('Episodes' not in dfs) or ('Header' not in dfs):
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
code_list = ['V3', 'V4']
# merge left on episodes to get all children for which episodes have been recorded even if they do not exist on the header.
merged = episodes.merge(header, on=['CHILD'], how='left', suffixes=['_eps', '_er'])
# Where any episode present, with an <LS> not = 'V3' or 'V4' then <UPN> must be provided
mask = (~merged['LS'].isin(code_list)) & merged['UPN'].isna()
episode_error_locs = merged.loc[mask, 'index_eps']
header_error_locs = merged.loc[mask, 'index_er']
return {'Episodes': episode_error_locs.tolist(),
# Select unique values since many episodes are joined to one header
# and multiple errors will be raised for the same index.
'Header': header_error_locs.dropna().unique().tolist()}
return error, _validate
def validate_344():
error = ErrorDefinition(
code='344',
description='The record shows the young person has died or returned home to live with parent(s) or someone with parental responsibility for a continuous period of 6 months or more, but activity and/or accommodation on leaving care have been completed.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
# If <IN_TOUCH> = 'DIED' or 'RHOM' then <ACTIV> and <ACCOM> should not be provided
mask = ((oc3['IN_TOUCH'] == 'DIED') | (oc3['IN_TOUCH'] == 'RHOM')) & (
oc3['ACTIV'].notna() | oc3['ACCOM'].notna())
error_locations = oc3.index[mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_345():
error = ErrorDefinition(
code='345',
description='The data collection record shows the local authority is in touch with this young person, but activity and/or accommodation data items are zero.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
# If <IN_TOUCH> = 'Yes' then <ACTIV> and <ACCOM> must be provided
mask = (oc3['IN_TOUCH'] == 'YES') & (oc3['ACTIV'].isna() | oc3['ACCOM'].isna())
error_locations = oc3.index[mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_384():
error = ErrorDefinition(
code='384',
description='A child receiving respite care cannot be in a long-term foster placement ',
affected_fields=['PLACE', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# Where <LS> = 'V3' or 'V4' then <PL> must not be 'U1' or 'U4'
mask = ((episodes['LS'] == 'V3') | (episodes['LS'] == 'V4')) & (
(episodes['PLACE'] == 'U1') | (episodes['PLACE'] == 'U4'))
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_390():
error = ErrorDefinition(
code='390',
description='Reason episode ceased is adopted but child has not been previously placed for adoption.',
affected_fields=['PLACE', 'REC']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# If <REC> = 'E11' or 'E12' then <PL> must be one of 'A3', 'A4', 'A5' or 'A6'
mask = ((episodes['REC'] == 'E11') | (episodes['REC'] == 'E12')) & ~(
(episodes['PLACE'] == 'A3') | (episodes['PLACE'] == 'A4') | (episodes['PLACE'] == 'A5') | (
episodes['PLACE'] == 'A6'))
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_378():
error = ErrorDefinition(
code='378',
description='A child who is placed with parent(s) cannot be looked after under a single period of accommodation under Section 20 of the Children Act 1989.',
affected_fields=['PLACE', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# the & sign supercedes the ==, so brackets are necessary here
mask = (episodes['PLACE'] == 'P1') & (episodes['LS'] == 'V2')
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_398():
error = ErrorDefinition(
code='398',
description='Distance field completed but child looked after under legal status V3 or V4.',
affected_fields=['LS', 'HOME_POST', 'PL_POST']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = ((episodes['LS'] == 'V3') | (episodes['LS'] == 'V4')) & (
episodes['HOME_POST'].notna() | episodes['PL_POST'].notna())
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_451():
error = ErrorDefinition(
code='451',
description='Child is still freed for adoption, but freeing orders could not be applied for since 30 December 2005.',
affected_fields=['DEC', 'REC', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = episodes['DEC'].isna() & episodes['REC'].isna() & (episodes['LS'] == 'D1')
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_519():
error = ErrorDefinition(
code='519',
description='Data entered on the legal status of adopters shows civil partnership couple, but data entered on genders of adopters does not show it as a couple.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR']
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
mask = (ad1['LS_ADOPTR'] == 'L2') & (
(ad1['SEX_ADOPTR'] != 'MM') & (ad1['SEX_ADOPTR'] != 'FF') & (ad1['SEX_ADOPTR'] != 'MF'))
error_locations = ad1.index[mask]
return {'AD1': error_locations.to_list()}
return error, _validate
def validate_520():
error = ErrorDefinition(
code='520',
description='Data entry on the legal status of adopters shows different gender married couple but data entry on genders of adopters shows it as a same gender couple.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR']
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
# check condition
mask = (ad1['LS_ADOPTR'] == 'L11') & (ad1['SEX_ADOPTR'] != 'MF')
error_locations = ad1.index[mask]
return {'AD1': error_locations.to_list()}
return error, _validate
def validate_522():
error = ErrorDefinition(
code='522',
description='Date of decision that the child should be placed for adoption must be on or before the date that a child should no longer be placed for adoption.',
affected_fields=['DATE_PLACED', 'DATE_PLACED_CEASED']
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
# Convert to datetimes
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
# Boolean mask
mask = placed_adoption['DATE_PLACED_CEASED'] > placed_adoption['DATE_PLACED']
error_locations = placed_adoption.index[mask]
return {'PlacedAdoption': error_locations.to_list()}
return error, _validate
def validate_563():
error = ErrorDefinition(
code='563',
description='The child should no longer be placed for adoption but the date of the decision that the child should be placed for adoption is blank',
affected_fields=['DATE_PLACED', 'REASON_PLACED_CEASED', 'DATE_PLACED_CEASED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
mask = placed_adoption['REASON_PLACED_CEASED'].notna() & placed_adoption['DATE_PLACED_CEASED'].notna() & \
placed_adoption['DATE_PLACED'].isna()
error_locations = placed_adoption.index[mask]
return {'PlacedAdoption': error_locations.to_list()}
return error, _validate
def validate_544():
error = ErrorDefinition(
code='544',
description="Any child who has conviction information completed must also have immunisation, teeth check, health assessment and substance misuse problem identified fields completed.",
affected_fields=['CONVICTED', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
convict = oc2['CONVICTED'].astype(str) == '1'
immunisations = oc2['IMMUNISATIONS'].isna()
teeth_ck = oc2['TEETH_CHECK'].isna()
health_ass = oc2['HEALTH_ASSESSMENT'].isna()
sub_misuse = oc2['SUBSTANCE_MISUSE'].isna()
error_mask = convict & (immunisations | teeth_ck | health_ass | sub_misuse)
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_634():
error = ErrorDefinition(
code='634',
description='There are entries for previous permanence options, but child has not started to be looked after from 1 April 2016 onwards.',
affected_fields=['LA_PERM', 'PREV_PERM', 'DATE_PERM', 'DECOM']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PrevPerm' not in dfs:
return {}
else:
episodes = dfs['Episodes']
prevperm = dfs['PrevPerm']
collection_start = dfs['metadata']['collection_start']
# convert date field to appropriate format
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# the maximum date has the highest possibility of satisfying the condition
episodes['LAST_DECOM'] = episodes.groupby('CHILD')['DECOM'].transform('max')
# prepare to merge
episodes.reset_index(inplace=True)
prevperm.reset_index(inplace=True)
merged = prevperm.merge(episodes, on='CHILD', how='left', suffixes=['_prev', '_eps'])
# If <PREV_PERM> or <LA_PERM> or <DATE_PERM> provided, then at least 1 episode must have a <DECOM> later than 01/04/2016
mask = (merged['PREV_PERM'].notna() | merged['DATE_PERM'].notna() | merged['LA_PERM'].notna()) & (
merged['LAST_DECOM'] < collection_start)
eps_error_locs = merged.loc[mask, 'index_eps']
prevperm_error_locs = merged.loc[mask, 'index_prev']
# return {'PrevPerm':prevperm_error_locs}
return {'Episodes': eps_error_locs.unique().tolist(), 'PrevPerm': prevperm_error_locs.unique().tolist()}
return error, _validate
def validate_158():
error = ErrorDefinition(
code='158',
description='If a child has been recorded as receiving an intervention for their substance misuse problem, then the additional item on whether an intervention was offered should be left blank.',
affected_fields=['INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
error_mask = oc2['INTERVENTION_RECEIVED'].astype(str).eq('1') & oc2['INTERVENTION_OFFERED'].notna()
error_locations = oc2.index[error_mask]
return {'OC2': error_locations.tolist()}
return error, _validate
def validate_133():
error = ErrorDefinition(
code='133',
description='Data entry for accommodation after leaving care is invalid. If reporting on a childs accommodation after leaving care the data entry must be valid',
affected_fields=['ACCOM'],
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
valid_codes = ['B1', 'B2', 'C1', 'C2', 'D1', 'D2', 'E1', 'E2', 'G1', 'G2', 'H1', 'H2', 'K1', 'K2', 'R1',
'R2', 'S2', 'T1', 'T2', 'U1', 'U2', 'V1', 'V2', 'W1', 'W2', 'X2', 'Y1', 'Y2', 'Z1', 'Z2',
'0']
error_mask = ~oc3['ACCOM'].isna() & ~oc3['ACCOM'].isin(valid_codes)
error_locations = oc3.index[error_mask]
return {'OC3': error_locations.tolist()}
return error, _validate
def validate_565():
error = ErrorDefinition(
code='565',
description='The date that the child started to be missing or away from placement without authorisation has been completed but whether the child was missing or away from placement without authorisation has not been completed.',
affected_fields=['MISSING', 'MIS_START']
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
missing = dfs['Missing']
mask = missing['MIS_START'].notna() & missing['MISSING'].isna()
error_locations = missing.index[mask]
return {'Missing': error_locations.to_list()}
return error, _validate
def validate_433():
error = ErrorDefinition(
code='433',
description='The reason for new episode suggests that this is a continuation episode, but the episode does not start on the same day as the last episode finished.',
affected_fields=['RNE', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['original_index'] = episodes.index
episodes.sort_values(['CHILD', 'DECOM', 'DEC'], inplace=True)
episodes[['PREVIOUS_DEC', 'PREVIOUS_CHILD']] = episodes[['DEC', 'CHILD']].shift(1)
rne_is_ongoing = episodes['RNE'].str.upper().astype(str).isin(['P', 'L', 'T', 'U', 'B'])
date_mismatch = episodes['PREVIOUS_DEC'] != episodes['DECOM']
missing_date = episodes['PREVIOUS_DEC'].isna() | episodes['DECOM'].isna()
same_child = episodes['PREVIOUS_CHILD'] == episodes['CHILD']
error_mask = rne_is_ongoing & (date_mismatch | missing_date) & same_child
error_locations = episodes['original_index'].loc[error_mask].sort_values()
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_437():
error = ErrorDefinition(
code='437',
description='Reason episode ceased is child has died or is aged 18 or over but there are further episodes.',
affected_fields=['REC'],
)
# !# potential false negatives, as this only operates on the current year's data
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes.sort_values(['CHILD', 'DECOM'], inplace=True)
episodes[['NEXT_DECOM', 'NEXT_CHILD']] = episodes[['DECOM', 'CHILD']].shift(-1)
# drop rows with missing DECOM as invalid/missing values can lead to errors
episodes = episodes.dropna(subset=['DECOM'])
ceased_e2_e15 = episodes['REC'].str.upper().astype(str).isin(['E2', 'E15'])
has_later_episode = episodes['CHILD'] == episodes['NEXT_CHILD']
error_mask = ceased_e2_e15 & has_later_episode
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_547():
error = ErrorDefinition(
code='547',
description="Any child who has health promotion information completed must also have immunisation, teeth check, health assessment and substance misuse problem identified fields completed.",
affected_fields=['HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
healthck = oc2['HEALTH_CHECK'].astype(str) == '1'
immunisations = oc2['IMMUNISATIONS'].isna()
teeth_ck = oc2['TEETH_CHECK'].isna()
health_ass = oc2['HEALTH_ASSESSMENT'].isna()
sub_misuse = oc2['SUBSTANCE_MISUSE'].isna()
error_mask = healthck & (immunisations | teeth_ck | health_ass | sub_misuse)
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_635():
error = ErrorDefinition(
code='635',
description='There are entries for date of order and local authority code where previous permanence option was arranged but previous permanence code is Z1',
affected_fields=['LA_PERM', 'DATE_PERM', 'PREV_PERM']
)
def _validate(dfs):
if 'PrevPerm' not in dfs:
return {}
else:
prev_perm = dfs['PrevPerm']
# raise and error if either LA_PERM or DATE_PERM are present, yet PREV_PERM is absent.
mask = ((prev_perm['LA_PERM'].notna() | prev_perm['DATE_PERM'].notna()) & prev_perm['PREV_PERM'].isna())
error_locations = prev_perm.index[mask]
return {'PrevPerm': error_locations.to_list()}
return error, _validate
def validate_550():
error = ErrorDefinition(
code='550',
description='A placement provider code of PR0 can only be associated with placement P1.',
affected_fields=['PLACE', 'PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = (episodes['PLACE'] != 'P1') & episodes['PLACE_PROVIDER'].eq('PR0')
validation_error_locations = episodes.index[mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_217():
error = ErrorDefinition(
code='217',
description='Children who are placed for adoption with current foster carers (placement types A3 or A5) must have a reason for new episode of S, T or U.',
affected_fields=['PLACE', 'DECOM', 'RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
max_decom_allowed = pd.to_datetime('01/04/2015', format='%d/%m/%Y', errors='coerce')
reason_new_ep = ['S', 'T', 'U']
place_codes = ['A3', 'A5']
mask = (episodes['PLACE'].isin(place_codes) & (episodes['DECOM'] >= max_decom_allowed)) & ~episodes[
'RNE'].isin(reason_new_ep)
validation_error_mask = mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_518():
error = ErrorDefinition(
code='518',
description='If reporting legal status of adopters is L4 then the genders of adopters should be coded as MM or FF. MM = the adopting couple are both males. FF = the adopting couple are both females.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L4') & ~AD1['SEX_ADOPTR'].isin(['MM', 'FF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_517():
error = ErrorDefinition(
code='517',
description='If reporting legal status of adopters is L3 then the genders of adopters should be coded as MF. MF = the adopting couple are male and female.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L3') & ~AD1['SEX_ADOPTR'].isin(['MF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_558():
error = ErrorDefinition(
code='558',
description='If a child has been adopted, then the decision to place them for adoption has not been disrupted and the date of the decision that a child should no longer be placed for adoption should be left blank. if the REC code is either E11 or E12 then the DATE PLACED CEASED date should not be provided',
affected_fields=['DATE_PLACED_CEASED', 'REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placedAdoptions = dfs['PlacedAdoption']
episodes = episodes.reset_index()
rec_codes = ['E11', 'E12']
placeEpisodes = episodes[episodes['REC'].isin(rec_codes)]
merged = placeEpisodes.merge(placedAdoptions, how='left', on='CHILD').set_index('index')
episodes_with_errors = merged[merged['DATE_PLACED_CEASED'].notna()]
error_mask = episodes.index.isin(episodes_with_errors.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_453():
error = ErrorDefinition(
code='453',
description='Contradiction between placement distance in the last episode of the previous year and in the first episode of the current year.',
affected_fields=['PL_DISTANCE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'Episodes_last' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['PL_DISTANCE'] = pd.to_numeric(episodes['PL_DISTANCE'], errors='coerce')
episodes_last['PL_DISTANCE'] = pd.to_numeric(episodes_last['PL_DISTANCE'], errors='coerce')
# drop rows with missing DECOM before finding idxmin/max, as invalid/missing values can lead to errors
episodes = episodes.dropna(subset=['DECOM'])
episodes_last = episodes_last.dropna(subset=['DECOM'])
episodes_min = episodes.groupby('CHILD')['DECOM'].idxmin()
episodes_last_max = episodes_last.groupby('CHILD')['DECOM'].idxmax()
episodes = episodes[episodes.index.isin(episodes_min)]
episodes_last = episodes_last[episodes_last.index.isin(episodes_last_max)]
episodes_merged = episodes.reset_index().merge(episodes_last, how='left', on=['CHILD'],
suffixes=('', '_last'), indicator=True).set_index('index')
in_both_years = episodes_merged['_merge'] == 'both'
same_rne = episodes_merged['RNE'] == episodes_merged['RNE_last']
last_year_open = episodes_merged['DEC_last'].isna()
different_pl_dist = abs(episodes_merged['PL_DISTANCE'] - episodes_merged['PL_DISTANCE_last']) >= 0.2
error_mask = in_both_years & same_rne & last_year_open & different_pl_dist
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_516():
error = ErrorDefinition(
code='516',
description='The episode data submitted for this child does not show that he/she was with their former foster carer(s) during the year.If the code in the reason episode ceased is E45 or E46 the child must have a placement code of U1 to U6.',
affected_fields=['REC', 'PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
place_codes = ['U1', 'U2', 'U3', 'U4', 'U5', 'U6']
rec_codes = ['E45', 'E46']
error_mask = episodes['REC'].isin(rec_codes) & ~episodes['PLACE'].isin(place_codes)
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_511():
error = ErrorDefinition(
code='511',
description='If reporting that the number of person(s) adopting the looked after child is two adopters then the code should only be MM, FF or MF. MM = the adopting couple are both males; FF = the adopting couple are both females; MF = The adopting couple are male and female.',
affected_fields=['NB_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
mask = AD1['NB_ADOPTR'].astype(str).eq('2') & AD1['SEX_ADOPTR'].isin(['M1', 'F1'])
validation_error_mask = mask
validation_error_locations = AD1.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_524():
error = ErrorDefinition(
code='524',
description='If reporting legal status of adopters is L12 then the genders of adopters should be coded as MM or FF. MM = the adopting couple are both males. FF = the adopting couple are both females',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L12') & ~AD1['SEX_ADOPTR'].isin(['MM', 'FF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_441():
error = ErrorDefinition(
code='441',
description='Participation method indicates child was 4 years old or over at the time of the review, but the date of birth and review date indicates the child was under 4 years old.',
affected_fields=['DOB', 'REVIEW', 'REVIEW_CODE'],
)
def _validate(dfs):
if 'Reviews' not in dfs:
return {}
else:
reviews = dfs['Reviews']
reviews['DOB'] = pd.to_datetime(reviews['DOB'], format='%d/%m/%Y', errors='coerce')
reviews['REVIEW'] = pd.to_datetime(reviews['REVIEW'], format='%d/%m/%Y', errors='coerce')
reviews = reviews.dropna(subset=['REVIEW', 'DOB'])
mask = reviews['REVIEW_CODE'].isin(['PN1', 'PN2', 'PN3', 'PN4', 'PN5', 'PN6', 'PN7']) & (
reviews['REVIEW'] < reviews['DOB'] + pd.offsets.DateOffset(years=4))
validation_error_mask = mask
validation_error_locations = reviews.index[validation_error_mask]
return {'Reviews': validation_error_locations.tolist()}
return error, _validate
def validate_184():
error = ErrorDefinition(
code='184',
description='Date of decision that a child should be placed for adoption is before the child was born.',
affected_fields=['DATE_PLACED', # PlacedAdoptino
'DOB'], # Header
)
def _validate(dfs):
if 'Header' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
child_record = dfs['Header']
placed_for_adoption = dfs['PlacedAdoption']
all_data = (placed_for_adoption
.reset_index()
.merge(child_record, how='left', on='CHILD', suffixes=[None, '_P4A']))
all_data['DATE_PLACED'] = pd.to_datetime(all_data['DATE_PLACED'], format='%d/%m/%Y', errors='coerce')
all_data['DOB'] = pd.to_datetime(all_data['DOB'], format='%d/%m/%Y', errors='coerce')
mask = (all_data['DATE_PLACED'] >= all_data['DOB']) | all_data['DATE_PLACED'].isna()
validation_error = ~mask
validation_error_locations = all_data[validation_error]['index'].unique()
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_612():
error = ErrorDefinition(
code='612',
description="Date of birth field has been completed but mother field indicates child is not a mother.",
affected_fields=['SEX', 'MOTHER', 'MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
error_mask = (
((header['MOTHER'].astype(str) == '0') | header['MOTHER'].isna())
& (header['SEX'].astype(str) == '2')
& header['MC_DOB'].notna()
)
validation_error_locations = header.index[error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_552():
"""
This error checks that the first adoption episode is after the last decision !
If there are multiple of either there may be unexpected results !
"""
error = ErrorDefinition(
code="552",
description="Date of Decision to place a child for adoption should be on or prior to the date that the child was placed for adoption.",
# Field that defines date of decision to place a child for adoption is DATE_PLACED and the start of adoption is defined by DECOM with 'A' placement types.
affected_fields=['DATE_PLACED', 'DECOM'],
)
def _validate(dfs):
if ('PlacedAdoption' not in dfs) or ('Episodes' not in dfs):
return {}
else:
# get the required datasets
placed_adoption = dfs['PlacedAdoption']
episodes = dfs['Episodes']
# keep index values so that they stay the same when needed later on for error locations
placed_adoption.reset_index(inplace=True)
episodes.reset_index(inplace=True)
adoption_eps = episodes[episodes['PLACE'].isin(['A3', 'A4', 'A5', 'A6'])].copy()
# find most recent adoption decision
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
# remove rows where either of the required values have not been filled.
placed_adoption = placed_adoption[placed_adoption['DATE_PLACED'].notna()]
placed_adoption_inds = placed_adoption.groupby('CHILD')['DATE_PLACED'].idxmax(skipna=True)
last_decision = placed_adoption.loc[placed_adoption_inds]
# first time child started adoption
adoption_eps["DECOM"] = pd.to_datetime(adoption_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
adoption_eps = adoption_eps[adoption_eps['DECOM'].notna()]
adoption_eps_inds = adoption_eps.groupby('CHILD')['DECOM'].idxmin(skipna=True)
# full information of first adoption
first_adoption = adoption_eps.loc[adoption_eps_inds]
# date of decision and date of start of adoption (DECOM) have to be put in one table
merged = first_adoption.merge(last_decision, on=['CHILD'], how='left', suffixes=['_EP', '_PA'])
# check to see if date of decision to place is less than or equal to date placed.
decided_after_placed = merged["DECOM"] < merged["DATE_PLACED"]
# find the corresponding location of error values per file.
episode_error_locs = merged.loc[decided_after_placed, 'index_EP']
placedadoption_error_locs = merged.loc[decided_after_placed, 'index_PA']
return {"PlacedAdoption": placedadoption_error_locs.to_list(), "Episodes": episode_error_locs.to_list()}
return error, _validate
def validate_551():
error = ErrorDefinition(
code='551',
description='Child has been placed for adoption but there is no date of the decision that the child should be placed for adoption.',
affected_fields=['DATE_PLACED', 'PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placedAdoptions = dfs['PlacedAdoption']
episodes = episodes.reset_index()
place_codes = ['A3', 'A4', 'A5', 'A6']
placeEpisodes = episodes[episodes['PLACE'].isin(place_codes)]
merged = placeEpisodes.merge(placedAdoptions, how='left', on='CHILD').set_index('index')
episodes_with_errors = merged[merged['DATE_PLACED'].isna()]
error_mask = episodes.index.isin(episodes_with_errors.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_557():
error = ErrorDefinition(
code='557',
description="Child for whom the decision was made that they should be placed for adoption has left care " +
"but was not adopted and information on the decision that they should no longer be placed for " +
"adoption items has not been completed.",
affected_fields=['DATE_PLACED_CEASED', 'REASON_PLACED_CEASED', # PlacedAdoption
'PLACE', 'LS', 'REC'], # Episodes
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'PlacedAdoption' not in dfs:
return {}
else:
eps = dfs['Episodes']
placed = dfs['PlacedAdoption']
eps = eps.reset_index()
placed = placed.reset_index()
child_placed = eps['PLACE'].isin(['A3', 'A4', 'A5', 'A6'])
order_granted = eps['LS'].isin(['D1', 'E1'])
not_adopted = ~eps['REC'].isin(['E11', 'E12']) & eps['REC'].notna()
placed['ceased_incomplete'] = (
placed['DATE_PLACED_CEASED'].isna() | placed['REASON_PLACED_CEASED'].isna()
)
eps = eps[(child_placed | order_granted) & not_adopted]
eps = eps.merge(placed, on='CHILD', how='left', suffixes=['_EP', '_PA'], indicator=True)
eps = eps[(eps['_merge'] == 'left_only') | eps['ceased_incomplete']]
EP_errors = eps['index_EP']
PA_errors = eps['index_PA'].dropna()
return {
'Episodes': EP_errors.to_list(),
'PlacedAdoption': PA_errors.to_list(),
}
return error, _validate
def validate_207():
error = ErrorDefinition(
code='207',
description='Mother status for the current year disagrees with the mother status already recorded for this child.',
affected_fields=['MOTHER'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
mother_is_different = header_merged['MOTHER'].astype(str) != header_merged['MOTHER_last'].astype(str)
mother_was_true = header_merged['MOTHER_last'].astype(str) == '1'
error_mask = in_both_years & mother_is_different & mother_was_true
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_523():
error = ErrorDefinition(
code='523',
description="Date of decision that the child should be placed for adoption should be the same date as the decision that adoption is in the best interest (date should be placed).",
affected_fields=['DATE_PLACED', 'DATE_INT'],
)
def _validate(dfs):
if ("AD1" not in dfs) or ("PlacedAdoption" not in dfs):
return {}
else:
placed_adoption = dfs["PlacedAdoption"]
ad1 = dfs["AD1"]
# keep initial index values to be reused for locating errors later on.
placed_adoption.reset_index(inplace=True)
ad1.reset_index(inplace=True)
# convert to datetime to enable comparison
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format="%d/%m/%Y",
errors='coerce')
ad1["DATE_INT"] = pd.to_datetime(ad1['DATE_INT'], format='%d/%m/%Y', errors='coerce')
# drop rows where either of the required values have not been filled.
placed_adoption = placed_adoption[placed_adoption["DATE_PLACED"].notna()]
ad1 = ad1[ad1["DATE_INT"].notna()]
# bring corresponding values together from both dataframes
merged_df = placed_adoption.merge(ad1, on=['CHILD'], how='inner', suffixes=["_AD", "_PA"])
# find error values
different_dates = merged_df['DATE_INT'] != merged_df['DATE_PLACED']
# map error locations to corresponding indices
pa_error_locations = merged_df.loc[different_dates, 'index_PA']
ad1_error_locations = merged_df.loc[different_dates, 'index_AD']
return {"PlacedAdoption": pa_error_locations.to_list(), "AD1": ad1_error_locations.to_list()}
return error, _validate
def validate_3001():
error = ErrorDefinition(
code='3001',
description='Where care leavers information is being returned for a young person around their 17th birthday, the accommodation cannot be with their former foster carer(s).',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'OC3' not in dfs:
return {}
else:
header = dfs['Header']
oc3 = dfs['OC3']
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
header['DOB17'] = header['DOB'] + pd.DateOffset(years=17)
oc3_merged = oc3.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
accom_foster = oc3_merged['ACCOM'].str.upper().astype(str).isin(['Z1', 'Z2'])
age_17_in_year = (oc3_merged['DOB17'] <= collection_end) & (oc3_merged['DOB17'] >= collection_start)
error_mask = accom_foster & age_17_in_year
error_locations = oc3.index[error_mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_389():
error = ErrorDefinition(
code='389',
description='Reason episode ceased is that child transferred to care of adult social care services, but child is aged under 16.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB16'] = header['DOB'] + pd.DateOffset(years=16)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
ceased_asc = episodes_merged['REC'].str.upper().astype(str).isin(['E7'])
ceased_over_16 = episodes_merged['DOB16'] <= episodes_merged['DEC']
error_mask = ceased_asc & ~ceased_over_16
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_387():
error = ErrorDefinition(
code='387',
description='Reason episode ceased is child moved into independent living arrangement, but the child is aged under 14.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB14'] = header['DOB'] + pd.DateOffset(years=14)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
ceased_indep = episodes_merged['REC'].str.upper().astype(str).isin(['E5', 'E6'])
ceased_over_14 = episodes_merged['DOB14'] <= episodes_merged['DEC']
dec_present = episodes_merged['DEC'].notna()
error_mask = ceased_indep & ~ceased_over_14 & dec_present
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_452():
error = ErrorDefinition(
code='452',
description='Contradiction between local authority of placement code in the last episode of the previous year and in the first episode of the current year.',
affected_fields=['PL_LA'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'Episodes_last' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_min = episodes.groupby('CHILD')['DECOM'].idxmin()
episodes_last_max = episodes_last.groupby('CHILD')['DECOM'].idxmax()
episodes = episodes[episodes.index.isin(episodes_min)]
episodes_last = episodes_last[episodes_last.index.isin(episodes_last_max)]
episodes_merged = episodes.reset_index().merge(episodes_last, how='left', on=['CHILD'],
suffixes=('', '_last'), indicator=True).set_index('index')
in_both_years = episodes_merged['_merge'] == 'both'
same_rne = episodes_merged['RNE'] == episodes_merged['RNE_last']
last_year_open = episodes_merged['DEC_last'].isna()
different_pl_la = episodes_merged['PL_LA'].astype(str) != episodes_merged['PL_LA_last'].astype(str)
error_mask = in_both_years & same_rne & last_year_open & different_pl_la
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_386():
error = ErrorDefinition(
code='386',
description='Reason episode ceased is adopted but child has reached age 18.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB18'] = header['DOB'] + pd.DateOffset(years=18)
episodes_merged = (
episodes
.reset_index()
.merge(header, how='left', on=['CHILD'], suffixes=('', '_header'), indicator=True)
.set_index('index')
.dropna(subset=['DOB18', 'DEC'])
)
ceased_adopted = episodes_merged['REC'].str.upper().astype(str).isin(['E11', 'E12'])
ceased_under_18 = episodes_merged['DOB18'] > episodes_merged['DEC']
error_mask = ceased_adopted & ~ceased_under_18
error_locations = episodes_merged.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_363():
error = ErrorDefinition(
code='363',
description='Child assessment order (CAO) lasted longer than 7 days allowed in the Children Act 1989.',
affected_fields=['LS', 'DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
collection_end_str = dfs['metadata']['collection_end']
L2_eps = episodes[episodes['LS'] == 'L3'].copy()
L2_eps['original_index'] = L2_eps.index
L2_eps = L2_eps[L2_eps['DECOM'].notna()]
L2_eps.loc[L2_eps['DEC'].isna(), 'DEC'] = collection_end_str
L2_eps['DECOM'] = pd.to_datetime(L2_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
L2_eps = L2_eps.dropna(subset=['DECOM'])
L2_eps['DEC'] = pd.to_datetime(L2_eps['DEC'], format='%d/%m/%Y', errors='coerce')
L2_eps.sort_values(['CHILD', 'DECOM'])
L2_eps['index'] = pd.RangeIndex(0, len(L2_eps))
L2_eps['index+1'] = L2_eps['index'] + 1
L2_eps = L2_eps.merge(L2_eps, left_on='index', right_on='index+1',
how='left', suffixes=[None, '_prev'])
L2_eps = L2_eps[['original_index', 'DECOM', 'DEC', 'DEC_prev', 'CHILD', 'CHILD_prev', 'LS']]
L2_eps['new_period'] = (
(L2_eps['DECOM'] > L2_eps['DEC_prev'])
| (L2_eps['CHILD'] != L2_eps['CHILD_prev'])
)
L2_eps['duration'] = (L2_eps['DEC'] - L2_eps['DECOM']).dt.days
L2_eps['period_id'] = L2_eps['new_period'].astype(int).cumsum()
L2_eps['period_duration'] = L2_eps.groupby('period_id')['duration'].transform(sum)
error_mask = L2_eps['period_duration'] > 7
return {'Episodes': L2_eps.loc[error_mask, 'original_index'].to_list()}
return error, _validate
def validate_364():
error = ErrorDefinition(
code='364',
description='Sections 41-46 of Police and Criminal Evidence (PACE; 1984) severely limits ' +
'the time a child can be detained in custody in Local Authority (LA) accommodation.',
affected_fields=['LS', 'DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
collection_end_str = dfs['metadata']['collection_end']
J2_eps = episodes[episodes['LS'] == 'J2'].copy()
J2_eps['original_index'] = J2_eps.index
J2_eps['DECOM'] = pd.to_datetime(J2_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
J2_eps = J2_eps[J2_eps['DECOM'].notna()]
J2_eps.loc[J2_eps['DEC'].isna(), 'DEC'] = collection_end_str
J2_eps['DEC'] = pd.to_datetime(J2_eps['DEC'], format='%d/%m/%Y', errors='coerce')
J2_eps.sort_values(['CHILD', 'DECOM'])
J2_eps['index'] = pd.RangeIndex(0, len(J2_eps))
J2_eps['index_prev'] = J2_eps['index'] + 1
J2_eps = J2_eps.merge(J2_eps, left_on='index', right_on='index_prev',
how='left', suffixes=[None, '_prev'])
J2_eps = J2_eps[['original_index', 'DECOM', 'DEC', 'DEC_prev', 'CHILD', 'CHILD_prev', 'LS']]
J2_eps['new_period'] = (
(J2_eps['DECOM'] > J2_eps['DEC_prev'])
| (J2_eps['CHILD'] != J2_eps['CHILD_prev'])
)
J2_eps['duration'] = (J2_eps['DEC'] - J2_eps['DECOM']).dt.days
J2_eps['period_id'] = J2_eps['new_period'].astype(int).cumsum()
J2_eps['period_duration'] = J2_eps.groupby('period_id')['duration'].transform(sum)
error_mask = J2_eps['period_duration'] > 21
return {'Episodes': J2_eps.loc[error_mask, 'original_index'].to_list()}
return error, _validate
def validate_365():
error = ErrorDefinition(
code='365',
description='Any individual short- term respite placement must not exceed 17 days.',
affected_fields=['LS', 'DECOM', 'DEC'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
episodes = dfs['Episodes']
collection_end_str = dfs['metadata']['collection_end']
episodes.loc[episodes['DEC'].isna(), 'DEC'] = collection_end_str
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = | pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce') | pandas.to_datetime |
import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
from .base import BaseExtensionTests
class BaseMissingTests(BaseExtensionTests):
def test_isna(self, data_missing):
expected = np.array([True, False])
result = pd.isna(data_missing)
tm.assert_numpy_array_equal(result, expected)
result = pd.Series(data_missing).isna()
expected = pd.Series(expected)
self.assert_series_equal(result, expected)
# GH 21189
result = pd.Series(data_missing).drop([0, 1]).isna()
expected = pd.Series([], dtype=bool)
self.assert_series_equal(result, expected)
def test_dropna_array(self, data_missing):
result = data_missing.dropna()
expected = data_missing[[1]]
self.assert_extension_array_equal(result, expected)
def test_dropna_series(self, data_missing):
ser = pd.Series(data_missing)
result = ser.dropna()
expected = ser.iloc[[1]]
self.assert_series_equal(result, expected)
def test_dropna_frame(self, data_missing):
df = pd.DataFrame({"A": data_missing})
# defaults
result = df.dropna()
expected = df.iloc[[1]]
self.assert_frame_equal(result, expected)
# axis = 1
result = df.dropna(axis='columns')
expected = pd.DataFrame(index=[0, 1])
self.assert_frame_equal(result, expected)
# multiple
df = pd.DataFrame({"A": data_missing,
"B": [1, np.nan]})
result = df.dropna()
expected = df.iloc[:0]
self.assert_frame_equal(result, expected)
def test_fillna_scalar(self, data_missing):
valid = data_missing[1]
result = data_missing.fillna(valid)
expected = data_missing.fillna(valid)
self.assert_extension_array_equal(result, expected)
def test_fillna_limit_pad(self, data_missing):
arr = data_missing.take([1, 0, 0, 0, 1])
result = | pd.Series(arr) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
import os
import pprint
import pandas as pd
from collections import OrderedDict
def get_parameters():
# Read Data
try:
df_363 = pd.read_excel(
io=os.path.join(os.path.dirname(__file__), 'data', 'tab_dd_363.xlsx'),
sheet_name='dd_363',
index_col=0
)
except Exception as e:
#print(e, '\n')
#print('Read table from GitHub')
df_363 = pd.read_excel(
io='https://github.com/gaemapiracicaba/norma_dd_363_11/raw/main/src/normas/data/tab_dd_363.xlsx',
sheet_name='dd_363',
index_col=0
)
# Filter only quality
df_363 = df_363.loc[(df_363['tipo_padrao'] == 'qualidade')]
# Classes
list_classes = list(set(df_363['padrao_qualidade']))
list_classes = [x for x in list_classes if pd.notnull(x)]
list_classes.sort()
return df_363, list_classes
def filter_by_classe(df_363, classe):
# Filter dataframe by Classe
df_363 = df_363.loc[(df_363['padrao_qualidade'] == classe)]
# Parâmetros
list_parametros = list(set(df_363['parametro_descricao']))
list_parametros = [x for x in list_parametros if pd.notnull(x)]
list_parametros.sort()
return df_363, list_parametros
# def filter_by_parameters(df_363, parametro):
# # Filter dataframe by Parametro
# df_363 = df_363.loc[(df_363['parametro_descricao'] == parametro)]
#
# # Check and Get Results
# if len(df_363) == 1:
# dict_363 = df_363.to_dict(orient='records')[0]
# dict_363 = OrderedDict(sorted(dict_363.items(), key=lambda x: df_363.columns.get_loc(x[0])))
# return dict_363
# else:
# return 'erro'
def filter_by_parameters(df_363, parametro, condicao=None):
# Filter dataframe by Parametro
df_363 = df_363.loc[(df_363['parametro_descricao'] == parametro)]
# Condição
array = df_363['condicao'].values
dict_condicao = dict(enumerate(array.flatten(), 1))
# Check and Get Results
if len(df_363) == 1 and len(array) == 1:
dict_363 = df_363.to_dict(orient='records')[0]
dict_363 = OrderedDict(sorted(dict_363.items(), key=lambda x: df_363.columns.get_loc(x[0])))
return dict_363
elif len(df_363) > 1 and len(array) > 1 and condicao is not None:
try:
# Filtra a Condição
#condicao = df_357['condicao'].values[condicao]
df_363 = df_363.loc[(df_363['condicao'] == dict_condicao[int(condicao)])]
dict_363 = df_363.to_dict(orient='records')[0]
dict_363 = OrderedDict(sorted(dict_363.items(), key=lambda x: df_363.columns.get_loc(x[0])))
return dict_363
except Exception as e:
#print(e)
print('A condição definida foi "{}".\nAs opções possíveis são:\n'.format(condicao))
print(*('{} - {}'.format(k, v) for k,v in dict_condicao.items()), sep='\n')
else:
print('Parâmetro "{}" tem mais de um registro.\nFaz-se necessário definir condição!\n'.format(parametro))
print(*('{} - {}'.format(k, v) for k,v in dict_condicao.items()), sep='\n')
def set_type_desconformidade(dict_363):
if pd.isnull(dict_363['valor_minimo_permitido']) & pd.notnull(dict_363['valor_maximo_permitido']):
#print('Parâmetro só tem "valor máximo". Caso o valor medido esteja acima, é amostra desconforme!')
tipo_363 = 'acima>desconforme'
elif | pd.notnull(dict_363['valor_minimo_permitido']) | pandas.notnull |
from datetime import datetime, timedelta
import warnings
import operator
from textwrap import dedent
import numpy as np
from pandas._libs import (lib, index as libindex, tslib as libts,
algos as libalgos, join as libjoin,
Timedelta)
from pandas._libs.lib import is_datetime_array
from pandas.compat import range, u, set_function_name
from pandas.compat.numpy import function as nv
from pandas import compat
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import ExtensionArray
from pandas.core.dtypes.generic import (
ABCSeries, ABCDataFrame,
ABCMultiIndex,
ABCPeriodIndex, ABCTimedeltaIndex,
ABCDateOffset)
from pandas.core.dtypes.missing import isna, array_equivalent
from pandas.core.dtypes.common import (
_ensure_int64,
_ensure_object,
_ensure_categorical,
_ensure_platform_int,
is_integer,
is_float,
is_dtype_equal,
is_dtype_union_equal,
is_object_dtype,
is_categorical,
is_categorical_dtype,
is_interval_dtype,
is_period_dtype,
is_bool,
is_bool_dtype,
is_signed_integer_dtype,
is_unsigned_integer_dtype,
is_integer_dtype, is_float_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_timedelta64_dtype,
is_hashable,
needs_i8_conversion,
is_iterator, is_list_like,
is_scalar)
from pandas.core.base import PandasObject, IndexOpsMixin
import pandas.core.common as com
from pandas.core import ops
from pandas.util._decorators import (
Appender, Substitution, cache_readonly, deprecate_kwarg)
from pandas.core.indexes.frozen import FrozenList
import pandas.core.dtypes.concat as _concat
import pandas.core.missing as missing
import pandas.core.algorithms as algos
import pandas.core.sorting as sorting
from pandas.io.formats.printing import (
pprint_thing, default_pprint, format_object_summary, format_object_attrs)
from pandas.core.ops import make_invalid_op
from pandas.core.strings import StringMethods
__all__ = ['Index']
_unsortable_types = frozenset(('mixed', 'mixed-integer'))
_index_doc_kwargs = dict(klass='Index', inplace='',
target_klass='Index',
unique='Index', duplicated='np.ndarray')
_index_shared_docs = dict()
def _try_get_item(x):
try:
return x.item()
except AttributeError:
return x
def _make_comparison_op(op, cls):
def cmp_method(self, other):
if isinstance(other, (np.ndarray, Index, ABCSeries)):
if other.ndim > 0 and len(self) != len(other):
raise ValueError('Lengths must match to compare')
# we may need to directly compare underlying
# representations
if needs_i8_conversion(self) and needs_i8_conversion(other):
return self._evaluate_compare(other, op)
if is_object_dtype(self) and self.nlevels == 1:
# don't pass MultiIndex
with np.errstate(all='ignore'):
result = ops._comp_method_OBJECT_ARRAY(op, self.values, other)
else:
# numpy will show a DeprecationWarning on invalid elementwise
# comparisons, this will raise in the future
with warnings.catch_warnings(record=True):
with np.errstate(all='ignore'):
result = op(self.values, np.asarray(other))
# technically we could support bool dtyped Index
# for now just return the indexing array directly
if is_bool_dtype(result):
return result
try:
return Index(result)
except TypeError:
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(cmp_method, name, cls)
def _make_arithmetic_op(op, cls):
def index_arithmetic_method(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
elif isinstance(other, ABCTimedeltaIndex):
# Defer to subclass implementation
return NotImplemented
other = self._validate_for_numeric_binop(other, op)
# handle time-based others
if isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)):
return self._evaluate_with_timedelta_like(other, op)
elif isinstance(other, (datetime, np.datetime64)):
return self._evaluate_with_datetime_like(other, op)
values = self.values
with np.errstate(all='ignore'):
result = op(values, other)
result = missing.dispatch_missing(op, values, other, result)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
if op is divmod:
result = (Index(result[0], **attrs), Index(result[1], **attrs))
else:
result = Index(result, **attrs)
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(index_arithmetic_method, name, cls)
class InvalidIndexError(Exception):
pass
_o_dtype = np.dtype(object)
_Identity = object
def _new_Index(cls, d):
""" This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__
"""
# required for backward compat, because PI can't be instantiated with
# ordinals through __new__ GH #13277
if issubclass(cls, ABCPeriodIndex):
from pandas.core.indexes.period import _new_PeriodIndex
return | _new_PeriodIndex(cls, **d) | pandas.core.indexes.period._new_PeriodIndex |
#
# (c) <NAME>
# 2018, All Rights Reserved
#
from i3d import Inception3D
import cv2
import configs
from os import path
import pandas as pd
import numpy as np
import helper
import communication
import time
import matplotlib.pyplot as plt
def validation_score(model_path, type, save=False, debugging=False):
model = Inception3D(input_shape=(configs.LENGTH, configs.IMG_HEIGHT, configs.IMG_WIDTH, 3),
weights_path=model_path)
# read the steering labels and image path
df_truth = | pd.read_csv('/home/neil/dataset/speedchallenge/data/validation.csv') | pandas.read_csv |
import streamlit as st
import streamlit.components.v1 as components
import gca_requests as gl
import datetime
import urllib.parse
import pandas as pd
import gca_main_functions as mf
def gca_page_2():
st.title("GOOGLE CALENDARS ANALYTICS PAGE 2")
st.write(f'## 2 GOOGLE CALENDAR SPECIFIC ANALYSIS (1 CALENDAR)')
st.sidebar.write(f'#### 2 GOOGLE CALENDAR SPECIFIC ANALYSIS (1 CALENDAR)')
input_calendar_name = st.sidebar.selectbox('2 SELECT CALENDAR:', mf.list_calendar_names)
input_calendar_id = [x["id"] for x in mf.list_calendar if x["summary"] == input_calendar_name][0]
input_dates_analyze = st.sidebar.date_input("2_b SELECT RANGE OF DATES TO ANALYZE", [datetime.date(2019, 1, 1),
datetime.date.today()],
key="spe_di")
input_calendar_specific_type = st.sidebar.selectbox('2_b SELECT EVENTS TYPE:', ["HOURS EVENTS", "DAYS EVENTS"],
key="spe_sb")
# Visualize events of selected calendar
input_calendar_events = gl.retrieve_calendar_events_by_id(input_calendar_id)
if len(input_calendar_events) == 0:
st.write("No values in this calendar.")
else:
#TODO Filter before dataframe
# df_events = filter_by_dates(input_calendar_events, input_dates_analyze, input_calendar_specific_type)
df_events = | pd.DataFrame(input_calendar_events) | pandas.DataFrame |
################################################
import pandas as pd
df = | pd.read_csv(r'C:\Users\mikol\Desktop\Python_Met_II/WorldCupMatches.csv') | pandas.read_csv |
from sklearn.ensemble import RandomForestRegressor
from sklearn.datasets import make_regression
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import numpy as np, tensorflow as tf
from sklearn.preprocessing import OneHotEncoder
import os
import csv
import gc
from sklearn.metrics import mean_squared_error
import math
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import ConstantKernel, RBF
from sklearn.gaussian_process.kernels import RationalQuadratic
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RepeatedKFold
from sklearn import linear_model
from xgboost.sklearn import XGBRegressor
from sklearn.decomposition import PCA
import copy
import pyflux as pf
import datetime
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
PRICED_BITCOIN_FILE_PATH = "C:/Users/wang.yuhao/Documents/ChainNet/data/original_data/pricedBitcoin2009-2018.csv"
DAILY_OCCURRENCE_FILE_PATH = "C:/Users/wang.yuhao/Documents/ChainNet/data/original_data/dailyOccmatrices/"
betti0_input_path = "C:/Users/wang.yuhao/Documents/ChainNet/data/original_data/betti_0(100).csv"
betti1_input_path = "C:/Users/wang.yuhao/Documents/ChainNet/data/original_data/betti_1(100).csv"
DAILY_FILTERED_OCCURRENCE_FILE_PATH = "C:/Users/wang.yuhao/Documents/ChainNet/data/original_data/filteredDailyOccMatrices/"
ROW = -1
COLUMN = -1
TEST_SPLIT = 0.01
ALL_YEAR_INPUT_ALLOWED = False
YEAR = 2017
# Baseline
from sklearn.metrics import mean_squared_error
from sklearn import metrics
import matplotlib.pyplot as plt
def exclude_days(train, test):
row, column = train.shape
train_days = np.asarray(train[:, -1]).reshape(-1, 1)
x_train = train[:, 0:column - 1]
test_days = np.asarray(test[:, -1]).reshape(-1, 1)
x_test = test[:, 0:column - 1]
return x_train, x_test, train_days, test_days
def merge_data(occurrence_data, daily_occurrence_normalized_matrix, aggregation_of_previous_days_allowed):
if(aggregation_of_previous_days_allowed):
if(occurrence_data.size==0):
occurrence_data = daily_occurrence_normalized_matrix
else:
occurrence_data = np.add(occurrence_data, daily_occurrence_normalized_matrix)
else:
if(occurrence_data.size == 0):
occurrence_data = daily_occurrence_normalized_matrix
else:
occurrence_data = np.concatenate((occurrence_data, daily_occurrence_normalized_matrix), axis=0)
#print("merge_data shape: {} occurrence_data: {} ".format(occurrence_data.shape, occurrence_data))
return occurrence_data
def get_normalized_matrix_from_file(day, year, totaltx):
daily_occurrence_matrix_path_name = DAILY_OCCURRENCE_FILE_PATH + "occ" + str(year) + '{:03}'.format(day) + '.csv'
daily_occurence_matrix = pd.read_csv(daily_occurrence_matrix_path_name, sep=",", header=None).values
return np.asarray(daily_occurence_matrix).reshape(1, daily_occurence_matrix.size)/totaltx
def fl_get_normalized_matrix_from_file(day, year, totaltx, n_components):
daily_occurence_matrix = np.asarray([],dtype=np.float32)
for filter_number in range(0, 50, 10):
daily_occurrence_matrix_path_name = DAILY_FILTERED_OCCURRENCE_FILE_PATH + "occ" + str(year) + '{:03}'.format(day) + "_" + str(filter_number) +'.csv'
daily_occurence_matrix_read = pd.read_csv(daily_occurrence_matrix_path_name, sep=",", header=None).values
if(daily_occurence_matrix.size == 0):
daily_occurence_matrix = daily_occurence_matrix_read
else:
daily_occurence_matrix = np.concatenate((daily_occurence_matrix, daily_occurence_matrix_read), axis = 1)
pca = PCA(n_components = 20)
pca.fit(daily_occurence_matrix)
daily_occurence_matrix = pca.transform(daily_occurence_matrix)
#print("daily_occurence_matrix: ", daily_occurence_matrix, daily_occurence_matrix.shape)
#return np.asarray(daily_occurence_matrix).reshape(1, daily_occurence_matrix.size)/totaltx
return np.asarray(daily_occurence_matrix).reshape(1, daily_occurence_matrix.size)
def get_daily_occurrence_matrices(priced_bitcoin, current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed):
#print("priced_bitcoin: ", priced_bitcoin, priced_bitcoin.shape)
#print("current_row: ", current_row, current_row.shape)
previous_price_data = np.array([], dtype=np.float32)
occurrence_data = np.array([], dtype=np.float32)
for index, row in priced_bitcoin.iterrows():
if not ((row.values == current_row.values).all()):
previous_price_data = np.append(previous_price_data, row['price'])
previous_price_data = np.append(previous_price_data, row['totaltx'])
#print("previous_price_data: ", previous_price_data,row['day'], row['year'], row['totaltx'])
#print("occurrence_data: ", occurrence_data)
if(is_price_of_previous_days_allowed):
#print("previous_price_data: ", np.asarray(previous_price_data).reshape(1, -1), np.asarray(previous_price_data).reshape(1, -1).shape)
occurrence_data = np.asarray(previous_price_data).reshape(1, -1)
occurrence_input = np.concatenate((occurrence_data, np.asarray(current_row['price']).reshape(1,1)), axis=1)
#print("current_row: ", current_row, current_row.shape)
#print(" price occurrence_input: ", np.asarray(current_row['price']).reshape(1,1), (np.asarray(current_row['price']).reshape(1,1)).shape)
#print("concatenate with price occurrence_input: ", occurrence_input, occurrence_input.shape)
occurrence_input = np.concatenate((occurrence_input, np.asarray(current_row['day']).reshape(1,1)), axis=1)
#print(" price occurrence_input: ", np.asarray(current_row['day']).reshape(1,1), (np.asarray(current_row['day']).reshape(1,1)).shape)
#print("concatenate with day occurrence_input: ", occurrence_input, occurrence_input.shape)
return occurrence_input
def betti_get_daily_occurrence_matrices(priced_bitcoin, current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed):
occurrence_data = np.array([], dtype=np.float32)
for index, row in priced_bitcoin.iterrows():
if not ((row.values == current_row.values).all()):
previous_price_data = np.array([], dtype=np.float32)
previous_price_data = np.append(previous_price_data, row['price'])
previous_price_data = np.append(previous_price_data, row['totaltx'])
betti0_50 = read_betti(betti0_input_path, row['day'])
previous_price_data = np.append(previous_price_data, np.asarray(betti0_50).reshape(1,-1))
betti1_50 = read_betti(betti1_input_path, row['day'])
previous_price_data = np.append(previous_price_data, np.asarray(betti1_50).reshape(1,-1))
if occurrence_data.size == 0:
occurrence_data = previous_price_data
else:
occurrence_data = np.row_stack((occurrence_data,previous_price_data))
#print(occurrence_data, occurrence_data.shape)
#print(previous_price_data, previous_price_data.shape)
occurrence_data = np.asarray(occurrence_data).reshape(1, -1)
#betti0_50 = read_betti(betti0_input_path, current_row['day'])
#occurrence_input = np.concatenate((occurrence_data, np.asarray(betti0_50).reshape(1,-1)), axis=1)
#betti1_50 = read_betti(betti1_input_path, current_row['day'])
#occurrence_input = np.concatenate((occurrence_input, np.asarray(betti1_50).reshape(1,-1)), axis=1)
occurrence_input = np.concatenate((occurrence_data, np.asarray(current_row['price']).reshape(1,1)), axis=1)
occurrence_input = np.concatenate((occurrence_input, np.asarray(current_row['day']).reshape(1,1)), axis=1)
return occurrence_input
def betti_der_get_daily_occurrence_matrices(priced_bitcoin, current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed):
#print("priced_bitcoin: ", priced_bitcoin, priced_bitcoin.shape)
#print("current_row: ", current_row, current_row.shape)
previous_price_data = np.array([], dtype=np.float32)
occurrence_data = np.array([], dtype=np.float32)
for index, row in priced_bitcoin.iterrows():
if not ((row.values == current_row.values).all()):
previous_price_data = np.append(previous_price_data, row['price'])
previous_price_data = np.append(previous_price_data, row['totaltx'])
betti0_50 = read_betti(betti0_input_path, row['day'])
previous_price_data = np.append(previous_price_data, np.asarray(betti0_50).reshape(1,-1))
betti1_50 = read_betti(betti1_input_path, row['day'])
previous_price_data = np.append(previous_price_data, np.asarray(betti1_50).reshape(1,-1))
betti0_50_diff1 = betti0_50.diff(1).dropna()
previous_price_data = np.concatenate((previous_price_data.reshape(1,-1), np.asarray(betti0_50_diff1).reshape(1,-1)), axis=1)
betti1_50_diff1 = betti1_50.diff(1).dropna()
previous_price_data = np.concatenate((previous_price_data, np.asarray(betti1_50_diff1).reshape(1,-1)), axis=1)
if occurrence_data.size == 0:
occurrence_data = previous_price_data
else:
occurrence_data = np.concatenate((occurrence_data, previous_price_data.reshape(1,-1)), axis=1)
#print(occurrence_data, occurrence_data.shape)
#print("previous_price_data: ", previous_price_data,row['day'], row['year'], row['totaltx'])
occurrence_data = np.asarray(occurrence_data).reshape(1, -1)
occurrence_input = np.concatenate((occurrence_data, np.asarray(current_row['price']).reshape(1,1)), axis=1)
occurrence_input = np.concatenate((occurrence_input, np.asarray(current_row['day']).reshape(1,1)), axis=1)
return occurrence_input
def fl_get_daily_occurrence_matrices(priced_bitcoin, current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed):
previous_price_data = np.array([], dtype=np.float32)
occurrence_data = np.array([], dtype=np.float32)
for index, row in priced_bitcoin.iterrows():
if not ((row.values == current_row.values).all()):
previous_price_data = np.append(previous_price_data, row['price'])
previous_price_data = np.append(previous_price_data, row['totaltx'])
daily_occurrence_normalized_matrix = fl_get_normalized_matrix_from_file(row['day'], row['year'], row['totaltx'], 20)
occurrence_data = merge_data(occurrence_data, daily_occurrence_normalized_matrix, aggregation_of_previous_days_allowed)
#print("occurrence_data: ",occurrence_data, occurrence_data.shape)
if(is_price_of_previous_days_allowed):
occurrence_data = np.concatenate((occurrence_data.reshape(1,-1), np.asarray(previous_price_data).reshape(1,-1)), axis=1)
occurrence_input = np.concatenate((occurrence_data.reshape(1,-1), np.asarray(current_row['price']).reshape(1,1)), axis=1)
occurrence_input = np.concatenate((occurrence_input, np.asarray(current_row['day']).reshape(1,1)), axis=1)
#print("occurrence_input: ",occurrence_input, occurrence_input.shape)
return occurrence_input
def read_betti(file_path, day):
day = day - 1
betti = | pd.read_csv(file_path, index_col=0) | pandas.read_csv |
import gym
from gym import spaces
import torch
import torch.nn as nn
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
from xitorch.interpolate import Interp1D
from tqdm.auto import tqdm, trange
import time
from rcmodel.room import Room
from rcmodel.building import Building
from rcmodel.RCModel import RCModel
from rcmodel.tools import InputScaling
from rcmodel.tools import BuildingTemperatureDataset
class PolicyNetwork(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
n = 10
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(in_dim, n),
nn.ReLU(),
nn.Linear(n, n),
nn.ReLU(),
nn.Linear(n, out_dim),
)
self.on_policy_reset()
def forward(self, state):
logits = self.linear_relu_stack(state)
return logits
def get_action(self, state):
pd = torch.distributions.categorical.Categorical(logits=self.forward(state)) # make a probability distribution
action = pd.sample() # sample from distribution pi(a|s) (action given state)
return action, pd.log_prob(action)
def on_policy_reset(self):
# this stores log_probs during an integration step.
self.log_probs = []
# self.rewards = []
class Reinforce:
def __init__(self, env, time_data, temp_data, gamma=0.99, alpha=1e-3):
assert len(time_data) == len(temp_data)
self.env = env
# self.pi = pi
self.time_data = time_data
self.temp_data = temp_data
self.gamma = gamma
self.alpha = alpha
self.optimiser = torch.optim.Adam(self.env.RC.cooling_policy.parameters(), lr=self.alpha)
def update_policy(self, rewards, log_probs):
# Calculate Discounted Reward:
discounted_rewards = torch.zeros(len(rewards))
R = 0
indx = len(rewards) - 1
for r in reversed(rewards):
R = r + self.gamma * R # Discounted Reward is calculated from last reward to first.
discounted_rewards[indx] = R # Fill array back to front to un-reverse the order
indx -= 1
# Normalise rewards
discounted_rewards = (discounted_rewards - discounted_rewards.mean()) / (
discounted_rewards.std() + 1e-9)
# discounted_rewards = torch.tensor(np.array(discounted_rewards.detach().numpy()))
expected_reward = -torch.stack(log_probs) * discounted_rewards # negative for maximising
expected_reward = torch.sum(expected_reward)
# print(f'ER {expected_reward}')
# Update parameters in pi
self.optimiser.zero_grad()
expected_reward.backward()
self.optimiser.step()
# print(list(self.pi.parameters())[0].grad) # check on grads if needed
return expected_reward
def train(self, num_episodes, step_size):
self.env.RC.cooling_policy.train() # Put in training mode
total_ER = []
total_rewards = []
loss_fn = torch.nn.MSELoss(reduction='none') # Squared Error
# with tqdm(total=len(self.env.time_data) * num_episodes, position=0, leave=False) as pbar: # progress bar
for episode in range(num_episodes):
self.env.reset()
episode_rewards = []
episode_ER = []
# Time is increased in steps, with the policy updating after every step.
while self.env.t_index < len(self.env.time_data) - 1:
# takes a step_size forward in time
pred = self.env.step(step_size).squeeze(-1) # state and action produced in step
actual = self.temp_data[self.env.t_index:int(self.env.t_index + step_size), 0:self.env.n_rooms]
# negative so reward can be maximised
reward = -loss_fn(pred[:, 2:], actual)
# Do gradient decent on sample
ER = self.update_policy(reward, self.env.RC.cooling_policy.log_probs)
self.env.RC.cooling_policy.on_policy_reset() # empty buffer
# get last output and use for next initial value
self.env.RC.iv = pred[-1, :].unsqueeze(1).detach() # MUST DETACH GRAD
episode_rewards.append(sum(reward))
episode_ER.append(ER)
self.env.t_index += int(step_size) # increase environment time
# print(f'Episode {episode+1}, Expected Reward: {sum(episode_ER).item():.2f}, total_reward: {sum(episode_rewards).item():.2f}')
total_ER.append(sum(episode_ER).detach())
total_rewards.append(sum(episode_rewards).detach())
return total_rewards, total_ER
class LSIEnv(gym.Env):
"""Custom Environment that follows gym interface"""
metadata = {'render.modes': ['human']}
def __init__(self, RC, time_data):
super().__init__()
self.RC = RC # RCModel Class
self.time_data = time_data # timeseries
self.t_index = 0 # used to keep track of index through timeseries
# ----- GYM Stuff -----
self.n_rooms = len(self.RC.building.rooms)
self.low_state = -10
self.high_state = 50
# Define action and observation space
# They must be gym.spaces objects
# Example when using discrete actions:
self.action_space = spaces.Discrete(2, )
# Observation is temperature of each room.
self.observation_space = spaces.Box(
low=self.low_state,
high=self.high_state,
shape=(self.n_rooms,),
dtype=np.float32
)
# action
def step(self, step_size):
# Execute a chunk of timeseries
t_eval = self.time_data[self.t_index:int(self.t_index + step_size)]
# actions are decided and stored by the policy while integrating the ODE:
pred = self.RC(t_eval)
return pred.detach() # No need for grad
# (observation, reward, done, info)
# self.state, reward, done, {}
def reset(self):
# Reset the state of the environment to an initial state
self.t_index = 0
self.RC.reset_iv()
self.RC.cooling_policy.on_policy_reset()
def render(self, mode='human', close=False):
# Render the environment to the screen
return
if __name__ == '__main__':
def initialise_model(pi):
torch.cuda.is_available = lambda: False
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def change_origin(coords):
x0 = 92.07
y0 = 125.94
for i in range(len(coords)):
coords[i][0] = round((coords[i][0] - x0) / 10, 2)
coords[i][1] = round((coords[i][1] - y0) / 10, 2)
return coords
capacitance = 3000 # Variable changed later
rooms = []
name = "seminar_rm_a_t0106"
coords = change_origin(
[[92.07, 125.94], [92.07, 231.74], [129.00, 231.74], [154.45, 231.74], [172.64, 231.74], [172.64, 125.94]])
rooms.append(Room(name, capacitance, coords))
# Initialise Building
height = 1
Re = [4, 1, 0.55] # Sum of R makes Uval=0.18 #Variable changed later
Ce = [1.2 * 10 ** 3, 0.8 * 10 ** 3] # Variable changed later
Rint = 0.66 # Uval = 1/R = 1.5 #Variable changed later
bld = Building(rooms, height, Re, Ce, Rint)
rm_CA = [200, 800] # [min, max] Capacitance/area
ex_C = [1.5 * 10 ** 4, 10 ** 6] # Capacitance
R = [0.2, 1.2] # Resistance ((K.m^2)/W)
scaling = InputScaling(rm_CA, ex_C, R)
scale_fn = scaling.physical_scaling # function to scale parameters back to physical values
path_Tout = '/Users/benfourcin/OneDrive - University of Exeter/PhD/LSI/Data/Met Office Weather Files/JuneSept.csv'
df = | pd.read_csv(path_Tout) | pandas.read_csv |
from __future__ import annotations
import pytest
from pandas.errors import ParserWarning
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Series,
to_datetime,
)
import pandas._testing as tm
from pandas.io.xml import read_xml
@pytest.fixture(params=[pytest.param("lxml", marks=td.skip_if_no("lxml")), "etree"])
def parser(request):
return request.param
@pytest.fixture(
params=[None, {"book": ["category", "title", "author", "year", "price"]}]
)
def iterparse(request):
return request.param
def read_xml_iterparse(data, **kwargs):
with tm.ensure_clean() as path:
with open(path, "w") as f:
f.write(data)
return read_xml(path, **kwargs)
xml_types = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<shape>square</shape>
<degrees>00360</degrees>
<sides>4.0</sides>
</row>
<row>
<shape>circle</shape>
<degrees>00360</degrees>
<sides/>
</row>
<row>
<shape>triangle</shape>
<degrees>00180</degrees>
<sides>3.0</sides>
</row>
</data>"""
xml_dates = """<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<shape>square</shape>
<degrees>00360</degrees>
<sides>4.0</sides>
<date>2020-01-01</date>
</row>
<row>
<shape>circle</shape>
<degrees>00360</degrees>
<sides/>
<date>2021-01-01</date>
</row>
<row>
<shape>triangle</shape>
<degrees>00180</degrees>
<sides>3.0</sides>
<date>2022-01-01</date>
</row>
</data>"""
# DTYPE
def test_dtype_single_str(parser):
df_result = read_xml(xml_types, dtype={"degrees": "str"}, parser=parser)
df_iter = read_xml_iterparse(
xml_types,
parser=parser,
dtype={"degrees": "str"},
iterparse={"row": ["shape", "degrees", "sides"]},
)
df_expected = DataFrame(
{
"shape": ["square", "circle", "triangle"],
"degrees": ["00360", "00360", "00180"],
"sides": [4.0, float("nan"), 3.0],
}
)
tm.assert_frame_equal(df_result, df_expected)
tm.assert_frame_equal(df_iter, df_expected)
def test_dtypes_all_str(parser):
df_result = read_xml(xml_dates, dtype="string", parser=parser)
df_iter = read_xml_iterparse(
xml_dates,
parser=parser,
dtype="string",
iterparse={"row": ["shape", "degrees", "sides", "date"]},
)
df_expected = DataFrame(
{
"shape": ["square", "circle", "triangle"],
"degrees": ["00360", "00360", "00180"],
"sides": ["4.0", None, "3.0"],
"date": ["2020-01-01", "2021-01-01", "2022-01-01"],
},
dtype="string",
)
tm.assert_frame_equal(df_result, df_expected)
tm.assert_frame_equal(df_iter, df_expected)
def test_dtypes_with_names(parser):
df_result = read_xml(
xml_dates,
names=["Col1", "Col2", "Col3", "Col4"],
dtype={"Col2": "string", "Col3": "Int64", "Col4": "datetime64"},
parser=parser,
)
df_iter = read_xml_iterparse(
xml_dates,
parser=parser,
names=["Col1", "Col2", "Col3", "Col4"],
dtype={"Col2": "string", "Col3": "Int64", "Col4": "datetime64"},
iterparse={"row": ["shape", "degrees", "sides", "date"]},
)
df_expected = DataFrame(
{
"Col1": ["square", "circle", "triangle"],
"Col2": | Series(["00360", "00360", "00180"]) | pandas.Series |
#!/usr/bin/env python3
"""
Script to test the runtime of different approximation techniques
"""
import os
from datetime import datetime
from uuid import uuid4
import time
from itertools import product
from argparse import ArgumentParser
import pandas as pd
from dmt import morse_approx_binning, morse_approx_along_gradients, morse_approx_induced_matching
from dmt.data import load_complex, get_complex_names
from dmt.dmt import reduce_until_stable
from dmt.binning import bin_cechmate
from dmt.perseus import to_nmfsimtop, perseus_unreduced_count, get_tmp_fpath, cleanup
RESULTS_FOLDER = os.path.join(os.path.dirname(__file__), "..", "timings")
if not os.path.exists(RESULTS_FOLDER):
os.mkdir(RESULTS_FOLDER)
def perseus_reduction(cplx, delta, prereduced_cplx=None, deltatrick=True):
tmp_filename = get_tmp_fpath()
binned_cechmate = bin_cechmate(cplx.cechmate_complex, delta, deltatrick=deltatrick)
to_nmfsimtop(binned_cechmate, tmp_filename)
unreduced_count = perseus_unreduced_count(tmp_filename)
cleanup(tmp_filename)
return cplx.size - unreduced_count
def reduction_wrapper(fn):
def reduction_fn(cplx, delta, prereduced_cplx=None, deltatrick=True):
original_size = cplx.size
if prereduced_cplx is not None:
cplx = prereduced_cplx
return original_size - fn(cplx, delta, deltatrick=deltatrick).size
return reduction_fn
binning_reduction = reduction_wrapper(morse_approx_binning)
gradient_reduction = reduction_wrapper(morse_approx_along_gradients)
induced_reduction = reduction_wrapper(morse_approx_induced_matching)
ALGO_MAP = {"binning": binning_reduction,
"gradient": gradient_reduction,
"induced": induced_reduction,
"perseus": perseus_reduction
}
def mytimeit(fun, *args, **kwargs):
start = time.process_time()
result = fun(*args, **kwargs)
duration = time.process_time() - start
return {"result": result,
"time_s": duration}
def combine(dict1, dict2):
""" update in place """
dict1.update(dict2)
return dict1
def compute_times(complex_fname, deltas, runs=3, algorithms=None):
runs = list(range(runs))
cplx = load_complex(complex_fname)
prereduced_cplx = reduce_until_stable(cplx)
algo_names = ALGO_MAP.keys() if algorithms is None else algorithms
return pd.DataFrame([combine({"complex": str(type(cplx)),
"filename": complex_fname,
"run": uuid4().hex,
"points": cplx.points.shape[0],
"dim": cplx.points.shape[1],
"size": cplx.size,
"size_prereduced": prereduced_cplx.size,
"delta": delta,
"algorithm": algo_name},
mytimeit(lambda: ALGO_MAP[algo_name](cplx, delta, prereduced_cplx=prereduced_cplx))
)
for (algo_name, run, delta)
in product(algo_names, runs, deltas)])
def run_experiment(deltas, runs=5, complex_fnames=None, algorithms=None):
times_dfs = []
for complex_fname in complex_fnames or get_complex_names():
print("Computing times for complex %s" % complex_fname)
toc = time.time()
times_df = compute_times(complex_fname, deltas, runs=runs, algorithms=algorithms)
times_df.to_csv(times_fname(complex_fname))
times_dfs.append(times_df)
print("Done in %ss" % (time.time() - toc))
return | pd.concat(times_dfs) | pandas.concat |
#! /usr/bin/env python
import maple
import maple.data as data
import maple.audio as audio
import numpy as np
import joblib
import pandas as pd
import argparse
import datetime
import sounddevice as sd
from scipy import signal
from pathlib import Path
from sklearn import preprocessing
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
labels = {
0: 'none',
1: 'whine',
2: 'howl',
3: 'bark',
4: 'play',
5: 'scratch_cage',
6: 'scratch_door',
}
class LabelAudio(object):
"""Label audio from Audio"""
def __init__(self, args=argparse.Namespace()):
self.menu = {
'home': {
'msg': 'Press [s] to start, Press [q] to quit. Response: ',
'function': self.menu_handle,
},
'preview': {
'msg': 'Use clip? [y]es, [n]o, [r]epeat, [q]uit. Response: ',
'function': self.preview_handle,
},
'label': {
'msg': '[0] none, [1] whine, [2] howl, [3] bark, [4] play, [5] cage, [6] door, [r]epeat, [R]epeat full, [s] to skip, [q]uit. Reponse: '.\
format(', '.join(['[' + str(key) + '] ' + val for key, val in labels.items()])),
'function': self.label_handle,
},
}
if args.label_data is None:
raise Exception("You must supply --label-data")
self.state = 'home'
self.subevent_time = 0.25 # in seconds
self.cols = [
'session_id',
'event_id',
'subevent_id',
't_start',
't_end',
'label',
'date_labeled',
]
self.label_data_path = Path(args.label_data)
if self.label_data_path.exists():
self.data = pd.read_csv(self.label_data_path, sep='\t')
else:
self.data = pd.DataFrame({}, columns=self.cols)
print(f'You have {self.data.shape[0]} labelled data')
if args.session_paths is None:
session_paths = sorted(maple.db_dir.glob('*/events.db'))
else:
session_paths = sorted([Path(x.strip()) for x in open(args.session_paths).readlines()])
self.sessions = {}
for session_path in session_paths:
print(f'Loading session path {session_path}')
session_db = data.SessionAnalysis(path=session_path)
session_db.trim_ends(minutes=1)
if session_db.dog.empty:
continue
session_id = session_db.meta[session_db.meta['key'] == 'id'].iloc[0]['value']
session_db.dog['session_id'] = session_id
self.sessions[session_id] = session_db.dog
session_db.disconnect()
self.filter()
def run(self):
while True:
self.menu[self.state]['function'](input(self.menu[self.state]['msg']))
print()
if self.state == 'done':
self.save_data()
print('Any new data has been saved. Bye.')
break
def menu_handle(self, response):
if response == 's':
self.event = self.sample_event()
self.play_event(self.event)
self.state = 'preview'
elif response == 'q':
self.state = 'done'
else:
print('invalid input')
def preview_handle(self, response):
if response == 'y':
self.set_subevents(self.event)
self.play_subevent(self.curr_subevent)
self.state = 'label'
elif response == 'n':
self.event = self.sample_event()
self.play_event(self.event)
elif response == 'r':
self.play_event(self.event)
elif response == 'q':
self.state = 'done'
else:
print('invalid input')
def label_handle(self, response):
if response == 'r':
self.play_subevent(self.curr_subevent)
elif response == 'R':
self.play_event(self.event)
elif response == 's':
if not self.increment_subevent():
print('Finished event')
self.cache_event_labels()
self.event = self.sample_event()
self.play_event(self.event)
self.state = 'preview'
return
self.play_subevent(self.curr_subevent)
elif response in [str(x) for x in labels.keys()]:
self.append(response)
if not self.increment_subevent():
print('Finished event')
self.cache_event_labels()
self.event = self.sample_event()
self.play_event(self.event)
self.state = 'preview'
return
self.play_subevent(self.curr_subevent)
elif response == 'q':
self.state = 'done'
else:
print('invalid input')
def sample_event(self):
self.event_data = {x: [] for x in self.cols}
while True:
session_id = np.random.choice(list(self.sessions.keys()))
event = self.sessions[session_id].sample().iloc[0]
for _, row in self.data.iterrows():
if row['session_id'] == session_id and row['event_id'] == event['event_id']:
break
else:
break
return event
def cache_event_labels(self):
self.event_data = pd.DataFrame(self.event_data)
self.data = pd.concat([self.data, self.event_data], ignore_index=True)
def save_data(self):
self.data['event_id'] = self.data['event_id'].astype(int)
self.data['subevent_id'] = self.data['subevent_id'].astype(int)
self.data.to_csv(self.label_data_path, sep='\t', index=False)
def append(self, response):
self.event_data['session_id'].append(self.event['session_id'])
self.event_data['event_id'].append(self.event['event_id'])
self.event_data['subevent_id'].append(self.curr_subevent_id)
self.event_data['t_start'].append(self.curr_subevent_id * self.subevent_time)
self.event_data['t_end'].append((self.curr_subevent_id + 1) * self.subevent_time)
self.event_data['label'].append(labels[int(response)])
self.event_data['date_labeled'].append(datetime.datetime.now())
def increment_subevent(self):
if self.curr_subevent_id == self.num_subevents - 1:
return False
self.curr_subevent_id += 1
self.curr_subevent = self.subevents[self.curr_subevent_id]
return True
def set_subevents(self, event):
self.subevents = []
self.num_subevents = int(event.t_len // self.subevent_time)
subevent_len = int(self.subevent_time * maple.RATE)
event_audio = event['audio']
for i in range(self.num_subevents):
self.subevents.append(event_audio[i*subevent_len: (i+1)*subevent_len])
self.curr_subevent = self.subevents[0]
self.curr_subevent_id = 0
def play_event(self, event):
# Normalize volumes so barks aren't too loud, and grrrs aren't too soft
audio = np.copy(event['audio']).astype(float)
audio *= 10000 / np.max(audio)
audio = audio.astype(maple.ARRAY_DTYPE)
sd.play(audio, blocking=True)
def play_subevent(self, subevent):
# Normalize volumes so barks aren't too loud, and grrrs aren't too soft
audio = np.copy(subevent).astype(float)
audio *= 10000 / np.max(audio)
audio = audio.astype(maple.ARRAY_DTYPE)
sd.play(audio, blocking=True)
def filter(self, max_t_len=10):
for events in self.sessions.values():
events = events[events['t_len'] <= max_t_len]
class Train(object):
def __init__(self, args=argparse.Namespace()):
"""Train a model based off label data
Parameters
==========
args : argparse.Namespace
An `argparse.Namespace` object with `label_data` and `model_dir` paths. `label_data` is
a string pointing to the label data filepath to be used for training. `model_dir` is a string
pointing to a directory where the model will be stored. The directory must not exist.
"""
self.label_dict = {v: k for k, v in labels.items()}
if args.label_data is None:
raise Exception("Must provide --label-data in order to train!")
A = lambda x: args.__dict__.get(x, None)
self.trans = A('transformation') or 'spectrogram'
self.log = A('log_the_data')
if self.log and self.trans not in ['spectrogram', 'fourier']:
raise Exception("Cannot log transform data unless --transformation is in {'spectrogram', 'fourier'}")
self.model_dir = Path(A('model_dir') or maple.model_dir)
self.label_data_path = Path(args.label_data)
self.label_data = | pd.read_csv(self.label_data_path, sep='\t') | pandas.read_csv |
import pandas as pd
from datetime import datetime
from pjud import data
import os
from tqdm.auto import tqdm
import numpy as np
import click
def convierte_fecha(fecha):
try:
day,month,year = map(int,fecha.split(sep = "-"))
except:
#print(f"no pude ejecutar {fecha}")
return pd.NaT
return datetime(year,month,day)
def obtiene_año(rit):
try:
rol,year = map(int,rit.split(sep = "-"))
except:
#print(f"no pude ejecutar {fecha}")
return pd.NaT
return (year)
def elimina_tilde(str_variable):
replacements = {'Á': 'A',
'É': 'E',
'Í': 'I',
'Ó': 'O',
'Ú': 'U',
'Ü': 'U',
}
for a, b in replacements.items():
str_variable = str_variable.astype(str).str.replace(a, b)
return str_variable
def elimina_espacios(col):
if col.dtypes == object:
return (col.astype(str).str.rstrip())
return col
def limpia_rit(str_rit):
return str_rit.replace('--','-')
def limpieza_caracteres(str_col):
replacements = {'-': '',
'\xa0': '',
'\n': ''
}
for a, b in replacements.items():
str_col = str_col.astype(str).str.replace(a, b)
return str_col
def transforma_numero(str_numero):
replacements = {"ún": "1",
"un": "1",
"dós": "2",
"dos": "2",
"tres": "3",
"cuatro": "4",
"cinco": "5",
"seis": "6",
"seís": "6",
"séis": "6",
"siete": "7",
"ocho": "8",
"nueve": "9",
"diez": "10",
"once": "11",
"doce": "12",
"trece": "13",
"dieci": "1",
"veinti": "2",
"veinte": "20"
}
for a, b in replacements.items():
str_numero = str_numero.replace(a, b)
return str_numero
def separa_regiones(str_region):
reemplazar_region = {"DECIMA REGION": "REGION",
"UNDECIMA REGION": "REGION",
"DUODECIMA REGION": "REGION",
"DECIMOCUARTA REGION": "REGION",
"DECIMOQUINTA REGION": "REGION",
"PRIMERA REGION": "REGION",
"SEGUNDA REGION": "REGION",
"TERCERA REGION": "REGION",
"CUARTA REGION": "REGION",
"QUINTA REGION": "REGION",
"SEXTA REGION": "REGION",
"SEPTIMA REGION": "REGION",
"OCTAVA REGION": "REGION",
"NOVENA REGION": "REGION",
"BIOBIO": "REGION DEL BIO BIO",
"AYSEN": "REGION DE AISEN",
"MAGALLANES Y DE LA ANTARTICA CHILENA": "REGION DE MAGALLANES Y ANTARTICA CHILENA"
}
for old, new in reemplazar_region.items():
str_region = str_region.replace(old, new)
return str_region
def transforma_asiento(str_asiento):
if str_asiento.find("JUZGADO DE GARANTIA") != -1 or str_asiento.find("TRIBUNAL DE JUICIO ORAL EN LO PENAL") != -1:
str_asiento = "SANTIAGO"
return str_asiento
def cambio_nombre_juzgados(str_tribunal):
reemplazar_texto = {"1º JUZGADO DE LETRAS": "JUZGADO DE LETRAS",
"6º TRIBUNAL DE JUICIO ORAL EN LO PENAL DE SAN MIGUEL": "SEXTO TRIBUNAL DE JUICIO ORAL EN LO PENAL SANTIAGO",
"10º JUZGADO DE GARANTIA": "DECIMO JUZGADO DE GARANTIA",
"11º JUZGADO DE GARANTIA": "UNDECIMO JUZGADO DE GARANTIA",
"12º JUZGADO DE GARANTIA": "DUODECIMO JUZGADO DE GARANTIA",
"13º JUZGADO DE GARANTIA": "DECIMOTERCER JUZGADO DE GARANTIA",
"14º JUZGADO DE GARANTIA": "DECIMOCUARTO JUZGADO DE GARANTIA",
"15º JUZGADO DE GARANTIA": "DECIMOQUINTO JUZGADO DE GARANTIA",
"TRIBUNAL ORAL EN LO PENAL DE": "TRIBUNAL DE JUICIO ORAL EN LO PENAL",
"1º": "PRIMER",
"2º": "SEGUNDO",
"3º": "TERCER",
"4º": "CUARTO",
"5º": "QUINTO",
"6º": "SEXTO",
"7º": "SEPTIMO",
"8º": "OCTAVO",
"9º": "NOVENO",
"TRIBUNAL DE JUICIO ORAL EN LO PENAL DE DE ": "TRIBUNAL DE JUICIO ORAL EN LO PENAL ",
"TRIBUNAL DE JUICIO ORAL EN LO PENAL DE": "TRIBUNAL DE JUICIO ORAL EN LO PENAL",
"JUZGADO DE GARANTIA DE DE ": "JUZGADO DE GARANTIA ",
"JUZGADO DE GARANTIA DE": "JUZGADO DE GARANTIA",
"JUZGADO DE LETRAS Y GARANTIA DE": "JUZGADO DE LETRAS Y GARANTIA",
"JUZGADO DE LETRAS DE": "JUZGADO DE LETRAS Y GARANTIA",
"LA CALERA": "CALERA",
"PUERTO NATALES": "NATALES",
"PUERTO AYSEN": "AISEN",
"PUERTO CISNES": "CISNES",
"SAN VICENTE DE TAGUA-TAGUA": "SAN VICENTE",
"ACHAO": "QUINCHAO",
"COYHAIQUE": "COIHAIQUE"
}
for old, new in reemplazar_texto.items():
str_tribunal = str_tribunal.replace(old, new)
return str_tribunal
def cambio_termino_causa(str_termino):
if pd.notnull(str_termino):
str_termino = str_termino.replace(".","")
return str_termino
def load_concatenate_by_filename(needle: str, src_path = "data/raw/pjud"):
archivos = os.listdir(src_path)
tqdm.pandas()
dataframes = []
for archivo in archivos:
if archivo.find(needle) != -1:
df = pd.read_csv(f"{src_path}/{archivo}", sep=";", encoding='cp850', dtype='unicode', low_memory=True)
dataframes.append(df)
return pd.concat(dataframes)
def carga_limpieza_ingresos_materia():
df_ingresos_materia = load_concatenate_by_filename('Ingresos por Materia Penal')
df_ingresos_materia['TOTAL INGRESOS POR MATERIAS'] = df_ingresos_materia['TOTAL INGRESOS POR MATERIAS'].fillna(
df_ingresos_materia['TOTAL INGRESOS POR MATERIAS(*)'])
df_ingresos_materia.drop(['N°', 'TOTAL INGRESOS POR MATERIAS(*)'], axis='columns', inplace=True)
df_ingresos_materia.drop([
'(*)Se agregó columna total de ingresos, dado que en algunas causas, la materia se repite (error de tramitación)'],
axis='columns', inplace=True)
# TRANSFORMAMOS DE FLOAT A INTEGER
df_ingresos_materia['COD. CORTE'] = df_ingresos_materia['COD. CORTE'].fillna(0).astype(np.int16)
df_ingresos_materia['COD. TRIBUNAL'] = df_ingresos_materia['COD. TRIBUNAL'].fillna(0).astype(np.int16)
df_ingresos_materia['COD. MATERIA'] = df_ingresos_materia['COD. MATERIA'].fillna(0).astype(np.int16)
df_ingresos_materia['AÑO INGRESO'] = df_ingresos_materia['AÑO INGRESO'].fillna(0).astype(np.int16)
df_ingresos_materia['TOTAL INGRESOS POR MATERIAS'] = df_ingresos_materia['TOTAL INGRESOS POR MATERIAS'].fillna(0).astype(np.int8)
# Transformamos fechas
click.echo('Transformando fechas')
df_ingresos_materia['FECHA INGRESO'] = df_ingresos_materia['FECHA INGRESO'].progress_apply(convierte_fecha)
# Elimino espacios en las columnas tipo objetos
click.echo('Eliminando espacios en objetos')
df_ingresos_materia = df_ingresos_materia.progress_apply(elimina_espacios, axis=0)
# Elimino tildes
click.echo('Eliminando tildes')
cols = df_ingresos_materia.select_dtypes(include=["object"]).columns
df_ingresos_materia[cols] = df_ingresos_materia[cols].progress_apply(elimina_tilde)
# Categorizacion
df_ingresos_materia['CORTE'] = df_ingresos_materia['CORTE'].astype('category')
tipo_causa = df_ingresos_materia[df_ingresos_materia['TIPO CAUSA'] != 'Ordinaria']
df_ingresos_materia.drop(tipo_causa.index, axis=0, inplace=True)
data.save_feather(df_ingresos_materia, 'clean_IngresosMateria')
click.echo("Generado archivo Feather 'clean_IngresosMateria.feather'. Proceso Terminado")
def carga_limpieza_terminos_materia():
df_termino_materia = load_concatenate_by_filename('Términos por Materia Penal')
df_metge = df_termino_materia[df_termino_materia['SISTEMA']=='METGE']
df_termino_materia.drop(df_metge.index, axis=0, inplace=True)
# Estandarización de nombres de variables
df_termino_materia.rename(columns = {'CÓD. CORTE':'COD. CORTE',
'CÓD. TRIBUNAL':'COD. TRIBUNAL',
'CÓD. MATERIA':'COD. MATERIA',
'MOTIVO DE TÉRMINO':'MOTIVO TERMINO',
'DURACIÓN CAUSA':'DURACION CAUSA',
'FECHA TÉRMINO':'FECHA TERMINO',
'MES TÉRMINO':'MES TERMINO',
'AÑO TÉRMINO':'AÑO TERMINO',
'TOTAL TÉRMINOS':'TOTAL TERMINOS'
},inplace = True)
df_termino_materia.drop(['N°','SISTEMA'], axis = 'columns', inplace = True)
# TRANSFORMAMOS DE FLOAT A INTEGER
df_termino_materia['COD. CORTE'] = df_termino_materia['COD. CORTE'].fillna(0).astype(np.int16)
df_termino_materia['COD. TRIBUNAL'] = df_termino_materia['COD. TRIBUNAL'].fillna(0).astype(np.int16)
df_termino_materia['COD. MATERIA'] = df_termino_materia['COD. MATERIA'].fillna(0).astype(np.int16)
df_termino_materia['DURACION CAUSA'] = df_termino_materia['DURACION CAUSA'].fillna(0).astype(np.int16)
df_termino_materia['AÑO TERMINO'] = df_termino_materia['AÑO TERMINO'].fillna(0).astype(np.int16)
df_termino_materia['TOTAL TERMINOS'] = df_termino_materia['TOTAL TERMINOS'].fillna(0).astype(np.int8)
# Transformamos formato fecha
click.echo('Convirtiendo fechas')
df_termino_materia['FECHA INGRESO'] = df_termino_materia['FECHA INGRESO'].progress_apply(convierte_fecha)
df_termino_materia['FECHA TERMINO'] = df_termino_materia['FECHA TERMINO'].progress_apply(convierte_fecha)
# Elimino espacios en las columnas tipo objetos
click.echo('Eliminando espacios')
df_termino_materia = df_termino_materia.progress_apply(elimina_espacios, axis=0)
# Elimino tildes de object
click.echo('Eliminando tilde')
cols = df_termino_materia.select_dtypes(include = ["object"]).columns
df_termino_materia[cols] = df_termino_materia[cols].progress_apply(elimina_tilde)
# Limpieza de RIT
click.echo('Limpieza de RIT')
df_termino_materia['RIT'] = df_termino_materia['RIT'].progress_apply(limpia_rit)
# Categorizar variables
df_termino_materia['CORTE'] = df_termino_materia['CORTE'].astype('category')
df_termino_materia['MOTIVO TERMINO'] = df_termino_materia['MOTIVO TERMINO'].astype('category')
#Dejo solo causas Ordinarias
tipo_causa = df_termino_materia[df_termino_materia['TIPO CAUSA']!='Ordinaria']
df_termino_materia.drop(tipo_causa.index, axis=0, inplace=True)
# Reset el index para realizar feather
data.save_feather(df_termino_materia, 'clean_TerminosMateria')
click.echo("Generado archivo Feather 'clean_TerminosMateria.feather'. Proceso Terminado")
def carga_limpieza_ingresos_rol():
df_ingresos_rol = load_concatenate_by_filename('Ingresos por Rol Penal')
# Transformamos variables float64 a int16
df_ingresos_rol['COD. CORTE'] = df_ingresos_rol['COD. CORTE'].fillna(0).astype(np.int16)
df_ingresos_rol['COD. TRIBUNAL'] = df_ingresos_rol['COD. TRIBUNAL'].fillna(0).astype(np.int16)
df_ingresos_rol['AÑO INGRESO'] = df_ingresos_rol['AÑO INGRESO'].fillna(0).astype(np.int16)
df_ingresos_rol.drop(['N°'], axis = 'columns', inplace = True)
click.echo('Transformando Fechas')
df_ingresos_rol['FECHA INGRESO'] = df_ingresos_rol['FECHA INGRESO'].progress_apply(convierte_fecha)
click.echo('Eliminando espacios en columnas objetos')
df_ingresos_rol = df_ingresos_rol.progress_apply(elimina_espacios, axis=0)
click.echo('Eliminando tildes')
cols = df_ingresos_rol.select_dtypes(include = ["object"]).columns
df_ingresos_rol[cols] = df_ingresos_rol[cols].progress_apply(elimina_tilde)
# Transformamos en variables categoricas
df_ingresos_rol['CORTE'] = df_ingresos_rol['CORTE'].astype('category')
# Elimina de causas que no sean del tipo ordinaria
tipo_causa = df_ingresos_rol[df_ingresos_rol['TIPO CAUSA']!='Ordinaria']
df_ingresos_rol.drop(tipo_causa.index, axis=0, inplace=True)
data.save_feather(df_ingresos_rol,'clean_IngresosRol')
click.echo("Generado archivo Feather 'clena_IngresosRol.feather'. Proceso Terminado")
def carga_limpieza_terminos_rol():
df_termino_rol = load_concatenate_by_filename('Términos por Rol Penal')
# Elimino causas que no sean SIAGJ
df_no_siagj = df_termino_rol[df_termino_rol['SISTEMA']!='SIAGJ']
df_termino_rol.drop(df_no_siagj.index, axis=0, inplace=True)
# Elimino filas vacias o con datos NaN
df_termino_rol = df_termino_rol.dropna()
df_termino_rol.drop(['N°','SISTEMA'], axis = 'columns', inplace = True)
# Cambio de nombre a algunas columnas para dejarlas iguales a otros dataframes
df_termino_rol.rename(columns = {'CÓD. CORTE':'COD. CORTE',
'CÓD. TRIBUNAL':'COD. TRIBUNAL',
'DURACIÓN CAUSA ':'DURACION CAUSA',
'MOTIVO DE TÉRMINO':'MOTIVO TERMINO',
'FECHA TÉRMINO':'FECHA TERMINO',
'MES TÉRMINO':'MES TERMINO',
'AÑO TÉRMINO':'AÑO TERMINO',
'TOTAL TÉRMINOS':'TOTAL TERMINOS'
}, inplace = True)
# Transformamos variables float64 a int16
df_termino_rol['COD. CORTE'] = df_termino_rol['COD. CORTE'].fillna(0).astype(np.int16)
df_termino_rol['COD. TRIBUNAL'] = df_termino_rol['COD. TRIBUNAL'].fillna(0).astype(np.int16)
df_termino_rol['DURACION CAUSA'] = df_termino_rol['DURACION CAUSA'].fillna(0).astype(np.int16)
df_termino_rol['AÑO TERMINO'] = df_termino_rol['AÑO TERMINO'].fillna(0).astype(np.int16)
df_termino_rol['TOTAL TERMINOS'] = df_termino_rol['TOTAL TERMINOS'].fillna(0).astype(np.int8)
click.echo('Elimino tildes de las columnas object')
cols = df_termino_rol.select_dtypes(include = ["object"]).columns
df_termino_rol[cols] = df_termino_rol[cols].progress_apply(elimina_tilde)
click.echo('Transformando fechas')
df_termino_rol['FECHA INGRESO'] = df_termino_rol['FECHA INGRESO'].progress_apply(convierte_fecha)
df_termino_rol['FECHA TERMINO'] = df_termino_rol['FECHA TERMINO'].progress_apply(convierte_fecha)
click.echo('Elimino espacios en las columnas tipo objeto')
df_termino_rol = df_termino_rol.progress_apply(elimina_espacios, axis=0)
click.echo('Limpieza de RIT')
df_termino_rol['RIT'] = df_termino_rol['RIT'].progress_apply(limpia_rit)
# Transformamos en variables categoricas
df_termino_rol['CORTE'] = df_termino_rol['CORTE'].astype('category')
df_termino_rol['MOTIVO TERMINO'] = df_termino_rol['MOTIVO TERMINO'].astype('category')
# Dejo solo causas Ordinarias
tipo_causa = df_termino_rol[df_termino_rol['TIPO CAUSA']!='Ordinaria']
df_termino_rol.drop(tipo_causa.index, axis=0, inplace=True)
data.save_feather(df_termino_rol,'clean_TerminosRol')
click.echo("Generado archivo Feather clean_TerminosRol.feather'. Proceso Terminado")
def carga_limpieza_inventario():
df_inventario = load_concatenate_by_filename('Inventario Causas en Tramitación Penal')
# Elimino registros de METGE
df_metge = df_inventario[df_inventario['SISTEMA']=='METGE']
df_inventario.drop(df_metge.index, axis=0, inplace=True)
# ESTANDARIZACION DE NOMBRES DE VARIABLES
df_inventario.rename(columns = {'CÓDIGO CORTE':'COD. CORTE',
'CÓDIGO TRIBUNAL':'COD. TRIBUNAL',
'CÓDIGO MATERIA':'COD. MATERIA',
' MATERIA':'MATERIA'
}, inplace = True)
df_inventario.drop(['SISTEMA'], axis = 'columns', inplace = True)
# TRANSFORMAMOS DE FLOAT A INTEGER
df_inventario['COD. CORTE'] = df_inventario['COD. CORTE'].fillna(0).astype(np.int16)
df_inventario['COD. TRIBUNAL'] = df_inventario['COD. TRIBUNAL'].fillna(0).astype(np.int16)
df_inventario['COD. MATERIA'] = df_inventario['COD. MATERIA'].fillna(0).astype(np.int16)
df_inventario['TOTAL INVENTARIO'] = df_inventario['TOTAL INVENTARIO'].fillna(0).astype(np.int8)
click.echo('Transformamos fechas')
df_inventario['FECHA INGRESO'] = df_inventario['FECHA INGRESO'].progress_apply(convierte_fecha)
df_inventario['FECHA ULT. DILIGENCIA'] = df_inventario['FECHA ULT. DILIGENCIA'].progress_apply(convierte_fecha)
click.echo('Elimino espacios en las columnas tipo objetos')
df_inventario = df_inventario.progress_apply(elimina_espacios, axis=0)
click.echo('Elimino tildes de las columnas object')
cols = df_inventario.select_dtypes(include = ["object"]).columns
df_inventario[cols] = df_inventario[cols].progress_apply(elimina_tilde)
# CATEGORIZACION DE VARIABLES
df_inventario['CORTE'] = df_inventario['CORTE'].astype('category')
df_inventario['COMPETENCIA'] = df_inventario['COMPETENCIA'].astype('category')
df_inventario['TIPO ULT. DILIGENCIA'] = df_inventario['TIPO ULT. DILIGENCIA'].astype('category')
# Dejo solo causas Ordinarias
tipo_causa = df_inventario[df_inventario['TIPO CAUSA']!='Ordinaria']
df_inventario.drop(tipo_causa.index, axis=0, inplace=True)
data.save_feather(df_inventario,'clean_Inventario')
click.echo("Generado archivo Feather 'clean_Inventario.feather'. Proceso Terminado")
def carga_limpieza_audiencias():
df_audiencias = load_concatenate_by_filename('Audiencias Realizadas Penal')
df_audiencias.rename(columns = {'CÓD. CORTE':'COD. CORTE',
'CÓD. TRIBUNAL':'COD. TRIBUNAL',
'DURACIÓN AUDIENCIA':'DURACION AUDIENCIA',
'AGENDAMIENTO (DÍAS CORRIDOS)':'DIAS AGENDAMIENTO',
'DURACIÓN AUDIENCIA (MINUTOS)':'DURACION AUDIENCIA (MIN)',
'FECHA PROGRAMACIÓN AUDIENCIA':'FECHA PROGRAMACION AUDIENCIA'
},
inplace = True)
# TRANSFORMAMOS DE FLOAT A INTEGER
df_audiencias['COD. CORTE'] = df_audiencias['COD. CORTE'].fillna(0).astype(np.int16)
df_audiencias['COD. TRIBUNAL'] = df_audiencias['COD. TRIBUNAL'].fillna(0).astype(np.int16)
df_audiencias['TOTAL AUDIENCIAS'] = df_audiencias['TOTAL AUDIENCIAS'].fillna(0).astype(np.int8)
# Podemos observar que existen columnas que se repiten, y que tienen datos NAN en algunas pero esos datos
# en otras columnas, pasa en TIPO AUDIENCIA=TIPO DE AUDIENCIA, AGENDAMIENTO (DÍAS CORRIDOS)=PLAZO AGENDAMIENTO
# (DÍAS CORRIDOS), DURACIÓN AUDIENCIA (MINUTOS)= DURACIÓN AUDIENCIA
df_audiencias['TIPO DE AUDIENCIA'] = df_audiencias['TIPO DE AUDIENCIA'].fillna(df_audiencias['TIPO AUDIENCIA'])
df_audiencias['DIAS AGENDAMIENTO'] = df_audiencias['DIAS AGENDAMIENTO'].fillna(df_audiencias['PLAZO AGENDAMIENTO (DIAS CORRIDOS)']).astype(np.int16)
df_audiencias['DURACION AUDIENCIA (MIN)'] = df_audiencias['DURACION AUDIENCIA (MIN)'].fillna(df_audiencias['DURACION AUDIENCIA'])
# Elimino las columnas reemplazadas
df_audiencias.drop(['TIPO AUDIENCIA','PLAZO AGENDAMIENTO (DIAS CORRIDOS)','DURACION AUDIENCIA'], axis = 'columns',
inplace = True)
click.echo('Transformamos fechas')
df_audiencias['FECHA PROGRAMACION AUDIENCIA'] = df_audiencias['FECHA PROGRAMACION AUDIENCIA'].progress_apply(convierte_fecha)
df_audiencias['FECHA AUDIENCIA'] = df_audiencias['FECHA AUDIENCIA'].progress_apply(convierte_fecha)
click.echo('Elimino espacios en las columnas tipo objetos')
df_audiencias = df_audiencias.progress_apply(elimina_espacios, axis=0)
click.echo('Elimino tildes')
cols = df_audiencias.select_dtypes(include = ["object"]).columns
df_audiencias[cols] = df_audiencias[cols].progress_apply(elimina_tilde)
# Categorizar
df_audiencias['CORTE'] = df_audiencias['CORTE'].astype('category')
# Dejo solo causas Ordinarias
tipo_causa = df_audiencias[df_audiencias['TIPO CAUSA']!='Ordinaria']
df_audiencias.drop(tipo_causa.index, axis=0, inplace=True)
data.save_feather(df_audiencias,'clean_Audiencias')
click.echo("Generado archivo Feather 'clean_Audiencias.feather'. Proceso Terminado")
def carga_limpieza_duraciones():
df_duraciones = load_concatenate_by_filename('Duraciones por Rol Penal')
# Elimino causas que no sean SIAGJ
df_no_siagj = df_duraciones[df_duraciones['SISTEMA']!='SIAGJ']
df_duraciones.drop(df_no_siagj.index, axis=0, inplace=True)
df_duraciones.rename(columns = {'CÓD. CORTE':'COD. CORTE',
'CÓD. TRIBUNAL':'COD. TRIBUNAL',
'DURACIÓN CAUSA ':'DURACIÓN CAUSA',
'FECHA TÉRMINO':'FECHA TERMINO',
'MOTIVO DE TÉRMINO':'MOTIVO TERMINO',
'MES TÉRMINO':'MES TERMINO',
'AÑO TÉRMINO':'AÑO TERMINO',
'TOTAL TÉRMINOS':'TOTAL TERMINOS'
}, inplace = True)
df_duraciones.drop(['N°','SISTEMA'], axis = 'columns', inplace = True)
df_duraciones = df_duraciones.dropna()
# TRANSFORMAMOS DE FLOAT A INTEGER
df_duraciones['COD. CORTE'] = df_duraciones['COD. CORTE'].fillna(0).astype(np.int16)
df_duraciones['COD. TRIBUNAL'] = df_duraciones['COD. TRIBUNAL'].fillna(0).astype(np.int16)
df_duraciones['AÑO TERMINO'] = df_duraciones['AÑO TERMINO'].fillna(0).astype(np.int16)
df_duraciones['TOTAL TERMINOS'] = df_duraciones['TOTAL TERMINOS'].fillna(0).astype(np.int8)
click.echo('Transformamos fechas')
df_duraciones['FECHA INGRESO'] = df_duraciones['FECHA INGRESO'].progress_apply(convierte_fecha)
df_duraciones['FECHA TERMINO'] = df_duraciones['FECHA TERMINO'].progress_apply(convierte_fecha)
click.echo('Elimino espacios en las columnas tipo objetos')
df_duraciones = df_duraciones.progress_apply(elimina_espacios, axis=0)
click.echo('Elimino tildes')
cols = df_duraciones.select_dtypes(include = ["object"]).columns
df_duraciones[cols] = df_duraciones[cols].progress_apply(elimina_tilde)
click.echo('Transformar el formato del RIT--AÑO a RIT-AÑO')
df_duraciones['RIT'] = df_duraciones['RIT'].progress_apply(limpia_rit)
# Categorizacion
df_duraciones['CORTE'] = df_duraciones['CORTE'].astype('category')
df_duraciones['MOTIVO TERMINO'] = df_duraciones['MOTIVO TERMINO'].astype('category')
# Dejo solo causas Ordinarias
tipo_causa = df_duraciones[df_duraciones['TIPO CAUSA']!='Ordinaria']
df_duraciones.drop(tipo_causa.index, axis=0, inplace=True)
data.save_feather(df_duraciones, 'clean_Duraciones')
click.echo("Generado archivo Feather 'clean_Duraciones.feather'. Proceso Terminado")
def carga_limpieza_delitos():
tqdm.pandas()
path_raw = "data/raw/delitos"
codigos_delitos = pd.read_excel(f"{path_raw}/codigos_penal_2020.xlsx", sheet_name = "codigos vigentes", engine='openpyxl')
# elimino filas con NaN
codigos_delitos = codigos_delitos.drop_duplicates()
# elimino 2 primeras filas que son titulos
codigos_delitos = codigos_delitos.drop([0,1,2], axis = 0)
# elimino columnas con datos NaN
variables = range(2,248)
columnas = []
for variable in variables:
columnas.append("Unnamed: " + str(variable))
codigos_delitos = codigos_delitos.drop(columns = columnas, axis = 1)
# cambio nombres columnas
codigos_delitos = codigos_delitos.rename(columns = {'VERSION AL 01/01/2018':'COD. MATERIA', 'Unnamed: 1':'MATERIA'})
delitos_vigentes = []
for item in codigos_delitos.index:
if str(codigos_delitos['COD. MATERIA'][item]).isupper():
tipologia_delito=str(codigos_delitos['COD. MATERIA'][item])
else:
delitos_vigentes.append([codigos_delitos['COD. MATERIA'][item],
str(codigos_delitos['MATERIA'][item]).upper().rstrip(),
tipologia_delito,'VIGENTE'])
df_delitos_vigentes = pd.DataFrame(delitos_vigentes,columns = ['COD. MATERIA','MATERIA','TIPOLOGIA MATERIA','VIGENCIA MATERIA'])
click.echo('Elimino tildes de las columnas object')
cols = df_delitos_vigentes.select_dtypes(include = ["object"]).columns
df_delitos_vigentes[cols] = df_delitos_vigentes[cols].progress_apply(elimina_tilde)
df_delitos_vigentes[cols] = df_delitos_vigentes[cols].progress_apply(limpieza_caracteres)
df_delitos_vigentes['COD. MATERIA'] = df_delitos_vigentes['COD. MATERIA'].fillna(0).astype('int16')
# CARGA Y LIMPIEZA DE DATOS RELACIONADOS A DELITOS NO VIGENTES
codigos_delitos_novigentes = pd.read_excel(f"{path_raw}/codigos_penal_2020.xlsx", sheet_name = "Codigos no vigentes", engine='openpyxl')
# cambio nombres columnas
codigos_delitos_novigentes = codigos_delitos_novigentes.rename(columns = {'MATERIAS PENALES NO VIGENTES':'TIPOLOGIA MATERIA',
'Unnamed: 1':'COD. MATERIA','Unnamed: 2':'MATERIA'})
# elimino primera fila que son titulos
codigos_delitos_novigentes = codigos_delitos_novigentes.drop([0], axis = 0)
# reemplazo Nan por ST
codigos_delitos_novigentes = codigos_delitos_novigentes.fillna('ST')
delitos_no_vigentes = []
for item in codigos_delitos_novigentes.index:
tipologia_delito = codigos_delitos_novigentes['TIPOLOGIA MATERIA'][item]
if tipologia_delito != 'ST':
tipologia = codigos_delitos_novigentes['TIPOLOGIA MATERIA'][item]
else:
tipologia_delito = tipologia
delitos_no_vigentes.append([codigos_delitos_novigentes['COD. MATERIA'][item],
codigos_delitos_novigentes['MATERIA'][item].rstrip(),
tipologia_delito,'NO VIGENTE'])
df_delitos_no_vigentes = pd.DataFrame(delitos_no_vigentes, columns = ['COD. MATERIA','MATERIA','TIPOLOGIA MATERIA','VIGENCIA MATERIA'])
click.echo('Elimino tildes de las columnas object')
cols = df_delitos_no_vigentes.select_dtypes(include = ["object"]).columns
df_delitos_no_vigentes[cols] = df_delitos_no_vigentes[cols].progress_apply(elimina_tilde)
df_delitos_no_vigentes['COD. MATERIA'] = df_delitos_no_vigentes['COD. MATERIA'].astype('int16')
# UNION DE AMBOS DATASET CON DELITOS VIGENTES Y NO VIGENTES
df_delitos = | pd.concat([df_delitos_vigentes,df_delitos_no_vigentes]) | pandas.concat |
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_series_equal
from src.policies.single_policy_functions import _interpolate_activity_level
from src.policies.single_policy_functions import reduce_recurrent_model
from src.policies.single_policy_functions import reduce_work_model
from src.policies.single_policy_functions import reopen_other_model
from src.policies.single_policy_functions import shut_down_model
@pytest.fixture
def fake_states():
states = pd.DataFrame(index=np.arange(10))
states["state"] = ["Bayern", "Berlin"] * 5
# date at which schools are open in Berlin but closed in Bavaria
# date with uneven week number, i.e. where group a attends school
states["date"] = pd.Timestamp("2020-04-23")
states["school_group_a"] = [0, 1] * 5
states["occupation"] = pd.Categorical(
["school"] * 8 + ["preschool_teacher", "school_teacher"]
)
states["educ_worker"] = [False] * 8 + [True] * 2
states["age"] = np.arange(10)
return states
def test_shut_down_model_non_recurrent():
contacts = pd.Series(np.arange(3))
states = pd.DataFrame(index=["a", "b", "c"])
calculated = shut_down_model(states, contacts, 123, is_recurrent=False)
expected = pd.Series(0, index=["a", "b", "c"])
assert_series_equal(calculated, expected)
def test_shut_down_model_recurrent():
contacts = pd.Series(np.arange(3))
states = pd.DataFrame(index=["a", "b", "c"])
calculated = shut_down_model(states, contacts, 123, is_recurrent=True)
expected = pd.Series(False, index=["a", "b", "c"])
assert_series_equal(calculated, expected)
def test_reduce_recurrent_model_set_zero():
states = pd.DataFrame(index=[0, 1, 2, 3])
contacts = pd.Series([True, True, False, False])
calculated = reduce_recurrent_model(states, contacts, 333, multiplier=0.0)
assert (calculated == 0).all()
def test_reduce_recurrent_model_no_change():
states = | pd.DataFrame(index=[0, 1, 2, 3]) | pandas.DataFrame |
from bs4 import BeautifulSoup
import re
import urllib3
import pandas as pd
http = urllib3.PoolManager()
url = 'https://www.sports-reference.com/cbb/'
response = http.request('GET', url)
soup = BeautifulSoup(response.data)
links = soup.find('div', id = "schools").find('select', id = 'selector_1').find_all("option")
all_teams = []
for link in links:
name = re.findall(r">(.*?)<", str(link))[0]
ext = re.findall(r'value="(.*?)"', str(link))[0]
if ext is not '':
out = {"team": name,
"link": ext}
all_teams.append(out)
# pd.DataFrame(all_teams).to_csv('sports_reference_teams.csv', index = False)
ranking_df_list = []
player_df_list = []
player_detail_list = []
for team in all_teams:
url = f'https://www.sports-reference.com{team["link"]}'
print(url)
response = http.request('GET', url)
soup = BeautifulSoup(response.data)
ptags = soup.find('div', id = 'info').find_all('p')
for tag in ptags:
if str(tag).find("Location") != -1:
place = re.findall(r'\n(.*)', str(tag))[1].strip()
team['location'] = place
try:
data = pd.read_html(f'{url}/polls.html')[0]
data = data[~data.Rk.str.contains("Season|Rk")].drop("Rk", axis = 1)
data['team'] = team['team']
data['team_location'] = team['location']
ranking_df_list.append(data)
except:
print(f"Could Not Find Ranking Data For {team['team']}")
continue
for year in range(2003, 2022):
try:
player_data_a = pd.read_html(f'{url}/{year}.html')
player_data = player_data_a[0]
player_data['year'] = year
player_data['team'] = team['team']
player_df_list.append(player_data)
for table in player_data_a[1:]:
if table.shape[0] > 10:
table['season'] = year
table['team'] = team['team']
player_detail_list.append(table)
except:
print(f"Could Not Find Player Level Data For {team['team']} in Year {year}")
pd.concat(player_df_list, ignore_index=True).to_csv("player_stats_0321.csv", index=False)
pd.concat(player_detail_list, ignore_index=True).to_csv("player_stats_detail_0321.csv", index=False)
| pd.concat(ranking_df_list, ignore_index=True) | pandas.concat |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sys
import unittest
import pytest
from numpy.testing import assert_array_equal
import numpy as np
from pandas.util.testing import assert_frame_equal
import pandas as pd
import pyarrow as pa
from pyarrow.compat import guid
from pyarrow.feather import (read_feather, write_feather,
FeatherReader)
from pyarrow.lib import FeatherWriter
def random_path():
return 'feather_{}'.format(guid())
class TestFeatherReader(unittest.TestCase):
def setUp(self):
self.test_files = []
def tearDown(self):
for path in self.test_files:
try:
os.remove(path)
except os.error:
pass
def test_file_not_exist(self):
with self.assertRaises(pa.ArrowIOError):
FeatherReader('test_invalid_file')
def _get_null_counts(self, path, columns=None):
reader = FeatherReader(path)
counts = []
for i in range(reader.num_columns):
col = reader.get_column(i)
if columns is None or col.name in columns:
counts.append(col.null_count)
return counts
def _check_pandas_roundtrip(self, df, expected=None, path=None,
columns=None, null_counts=None,
nthreads=1):
if path is None:
path = random_path()
self.test_files.append(path)
write_feather(df, path)
if not os.path.exists(path):
raise Exception('file not written')
result = read_feather(path, columns, nthreads=nthreads)
if expected is None:
expected = df
assert_frame_equal(result, expected)
if null_counts is None:
null_counts = np.zeros(len(expected.columns))
np.testing.assert_array_equal(self._get_null_counts(path, columns),
null_counts)
def _assert_error_on_write(self, df, exc, path=None):
# check that we are raising the exception
# on writing
if path is None:
path = random_path()
self.test_files.append(path)
def f():
write_feather(df, path)
self.assertRaises(exc, f)
def test_num_rows_attr(self):
df = pd.DataFrame({'foo': [1, 2, 3, 4, 5]})
path = random_path()
self.test_files.append(path)
write_feather(df, path)
reader = FeatherReader(path)
assert reader.num_rows == len(df)
df = pd.DataFrame({})
path = random_path()
self.test_files.append(path)
write_feather(df, path)
reader = FeatherReader(path)
assert reader.num_rows == 0
def test_float_no_nulls(self):
data = {}
numpy_dtypes = ['f4', 'f8']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randn(num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
self._check_pandas_roundtrip(df)
def test_float_nulls(self):
num_values = 100
path = random_path()
self.test_files.append(path)
writer = FeatherWriter()
writer.open(path)
null_mask = np.random.randint(0, 10, size=num_values) < 3
dtypes = ['f4', 'f8']
expected_cols = []
null_counts = []
for name in dtypes:
values = np.random.randn(num_values).astype(name)
writer.write_array(name, values, null_mask)
values[null_mask] = np.nan
expected_cols.append(values)
null_counts.append(null_mask.sum())
writer.close()
ex_frame = pd.DataFrame(dict(zip(dtypes, expected_cols)),
columns=dtypes)
result = read_feather(path)
assert_frame_equal(result, ex_frame)
assert_array_equal(self._get_null_counts(path), null_counts)
def test_integer_no_nulls(self):
data = {}
numpy_dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randint(0, 100, size=num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
self._check_pandas_roundtrip(df)
def test_platform_numpy_integers(self):
data = {}
numpy_dtypes = ['longlong']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randint(0, 100, size=num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
self._check_pandas_roundtrip(df)
def test_integer_with_nulls(self):
# pandas requires upcast to float dtype
path = random_path()
self.test_files.append(path)
int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
num_values = 100
writer = FeatherWriter()
writer.open(path)
null_mask = np.random.randint(0, 10, size=num_values) < 3
expected_cols = []
for name in int_dtypes:
values = np.random.randint(0, 100, size=num_values)
writer.write_array(name, values, null_mask)
expected = values.astype('f8')
expected[null_mask] = np.nan
expected_cols.append(expected)
ex_frame = pd.DataFrame(dict(zip(int_dtypes, expected_cols)),
columns=int_dtypes)
writer.close()
result = read_feather(path)
assert_frame_equal(result, ex_frame)
def test_boolean_no_nulls(self):
num_values = 100
np.random.seed(0)
df = pd.DataFrame({'bools': np.random.randn(num_values) > 0})
self._check_pandas_roundtrip(df)
def test_boolean_nulls(self):
# pandas requires upcast to object dtype
path = random_path()
self.test_files.append(path)
num_values = 100
np.random.seed(0)
writer = FeatherWriter()
writer.open(path)
mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 10, size=num_values) < 5
writer.write_array('bools', values, mask)
expected = values.astype(object)
expected[mask] = None
writer.close()
ex_frame = pd.DataFrame({'bools': expected})
result = read_feather(path)
assert_frame_equal(result, ex_frame)
def test_buffer_bounds_error(self):
# ARROW-1676
path = random_path()
self.test_files.append(path)
for i in range(16, 256):
values = pa.array([None] + list(range(i)), type=pa.float64())
writer = FeatherWriter()
writer.open(path)
writer.write_array('arr', values)
writer.close()
result = read_feather(path)
expected = pd.DataFrame({'arr': values.to_pandas()})
| assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
import re
from more_itertools import flatten
import pandas as pd
import numpy as np
def originHandle():
with open("./renmin.txt", "r") as inp, open("./renmin2.txt", "w") as outp:
for line in inp.readlines():
line = line.split(" ")
i = 1
while i < len(line) - 1:
if line[i][0] == "[": # [中央/n', '人民/n', '广播/vn', '电台/n]nt',
outp.write(line[i].split("/")[0][1:])
i += 1
while i < len(line) - 1 and line[i].find("]") == -1: # 找不到 "]"
if line[i] != "":
outp.write(line[i].split("/")[0]) # 人民
i += 1
outp.write(line[i].split("/")[0].strip() + "/" + line[i].strip("/")[1][-2:] + " ")
elif line[i].split("/")[1] == "nr": # nr: 人名
word = line[i].split("/")[0]
i += 1
if i < len(line) - 1 and line[i].split("/")[1] == "nr":
outp.write(word+line[i].split('/')[0]+'/nr ')
i += 1
else:
outp.write(word + "/nr")
continue
else:
outp.write(line[i] + " ")
i += 1
outp.write("\n")
def originHandle2():
with open('./renmin2.txt', 'r') as inp, open('./renmin3.txt', 'w') as outp:
for line in inp.readlines():
line = line.split(' ')
i = 0
while i < len(line)-1:
if line[i] == '':
i += 1
continue
word = line[i].split('/')[0]
tag = line[i].split('/')[1]
if tag == 'nr' or tag == 'ns' or tag == 'nt': # 人名 地名 组织名 中共中央/nt
outp.write(word[0]+"/B_"+tag+" ") # 中共中央/B_
for j in word[1:len(word)-1]:
if j != ' ':
outp.write(j+"/M_"+tag+" ")
outp.write(word[-1]+"/E_"+tag+" ")
else:
for wor in word:
outp.write(wor+'/O ')
i += 1
outp.write('\n')
def sentence2split():
with open('./renmin3.txt','r') as inp, open('./renmin4.txt','w') as outp:
texts = inp.read()
sentences = re.split(r'[,。!?、‘’“”:]/[O]', texts)
for sentence in sentences:
if sentence != " ":
outp.write(sentence.strip()+'\n')
def data2pkl():
datas = []
labels = []
tags = set()
tags.add('')
input_data = open('renmin4.txt', 'r')
for line in input_data.readlines():
line = line.split()
linedata = []
linelabel = []
numNotO = 0
for word in line:
word = word.split('/')
linedata.append(word[0])
linelabel.append(word[1])
tags.add(word[1])
if word[1] != 'O':
numNotO += 1
if numNotO != 0:
datas.append(linedata)
labels.append(linelabel)
input_data.close()
# 选出需要的行
print(len(datas)) # [[], [], ..]
print(len(labels))
# from compiler.ast import flatten
all_words = flatten(datas) # 合并 [...]
sr_allwords = pd.Series(all_words)
sr_allwords = sr_allwords.value_counts()
"""
>>> import pandas as pd
>>> data=pd.Series(['python','java','python','php','php','java','python','java'])
>>> data
0 python
1 java
2 python
3 php
4 php
5 java
6 python
7 java
dtype: object
>>>
>>> data.value_counts()
python 3
java 3
php 2
dtype: int64
"""
set_words = sr_allwords.index
set_ids = range(1, len(set_words) + 1)
"""
>>> data.index
RangeIndex(start=0, stop=8, step=1)
>>> set_words = data.index
>>> set_idx = range(1, len(set_words) + 1)
>>> set_idx
range(1, 9)
"""
tags = [i for i in tags]
tag_ids = range(len(tags))
word2id = pd.Series(set_ids, index=set_words)
id2word = | pd.Series(set_words, index=set_ids) | pandas.Series |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/10/23 0023
# @Author : justin.郑 <EMAIL>
# @File : index_toutiao.py
# @Desc : 头条指数
import json
import pandas as pd
import requests
from gopup.index.cons import index_toutiao_headers
def toutiao_index(keyword="python", start_date="20201016", end_date="20201022", app_name="toutiao"):
"""
头条指数数据
:param keyword: 关键词
:param start_date: 开始日期
:param end_date: 截止日期
:param app_name: 平台
:return:
datetime 日期
index 指数
"""
# list_keyword = '["%s"]' % keyword
try:
url = "https://trendinsight.oceanengine.com/api/open/index/get_multi_keyword_hot_trend"
data = {
"keyword_list": [keyword],
"start_date": start_date,
"end_date": end_date,
"app_name": app_name
}
res = requests.post(url, json=data, headers=index_toutiao_headers)
hot_list = json.loads(res.text)['data']['hot_list'][0]['hot_list']
df = pd.DataFrame(hot_list)
return df
except:
return None
def toutiao_relation(keyword="python", start_date="20201012", end_date="20201018", app_name="toutiao"):
"""
头条相关分析
:param keyword: 关键词
:param start_date: 开始日期
:param end_date: 截止日期
:return:
relation_word 相关词
relation_score 相关性值
score_rank 相关性值排名
search_hot 搜索热点值
search_ratio 搜索比率
"""
try:
url = "https://trendinsight.oceanengine.com/api/open/index/get_relation_word"
data = {"param": {"keyword": keyword,
"start_date": start_date,
"end_date": end_date,
"app_name": app_name}
}
res = requests.post(url, json=data, headers=index_toutiao_headers)
relation_word_list = json.loads(res.text)['data']['relation_word_list']
df = pd.DataFrame(relation_word_list)
return df
except:
return None
# def toutiao_sentiment(keyword="python", start_date="20201012", end_date="20201018"):
# """
# 头条情感分析
# :param keyword: 关键词
# :param start_date: 开始日期
# :param end_date: 截止日期
# :return:
# keyword 关键词
# score 情感值
# """
# url = "https://index.toutiao.com/api/v1/get_keyword_sentiment"
# data = {
# "keyword": keyword,
# "start_date": start_date,
# "end_date": end_date
# }
# res = requests.get(url, params=data, headers=index_toutiao_headers)
# score = json.loads(res.text)['data']['score']
# df = pd.DataFrame([{"score": score, "keyword": keyword}])
# return df
def toutiao_province(keyword="python", start_date="20201012", end_date="20201018", app_name="toutiao"):
"""
头条地域分析
:param keyword: 关键词
:param start_date: 开始日期
:param end_date: 截止日期
:return:
name 省份
value 渗透率
"""
try:
url = "https://trendinsight.oceanengine.com/api/open/index/get_portrait"
data = {"param": {"keyword": keyword,
"start_date": start_date,
"end_date": end_date,
"app_name": app_name}
}
res = requests.post(url, json=data, headers=index_toutiao_headers)
res_text = json.loads(res.text)['data']['data'][2]['label_list']
df = pd.DataFrame(res_text)
df['name'] = df['name_zh']
df = df.drop(['label_id', 'name_zh'], axis=1)
df = df.sort_values(by="value", ascending=False)
return df
except:
return None
def toutiao_city(keyword="python", start_date="20201012", end_date="20201018", app_name="toutiao"):
"""
头条城市分析
:param keyword: 关键词
:param start_date: 开始日期
:param end_date: 截止日期
:return:
name 城市
value 渗透率
"""
try:
url = "https://trendinsight.oceanengine.com/api/open/index/get_portrait"
data = {"param": {"keyword": keyword,
"start_date": start_date,
"end_date": end_date,
"app_name": app_name}
}
res = requests.post(url, json=data, headers=index_toutiao_headers)
res_text = json.loads(res.text)['data']['data'][3]['label_list']
df = pd.DataFrame(res_text)
df['name'] = df['name_zh']
df = df.drop(['label_id', 'name_zh'], axis=1)
df = df.sort_values(by="value", ascending=False)
return df
except:
return None
def toutiao_age(keyword="python", start_date="20201012", end_date="20201018", app_name="toutiao"):
"""
头条年龄分析
:param keyword: 关键词
:param start_date: 开始日期
:param end_date: 截止日期
:return:
name 年龄区间
value 渗透率
"""
try:
url = "https://trendinsight.oceanengine.com/api/open/index/get_portrait"
data = {"param": {"keyword": keyword,
"start_date": start_date,
"end_date": end_date,
"app_name": app_name}
}
res = requests.post(url, json=data, headers=index_toutiao_headers)
res_text = json.loads(res.text)['data']['data'][0]['label_list']
df = pd.DataFrame(res_text)
df['name'] = df['name_zh']
df = df.drop(['label_id', 'name_zh'], axis=1)
return df
except:
return None
def toutiao_gender(keyword="python", start_date="20201012", end_date="20201018", app_name="toutiao"):
"""
头条性别分析
:param keyword: 关键词
:param start_date: 开始日期
:param end_date: 截止日期
:return:
name 性别
value 渗透率
"""
try:
url = "https://trendinsight.oceanengine.com/api/open/index/get_portrait"
data = {"param": {"keyword": keyword,
"start_date": start_date,
"end_date": end_date,
"app_name": app_name}
}
res = requests.post(url, json=data, headers=index_toutiao_headers)
res_text = json.loads(res.text)['data']['data'][1]['label_list']
df = pd.DataFrame(res_text)
df['name'] = df['name_zh']
df = df.drop(['label_id', 'name_zh'], axis=1)
df = df.sort_values(by="value", ascending=False)
return df
except:
return None
def toutiao_interest_category(keyword="python", start_date="20201012", end_date="20201018", app_name="toutiao"):
"""
头条用户阅读兴趣分类
:param keyword: 关键词
:param start_date: 开始日期
:param end_date: 截止日期
:return:
name 分类
value 渗透率
"""
try:
url = "https://trendinsight.oceanengine.com/api/open/index/get_portrait"
data = {"param": {"keyword": keyword,
"start_date": start_date,
"end_date": end_date,
"app_name": app_name}
}
res = requests.post(url, json=data, headers=index_toutiao_headers)
res_text = json.loads(res.text)['data']['data'][4]['label_list']
df = | pd.DataFrame(res_text) | pandas.DataFrame |
import logging
import multiprocessing
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy as sp
import seaborn as sns
from sklearn import (
cross_validation, ensemble, grid_search, learning_curve, linear_model,
metrics, naive_bayes, pipeline, preprocessing)
from imblearn.metrics import specificity_score
###############
# Classifiers #
###############
CLASSIFIERS = {
'ExtraTrees': {
'name': 'Extra Randomized Trees',
'class': ensemble.ExtraTreesClassifier,
'best_params': { # MCC: 0.582
'max_depth': 9,
'criterion': 'entropy',
'min_samples_leaf': 7,
'bootstrap': True,
'max_features': 0.47,
'min_samples_split': 4,
'n_jobs': 2
},
'param_distributions': {
'max_depth': sp.stats.randint(5, 10),
'max_features': sp.stats.uniform(0.05, 0.5),
'min_samples_split': sp.stats.randint(1, 8),
'min_samples_leaf': sp.stats.randint(6, 15),
'bootstrap': [True, False],
'criterion': ['gini', 'entropy'],
'n_jobs': [2]
}
},
'RandomForest': {
'name': 'Random Forest',
'best_params': { # MCC: 0.638
'bootstrap': True,
'criterion': 'gini',
'max_depth': 9,
'max_features': 0.49,
'min_samples_leaf': 10,
'min_samples_split': 2,
'n_estimators': 13,
'n_jobs': 4,
},
'class': ensemble.RandomForestClassifier,
'param_distributions': {
'n_estimators': sp.stats.randint(5, 20),
'criterion': ['gini', 'entropy'],
'max_features': sp.stats.uniform(0.05, 0.5),
'min_samples_split': sp.stats.randint(1, 8),
'min_samples_leaf': sp.stats.randint(6, 15),
'bootstrap': [True, False],
'n_jobs': [2]
}
},
'AdaBoost': {
'name': 'Ada Boost',
'class': ensemble.AdaBoostClassifier,
'best_params': { # MCC: 0.641
'n_estimators': 258
},
'param_distributions': {
'n_estimators': sp.stats.randint(50, 300)
}
},
'LogisticRegression': {
'name': 'Logistic Regression',
'class': linear_model.LogisticRegression,
'best_params': { # MCC: 0.487
'C': 1.425494495402806,
'dual': False,
'fit_intercept': True,
'max_iter': 146,
'n_jobs': 2,
'penalty': 'l2',
'solver': 'lbfgs'
},
'param_distributions': {
'penalty': ['l2'], # (default)
'dual': [False], # (default), better when # samples > # features
'C': sp.stats.uniform(0.5, 1.5), # (default: 1.0)
'fit_intercept': [True], # (default), add biast to decision
'solver': ['newton-cg', 'lbfgs', 'liblinear', 'sag'], # liblinear
'max_iter': sp.stats.randint(100, 250), # (default: 100)
'n_jobs': [2]
}
},
'SGD': {
'name': 'Stochastic Gradient Descent',
'class': linear_model.SGDClassifier,
'best_params': { # MCC: 0.488
'loss': 'log',
'n_iter': 11,
'penalty': 'l1'
},
'param_distributions': {
'loss': ['hinge', 'log', 'modified_huber', 'perceptron', 'huber'],
'penalty': ['l2', 'l1', 'elasticnet'],
'n_iter': sp.stats.randint(6, 15)
}
},
'NaiveBayes': {
'name': 'Gaussian Naive Bayes',
'class': naive_bayes.GaussianNB,
'best_params': {}, # No parameters
'param_distributions': {}
}
}
# Set classifier colors
for classifier_name, color in zip(sorted(CLASSIFIERS), sns.color_palette()):
CLASSIFIERS[classifier_name]['color'] = color
# Swap nicer colors
_red = CLASSIFIERS['LogisticRegression']['color']
_gold = CLASSIFIERS['RandomForest']['color']
CLASSIFIERS['LogisticRegression']['color'] = _gold
CLASSIFIERS['RandomForest']['color'] = _red
def _get_classifier(classifier_name, classifier_params=None):
if classifier_params:
return CLASSIFIERS[classifier_name]['class'](**classifier_params)
else:
return CLASSIFIERS[classifier_name]['class'](
**CLASSIFIERS[classifier_name]['best_params'])
def _get_estimator(classifier_name, classifier_params=None):
classifier = _get_classifier(classifier_name, classifier_params)
return pipeline.Pipeline([
("imputer", preprocessing.Imputer(strategy="median")),
("scaler", preprocessing.StandardScaler()),
("classifier", classifier)])
#########
# Folds #
#########
FOLDS = {
'kfold': {
'name': 'k-fold',
'class': cross_validation.KFold,
'params': {
'n_folds': 10,
'shuffle': True,
}
},
'stratified_kfold': {
'name': 'Stratified k-fold',
'class': cross_validation.StratifiedKFold,
'params': {
'n_folds': 10,
'shuffle': True,
}
},
'label_kfold': {
'name': 'Label k-fold',
'class': cross_validation.LabelKFold,
'params': {
'n_folds': 10,
}
},
'predefined': {
'name': 'Predefined split',
'class': cross_validation.PredefinedSplit,
'params': {}
}
}
def _get_folds(folds_name, y, folds_params=None, labels=None):
# Get default params
params = FOLDS[folds_name]['params']
# Update params if given
if folds_params:
params.update(folds_params)
# Set default params depending on y and labels
if folds_name == 'kfold':
params['n'] = len(y)
elif folds_name == 'stratified_kfold':
params['y'] = y
elif folds_name == 'label_kfold':
params['labels'] = labels
# Get folds
return FOLDS[folds_name]['class'](**params)
def _evaluate(X, y, estimator, train, test, percentiles=None, scores=None):
if not percentiles:
percentiles = [100]
estimator.fit(X[train], y[train])
prob = estimator.predict_proba(X[test])[:, 1]
pred = prob > 0.5
results = | pd.DataFrame() | pandas.DataFrame |
"""Tests for importing and exporting data to SBDF files."""
import datetime
import decimal
import os
import unittest
from time import perf_counter
import pandas
from spotfire import fast_sbdf, sbdf
def fast_slow_parity(filename):
start = perf_counter()
slow_df = sbdf.import_data(filename)
mid = perf_counter()
fast_df = fast_sbdf.import_data(filename)
end = perf_counter()
t1 = mid - start
t2 = end - mid
print(f"Slow read took {t1:.4f}s, fast read took {t2:.4f}s")
with | pandas.option_context("display.max_columns", 30, "display.expand_frame_repr", False) | pandas.option_context |
# coding: utf-8
# Import libraries
import pandas as pd
from pandas import ExcelWriter
def genes_mapping():
"""
The GENES_MAPPING operation creates creates a mapping table for all the human genes (downloaded from HGNC), providing their current official Gene Symbols, their main IDs, and, if existing, the transcription factors they encode (with their corresponding UniProt IDs). The mapping table is returned as a Pandas dataframe and exported locally in the Excel file 'Genes Mapping.xlsx'.
:return: a Pandas dataframe
Example::
import genereg as gr
mapping_df = gr.GenesMapping.genes_mapping()
"""
# Load input data files:
# HGNC
hgnc_df = | pd.read_csv('./0_Genes_Mapping/DATA/HGNC.tsv', sep='\t', names=['HGNC_ID','GENE_SYMBOL','Previous Symbols','Synonyms','ENTREZ_GENE_ID','RefSeq_ID','UniProt_ID','ENSEMBL_GENE_ID'], dtype={'HGNC_ID':str,'ENTREZ_GENE_ID':str},skiprows=1) | pandas.read_csv |
#!/usr/bin/env python
"""Train HMMs for alignment of signal data from the MinION
"""
import sys
import os
import glob
import pandas as pd
import numpy as np
from timeit import default_timer as timer
from argparse import ArgumentParser
from shutil import copyfile
from subprocess import check_call
import shutil
import tempfile
from py3helpers.utils import create_dot_dict, merge_lists, all_string_permutations, save_json, load_json, \
count_lines_in_file, merge_dicts, list_dir
from py3helpers.multiprocess import *
from signalalign.signalAlignment import multithread_signal_alignment_samples, create_signalAlignment_args, \
SignalAlignSample
from signalalign.hiddenMarkovModel import HmmModel, parse_assignment_file, parse_alignment_file, read_in_alignment_file
from signalalign.utils.fileHandlers import FolderHandler
from signalalign.utils.parsers import read_fasta
from signalalign.utils.sequenceTools import get_motif_kmers, get_sequence_kmers, CustomAmbiguityPositions
from signalalign.build_alignments import generate_top_n_kmers_from_sa_output
def make_master_assignment_table(list_of_assignment_paths, min_probability=0.0, full=False):
"""Create a master assignment table from a list of assignment paths
:param list_of_assignment_paths: list of all paths to assignment.tsv files to concat
:param min_probability: minimum probabilty to keep
:return: pandas DataFrame of all assignments
"""
assignment_dfs = []
for f in list_of_assignment_paths:
assignment_dfs.append(get_assignment_table(f, min_probability, full))
return pd.concat(assignment_dfs, ignore_index=True)
def multiprocess_make_master_assignment_table(list_of_assignment_paths, min_probability=0.0, full=False,
worker_count=1):
"""Create a master assignment table from a list of assignment paths
:param list_of_assignment_paths: list of all paths to assignment.tsv files to concat
:param min_probability: minimum probabilty to keep
:return: pandas DataFrame of all assignments
"""
extra_args = {"min_probability": min_probability,
"full": full}
service = BasicService(get_assignment_table, service_name="multiprocess_make_master_assignment_table")
total, failure, messages, output = run_service(service.run, list_of_assignment_paths,
extra_args, ["file_path"], worker_count)
return pd.concat(output, ignore_index=True)
def get_assignment_table(file_path, min_probability, full):
if full:
data = parse_alignment_file(file_path)
else:
data = parse_assignment_file(file_path)
return data.loc[data['prob'] >= min_probability]
def multiprocess_make_kmer_assignment_tables(list_of_assignment_paths, kmers, strands, min_probability=0.0,
verbose=True, full=False, max_assignments=10,
worker_count=1):
"""Create a master assignment tables from a list of assignment paths
:param kmers:
:param strands:
:param verbose:
:param full:
:param max_assignments:
:param worker_count:
:param list_of_assignment_paths: list of all paths to assignment.tsv files to concat
:param min_probability: minimum probabilty to keep
:return: pandas DataFrame of all assignments
"""
# just in case we get a set
kmers = list(kmers)
# Multiprocess reading in files
extra_args = {"min_probability": min_probability,
"full": full,
"kmers": kmers}
service = BasicService(get_assignment_kmer_tables, service_name="get_assignment_kmer_tables")
total, failure, messages, output = run_service(service.run, list_of_assignment_paths,
extra_args, ["file_path"], worker_count)
kmer_tables = [(pd.concat(x, ignore_index=True), kmers[i]) for i, x in enumerate(zip(*output))]
# Multiprocess sorting each kmer table
extra_args = {"strands": strands,
"verbose": verbose,
"max_assignments": max_assignments}
service = BasicService(sort_dataframe_wrapper, service_name="sort_dataframe_wrapper")
total, failure, messages, output = run_service(service.run, kmer_tables,
extra_args, ["data_table", "kmer"], worker_count)
return pd.concat(output, ignore_index=True)
def get_assignment_kmer_tables(file_path, kmers, min_probability, full):
if full:
data = parse_alignment_file(file_path)
else:
data = parse_assignment_file(file_path)
data = data.loc[data['prob'] >= min_probability]
all_kmers = []
for k in kmers:
all_kmers.append(data.loc[data.kmer == k])
return all_kmers
def sort_dataframe_wrapper(data_table, kmer, max_assignments=10, verbose=False, strands=('t', 'c')):
assert len(strands) > 0, \
"strands must be a list and not be empty. strands: {}".format(strands)
final_output = []
if data_table.empty and verbose:
print("missing kmer {}, continuing".format(kmer))
final_output = data_table
else:
for strand in strands:
by_strand = data_table.loc[data_table['strand'] == strand]
kmer_assignments = by_strand.sort_values(['prob'], ascending=0)[:max_assignments]
if len(kmer_assignments) < max_assignments and verbose:
print("WARNING didn't find {max} requested assignments for {kmer} only found {found}"
"".format(max=max_assignments, kmer=kmer, found=len(kmer_assignments)))
final_output.append(kmer_assignments)
final_output = | pd.concat(final_output, ignore_index=True) | pandas.concat |
"""This module contains most of the pyemu.Pst object definition. This object
is the primary mechanism for dealing with PEST control files
"""
from __future__ import print_function, division
import os
import re
import copy
import warnings
import numpy as np
import pandas as pd
pd.options.display.max_colwidth = 100
from pyemu.pst.pst_controldata import ControlData, SvdData, RegData
from pyemu.pst import pst_utils
class Pst(object):
"""basic class for handling pest control files to support linear analysis
as well as replicate some of the functionality of the pest utilities
Parameters
----------
filename : str
the name of the control file
load : (boolean)
flag to load the control file. Default is True
resfile : str
corresponding residual file. If None, a residual file
with the control file base name is sought. Default is None
Returns
-------
Pst : Pst
a control file object
"""
def __init__(self, filename, load=True, resfile=None):
self.filename = filename
self.resfile = resfile
self.__res = None
self.__pi_count = 0
for key,value in pst_utils.pst_config.items():
self.__setattr__(key,copy.copy(value))
self.tied = None
self.control_data = ControlData()
self.svd_data = SvdData()
self.reg_data = RegData()
if load:
assert os.path.exists(filename),\
"pst file not found:{0}".format(filename)
self.load(filename)
def __setattr__(self, key, value):
if key == "model_command":
if isinstance(value, str):
value = [value]
super(Pst,self).__setattr__(key,value)
@property
def phi(self):
"""get the weighted total objective function
Returns
-------
phi : float
sum of squared residuals
"""
sum = 0.0
for grp, contrib in self.phi_components.items():
sum += contrib
return sum
@property
def phi_components(self):
""" get the individual components of the total objective function
Returns
-------
dict : dict
dictionary of observation group, contribution to total phi
Raises
------
Assertion error if Pst.observation_data groups don't match
Pst.res groups
"""
# calculate phi components for each obs group
components = {}
ogroups = self.observation_data.groupby("obgnme").groups
rgroups = self.res.groupby("group").groups
for og in ogroups.keys():
assert og in rgroups.keys(),"Pst.adjust_weights_res() obs group " +\
"not found: " + str(og)
og_res_df = self.res.ix[rgroups[og]]
og_res_df.index = og_res_df.name
og_df = self.observation_data.ix[ogroups[og]]
og_df.index = og_df.obsnme
og_res_df = og_res_df.loc[og_df.index,:]
assert og_df.shape[0] == og_res_df.shape[0],\
" Pst.phi_components error: group residual dataframe row length" +\
"doesn't match observation data group dataframe row length" + \
str(og_df.shape) + " vs. " + str(og_res_df.shape)
components[og] = np.sum((og_res_df["residual"] *
og_df["weight"]) ** 2)
if not self.control_data.pestmode.startswith("reg") and \
self.prior_information.shape[0] > 0:
ogroups = self.prior_information.groupby("obgnme").groups
for og in ogroups.keys():
assert og in rgroups.keys(),"Pst.adjust_weights_res() obs group " +\
"not found: " + str(og)
og_res_df = self.res.ix[rgroups[og]]
og_res_df.index = og_res_df.name
og_df = self.prior_information.ix[ogroups[og]]
og_df.index = og_df.pilbl
og_res_df = og_res_df.loc[og_df.index,:]
assert og_df.shape[0] == og_res_df.shape[0],\
" Pst.phi_components error: group residual dataframe row length" +\
"doesn't match observation data group dataframe row length" + \
str(og_df.shape) + " vs. " + str(og_res_df.shape)
components[og] = np.sum((og_res_df["residual"] *
og_df["weight"]) ** 2)
return components
@property
def phi_components_normalized(self):
""" get the individual components of the total objective function
normalized to the total PHI being 1.0
Returns
-------
dict : dict
dictionary of observation group, normalized contribution to total phi
Raises
------
Assertion error if self.observation_data groups don't match
self.res groups
"""
# use a dictionary comprehension to go through and normalize each component of phi to the total
phi_components_normalized = {i: self.phi_components[i]/self.phi for i in self.phi_components}
return phi_components_normalized
def set_res(self,res):
""" reset the private Pst.res attribute
Parameters
----------
res : (varies)
something to use as Pst.res attribute
"""
self.__res = res
@property
def res(self):
"""get the residuals dataframe attribute
Returns
-------
res : pandas.DataFrame
Note
----
if the Pst.__res attribute has not been loaded,
this call loads the res dataframe from a file
"""
if self.__res is not None:
return self.__res
else:
if self.resfile is not None:
assert os.path.exists(self.resfile),"Pst.res: self.resfile " +\
str(self.resfile) + " does not exist"
else:
self.resfile = self.filename.replace(".pst", ".res")
if not os.path.exists(self.resfile):
self.resfile = self.resfile.replace(".res", ".rei")
if not os.path.exists(self.resfile):
raise Exception("Pst.res: " +
"could not residual file case.res" +
" or case.rei")
res = pst_utils.read_resfile(self.resfile)
missing_bool = self.observation_data.obsnme.apply\
(lambda x: x not in res.name)
missing = self.observation_data.obsnme[missing_bool]
if missing.shape[0] > 0:
raise Exception("Pst.res: the following observations " +
"were not found in " +
"{0}:{1}".format(self.resfile,','.join(missing)))
self.__res = res
return self.__res
@property
def nprior(self):
"""number of prior information equations
Returns
-------
nprior : int
the number of prior info equations
"""
self.control_data.nprior = self.prior_information.shape[0]
return self.control_data.nprior
@property
def nnz_obs(self):
""" get the number of non-zero weighted observations
Returns
-------
nnz_obs : int
the number of non-zeros weighted observations
"""
nnz = 0
for w in self.observation_data.weight:
if w > 0.0:
nnz += 1
return nnz
@property
def nobs(self):
"""get the number of observations
Returns
-------
nobs : int
the number of observations
"""
self.control_data.nobs = self.observation_data.shape[0]
return self.control_data.nobs
@property
def npar_adj(self):
"""get the number of adjustable parameters (not fixed or tied)
Returns
-------
npar_adj : int
the number of adjustable parameters
"""
pass
np = 0
for t in self.parameter_data.partrans:
if t not in ["fixed", "tied"]:
np += 1
return np
@property
def npar(self):
"""get number of parameters
Returns
-------
npar : int
the number of parameters
"""
self.control_data.npar = self.parameter_data.shape[0]
return self.control_data.npar
@property
def forecast_names(self):
"""get the forecast names from the pestpp options (if any).
Returns None if no forecasts are named
Returns
-------
forecast_names : list
a list of forecast names.
"""
if "forecasts" in self.pestpp_options.keys():
return self.pestpp_options["forecasts"].lower().split(',')
elif "predictions" in self.pestpp_options.keys():
return self.pestpp_options["predictions"].lower().split(',')
else:
return None
@property
def obs_groups(self):
"""get the observation groups
Returns
-------
obs_groups : list
a list of unique observation groups
"""
og = list(self.observation_data.groupby("obgnme").groups.keys())
#og = list(map(pst_utils.SFMT, og))
return og
@property
def nnz_obs_groups(self):
""" get the observation groups that contain at least one non-zero weighted
observation
Returns
-------
nnz_obs_groups : list
a list of observation groups that contain at
least one non-zero weighted observation
"""
og = []
obs = self.observation_data
for g in self.obs_groups:
if obs.loc[obs.obgnme==g,"weight"].sum() > 0.0:
og.append(g)
return og
@property
def par_groups(self):
"""get the parameter groups
Returns
-------
par_groups : list
a list of parameter groups
"""
pass
return list(self.parameter_data.groupby("pargp").groups.keys())
@property
def prior_groups(self):
"""get the prior info groups
Returns
-------
prior_groups : list
a list of prior information groups
"""
og = list(self.prior_information.groupby("obgnme").groups.keys())
#og = list(map(pst_utils.SFMT, og))
return og
@property
def prior_names(self):
""" get the prior information names
Returns
-------
prior_names : list
a list of prior information names
"""
return list(self.prior_information.groupby(
self.prior_information.index).groups.keys())
@property
def par_names(self):
"""get the parameter names
Returns
-------
par_names : list
a list of parameter names
"""
return list(self.parameter_data.parnme.values)
@property
def adj_par_names(self):
""" get the adjustable (not fixed or tied) parameter names
Returns
-------
adj_par_names : list
list of adjustable (not fixed or tied) parameter names
"""
adj_names = []
for t,n in zip(self.parameter_data.partrans,
self.parameter_data.parnme):
if t.lower() not in ["tied","fixed"]:
adj_names.append(n)
return adj_names
@property
def obs_names(self):
"""get the observation names
Returns
-------
obs_names : list
a list of observation names
"""
pass
return list(self.observation_data.obsnme.values)
@property
def nnz_obs_names(self):
"""get the non-zero weight observation names
Returns
-------
nnz_obs_names : list
a list of non-zero weighted observation names
"""
nz_names = []
for w,n in zip(self.observation_data.weight,
self.observation_data.obsnme):
if w > 0.0:
nz_names.append(n)
return nz_names
@property
def zero_weight_obs_names(self):
""" get the zero-weighted observation names
Returns
-------
zero_weight_obs_names : list
a list of zero-weighted observation names
"""
self.observation_data.index = self.observation_data.obsnme
groups = self.observation_data.groupby(
self.observation_data.weight.apply(lambda x: x==0.0)).groups
if True in groups:
return list(self.observation_data.loc[groups[True],"obsnme"])
else:
return []
# @property
# def regul_section(self):
# phimlim = float(self.nnz_obs)
# #sect = "* regularisation\n"
# sect = "{0:15.6E} {1:15.6E}\n".format(phimlim, phimlim*1.15)
# sect += "1.0 1.0e-10 1.0e10 linreg continue\n"
# sect += "1.3 1.0e-2 1\n"
# return sect
@property
def estimation(self):
""" check if the control_data.pestmode is set to estimation
Returns
-------
estimation : bool
True if pestmode is estmation, False otherwise
"""
if self.control_data.pestmode == "estimation":
return True
return False
@staticmethod
def _read_df(f,nrows,names,converters,defaults=None):
""" a private method to read part of an open file into a pandas.DataFrame.
Parameters
----------
f : file object
nrows : int
number of rows to read
names : list
names to set the columns of the dataframe with
converters : dict
dictionary of lambda functions to convert strings
to numerical format
defaults : dict
dictionary of default values to assign columns.
Default is None
Returns
-------
pandas.DataFrame : pandas.DataFrame
"""
seek_point = f.tell()
df = pd.read_csv(f, header=None,names=names,
nrows=nrows,delim_whitespace=True,
converters=converters, index_col=False)
# in case there was some extra junk at the end of the lines
if df.shape[1] > len(names):
df = df.iloc[:,len(names)]
df.columns = names
if defaults is not None:
for name in names:
df.loc[:,name] = df.loc[:,name].fillna(defaults[name])
elif np.any( | pd.isnull(df) | pandas.isnull |
# -*- coding: utf-8 -*-
"""
===============================================================================
FINANCIAL IMPACT FILE
===============================================================================
Most recent update:
21 January 2019
===============================================================================
Made by:
<NAME>
Copyright:
<NAME>, 2018
For more information, please email:
<EMAIL>
===============================================================================
"""
import numpy as np
import pandas as pd
import sys
sys.path.insert(0, '/***YOUR LOCAL FILE PATH***/CLOVER 4.0/Scripts/Conversion scripts')
from Conversion import Conversion
class Finance():
def __init__(self):
self.location = 'Bahraich'
self.CLOVER_filepath = '/***YOUR LOCAL FILE PATH***/CLOVER 4.0'
self.location_filepath = self.CLOVER_filepath + '/Locations/' + self.location
self.location_inputs = pd.read_csv(self.location_filepath + '/Location Data/Location inputs.csv',header=None,index_col=0)[1]
self.finance_filepath = self.location_filepath + '/Impact/Finance inputs.csv'
self.finance_inputs = pd.read_csv(self.finance_filepath,header=None,index_col=0).round(decimals=3)[1]
self.inverter_inputs = pd.read_csv(self.location_filepath + '/Load/Device load/yearly_load_statistics.csv',index_col=0)
#%%
#==============================================================================
# EQUIPMENT EXPENDITURE (NOT DISCOUNTED)
# Installation costs (not discounted) for new equipment installations
#==============================================================================
# PV array costs
def get_PV_cost(self,PV_array_size,year=0):
'''
Function:
Calculates cost of PV
Inputs:
PV_array_size Capacity of PV being installed
year Installation year
Outputs:
Undiscounted cost
'''
PV_cost = PV_array_size * self.finance_inputs.loc['PV cost']
annual_reduction = 0.01 * self.finance_inputs.loc['PV cost decrease']
return PV_cost * (1.0 - annual_reduction)**year
# PV balance of systems costs
def get_BOS_cost(self,PV_array_size,year=0):
'''
Function:
Calculates cost of PV BOS
Inputs:
PV_array_size Capacity of PV being installed
year Installation year
Outputs:
Undiscounted cost
'''
BOS_cost = PV_array_size * self.finance_inputs.loc['BOS cost']
annual_reduction = 0.01 * self.finance_inputs.loc['BOS cost decrease']
return BOS_cost * (1.0 - annual_reduction)**year
# Battery storage costs
def get_storage_cost(self,storage_size,year=0):
'''
Function:
Calculates cost of battery storage
Inputs:
storage_size Capacity of battery storage being installed
year Installation year
Outputs:
Undiscounted cost
'''
storage_cost = storage_size * self.finance_inputs.loc['Storage cost']
annual_reduction = 0.01 * self.finance_inputs.loc['Storage cost decrease']
return storage_cost * (1.0 - annual_reduction)**year
# Diesel generator costs
def get_diesel_cost(self,diesel_size,year=0):
'''
Function:
Calculates cost of diesel generator
Inputs:
diesel_size Capacity of diesel generator being installed
year Installation year
Outputs:
Undiscounted cost
'''
diesel_cost = diesel_size * self.finance_inputs.loc['Diesel generator cost']
annual_reduction = 0.01 * self.finance_inputs.loc['Diesel generator cost decrease']
return diesel_cost * (1.0 - annual_reduction)**year
# Installation costs
def get_installation_cost(self,PV_array_size,diesel_size,year=0):
'''
Function:
Calculates cost of installation
Inputs:
PV_array_size Capacity of PV being installed
diesel_size Capacity of diesel generator being installed
year Installation year
Outputs:
Undiscounted cost
'''
PV_installation = PV_array_size * self.finance_inputs.loc['PV installation cost']
annual_reduction_PV = 0.01 * self.finance_inputs.loc['PV installation cost decrease']
diesel_installation = diesel_size * self.finance_inputs.loc['Diesel installation cost']
annual_reduction_diesel = 0.01 * self.finance_inputs.loc['Diesel installation cost decrease']
return PV_installation * (1.0 - annual_reduction_PV)**year + diesel_installation * (1.0 - annual_reduction_diesel)**year
# Miscellaneous costs
def get_misc_costs(self,PV_array_size,diesel_size):
'''
Function:
Calculates cost of miscellaneous capacity-related costs
Inputs:
PV_array_size Capacity of PV being installed
diesel_size Capacity of diesel generator being installed
Outputs:
Undiscounted cost
'''
misc_costs = (PV_array_size + diesel_size) * self.finance_inputs.loc['Misc. costs']
return misc_costs
# Total cost of newly installed equipment
def get_total_equipment_cost(self,PV_array_size,storage_size,diesel_size,year=0):
'''
Function:
Calculates cost of all equipment costs
Inputs:
PV_array_size Capacity of PV being installed
storage_size Capacity of battery storage being installed
diesel_size Capacity of diesel generator being installed
year Installation year
Outputs:
Undiscounted cost
'''
PV_cost = self.get_PV_cost(PV_array_size,year)
BOS_cost = self.get_BOS_cost(PV_array_size,year)
storage_cost = self.get_storage_cost(storage_size,year)
diesel_cost = self.get_diesel_cost(diesel_size,year)
installation_cost = self.get_installation_cost(PV_array_size,diesel_size,year)
misc_costs = self.get_misc_costs(PV_array_size,diesel_size)
return PV_cost + BOS_cost + storage_cost + diesel_cost + installation_cost + misc_costs
#%%
#==============================================================================
# EQUIPMENT EXPENDITURE (DISCOUNTED)
# Find system equipment capital expenditure (discounted) for new equipment
#==============================================================================
def discounted_equipment_cost(self,PV_array_size,storage_size,diesel_size,year=0):
'''
Function:
Calculates cost of all equipment costs
Inputs:
PV_array_size Capacity of PV being installed
storage_size Capacity of battery storage being installed
diesel_size Capacity of diesel generator being installed
year Installation year
Outputs:
Discounted cost
'''
undiscounted_cost = self.get_total_equipment_cost(PV_array_size,storage_size,diesel_size,year)
discount_fraction = (1.0 - self.finance_inputs.loc['Discount rate'])**year
return undiscounted_cost * discount_fraction
def get_connections_expenditure(self,households,year=0):
'''
Function:
Calculates cost of connecting households to the system
Inputs:
households DataFrame of households from Energy_System().simulation(...)
year Installation year
Outputs:
Discounted cost
'''
households = pd.DataFrame(households)
connection_cost = self.finance_inputs.loc['Connection cost']
new_connections = np.max(households) - np.min(households)
undiscounted_cost = float(connection_cost * new_connections)
discount_fraction = (1.0 - self.finance_inputs.loc['Discount rate'])**year
total_discounted_cost = undiscounted_cost * discount_fraction
# Section in comments allows a more accurate consideration of the discounted
# cost for new connections, but substantially increases the processing time.
# new_connections = [0]
# for t in range(int(households.shape[0])-1):
# new_connections.append(households['Households'][t+1] - households['Households'][t])
# new_connections = pd.DataFrame(new_connections)
# new_connections_daily = Conversion().hourly_profile_to_daily_sum(new_connections)
# total_daily_cost = connection_cost * new_connections_daily
# total_discounted_cost = self.discounted_cost_total(total_daily_cost,start_year,end_year)
return total_discounted_cost
# Grid extension components
def get_grid_extension_cost(self,grid_extension_distance,year):
'''
Function:
Calculates cost of extending the grid network to a community
Inputs:
grid_extension_distance Distance to the existing grid network
year Installation year
Outputs:
Discounted cost
'''
grid_extension_cost = self.finance_inputs.loc['Grid extension cost'] # per km
grid_infrastructure_cost = self.finance_inputs.loc['Grid infrastructure cost']
discount_fraction = (1.0 - self.finance_inputs.loc['Discount rate'])**year
return grid_extension_distance * grid_extension_cost * discount_fraction + grid_infrastructure_cost
#%%
# =============================================================================
# EQUIPMENT EXPENDITURE (DISCOUNTED) ON INDEPENDENT EXPENDITURE
# Find expenditure (discounted) on items independent of simulation periods
# =============================================================================
def get_independent_expenditure(self,start_year,end_year):
'''
Function:
Calculates cost of equipment which is independent of simulation periods
Inputs:
start_year Start year of simulation period
end_year End year of simulation period
Outputs:
Discounted cost
'''
inverter_expenditure = self.get_inverter_expenditure(start_year,end_year)
total_expenditure = inverter_expenditure # ... + other components as required
return total_expenditure
def get_inverter_expenditure(self,start_year,end_year):
'''
Function:
Calculates cost of inverters based on load calculations
Inputs:
start_year Start year of simulation period
end_year End year of simulation period
Outputs:
Discounted cost
'''
# Initialise inverter replacement periods
replacement_period = int(self.finance_inputs.loc['Inverter lifetime'])
system_lifetime = int(self.location_inputs['Years'])
replacement_intervals = pd.DataFrame(np.arange(0,system_lifetime,replacement_period))
replacement_intervals.columns = ['Installation year']
# Check if inverter should be replaced in the specified time interval
if replacement_intervals.loc[replacement_intervals['Installation year'].isin(
range(start_year,end_year))].empty == True:
inverter_discounted_cost = float(0.0)
return inverter_discounted_cost
# Initialise inverter sizing calculation
max_power = []
inverter_step = float(self.finance_inputs.loc['Inverter size increment'])
inverter_size = []
for i in range(len(replacement_intervals)):
# Calculate maximum power in interval years
start = replacement_intervals['Installation year'].iloc[i]
end = start + replacement_period
max_power_interval = self.inverter_inputs['Maximum'].iloc[start:end].max()
max_power.append(max_power_interval)
# Calculate resulting inverter size
inverter_size_interval = np.ceil(0.001*max_power_interval / inverter_step) * inverter_step
inverter_size.append(inverter_size_interval)
inverter_size = pd.DataFrame(inverter_size)
inverter_size.columns = ['Inverter size (kW)']
inverter_info = pd.concat([replacement_intervals,inverter_size],axis=1)
# Calculate
inverter_info['Discount rate'] = [(1 - self.finance_inputs.loc['Discount rate']) **
inverter_info['Installation year'].iloc[i] for i in range(len(inverter_info))]
inverter_info['Inverter cost ($/kW)'] = [self.finance_inputs.loc['Inverter cost'] *
(1 - 0.01*self.finance_inputs.loc['Inverter cost decrease'])
**inverter_info['Installation year'].iloc[i] for i in range(len(inverter_info))]
inverter_info['Discounted expenditure ($)'] = [inverter_info['Discount rate'].iloc[i] *
inverter_info['Inverter size (kW)'].iloc[i] * inverter_info['Inverter cost ($/kW)'].iloc[i]
for i in range(len(inverter_info))]
inverter_discounted_cost = np.sum(inverter_info.loc[inverter_info['Installation year'].
isin(np.array(range(start_year,end_year)))
]['Discounted expenditure ($)']).round(2)
return inverter_discounted_cost
#%%
#==============================================================================
# EXPENDITURE (DISCOUNTED) ON RUNNING COSTS
# Find expenditure (discounted) incurred during the simulation period
#==============================================================================
def get_kerosene_expenditure(self,kerosene_lamps_in_use_hourly,start_year=0,end_year=20):
'''
Function:
Calculates cost of kerosene usage
Inputs:
kerosene_lamps_in_use_hourly Output from Energy_System().simulation(...)
start_year Start year of simulation period
end_year End year of simulation period
Outputs:
Discounted cost
'''
kerosene_cost = kerosene_lamps_in_use_hourly * self.finance_inputs.loc['Kerosene cost']
total_daily_cost = Conversion().hourly_profile_to_daily_sum(kerosene_cost)
total_discounted_cost = self.discounted_cost_total(total_daily_cost,start_year,end_year)
return total_discounted_cost
def get_kerosene_expenditure_mitigated(self,kerosene_lamps_mitigated_hourly,start_year=0,end_year=20):
'''
Function:
Calculates cost of kerosene usage that has been avoided by using the system
Inputs:
kerosene_lamps_mitigated_hourly Output from Energy_System().simulation(...)
start_year Start year of simulation period
end_year End year of simulation period
Outputs:
Discounted cost
'''
kerosene_cost = kerosene_lamps_mitigated_hourly * self.finance_inputs.loc['Kerosene cost']
total_daily_cost = Conversion().hourly_profile_to_daily_sum(kerosene_cost)
total_discounted_cost = self.discounted_cost_total(total_daily_cost,start_year,end_year)
return total_discounted_cost
def get_grid_expenditure(self,grid_energy_hourly,start_year=0,end_year=20):
'''
Function:
Calculates cost of grid electricity used by the system
Inputs:
grid_energy_hourly Output from Energy_System().simulation(...)
start_year Start year of simulation period
end_year End year of simulation period
Outputs:
Discounted cost
'''
grid_cost = grid_energy_hourly * self.finance_inputs.loc['Grid cost']
total_daily_cost = Conversion().hourly_profile_to_daily_sum(grid_cost)
total_discounted_cost = self.discounted_cost_total(total_daily_cost,start_year,end_year)
return total_discounted_cost
def get_diesel_fuel_expenditure(self,diesel_fuel_usage_hourly,start_year=0,end_year=20):
'''
Function:
Calculates cost of diesel fuel used by the system
Inputs:
diesel_fuel_usage_hourly Output from Energy_System().simulation(...)
start_year Start year of simulation period
end_year End year of simulation period
Outputs:
Discounted cost
'''
diesel_fuel_usage_daily = Conversion().hourly_profile_to_daily_sum(diesel_fuel_usage_hourly)
start_day = start_year * 365
end_day = end_year * 365
diesel_price_daily = []
original_diesel_price = self.finance_inputs.loc['Diesel fuel cost']
r_y = 0.01 * self.finance_inputs.loc['Diesel fuel cost decrease']
r_d = ((1.0 + r_y) ** (1.0/365.0)) - 1.0
for t in range(start_day,end_day):
diesel_price = original_diesel_price * (1.0 - r_d)**t
diesel_price_daily.append(diesel_price)
diesel_price_daily = pd.DataFrame(diesel_price_daily)
total_daily_cost = pd.DataFrame(diesel_fuel_usage_daily.values * diesel_price_daily.values)
total_discounted_cost = self.discounted_cost_total(total_daily_cost,start_year,end_year)
return total_discounted_cost
#%%
#==============================================================================
# OPERATION AND MAINTENANCE EXPENDITURE (DISCOUNTED)
# Find O&M costs (discounted) incurred during simulation
#==============================================================================
# PV O&M for entire PV array
def get_PV_OM(self,PV_array_size,start_year=0,end_year=20):
'''
Function:
Calculates O&M cost of PV the simulation period
Inputs:
PV_array_size Capacity of PV installed
start_year Start year of simulation period
end_year End year of simulation period
Outputs:
Discounted cost
'''
PV_OM_cost = PV_array_size * self.finance_inputs.loc['PV O&M'] # $ per year
PV_OM_cost_daily = PV_OM_cost / 365.0 # $ per day
total_daily_cost = pd.DataFrame([PV_OM_cost_daily]*(end_year-start_year)*365)
return self.discounted_cost_total(total_daily_cost,start_year,end_year)
# Storage O&M for entire storage system
def get_storage_OM(self,storage_size,start_year=0,end_year=20):
'''
Function:
Calculates O&M cost of storage the simulation period
Inputs:
storage_size Capacity of battery storage installed
start_year Start year of simulation period
end_year End year of simulation period
Outputs:
Discounted cost
'''
storage_OM_cost = storage_size * self.finance_inputs.loc['Storage O&M'] # $ per year
storage_OM_cost_daily = storage_OM_cost / 365.0 # $ per day
total_daily_cost = pd.DataFrame([storage_OM_cost_daily]*(end_year-start_year)*365)
return self.discounted_cost_total(total_daily_cost,start_year,end_year)
# Diesel O&M for entire diesel genset
def get_diesel_OM(self,diesel_size,start_year=0,end_year=20):
'''
Function:
Calculates O&M cost of diesel generation the simulation period
Inputs:
diesel_size Capacity of diesel generator installed
start_year Start year of simulation period
end_year End year of simulation period
Outputs:
Discounted cost
'''
diesel_OM_cost = diesel_size * self.finance_inputs.loc['Diesel O&M'] # $ per year
diesel_OM_cost_daily = diesel_OM_cost / 365.0 # $ per day
total_daily_cost = | pd.DataFrame([diesel_OM_cost_daily]*(end_year-start_year)*365) | pandas.DataFrame |
import streamlit as st
import pandas as pd
import datetime
import altair as alt
now = datetime.datetime.now()
def curva_activos_recuperados(f_inicio,f_termino):
df = obtener_datos()
data = pd.DataFrame(data=df)
data = data.groupby(['Fecha']).sum()
return data.loc[str(f_inicio):str(f_termino)]
def obtener_datos():
url = 'https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto46/activos_vs_recuperados.csv'
return | pd.read_csv(url,header=0,names=['Fecha','Activos','Recuperados']) | pandas.read_csv |
# Passed arguments:
# $1 - The input database file. Used for subsetting the master precomputed one
# $2 - A file (path) which to write as a subset of master database
# $3 - The folder where to look for a precomputed database
# $4 - The database name to gather the _all and _one files
# $5 - A file (path) which to write as a subset of "one" database. So we gather
# the organisms that have only one 16S in their genome
# Import linraries
import pandas as pd
import sys
import os
def process_dataframe(file):
'''
Returns processed dataframe from standard csv file, as downloaded from NCBI
The dataframe contains two colums Code and Organism
Code is a filename, as the last /string in GenBank FTP field
Organism is an organism name, as name + strains, if strain is not listed in name.
'''
#Need to properly display all names. If less - strings are incomplete
pd.options.display.max_colwidth = 100
df = pd.read_csv(file)
#Make tmp dataframe with splited GenBank FNT field
df1 = df["GenBank FTP"].str.split(pat = '/')
#Make new dataframe to append to
df_clean = pd.DataFrame(columns = ['Code', 'Organism'])
i = 0
for row in df1.iteritems():
#Checks if information in strain field is already in name field,
#If not, concatenate name and atrin fileds
if df['#Organism Name'].iloc[i].split(' ')[-1] == str(str(df['Strain'].iloc[i]).split(' ')[-1]):
name = df['#Organism Name'].iloc[i]
else:
name = df['#Organism Name'].iloc[i] +' '+str(df['Strain'].iloc[i]).replace("/",'_')
#Appends information to clean dataframe
df_clean = df_clean.append(pd.Series([row[-1][-1], name], index = df_clean.columns),
ignore_index = True)
i+=1
return df_clean
def check_two_dataframes(master, subset):
'''
Compare to dataframes - master one (compare to) and subset one (which to
compare).
The master dataframe should contain 'Organism' column. The subset is a
csv from Genbank, organisms from which are going to be renamed and checked
with the master 'Organism' column
'''
cleaned_subset = [name.replace(' ', '_').replace('/', '_').replace(':', '_')\
.replace(';', '_').replace(',', '_').replace('[','').replace(']','') \
for name in list(subset['Organism'])]
subset_final = master[master.Names.isin(cleaned_subset)]
return subset_final
# Read master dataframe files and subset one
master_csv = | pd.read_csv(sys.argv[3]+"/" +sys.argv[4]+"_all.csv") | pandas.read_csv |
from typing import Optional
import numpy as np
import pandas as pd
import pytest
from pandas import testing as pdt
from rle_array.autoconversion import auto_convert_to_rle, decompress
from rle_array.dtype import RLEDtype
pytestmark = pytest.mark.filterwarnings("ignore:performance")
@pytest.mark.parametrize(
"orig, threshold, expected",
[
(
# orig
pd.DataFrame(
{
"int64": pd.Series([1], dtype=np.int64),
"int32": pd.Series([1], dtype=np.int32),
"uint64": pd.Series([1], dtype=np.uint64),
"float64": pd.Series([1.2], dtype=np.float64),
"bool": pd.Series([True], dtype=np.bool_),
"object": pd.Series(["foo"], dtype=np.object_),
"datetime64": pd.Series(
[pd.Timestamp("2020-01-01")], dtype="datetime64[ns]"
),
}
),
# threshold
None,
# expected
pd.DataFrame(
{
"int64": pd.Series([1], dtype=RLEDtype(np.int64)),
"int32": pd.Series([1], dtype=RLEDtype(np.int32)),
"uint64": pd.Series([1], dtype=RLEDtype(np.uint64)),
"float64": pd.Series([1.2], dtype=RLEDtype(np.float64)),
"bool": pd.Series([True], dtype=RLEDtype(np.bool_)),
"object": pd.Series(["foo"]).astype(RLEDtype(np.object_)),
"datetime64": pd.Series(
[pd.Timestamp("2020-01-01")], dtype="datetime64[ns]"
),
}
),
),
(
# orig
pd.DataFrame(
{
"int64": pd.Series([1], dtype=np.int64),
"int32": pd.Series([1], dtype=np.int32),
"uint64": pd.Series([1], dtype=np.uint64),
"float64": pd.Series([1.2], dtype=np.float64),
"bool": pd.Series([True], dtype=np.bool_),
"object": pd.Series(["foo"], dtype=np.object_),
"datetime64": pd.Series(
[pd.Timestamp("2020-01-01")], dtype="datetime64[ns]"
),
}
),
# threshold
2.0,
# expected
pd.DataFrame(
{
"int64": pd.Series([1], dtype=RLEDtype(np.int64)),
"int32": pd.Series([1], dtype=np.int32),
"uint64": pd.Series([1], dtype=RLEDtype(np.uint64)),
"float64": pd.Series([1.2], dtype=RLEDtype(np.float64)),
"bool": pd.Series([True], dtype=np.bool_),
"object": pd.Series(["foo"]).astype(RLEDtype(np.object_)),
"datetime64": pd.Series(
[pd.Timestamp("2020-01-01")], dtype="datetime64[ns]"
),
}
),
),
(
# orig
pd.DataFrame(
{
"single_value": pd.Series([1, 1, 1, 1, 1, 1], dtype=np.int64),
"two_values": pd.Series([1, 1, 1, 2, 2, 2], dtype=np.int64),
"increasing": pd.Series([1, 2, 3, 4, 5, 6], dtype=np.int64),
}
),
# threshold
None,
# expected
pd.DataFrame(
{
"single_value": pd.Series(
[1, 1, 1, 1, 1, 1], dtype=RLEDtype(np.int64)
),
"two_values": pd.Series(
[1, 1, 1, 2, 2, 2], dtype=RLEDtype(np.int64)
),
"increasing": pd.Series(
[1, 2, 3, 4, 5, 6], dtype=RLEDtype(np.int64)
),
}
),
),
(
# orig
pd.DataFrame(
{
"single_value": pd.Series([1, 1, 1, 1, 1, 1], dtype=np.int64),
"two_values": pd.Series([1, 1, 1, 2, 2, 2], dtype=np.int64),
"increasing": pd.Series([1, 2, 3, 4, 5, 6], dtype=np.int64),
}
),
# threshold
0.9,
# expected
pd.DataFrame(
{
"single_value": pd.Series(
[1, 1, 1, 1, 1, 1], dtype=RLEDtype(np.int64)
),
"two_values": pd.Series(
[1, 1, 1, 2, 2, 2], dtype=RLEDtype(np.int64)
),
"increasing": pd.Series([1, 2, 3, 4, 5, 6], dtype=np.int64),
}
),
),
(
# orig
pd.DataFrame(
{
"single_value": pd.Series([1, 1, 1, 1, 1, 1], dtype=np.int64),
"two_values": pd.Series([1, 1, 1, 2, 2, 2], dtype=np.int64),
"increasing": pd.Series([1, 2, 3, 4, 5, 6], dtype=np.int64),
}
),
# threshold
0.5,
# expected
pd.DataFrame(
{
"single_value": pd.Series(
[1, 1, 1, 1, 1, 1], dtype=RLEDtype(np.int64)
),
"two_values": pd.Series([1, 1, 1, 2, 2, 2], dtype=np.int64),
"increasing": pd.Series([1, 2, 3, 4, 5, 6], dtype=np.int64),
}
),
),
(
# orig
pd.DataFrame(
{
"single_value": pd.Series([1, 1, 1, 1, 1, 1], dtype=np.int64),
"two_values": pd.Series([1, 1, 1, 2, 2, 2], dtype=np.int64),
"increasing": pd.Series([1, 2, 3, 4, 5, 6], dtype=np.int64),
}
),
# threshold
0.0,
# expected
pd.DataFrame(
{
"single_value": pd.Series([1, 1, 1, 1, 1, 1], dtype=np.int64),
"two_values": pd.Series([1, 1, 1, 2, 2, 2], dtype=np.int64),
"increasing": pd.Series([1, 2, 3, 4, 5, 6], dtype=np.int64),
}
),
),
(
# orig
pd.DataFrame({"x": pd.Series([], dtype=np.int64)}),
# threshold
0.0,
# expected
pd.DataFrame({"x": pd.Series([], dtype=np.int64)}),
),
(
# orig
pd.DataFrame({"x": pd.Series([], dtype=np.int64)}),
# threshold
0.1,
# expected
pd.DataFrame({"x": pd.Series([], dtype=RLEDtype(np.int64))}),
),
(
# orig
pd.DataFrame(
{
"single_value": pd.Series([1, 1, 1, 1, 1, 1], dtype=np.int64),
"two_values": pd.Series([1, 1, 1, 2, 2, 2], dtype=np.int64),
"increasing": pd.Series(
[1, 2, 3, 4, 5, 6], dtype=RLEDtype(np.int64)
),
}
),
# threshold
0.5,
# expected
pd.DataFrame(
{
"single_value": pd.Series(
[1, 1, 1, 1, 1, 1], dtype=RLEDtype(np.int64)
),
"two_values": pd.Series([1, 1, 1, 2, 2, 2], dtype=np.int64),
"increasing": pd.Series(
[1, 2, 3, 4, 5, 6], dtype=RLEDtype(np.int64)
),
}
),
),
(
# orig
pd.DataFrame({"x": pd.Series(range(10), dtype=np.int64)}),
# threshold
1.0,
# expected
pd.DataFrame({"x": pd.Series(range(10), dtype=np.int64)}),
),
(
# orig
pd.DataFrame(),
# threshold
None,
# expected
pd.DataFrame(),
),
],
)
@pytest.mark.filterwarnings("ignore:.*would use a DatetimeBlock:UserWarning")
def test_auto_convert_to_rle_ok(
orig: pd.DataFrame, threshold: Optional[float], expected: pd.DataFrame
) -> None:
actual = auto_convert_to_rle(orig, threshold)
pdt.assert_frame_equal(actual, expected)
def test_datetime_warns() -> None:
df = pd.DataFrame(
{
"i1": pd.Series([1], dtype=np.int64),
"d1": pd.Series([pd.Timestamp("2020-01-01")], dtype="datetime64[ns]"),
"i2": pd.Series([1], dtype=np.int64),
"d2": pd.Series([pd.Timestamp("2020-01-01")], dtype="datetime64[ns]"),
}
)
with pytest.warns(None) as record:
auto_convert_to_rle(df, 0.5)
assert len(record) == 2
assert (
str(record[0].message)
== "Column d1 would use a DatetimeBlock and can currently not be RLE compressed."
)
assert (
str(record[1].message)
== "Column d2 would use a DatetimeBlock and can currently not be RLE compressed."
)
def test_auto_convert_to_rle_threshold_out_of_range() -> None:
df = pd.DataFrame({"x": [1]})
with pytest.raises(ValueError, match=r"threshold \(-0.1\) must be non-negative"):
auto_convert_to_rle(df, -0.1)
@pytest.mark.parametrize(
"orig, expected",
[
(
# orig
pd.DataFrame(
{
"int64": pd.Series([1], dtype=RLEDtype(np.int64)),
"int32": pd.Series([1], dtype=RLEDtype(np.int32)),
"uint64": pd.Series([1], dtype=RLEDtype(np.uint64)),
"float64": pd.Series([1.2], dtype=RLEDtype(np.float64)),
"bool": pd.Series([True], dtype=RLEDtype(np.bool_)),
"object": pd.Series(["foo"]).astype(RLEDtype(np.object_)),
"datetime64": pd.Series(
[pd.Timestamp("2020-01-01")], dtype="datetime64[ns]"
),
}
),
# expected
pd.DataFrame(
{
"int64": pd.Series([1], dtype=np.int64),
"int32": pd.Series([1], dtype=np.int32),
"uint64": pd.Series([1], dtype=np.uint64),
"float64": pd.Series([1.2], dtype=np.float64),
"bool": pd.Series([True], dtype=np.bool_),
"object": pd.Series(["foo"], dtype=np.object_),
"datetime64": pd.Series(
[pd.Timestamp("2020-01-01")], dtype="datetime64[ns]"
),
}
),
),
(
# orig
pd.DataFrame(
{
"int64": pd.Series([1], dtype=np.int64),
"int32": pd.Series([1], dtype=np.int32),
"uint64": pd.Series([1], dtype=np.uint64),
"float64": pd.Series([1.2], dtype=np.float64),
"bool": pd.Series([True], dtype=np.bool_),
"object": pd.Series(["foo"], dtype=np.object_),
"datetime64": pd.Series(
[pd.Timestamp("2020-01-01")], dtype="datetime64[ns]"
),
}
),
# expected
pd.DataFrame(
{
"int64": pd.Series([1], dtype=np.int64),
"int32": pd.Series([1], dtype=np.int32),
"uint64": pd.Series([1], dtype=np.uint64),
"float64": pd.Series([1.2], dtype=np.float64),
"bool": pd.Series([True], dtype=np.bool_),
"object": pd.Series(["foo"], dtype=np.object_),
"datetime64": pd.Series(
[ | pd.Timestamp("2020-01-01") | pandas.Timestamp |
import pandas as pd
import numpy as np
from sklearn import impute
data = {
'size': ['XL','L','M', np.nan ,'M','M'],
'color': ['red','green','blue','green','red','green'],
'gender': ['female','male', np.nan,'female','female','male'],'price': [ 199.0 , 89.0, np.nan,129.0, 79.0, 89.0],
'weight': [ 500,450,300, np.nan, 410,np.nan ],
'bought': ['yes','no','yes','no','yes','no']
}
df = | pd.DataFrame(data) | pandas.DataFrame |
import logging
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor
from dataclasses import dataclass
from decimal import Decimal
from functools import cached_property
from pathlib import Path
from typing import Dict, List, Tuple, Union
import pandas as pd
from brownie import Contract, chain, convert, multicall, web3
from joblib.parallel import Parallel, delayed
from pandas import DataFrame
from pandas.core.tools.datetimes import DatetimeScalar
from pony.orm import OperationalError, db_session
from rich import print
from rich.progress import track
from web3._utils.abi import filter_by_name
from web3._utils.events import construct_event_topic_set
from yearn.events import decode_logs, get_logs_asap
from yearn.exceptions import UnsupportedNetwork
from yearn.multicall2 import batch_call
from yearn.networks import Network
from yearn.partners.charts import make_partner_charts
from yearn.partners.constants import OPEX_COST, get_tier
from yearn.prices import magic
from yearn.utils import contract, contract_creation_block, get_block_timestamp
from yearn.v2.registry import Registry
from yearn.v2.vaults import Vault
try:
from yearn.entities import PartnerHarvestEvent
from yearn.outputs.postgres.utils import cache_address
USE_POSTGRES_CACHE = True
except OperationalError as e:
if "Is the server running on that host and accepting TCP/IP connections?" in str(e):
USE_POSTGRES_CACHE = False
else:
raise
logger = logging.getLogger(__name__)
def get_timestamps(blocks: Tuple[int,...]) -> DatetimeScalar:
data = Parallel(10, 'threading')(
delayed(get_block_timestamp)(block) for block in blocks
)
return pd.to_datetime([x * 1e9 for x in data])
def get_protocol_fees(address: str, start_block: int = None) -> Dict[int,Decimal]:
"""
Get all protocol fee payouts for a given vault.
Fees can be found as vault share transfers to the rewards address.
"""
vault = Vault.from_address(address)
rewards = vault.vault.rewards()
topics = construct_event_topic_set(
filter_by_name('Transfer', vault.vault.abi)[0],
web3.codec,
{'sender': address, 'receiver': rewards},
)
logs = decode_logs(get_logs_asap(address, topics, from_block=start_block))
return {log.block_number: Decimal(log['value']) / Decimal(vault.scale) for log in logs}
@dataclass
class Wrapper:
def __init__(self, name: str, vault: str, wrapper: str) -> None:
self.name = name
self.vault = convert.to_address(vault)
self.wrapper = convert.to_address(wrapper)
@db_session
def read_cache(self) -> DataFrame:
entities = PartnerHarvestEvent.select(lambda e: e.vault == self.vault and e.wrapper.address == self.wrapper and e.wrapper.chainid == chain.id)[:]
cache = [
{
'block': e.block,
'timestamp': pd.to_datetime(e.timestamp,unit='s'),
'balance': e.balance,
'total_supply': e.total_supply,
'vault_price': e.vault_price,
'balance_usd': e.balance_usd,
'share': e.share,
'payout_base': e.payout_base,
'protocol_fee': e.protocol_fee,
'wrapper': e.wrapper.address,
'vault': e.vault,
} for e in entities
]
return DataFrame(cache)
def protocol_fees(self, start_block: int = None) -> Dict[int,Decimal]:
return get_protocol_fees(self.vault, start_block=start_block)
def balances(self, blocks: Tuple[int,...]) -> List[Decimal]:
vault = Vault.from_address(self.vault)
balances = batch_call(
[[vault.vault, 'balanceOf', self.wrapper, block] for block in blocks]
)
return [Decimal(balance) / Decimal(vault.scale) for balance in balances]
def total_supplies(self, blocks: Tuple[int,...]) -> List[Decimal]:
vault = Vault.from_address(self.vault)
supplies = batch_call([[vault.vault, 'totalSupply', block] for block in blocks])
return [Decimal(supply) / Decimal(vault.scale) for supply in supplies]
def vault_prices(self, blocks: Tuple[int,...]) -> List[Decimal]:
prices = Parallel(10, 'threading')(
delayed(magic.get_price)(self.vault, block=block) for block in blocks
)
return [Decimal(price) for price in prices]
class BentoboxWrapper(Wrapper):
"""
Use BentoBox deposits by wrapper.
"""
def balances(self, blocks) -> List[Decimal]:
bentobox = contract('0xF5BCE5077908a1b7370B9ae04AdC565EBd643966')
vault = Vault.from_address(self.vault)
balances = batch_call(
[
[bentobox, 'balanceOf', self.vault, self.wrapper, block]
for block in blocks
]
)
return [Decimal(balance or 0) / Decimal(vault.scale) for balance in balances]
@dataclass
class WildcardWrapper:
"""
Automatically find and generate all valid (wrapper, vault) pairs.
"""
name: str
wrapper: Union[str, List[str]] # can unpack multiple wrappers
def unwrap(self) -> List[Wrapper]:
registry = Registry()
wrappers = [self.wrapper] if isinstance(self.wrapper, str) else self.wrapper
topics = construct_event_topic_set(
filter_by_name('Transfer', registry.vaults[0].vault.abi)[0],
web3.codec,
{'receiver': wrappers},
)
addresses = [str(vault.vault) for vault in registry.vaults]
from_block = min(ThreadPoolExecutor().map(contract_creation_block, addresses))
# wrapper -> {vaults}
deposits = defaultdict(set)
for log in decode_logs(get_logs_asap(addresses, topics, from_block)):
deposits[log['receiver']].add(log.address)
return [
Wrapper(name=vault.name, vault=str(vault.vault), wrapper=wrapper)
for wrapper in wrappers
for vault in registry.vaults
if str(vault.vault) in deposits[wrapper]
]
@dataclass
class YApeSwapFactoryWrapper(WildcardWrapper):
name: str
wrapper: str
def unwrap(self) -> List[Wrapper]:
factory = contract(self.wrapper)
with multicall:
pairs = [factory.allPairs(i) for i in range(factory.allPairsLength())]
ratios = [Contract(pair).farmingRatio() for pair in pairs]
# pools with ratio.min > 0 deploy to yearn vaults
farming = [str(pair) for pair, ratio in zip(pairs, ratios) if ratio['min'] > 0]
return WildcardWrapper(self.name, farming).unwrap()
class GearboxWrapper(Wrapper):
"""
Use Gearbox CAs as wrappers.
"""
def balances(self, blocks) -> List[Decimal]:
GearboxAccountFactory = contract(self.wrapper)
vault = Vault.from_address(self.vault)
with multicall:
CAs = [GearboxAccountFactory.creditAccounts(i) for i in range(GearboxAccountFactory.countCreditAccounts())]
balances = []
for block in blocks:
balances_at_block = batch_call(
[
[
vault.vault,
'balanceOf',
ca,
block
]
for ca in CAs
]
)
tvl = sum(balance / Decimal(vault.scale) for balance in balances_at_block)
balances.append(tvl)
return balances
@dataclass
class Partner:
name: str
wrappers: List[Wrapper]
treasury: str = None
@cached_property
def flat_wrappers(self) -> List[Wrapper]:
# unwrap wildcard wrappers to a flat list
flat_wrappers = []
for wrapper in self.wrappers:
if isinstance(wrapper, Wrapper):
flat_wrappers.append(wrapper)
elif isinstance(wrapper, WildcardWrapper):
flat_wrappers.extend(wrapper.unwrap())
return flat_wrappers
def process(self, use_postgres_cache: bool = USE_POSTGRES_CACHE) -> Tuple[DataFrame,DataFrame]:
# snapshot wrapper share at each harvest
wrappers = []
for wrapper in track(self.flat_wrappers, self.name):
if use_postgres_cache:
cache = wrapper.read_cache()
try:
max_cached_block = int(cache['block'].max())
start_block = max_cached_block + 1
logger.debug(f'{self.name} {wrapper.name} is cached thru block {max_cached_block}')
except KeyError:
start_block = None
logger.debug(f'no harvests cached for {self.name} {wrapper.name}')
logger.debug(f'start block: {start_block}')
else:
start_block = None
protocol_fees = wrapper.protocol_fees(start_block=start_block)
try:
blocks, protocol_fees = zip(*protocol_fees.items())
wrap = DataFrame(
{
'block': blocks,
'timestamp': get_timestamps(blocks),
'protocol_fee': protocol_fees,
'balance': wrapper.balances(blocks),
'total_supply': wrapper.total_supplies(blocks),
'vault_price': wrapper.vault_prices(blocks),
}
)
wrap['balance_usd'] = wrap.balance * wrap.vault_price
wrap['share'] = wrap.balance / wrap.total_supply
wrap['payout_base'] = wrap.share * wrap.protocol_fee * Decimal(1 - OPEX_COST)
wrap['protocol_fee'] = wrap.protocol_fee
wrap['wrapper'] = wrapper.wrapper
wrap['vault'] = wrapper.vault
except ValueError as e:
if str(e) != 'not enough values to unpack (expected 2, got 0)':
raise
wrap = | DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""ml_package_code.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1slXIOwuXRSfAQpsAAsGs1DIkqMU3hHYZ
"""
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from sklearn.impute import SimpleImputer
from keras.models import Sequential
from keras.layers import Dense
import seaborn as sns
import matplotlib.pyplot as plt
def read_dataset(param):
y_var = param[1]
path = param[0]
ind = param[2]
if ind ==' ':
df = pd.read_csv(path)
y = df[y_var]
x = df.drop(columns=y_var)
print(df.head())
return df,x,y
else:
df = pd.read_csv(path,index_col=ind)
y = df[y_var]
x = df.drop(columns=y_var)
print(df.head())
return df,x,y
def one_hot(x,y,cat=False,exc=False,onlyoutcome=False,onlyx=False):
if cat =='~':
for j in x.columns:
onehot = pd.get_dummies(x[j],prefix=j)
x = x.drop(columns=[j])
x = pd.concat([x,onehot],axis=1)
y = pd.get_dummies(y)
return x,y
elif exc == True:
col = x.columns
col1 = list(col)
for i in cat:
col1.remove(i)
for j in col1:
onehot = pd.get_dummies(x[j],prefix=j)
x = x.drop(columns=[j])
x = pd.concat([x,onehot],axis=1)
y = pd.get_dummies(y)
return x,y
elif onlyoutcome == True:
y = | pd.get_dummies(y) | pandas.get_dummies |
import pandas as pd
import glob
from pathlib import Path
class DataProcessor:
def __init__(self):
pass
def write_data(self, save_file):
"""Write processed data to directory."""
self.data.to_pickle(save_file)
class GameLogProcessor(DataProcessor):
def read_data(self, input_filepath):
"""Read raw data into DataProcessor."""
game_logs = Path(input_filepath) / 'gl1871_2020'
all_files = glob.glob(str(game_logs / "GL201*.TXT"))
header_file = Path(input_filepath) / 'game_log_header.csv'
fields = pd.read_csv(header_file)
li = []
for filename in all_files:
df = pd.read_csv(filename, header=None, names=fields.columns)
li.append(df)
df = pd.concat(li, axis=0, ignore_index=True)
self.data = df
def process_data(self, stable=True):
"""Process raw data into useful files for model."""
self.data['GAME_ID'] = self.data['HomeTeam'] + \
self.data['Date'].map(str) + \
self.data['DoubleHeader'].map(str)
self.data['Date'] = pd.to_datetime(self.data['Date'], format='%Y%m%d')
self.data['year'] = self.data.Date.dt.year
class EventsProcessor(DataProcessor):
def read_data(self, input_filepath):
"""Read raw data into DataProcessor."""
header_file = Path(input_filepath) / 'fields.csv'
all_files = glob.glob(str(Path(input_filepath) / "Event201*.txt"))
fields = pd.read_csv(header_file)
header = fields['Header'].to_numpy()
li = []
for filename in all_files:
year = int(filename[-8:-4])
df = pd.read_csv(filename, header=None, names=header,
low_memory=False)
df['year'] = year
li.append(df)
df = | pd.concat(li, axis=0, ignore_index=True) | pandas.concat |
import pandas as pd
import numpy as np
import datetime
from contextlib import contextmanager
from xbbg import const, pipeline
from xbbg.io import logs, files, storage
from xbbg.core import utils, conn, process
def bsrch(tickers, flds, domain, variables, **kwargs):
logger = logs.get_logger(bdp, **kwargs)
service = conn.bbg_service(service='//blp/exrsvc', **kwargs)
request = service.createRequest("ExcelGetGridRequest")
request.set("Domain", domain)
overrides = request.getElement("Overrides")
for key, value in variables.items():
override1 = overrides.appendElement()
override1.setElement("name", key)
override1.setElement("value", value)
process.init_request(request=request, tickers=tickers, flds=flds, **kwargs)
logger.debug(f'Sending request to Bloomberg ...\n{request}')
conn.send_request(request=request, **kwargs)
res = pd.DataFrame(process.rec_events(func=process.process_ref))
if kwargs.get('raw', False): return res
if res.empty or any(fld not in res for fld in ['ticker', 'field']):
return pd.DataFrame()
col_maps = kwargs.get('col_maps', None)
cols = res.field.unique()
return (
res
.set_index(['ticker', 'field'])
.unstack(level=1)
.rename_axis(index=None, columns=[None, None])
.droplevel(axis=1, level=0)
.loc[:, cols]
.pipe(pipeline.standard_cols, col_maps=col_maps)
)
def bdp(tickers, flds, **kwargs) -> pd.DataFrame:
"""
Bloomberg reference data
Args:
tickers: tickers
flds: fields to query
**kwargs: Bloomberg overrides
Returns:
pd.DataFrame
"""
logger = logs.get_logger(bdp, **kwargs)
if isinstance(tickers, str): tickers = [tickers]
if isinstance(flds, str): flds = [flds]
service = conn.bbg_service(service='//blp/refdata', **kwargs)
request = service.createRequest('ReferenceDataRequest')
process.init_request(request=request, tickers=tickers, flds=flds, **kwargs)
logger.debug(f'Sending request to Bloomberg ...\n{request}')
conn.send_request(request=request, **kwargs)
res = pd.DataFrame(process.rec_events(func=process.process_ref))
if kwargs.get('raw', False): return res
if res.empty or any(fld not in res for fld in ['ticker', 'field']):
return pd.DataFrame()
col_maps = kwargs.get('col_maps', None)
cols = res.field.unique()
return (
res
.set_index(['ticker', 'field'])
.unstack(level=1)
.rename_axis(index=None, columns=[None, None])
.droplevel(axis=1, level=0)
.loc[:, cols]
.pipe(pipeline.standard_cols, col_maps=col_maps)
)
def bds(tickers, flds, **kwargs) -> pd.DataFrame:
"""
Bloomberg block data
Args:
tickers: ticker(s)
flds: field
**kwargs: other overrides for query
Returns:
pd.DataFrame: block data
"""
logger = logs.get_logger(bds, **kwargs)
service = conn.bbg_service(service='//blp/refdata', **kwargs)
request = service.createRequest('ReferenceDataRequest')
if isinstance(tickers, str):
data_file = storage.ref_file(
ticker=tickers, fld=flds, has_date=True, ext='pkl', **kwargs
)
if files.exists(data_file):
logger.debug(f'Loading Bloomberg data from: {data_file}')
return pd.DataFrame(pd.read_pickle(data_file))
process.init_request(request=request, tickers=tickers, flds=flds, **kwargs)
logger.debug(f'Sending request to Bloomberg ...\n{request}')
conn.send_request(request=request, **kwargs)
res = pd.DataFrame(process.rec_events(func=process.process_ref))
if kwargs.get('raw', False): return res
if res.empty or any(fld not in res for fld in ['ticker', 'field']):
return pd.DataFrame()
data = (
res
.set_index(['ticker', 'field'])
.droplevel(axis=0, level=1)
.rename_axis(index=None)
.pipe(pipeline.standard_cols, col_maps=kwargs.get('col_maps', None))
)
if data_file:
logger.debug(f'Saving Bloomberg data to: {data_file}')
files.create_folder(data_file, is_file=True)
data.to_pickle(data_file)
return data
else:
return pd.DataFrame(pd.concat([
bds(tickers=ticker, flds=flds, **kwargs) for ticker in tickers
], sort=False))
def bdh(
tickers, flds=None, start_date=None, end_date='today', adjust=None, **kwargs
) -> pd.DataFrame:
"""
Bloomberg historical data
Args:
tickers: ticker(s)
flds: field(s)
start_date: start date
end_date: end date - default today
adjust: `all`, `dvd`, `normal`, `abn` (=abnormal), `split`, `-` or None
exact match of above words will adjust for corresponding events
Case 0: `-` no adjustment for dividend or split
Case 1: `dvd` or `normal|abn` will adjust for all dividends except splits
Case 2: `adjust` will adjust for splits and ignore all dividends
Case 3: `all` == `dvd|split` == adjust for all
Case 4: None == Bloomberg default OR use kwargs
**kwargs: overrides
Returns:
pd.DataFrame
"""
logger = logs.get_logger(bdh, **kwargs)
if flds is None: flds = ['Last_Price']
e_dt = utils.fmt_dt(end_date, fmt='%Y%m%d')
if start_date is None: start_date = pd.Timestamp(e_dt) - pd.Timedelta(weeks=8)
s_dt = utils.fmt_dt(start_date, fmt='%Y%m%d')
service = conn.bbg_service(service='//blp/refdata', **kwargs)
request = service.createRequest('HistoricalDataRequest')
process.init_request(
request=request, tickers=tickers, flds=flds,
start_date=s_dt, end_date=e_dt, adjust=adjust, **kwargs
)
logger.debug(f'Sending request to Bloomberg ...\n{request}')
conn.send_request(request=request, **kwargs)
res = pd.DataFrame(process.rec_events(process.process_hist))
if kwargs.get('raw', False): return res
if res.empty or any(fld not in res for fld in ['ticker', 'date']):
return pd.DataFrame()
return (
res
.set_index(['ticker', 'date'])
.unstack(level=0)
.rename_axis(index=None, columns=[None, None])
.swaplevel(0, 1, axis=1)
)
def bdib(
ticker: str, dt, session='allday', typ='TRADE', **kwargs
) -> pd.DataFrame:
"""
Bloomberg intraday bar data
Args:
ticker: ticker name
dt: date to download
session: [allday, day, am, pm, pre, post]
typ: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK]
**kwargs:
batch: whether is batch process to download data
log: level of logs
Returns:
pd.DataFrame
"""
from xbbg.core import missing
logger = logs.get_logger(bdib, **kwargs)
exch = const.exch_info(ticker=ticker)
if exch.empty: raise KeyError(f'Cannot find exchange info for {ticker}')
ss_rng = process.time_range(dt=dt, ticker=ticker, session=session, tz=exch.tz)
data_file = storage.bar_file(ticker=ticker, dt=dt, typ=typ)
if files.exists(data_file) and kwargs.get('cache', True) \
and (not kwargs.get('reload', False)):
res = (
pd.read_parquet(data_file)
.pipe(pipeline.add_ticker, ticker=ticker)
.loc[ss_rng[0]:ss_rng[1]]
)
if not res.empty:
logger.debug(f'Loading Bloomberg intraday data from: {data_file}')
return res
t_1 = pd.Timestamp('today').date() - pd.Timedelta('1D')
whole_day = pd.Timestamp(dt).date() < t_1
batch = kwargs.pop('batch', False)
if (not whole_day) and batch:
logger.warning(f'Querying date {t_1} is too close, ignoring download ...')
return pd.DataFrame()
cur_dt = pd.Timestamp(dt).strftime('%Y-%m-%d')
info_log = f'{ticker} / {cur_dt} / {typ}'
q_tckr = ticker
if exch.get('is_fut', False):
if 'freq' not in exch:
logger.error(f'[freq] missing in info for {info_log} ...')
is_sprd = exch.get('has_sprd', False) and (len(ticker[:-1]) != exch['tickers'][0])
if not is_sprd:
q_tckr = fut_ticker(gen_ticker=ticker, dt=dt, freq=exch['freq'])
if q_tckr == '':
logger.error(f'cannot find futures ticker for {ticker} ...')
return pd.DataFrame()
info_log = f'{q_tckr} / {cur_dt} / {typ}'
miss_kw = dict(ticker=ticker, dt=dt, typ=typ, func='bdib')
cur_miss = missing.current_missing(**miss_kw)
if cur_miss >= 2:
if batch: return pd.DataFrame()
logger.info(f'{cur_miss} trials with no data {info_log}')
return | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
from pandas._libs.tslibs.period import IncompatibleFrequency
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import PeriodArray, period_array
@pytest.mark.parametrize(
"data, freq, expected",
[
([pd.Period("2017", "D")], None, [17167]),
([pd.Period("2017", "D")], "D", [17167]),
([2017], "D", [17167]),
(["2017"], "D", [17167]),
([ | pd.Period("2017", "D") | pandas.Period |
import ipyvuetify as v
import pandas as pd
from ipywidgets import Output
from matplotlib import pyplot as plt
from component import parameter as cp
class LayerFull(v.Layout):
COLORS = cp.gradient(5) + ['grey']
def __init__(self, layer_name, values, aoi_names, colors):
# read the layer list and find the layer information based on the layer name
layer_list = pd.read_csv(cp.layer_list).fillna('')
layer_row = layer_list[layer_list.layer_name == layer_name]
if len(layer_row) != 1:
raise IndexError(f"The layer {layer_name} is not part of the existing layers of the application. Please contact our maintainer.")
# build the internal details
details = v.ExpansionPanels(xs12=True, class_="mt-3", children= [
v.ExpansionPanel(children = [
v.ExpansionPanelHeader(children=['Details'], expand_icon='mdi-help-circle-outline', disable_icon_rotate=True),
v.ExpansionPanelContent(children=[layer_row.layer_info.values[0]])
])
])
# create a title with the layer name
title = v.Html(class_="mt-2 mb-2", xs12= True, tag="h3", children=[f'{layer_name} ({layer_row.unit.values[0]})'])
# taken from https://stackoverflow.com/questions/579310/formatting-long-numbers-as-strings-in-python
def human_format(num, round_to=2):
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num = round(num / 1000.0, round_to)
return '{:.{}f}{}'.format(round(num, round_to), round_to, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
# create a matplotlib stack horizontal bar chart
chart = Output()
with chart:
# change pyplot style
plt.style.use('dark_background')
# create the chart
fig, ax = plt.subplots(figsize=[50, len(values)*2], facecolor=((0,0,0,0)))
# set the datas
max_value = max(values)
norm_values = [v/max_value*100 for v in reversed(values)]
human_values = [f"{human_format(val)}" for val in reversed(values)]
colors = [colors[i-1] if i else v.theme.themes.dark.primary for i in range(len(values))][::-1]
# add the axes
ax.barh(aoi_names, norm_values, color=colors)
# add the text
for i, (norm, name, val, color) in enumerate(zip(norm_values, aoi_names, human_values, colors)):
ax.text(norm+1, i, val, fontsize=40, color=color)
# cosmetic tuning
ax.set_xlim(0, 110)
ax.tick_params(axis='y', which='major', pad=30, labelsize=40, left=False)
ax.tick_params(axis='x', bottom=False, labelbottom=False)
ax.set_frame_on(False)
plt.show()
super().__init__(
class_ = "ma-5",
row=True,
children=[
v.Flex(xs12=True, children = [title]),
v.Flex(xs12=True, children=[chart]),
v.Flex(xs12=True, children=[details])
]
)
class LayerPercentage(v.Layout):
def __init__(self, layer_name, pcts, colors):
# read the layer list and find the layer information based on the layer name
layer_list = | pd.read_csv(cp.layer_list) | pandas.read_csv |
import base64
import io
import pandas as pd
from dash import html
def check_non_default_index(df):
if not ((type(df.index) == pd.RangeIndex) and (df.index.name is None)):
return True
else:
return False
def numeric_cols_in_df(df):
numeric_cols = ~ df.apply(lambda s: pd.to_numeric(s, errors='coerce').isna().all())
return numeric_cols
def datetime_strcols_in_df(df, return_numeric_cols=False):
numeric_cols = numeric_cols_in_df(df)
datetime_strcols = ~ df.loc[:, ~numeric_cols].apply(lambda s: pd.to_datetime(s, errors='coerce').isna().all())
datetime_strcols = (~ numeric_cols ) & datetime_strcols
if return_numeric_cols:
return datetime_strcols, numeric_cols
else:
return datetime_strcols
def monotonic_cols_in_df(df):
monotonic_cols = df.apply(lambda s: s.is_monotonic)
return monotonic_cols
def find_closest(value, df, colname, return_lower='True'):
if check_non_default_index(df):
df = df.reset_index()
exactmatch = df[df[colname] == value]
if not exactmatch.empty:
return exactmatch.index[0]
elif return_lower:
lowerneighbour_ind = df[df[colname] < value][colname].idxmax()
return lowerneighbour_ind
else:
upperneighbour_ind = df[df[colname] > value][colname].idxmin()
return upperneighbour_ind
def convert_to_numeric_datetime(df):
datetime_strcols, numeric_cols = datetime_strcols_in_df(df, return_numeric_cols=True)
numeric_colnames = numeric_cols[numeric_cols].index
datetime_strcolnames = datetime_strcols[datetime_strcols].index
df[numeric_colnames] = df[numeric_colnames].apply(lambda s: | pd.to_numeric(s, errors='coerce') | pandas.to_numeric |
import pandas as pd
import logging as log
import collections
import re
import random
class dprep(pd.DataFrame):
df = | pd.DataFrame(index=[""], columns=[""]) | pandas.DataFrame |
import pandas as pd
import glob
import numpy as np
from pandas_profiling import ProfileReport
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
# Loading HERA data
path = r'./hera/' # use your path
USE_COLS = ['ISO_3', 'DATE', 'REGION', 'CONTAMINES', 'DECES', 'GUERIS', 'CONTAMINES_FEMME', 'CONTAMINES_HOMME', 'CONTAMINES_GENRE_NON_SPECIFIE']
all_files = glob.glob(path + "/*.csv")
li = []
for filename in all_files:
df = pd.read_csv(filename, index_col=None, header=0, delimiter=";", usecols=USE_COLS)
li.append(df)
frame = pd.concat(li, axis=0, ignore_index=True)
# Loading Africa Admin Level 1 boundaries
FILE_LOCATION = "africa_admin1.csv"
admin = pd.read_csv(FILE_LOCATION, delimiter=",")
admin = admin.rename(columns={'parent_cod': 'ISO_3'})
# Loading COVID19za data
FILE_LOCATION_ZA_CASES = "./covid19za/covid19za_provincial_cumulative_timeline_confirmed.csv"
FILE_LOCATION_ZA_DEATHS = "./covid19za/covid19za_provincial_cumulative_timeline_deaths.csv"
FILE_LOCATION_ZA_RECOVERIES = "./covid19za/covid19za_provincial_cumulative_timeline_recoveries.csv"
cases_za = pd.read_csv(FILE_LOCATION_ZA_CASES, delimiter=",", low_memory=False)
deaths_za = | pd.read_csv(FILE_LOCATION_ZA_DEATHS, delimiter=",", low_memory=False) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 15 19:43:26 2021
@author: llothar
"""
from sens_tape import tape
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use(['science','no-latex'])
data = pd.read_csv('f9ad.csv')
drops = ['Unnamed: 0', 'Unnamed: 0.1', 'RHX_RT unitless', 'Pass Name unitless',
'nameWellbore', 'name','RGX_RT unitless',
'MWD Continuous Azimuth dega']
dfs = data.iloc[2000:10000]
index = 'Measured Depth m'
target = 'MWD Continuous Inclination dega',
fig, axs = plt.subplots(1, 2, sharey=True, figsize=(8,3))
index_dr = np.diff(dfs[index])
index_mean = np.mean(index_dr)
index_std = np.std(index_dr)
index_maxgap = np.max(index_dr)
h = 5
x = np.arange(np.min(dfs[index].to_numpy()),
np.max(dfs[index].to_numpy()),
index_maxgap*h)
from sklearn.neighbors import RadiusNeighborsRegressor
# raw = dfs['MWD Continuous Inclination dega'].interpolate().ffill().bfill().to_numpy()
# reg.fit(dfs[index].to_numpy().reshape(-1,1),raw)
# y = reg.predict(x.reshape(-1,1))
# plt.xlim(650,700)
# plt.plot(x,y)
# plt.plot(dfs[index].to_numpy(),raw)
# plt.show()
reg = RadiusNeighborsRegressor(radius=index_maxgap*1, weights='uniform')
raw = dfs['Rate of Penetration m/h'].interpolate().ffill().bfill().to_numpy()
reg.fit(dfs[index].to_numpy().reshape(-1,1),raw)
y = reg.predict(x.reshape(-1,1))
axs[0].plot(x,y, c='blue', linewidth=1, label='r = 1 max step', linestyle="-")
reg = RadiusNeighborsRegressor(radius=index_maxgap*20, weights='uniform')
raw = dfs['Rate of Penetration m/h'].interpolate().ffill().bfill().to_numpy()
reg.fit(dfs[index].to_numpy().reshape(-1,1),raw)
y = reg.predict(x.reshape(-1,1))
axs[0].plot(x,y, c='black', linewidth=1, label='r = 20 max step', linestyle="-")
reg = RadiusNeighborsRegressor(radius=index_maxgap*100, weights='uniform')
raw = dfs['Rate of Penetration m/h'].interpolate().ffill().bfill().to_numpy()
reg.fit(dfs[index].to_numpy().reshape(-1,1),raw)
y = reg.predict(x.reshape(-1,1))
axs[0].plot(x,y, c='black', linewidth=1, label='r = 100 max step', linestyle="--")
raw_x = dfs[index].to_numpy()
axs[0].plot(raw_x,raw, c='red', linestyle=':', label='raw data')
axs[0].grid()
plt.tight_layout()
axs[0].set_xlim(650,690)
plt.ylim(0,60)
axs[0].legend()
axs[0].set_title('Uniform weight')
axs[0].set_ylabel('Rate of Penetration [m/h]')
axs[0].set_xlabel('Measured Depth [m]')
reg = RadiusNeighborsRegressor(radius=index_maxgap*1, weights='distance')
raw = dfs['Rate of Penetration m/h'].interpolate().ffill().bfill().to_numpy()
reg.fit(dfs[index].to_numpy().reshape(-1,1),raw)
y = reg.predict(x.reshape(-1,1))
axs[1].plot(x,y, c='blue', linewidth=1, label='r = 1 max step', linestyle="-")
reg = RadiusNeighborsRegressor(radius=index_maxgap*20, weights='distance')
raw = dfs['Rate of Penetration m/h'].interpolate().ffill().bfill().to_numpy()
reg.fit(dfs[index].to_numpy().reshape(-1,1),raw)
y = reg.predict(x.reshape(-1,1))
axs[1].plot(x,y, c='black', linewidth=1, label='r = 20 max step', linestyle="-")
reg = RadiusNeighborsRegressor(radius=index_maxgap*100, weights='distance')
raw = dfs['Rate of Penetration m/h'].interpolate().ffill().bfill().to_numpy()
reg.fit(dfs[index].to_numpy().reshape(-1,1),raw)
y = reg.predict(x.reshape(-1,1))
axs[1].plot(x,y, c='black', linewidth=1, label='r = 100 max step', linestyle="--")
raw_x = dfs[index].to_numpy()
axs[1].plot(raw_x,raw, c='red', linestyle=':', label='raw data')
axs[1].grid()
plt.tight_layout()
axs[1].set_xlim(650,690)
plt.ylim(0,60)
axs[1].legend()
axs[1].set_title('Distance weight')
axs[1].set_xlabel('Measured Depth [m]')
plt.savefig('resampling_radius_rnr.pdf')
plt.show()
#%%
data = pd.read_csv('f9ad.csv')
drops = ['Unnamed: 0', 'Unnamed: 0.1', 'RHX_RT unitless', 'Pass Name unitless',
'nameWellbore', 'name','RGX_RT unitless',
'MWD Continuous Azimuth dega']
dfs = data.iloc[2000:10000]
index = 'Measured Depth m'
target = 'MWD Continuous Inclination dega',
fig, axs = plt.subplots(1, 2, sharey=True, figsize=(8,3))
index_dr = np.diff(dfs[index])
index_mean = np.mean(index_dr)
index_std = np.std(index_dr)
index_maxgap = np.max(index_dr)
h = 5
x = np.arange(np.min(dfs[index].to_numpy()),
np.max(dfs[index].to_numpy()),
index_maxgap*h)
from sklearn.neighbors import KNeighborsRegressor
# raw = dfs['MWD Continuous Inclination dega'].interpolate().ffill().bfill().to_numpy()
# reg.fit(dfs[index].to_numpy().reshape(-1,1),raw)
# y = reg.predict(x.reshape(-1,1))
# plt.xlim(650,700)
# plt.plot(x,y)
# plt.plot(dfs[index].to_numpy(),raw)
# plt.show()
reg = KNeighborsRegressor(n_neighbors=1, weights='uniform')
raw = dfs['Rate of Penetration m/h'].interpolate().ffill().bfill().to_numpy()
reg.fit(dfs[index].to_numpy().reshape(-1,1),raw)
y = reg.predict(x.reshape(-1,1))
axs[0].plot(x,y, c='blue', linewidth=1, label='K = 1', linestyle="-")
reg = KNeighborsRegressor(n_neighbors=20, weights='uniform')
raw = dfs['Rate of Penetration m/h'].interpolate().ffill().bfill().to_numpy()
reg.fit(dfs[index].to_numpy().reshape(-1,1),raw)
y = reg.predict(x.reshape(-1,1))
axs[0].plot(x,y, c='black', linewidth=1, label='K = 20', linestyle="-")
reg = KNeighborsRegressor(n_neighbors=100, weights='uniform')
raw = dfs['Rate of Penetration m/h'].interpolate().ffill().bfill().to_numpy()
reg.fit(dfs[index].to_numpy().reshape(-1,1),raw)
y = reg.predict(x.reshape(-1,1))
axs[0].plot(x,y, c='black', linewidth=1, label='K = 100', linestyle="--")
raw_x = dfs[index].to_numpy()
axs[0].plot(raw_x,raw, c='red', linestyle=':', label='raw data')
axs[0].grid()
plt.tight_layout()
axs[0].set_xlim(650,690)
plt.ylim(0,60)
axs[0].legend()
axs[0].set_title('Uniform weight')
axs[0].set_ylabel('Rate of Penetration [m/h]')
axs[0].set_xlabel('Measured Depth [m]')
reg = KNeighborsRegressor(n_neighbors=1, weights='distance')
raw = dfs['Rate of Penetration m/h'].interpolate().ffill().bfill().to_numpy()
reg.fit(dfs[index].to_numpy().reshape(-1,1),raw)
y = reg.predict(x.reshape(-1,1))
axs[1].plot(x,y, c='blue', linewidth=1, label='K = 1', linestyle="-")
reg = KNeighborsRegressor(n_neighbors=20, weights='distance')
raw = dfs['Rate of Penetration m/h'].interpolate().ffill().bfill().to_numpy()
reg.fit(dfs[index].to_numpy().reshape(-1,1),raw)
y = reg.predict(x.reshape(-1,1))
axs[1].plot(x,y, c='black', linewidth=1, label='K = 20', linestyle="-")
reg = KNeighborsRegressor(n_neighbors=100, weights='distance')
raw = dfs['Rate of Penetration m/h'].interpolate().ffill().bfill().to_numpy()
reg.fit(dfs[index].to_numpy().reshape(-1,1),raw)
y = reg.predict(x.reshape(-1,1))
axs[1].plot(x,y, c='black', linewidth=1, label='K = 100', linestyle="--")
raw_x = dfs[index].to_numpy()
axs[1].plot(raw_x,raw, c='red', linestyle=':', label='raw data')
axs[1].grid()
plt.tight_layout()
axs[1].set_xlim(650,690)
plt.ylim(0,60)
axs[1].legend()
axs[1].set_title('Distance weight')
axs[1].set_xlabel('Measured Depth [m]')
plt.savefig('resampling_radius_knn.pdf')
plt.show()
#%%
from shapely.geometry import Polygon
from shapely.geometry import LineString
from shapely.ops import unary_union
from shapely.ops import unary_union, polygonize
data = pd.read_csv('f9ad.csv')
drops = ['Unnamed: 0',
'Pass Name unitless',
'MWD Magnetic Toolface dega',
'nameWellbore',
'name',
'IMP/ARC Attenuation Conductivity 40-in. at 2 MHz mS/m',
'ARC Annular Pressure kPa',
'MWD Collar RPM rpm',
'IMP/ARC Non-BHcorr Phase-Shift Resistivity 28-in. at 2 MHz ohm.m',
'IMP/ARC Phase-Shift Conductivity 40-in. at 2 MHz mS/m',
'Annular Temperature degC',
'IMP/ARC Non-BHcorr Phase-Shift Resistivity 40-in. at 2 MHz ohm.m',
'ARC Gamma Ray (BH corrected) gAPI',
'IMP/ARC Non-BHcorr Attenuation Resistivity 40-in. at 2 MHz ohm.m',
'MWD Stick-Slip PKtoPK RPM rpm',
'IMP/ARC Non-BHcorr Attenuation Resistivity 28-in. at 2 MHz ohm.m',
'IMP/ARC Phase-Shift Conductivity 28-in. at 2 MHz mS/m'
]
data = data.drop(drops, axis=1)
dfs = data.iloc[2000:10000]
index = 'Measured Depth m'
target = 'Rate of Penetration m/h' #'MWD Continuous Inclination dega'
index_dr = np.diff(dfs[index])
index_mean = np.mean(index_dr)
index_std = np.std(index_dr)
index_maxgap = np.max(index_dr)
h = 5
data_x = np.arange(np.min(dfs[index].to_numpy()),
np.max(dfs[index].to_numpy()),
index_maxgap*h)
#%%
for target in list(data):
# try:
areas = []
samples = np.arange(1,200,10)
weightss = ['uniform', 'distance']
for weights in weightss:
areas = []
for i in samples:
reg = RadiusNeighborsRegressor(radius=index_maxgap*i, weights=weights)
raw = dfs[target].interpolate().ffill().bfill().to_numpy()
reg.fit(dfs[index].to_numpy().reshape(-1,1),raw)
data_y = reg.predict(data_x.reshape(-1,1))
x_y_curve1 = np.rot90([data_x,data_y])
x_y_curve2 = np.rot90([dfs[index].to_numpy(), raw])
polygon_points = [] #creates a empty list where we will append the points to create the polygon
for xyvalue in x_y_curve1:
polygon_points.append([xyvalue[0],xyvalue[1]]) #append all xy points for curve 1
for xyvalue in x_y_curve2[::-1]:
polygon_points.append([xyvalue[0],xyvalue[1]]) #append all xy points for curve 2 in the reverse order (from last point to first point)
for xyvalue in x_y_curve1[0:1]:
polygon_points.append([xyvalue[0],xyvalue[1]]) #append the first point in curve 1 again, to it "closes" the polygon
polygon = Polygon(polygon_points)
area = polygon.area
x,y = polygon.exterior.xy
# original data
ls = LineString(np.c_[x, y])
# closed, non-simple
lr = LineString(ls.coords[:] + ls.coords[0:1])
lr.is_simple # False
mls = unary_union(lr)
mls.geom_type # MultiLineString'
Area_cal =[]
for polygon in polygonize(mls):
Area_cal.append(polygon.area)
Area_poly = (np.asarray(Area_cal).sum())
areas.append(Area_poly)
plt.plot(samples,areas, label=f'RNR, {weights}')
from sklearn.neighbors import KNeighborsRegressor
ks = np.arange(1,200,10)
for weights in weightss:
areas = []
for i in ks:
reg = KNeighborsRegressor(n_neighbors=i, weights=weights)
raw = dfs[target].interpolate().ffill().bfill().to_numpy()
reg.fit(dfs[index].to_numpy().reshape(-1,1),raw)
data_y = reg.predict(data_x.reshape(-1,1))
x_y_curve1 = np.rot90([data_x,data_y])
x_y_curve2 = np.rot90([dfs[index].to_numpy(), raw])
polygon_points = [] #creates a empty list where we will append the points to create the polygon
for xyvalue in x_y_curve1:
polygon_points.append([xyvalue[0],xyvalue[1]]) #append all xy points for curve 1
for xyvalue in x_y_curve2[::-1]:
polygon_points.append([xyvalue[0],xyvalue[1]]) #append all xy points for curve 2 in the reverse order (from last point to first point)
for xyvalue in x_y_curve1[0:1]:
polygon_points.append([xyvalue[0],xyvalue[1]]) #append the first point in curve 1 again, to it "closes" the polygon
polygon = Polygon(polygon_points)
area = polygon.area
x,y = polygon.exterior.xy
# original data
ls = LineString(np.c_[x, y])
# closed, non-simple
lr = LineString(ls.coords[:] + ls.coords[0:1])
lr.is_simple # False
mls = unary_union(lr)
mls.geom_type # MultiLineString'
Area_cal =[]
for polygon in polygonize(mls):
Area_cal.append(polygon.area)
Area_poly = (np.asarray(Area_cal).sum())
areas.append(Area_poly)
plt.plot(ks,areas, label=f'KNN, {weights}')
plt.legend()
plt.title(target)
plt.grid()
plt.show()
# except:
# print(f'{target} failed for some reason')
#%%
# no poly version
def myr2(x,y,data_x, data_y):
try:
x1 = np.max(data_x[data_x < x])
x2 = np.min(data_x[data_x > x])
loc1 = np.where(data_x == x1)
loc2 = np.where(data_x == x2)
y1 = data_y[loc1][-1]
y2 = data_y[loc2][0]
m = (y1-y2)/(x1-x2)
b = (x1*y2 - x2*y1)/(x1-x2)
y_inter = m * x + b
return np.power(y-y_inter, 2)
except:
return 0
n = 0
for target in list(data):
# try:
plt.figure(figsize=(5,5))
areas = []
samples = np.arange(1,31,1)
weightss = ['uniform', 'distance']
for weights in weightss:
areas = []
for i in samples:
reg = RadiusNeighborsRegressor(radius=index_maxgap*i, weights=weights)
raw = dfs[target].interpolate().ffill().bfill().to_numpy()
reg.fit(dfs[index].to_numpy().reshape(-1,1),raw)
data_y = reg.predict(data_x.reshape(-1,1))
totals = []
for row in np.rot90([data_x,data_y]):
x = row[0]
y = row[1]
totals.append(myr2(x,y,dfs[index].to_numpy(), raw))
Area_poly = np.power((np.sum(totals)/len(totals)),0.5)
areas.append(Area_poly)
plt.plot(samples,areas, label=f'RNR, {weights}')
from sklearn.neighbors import KNeighborsRegressor
ks = np.arange(1,31,1)
for weights in weightss:
areas = []
for i in ks:
reg = KNeighborsRegressor(n_neighbors=i, weights=weights)
raw = dfs[target].interpolate().ffill().bfill().to_numpy()
reg.fit(dfs[index].to_numpy().reshape(-1,1),raw)
data_y = reg.predict(data_x.reshape(-1,1))
totals = []
for row in np.rot90([data_x,data_y]):
x = row[0]
y = row[1]
totals.append(myr2(x,y,dfs[index].to_numpy(), raw))
Area_poly = np.power((np.sum(totals)/len(totals)),0.5)
areas.append(Area_poly)
plt.plot(ks,areas, label=f'KNN, {weights}')
plt.xlabel('K \ radius multiplier')
plt.ylabel('Error [RMS]')
plt.legend()
plt.title(target)
plt.grid()
plt.yscale('log')
plt.savefig(f'{n}.pdf')
n += 1
plt.show()
# except:
# print(f'{target} failed for some reason')
#%%
# no poly version, Riemann squared
def myr2multi(x_start, y_start, x_stop, y_stop, data_x, data_y, res):
try:
loc_results = []
x_range = np.linspace(x_start, x_stop, res+1)[:-1]
y_range = np.linspace(y_start, y_stop, res+1)[:-1]
for i in range(res):
x = x_range[i]
y = y_range[i]
x1 = np.max(data_x[data_x <= x])
x2 = np.min(data_x[data_x > x])
loc1 = np.where(data_x == x1)
loc2 = np.where(data_x == x2)
y1 = data_y[loc1][-1]
y2 = data_y[loc2][0]
m = (y1-y2)/(x1-x2)
b = (x1*y2 - x2*y1)/(x1-x2)
y_inter = m * x + b
loc_results.append(np.power(y-y_inter, 2))
return loc_results
except:
return 0
print('oops')
n = 0
res = 10
global_results = []
colors = ['red', 'green', 'blue', 'black']
linestyles = ['-','--', '-.', ':']
for target in list(data):
# try:
c = 0
local_result = [[],[],[],[]]
plt.figure(figsize=(4,4))
areas = []
samples = np.arange(1,31,1)
weightss = ['uniform', 'distance']
plotno = 0
for weights in weightss:
areas = []
for i in samples:
reg = RadiusNeighborsRegressor(radius=index_maxgap*i, weights=weights)
raw = dfs[target].interpolate().ffill().bfill().to_numpy()
reg.fit(dfs[index].to_numpy().reshape(-1,1),raw)
data_y = reg.predict(data_x.reshape(-1,1))
totals = []
newdata = np.rot90([data_x,data_y])
for i in range(1,len(newdata)):
x_start = newdata[i-1][0]
y_start = newdata[i-1][1]
x_stop = newdata[i][0]
y_stop = newdata[i][1]
result = myr2multi(x_start, y_start,
x_stop, y_stop,
dfs[index].to_numpy(), raw,
res)
totals.append(result) # added /np.mean(raw)
totals = np.asarray(totals)
Area_poly = np.power((np.sum(totals)/totals.size),0.5)
areas.append(Area_poly)
plt.plot(samples,areas, label=f'RNR\n{weights}',
c = colors[c], linestyle = linestyles[c],linewidth=1.5 )
c += 1
local_result[plotno] = areas
plotno += 1
from sklearn.neighbors import KNeighborsRegressor
ks = np.arange(1,31,1)
for weights in weightss:
areas = []
for i in ks:
reg = KNeighborsRegressor(n_neighbors=i, weights=weights)
raw = dfs[target].interpolate().ffill().bfill().to_numpy()
reg.fit(dfs[index].to_numpy().reshape(-1,1),raw)
data_y = reg.predict(data_x.reshape(-1,1))
totals = []
newdata = np.rot90([data_x,data_y])
for i in range(1,len(newdata)):
x_start = newdata[i-1][0]
y_start = newdata[i-1][1]
x_stop = newdata[i][0]
y_stop = newdata[i][1]
totals.append(myr2multi(x_start, y_start,
x_stop, y_stop,
dfs[index].to_numpy(), raw,
res))
totals = np.asarray(totals)
Area_poly = np.power((np.sum(totals)/totals.size),0.5)
areas.append(Area_poly)
plt.plot(ks,areas, label=f'KNN\n{weights}',
c = colors[c], linestyle = linestyles[c],linewidth=1.5 )
c += 1
local_result[plotno] = areas
plotno += 1
local_result = local_result/np.min(local_result)
global_results.append(local_result)
plt.xlabel('neigbor count / radius multiplier')
plt.ylabel('error [RMRS]')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.title(target)
plt.grid()
plt.yscale('log')
plt.savefig(f'multi_{n}.pdf')
n += 1
plt.show()
plt.plot(local_result[0])
plt.plot(local_result[1])
plt.plot(local_result[2])
plt.plot(local_result[3])
plt.show()
# except:
# print(f'{target} failed for some reason')
np.save('global_results.npy', global_results)
#%%
global_results = np.load('global_results.npy')
plt.figure(figsize=(4,4))
global_results = np.asarray(global_results)
methods_plot = [
'RNR\nuniform',
'RNR\ndistance',
'KNN\nuniform',
'KNN\ndistance'
]
colors = ['red', 'green', 'blue', 'black']
linestyles = ['-','--', '-.', ':']
for i in range(4):
plt.plot(np.nanmean(global_results[:,i,:], axis=0), label=methods_plot[i],
c=colors[i], linewidth=1.5, linestyle = linestyles[i])
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.yscale('log')
ymax = 3.1
plt.yticks(np.arange(1,ymax,0.2), np.arange(100,ymax*100,20).astype(int))
plt.grid()
plt.xlabel('neighbor count / radius multiplier')
plt.ylabel('RMRS error compared\nto best selection [%]')
plt.ylim(1,3)
plt.xticks(np.arange(-1,31,5), np.arange(0,32,5))
plt.savefig('algocompare.pdf')
#%%
plt.figure(figsize=(5,4))
plt.rc('axes', axisbelow=True)
plt.grid(linewidth=1, color='gray')
x = np.arange(1,101,1)
y = 1/x
import matplotlib
cmap = matplotlib.cm.get_cmap('hsv')
n = 15
for i in range(n+1):
for j in range(i):
if i == n:
plt.bar(x=i,
height=y[j]/np.sum(y[:i]),
bottom=np.sum(y[:j])/np.sum(y[:i]),
color = cmap(j/(n+1)),
label=f'd = {j+1}',
edgecolor='black')
else:
plt.bar(x=i,
height=y[j]/np.sum(y[:i]),
bottom=np.sum(y[:j])/np.sum(y[:i]),
color = cmap(j/(n+2)),
edgecolor='black')
plt.xlim(0,n+1)
plt.xticks(np.arange(1,n+1,1), rotation=90)
plt.yticks(np.linspace(0,1,11), np.linspace(0,100,11).astype(int))
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.xlabel('Radius Neighbour Regressor radius limit')
plt.ylabel('Datapoint weights, percent')
plt.tight_layout()
#plt.grid()
plt.savefig('Cumulative weights.pdf')
#%%
plt.figure(figsize=(5,4))
plt.rc('axes', axisbelow=True)
plt.grid(linewidth=1, color='gray')
x = np.ones(100)
y = x
import matplotlib
cmap = matplotlib.cm.get_cmap('hsv')
n = 15
for i in range(n+1):
for j in range(i):
if i == n:
plt.bar(x=i,
height=y[j]/np.sum(y[:i]),
bottom=np.sum(y[:j])/np.sum(y[:i]),
color = cmap(j/(n+1)),
label=f'd = {j+1}',
edgecolor='black')
else:
plt.bar(x=i,
height=y[j]/np.sum(y[:i]),
bottom=np.sum(y[:j])/np.sum(y[:i]),
color = cmap(j/(n+2)),
edgecolor='black')
plt.xlim(0,n+1)
plt.xticks(np.arange(1,n+1,1), rotation=90)
plt.yticks(np.linspace(0,1,11), np.linspace(0,100,11).astype(int))
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.xlabel('Radius Neighbour Regressor radius limit')
plt.ylabel('Datapoint weights, percent')
plt.tight_layout()
plt.savefig('Cumulative weights2.pdf')
#%%
import glob
filelist = (glob.glob("full_log*.npy"))
data_array = []
for file in filelist:
data = np.load(file, allow_pickle=True)
if len(data) > 0:
data_array.append(data)
data = np.vstack(data_array)
plt.figure(figsize=(4,2.5))
df = pd.DataFrame(data=data, columns=["method", "n", "param"])
df['n'] = df['n'].astype(int)
methods = ['KNN uniform', 'KNN distance', 'RNR uniform', 'RNR distance']
ns = np.arange(1,11,1)
ms = np.arange(0,4,1)
summary = []
for m in ms:
for n in ns:
dft = df[df['method'] == methods[m]]
dft = dft[dft['n'] == n]
summary.append([m,n,len(dft)])
summary = np.asarray(summary)
methods_plot = ['KNN\nuniform',
'KNN\ndistance',
'RNR\nuniform',
'RNR\ndistance']
scaler = 1.5
plt.scatter(x=summary[:,1], y=summary[:,0], s=summary[:,2]*scaler, c='steelblue')
plt.xticks(ns)
plt.yticks(ms, methods_plot)
sizes = np.arange(100,401,100)
sizes = np.hstack((1,sizes))
for s in sizes:
plt.scatter([],[],s=s*scaler, c='steelblue', label=f'{s}\n ')
plt.legend(title='winner count', bbox_to_anchor=(1.0, 1), loc='upper left')
plt.xlabel('Neighbor count / Radius multiplier')
#%%
import glob
filelist = (glob.glob("simann*.npy"))
data_array = []
for file in filelist:
data = np.load(file, allow_pickle=True)
if len(data) > 0:
data_array.append(data)
data = np.vstack(data_array)
plt.figure(figsize=(4,2.5))
df = | pd.DataFrame(data=data, columns=["method", "n", "param"]) | pandas.DataFrame |
#Import semua modul yang diperlukan
import json
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import streamlit as st
#-------------------------------------------
#Open file
f = open("kode_negara_lengkap.json")
file_json = json.load(f)
df_csv = | pd.read_csv("produksi_minyak_mentah.csv") | pandas.read_csv |
import numpy as np
import pandas as pd
import datetime as dt
from datetime import datetime
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect
from flask import Flask, jsonify, request
def toDate(dateString):
return dt.datetime.strptime(dateString, "%Y-%m-%d").date()
#################################################
# Database Setup
#################################################
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
print(Base.classes.keys())
# Save reference to the table
# Assign the measurement class to a variable called `Measurement`
Measurement = Base.classes.measurement
# Assign the station class to a variable called `Station`
Station = Base.classes.station
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
#################################################
# Flask Routes
#################################################
@app.route("/")
def welcome():
"""List all available api routes."""
return (
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/YYYY-MM-DD (trip start date - enter any date before 2017-08-23)<br/>"
f"/api/v1.0/YYYY-MM-DD (trip start date)/YYYY-MM-DD (trip end date)<br/>"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
# Create our session (link) from Python to the DB
session = Session(engine)
"""Return a list of all dates and precipitations"""
# Query all passengers
results = session.query(Measurement.date, Measurement.prcp).order_by(Measurement.date.desc()).all()
session.close()
# Convert list of tuples into normal list
prcp_list = list(np.ravel(results))
print(len(prcp_list))
# Create a dictionary from normal list
all_prcp = []
for date, prcp in results:
prcp_dict = {}
prcp_dict["date"] = date
prcp_dict["prcp"] = prcp
all_prcp.append(prcp_dict)
return jsonify(all_prcp)
@app.route("/api/v1.0/stations")
def stations():
# Create our session (link) from Python to the DB
session = Session(engine)
"""Return a list of all stations"""
# Query all stations
results = session.query(Measurement.station, func.avg(Measurement.tobs))\
.group_by(Measurement.station)\
.order_by(Measurement.station).all()
session.close()
# Convert list of tuples into normal list
stations = list(np.ravel(results))
# Create a dictionary from normal list
all_stations = []
for station, avg_temp in stations:
stations_dict = {}
stations_dict["station"] = station
all_stations.append(stations_dict)
return jsonify(all_stations)
@app.route("/api/v1.0/tobs")
def tobs():
# Create our session (link) from Python to the DB
session = Session(engine)
results_last_date = session.query(Measurement.date, Measurement.prcp)\
.order_by(Measurement.date.desc()).first()
last_date = | pd.to_datetime(results_last_date[0]) | pandas.to_datetime |
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from typing import List
class Animator:
def __init__(
self,
pred_df: pd.DataFrame,
actual_df: pd.DataFrame,
animation_interval: int,
number_of_plots: int,
pairs: List[str],
ops_freq: int,
):
self._animation_interval = animation_interval
self._number_of_plots = number_of_plots
self._fig = plt.figure()
self._pred_df = pred_df
self._actual_df = actual_df
self._batch_size = f"{ops_freq}T"
self._pairs = pairs
self._generate_pair_df_plot_dict()
plt.show(block=False)
def _generate_pair_df_plot_dict(self):
self._plots = {}
for i, pair in enumerate(self._pairs):
self._plots[pair] = self._fig.add_subplot(
len(self._pairs), 1, i + 1, ylabel=pair
)
def animate(self, i):
for pair in self._pairs:
actual = self._actual_df[self._actual_df["pair"] == pair]
actual["datetime"] = pd.to_datetime(actual["ts"], unit="s")
actual = actual.set_index("datetime").resample(self._batch_size).mean()
pred = self._pred_df[self._pred_df["pair"] == pair]
pred["datetime"] = | pd.to_datetime(pred["ts"], unit="s") | pandas.to_datetime |
import argparse
import os
import warnings
import subprocess
subprocess.call(['pip', 'install', 'sagemaker-experiments'])
import pandas as pd
import numpy as np
import tarfile
from smexperiments.tracker import Tracker
from sklearn.externals import joblib
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.compose import make_column_transformer
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
columns = ["status","duration","credit_history","purpose","amount","savings","employment_duration","installment_rate","personal_status_sex","other_debtors","present_residence","property","age","other_installment_plans","housing","number_credits","job","people_liable","telephone","foreign_worker","credit_risk"]
if __name__=='__main__':
# Read the arguments passed to the script.
parser = argparse.ArgumentParser()
parser.add_argument('--train-test-split-ratio', type=float, default=0.3)
args, _ = parser.parse_known_args()
# Tracking specific parameter value during job.
tracker = Tracker.load()
tracker.log_parameter('train-test-split-ratio', args.train_test_split_ratio)
print('Received arguments {}'.format(args))
# Read input data into a Pandas dataframe.
input_data_path = os.path.join('/opt/ml/processing/input', 'SouthGermanCredit.txt')
print('Reading input data from {}'.format(input_data_path))
df = | pd.read_csv(input_data_path,names=columns,header=0,sep=r' ') | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.